Documentation
¶
Overview ¶
Package analyze provides the analysis engine and analyzer interface for CI performance analysis.
Index ¶
- Constants
- type AnalysisContext
- type AnalysisResult
- type Analyzer
- type ChangePointAnalyzer
- type ChangePointDetail
- type CostAnalyzer
- type CostDetail
- type Duration
- type Engine
- type FailingStep
- type FailureAnalyzer
- type FailureDetail
- type Finding
- type FindingDetail
- type JobCostBreakdown
- type JobSummary
- type OutlierAnalyzer
- type OutlierDetail
- type OutlierGroupDetail
- type PipelineAnalyzer
- type PipelineDetail
- type PipelineStage
- type QueueStats
- type ResultMeta
- type RunnerAnalyzer
- type RunnerDetail
- type StepAnalyzer
- type StepSummary
- type StepTimingDetail
- type SummaryAnalyzer
- type SummaryDetail
- type SummaryStats
Constants ¶
const ( SeverityInfo = "info" SeverityWarning = "warning" SeverityCritical = "critical" )
Severity levels for findings.
const ( DirectionSlowdown = "slowdown" DirectionSpeedup = "speedup" )
Change point directions.
const ( TypeSummary = "summary" TypeSteps = "steps" TypeOutlier = "outlier" TypeChangepoint = "changepoint" TypeFailure = "failure" TypeCost = "cost" )
Finding type identifiers.
const ( PersistencePersistent = "persistent" PersistenceTransient = "transient" PersistenceInconclusive = "inconclusive" )
Change point persistence classifications.
const ( CategoryRegression = "regression" // actionable slowdown (deduplicated, latest per job) CategoryOscillating = "oscillating" // volatile job with 3+ shifts (noise) CategoryMinor = "minor" // severity=info, hidden by default CategorySpeedup = "speedup" // improvement )
Change point categories (set by post-processing).
const ( FailureTrendImproving = "improving" FailureTrendWorsening = "worsening" FailureTrendStable = "stable" )
Failure trend directions.
const ( FailureKindSystematic = "systematic" // >90% of failures hit the same root-cause step FailureKindFlaky = "flaky" // failures spread across multiple steps )
Failure kind classifications.
const ( FailureCategoryInfra = "infra" // setup, runner, environment steps FailureCategoryBuild = "build" // compile, lint, build steps FailureCategoryTest = "test" // test, e2e, integration steps FailureCategoryOther = "other" )
Failure category classifications based on step name heuristics.
const TypePipeline = "pipeline"
TypePipeline is the finding type for pipeline analysis.
const TypeRunner = "runner"
TypeRunner is the finding type for runner sizing analysis.
Variables ¶
This section is empty.
Functions ¶
This section is empty.
Types ¶
type AnalysisContext ¶
type AnalysisContext struct {
Details []model.RunDetail // filtered (success-only by default)
AllDetails []model.RunDetail // unfiltered — includes failures, for reliability analysis
RerunStats map[int64]preprocess.RerunStats // per-workflow retry stats (computed before dedup)
WorkflowNames map[int64]string // WorkflowID → canonical name from ListWorkflows
}
AnalysisContext carries run data and lazily-computed derived views shared across analyzers.
func (*AnalysisContext) WorkflowName ¶
func (ac *AnalysisContext) WorkflowName(id int64) string
WorkflowName resolves the canonical workflow name for a given ID.
type AnalysisResult ¶
type AnalysisResult struct {
Findings []Finding `json:"findings"`
Diagnostics []diag.Diagnostic `json:"diagnostics"`
Meta ResultMeta `json:"meta"`
}
AnalysisResult is the output of the analysis engine.
type Analyzer ¶
type Analyzer interface {
Name() string
Analyze(ctx context.Context, ac *AnalysisContext) ([]Finding, error)
}
Analyzer examines workflow run data and produces findings.
func DefaultAnalyzers ¶
func DefaultAnalyzers() []Analyzer
DefaultAnalyzers returns the standard set of analyzers in their canonical order.
type ChangePointAnalyzer ¶
type ChangePointAnalyzer struct {
// ThresholdMultiplier controls CUSUM sensitivity (default: 4.0)
ThresholdMultiplier float64
// MinSegment is the minimum runs between change points (default: 5)
MinSegment int
}
ChangePointAnalyzer detects when CI performance shifted using CUSUM.
func (ChangePointAnalyzer) Analyze ¶
func (c ChangePointAnalyzer) Analyze(_ context.Context, ac *AnalysisContext) ([]Finding, error)
func (ChangePointAnalyzer) Name ¶
func (ChangePointAnalyzer) Name() string
Name implements Analyzer.
type ChangePointDetail ¶
type ChangePointDetail struct {
WorkflowName string `json:"workflow_name"`
JobName string `json:"job_name"`
ChangeIdx int `json:"change_idx"`
BeforeMean Duration `json:"before_mean"`
AfterMean Duration `json:"after_mean"`
PctChange float64 `json:"pct_change"`
Direction string `json:"direction"`
PValue float64 `json:"p_value"`
CommitSHA string `json:"commit_sha"`
Date time.Time `json:"date"`
PostChangeRuns int `json:"post_change_runs"`
PostChangeCV float64 `json:"post_change_cv"`
Persistence string `json:"persistence"`
OverlapRatio float64 `json:"overlap_ratio"` // fraction of after-points within before-segment's IQR (0-1)
Category string `json:"category,omitempty"`
}
ChangePointDetail contains information about a detected performance shift.
func (ChangePointDetail) DetailType ¶
func (ChangePointDetail) DetailType() string
DetailType implements FindingDetail.
type CostAnalyzer ¶
type CostAnalyzer struct{}
CostAnalyzer estimates CI cost per workflow based on job durations and runner types.
func (CostAnalyzer) Analyze ¶
func (CostAnalyzer) Analyze(_ context.Context, ac *AnalysisContext) ([]Finding, error)
Analyze implements Analyzer.
type CostDetail ¶
type CostDetail struct {
Workflow string `json:"workflow"`
TotalRuns int `json:"total_runs"`
BillableMinutes float64 `json:"billable_minutes"`
SelfHostedMinutes float64 `json:"self_hosted_minutes"` // minutes on self-hosted runners (free)
DailyRate float64 `json:"daily_rate"` // billable minutes per day
PriorityScore float64 `json:"priority_score"` // higher = more optimization value
DailySavingsEstimate float64 `json:"daily_savings_estimate"`
Jobs []JobCostBreakdown `json:"jobs"`
}
CostDetail contains cost estimation for a workflow.
func (CostDetail) DetailType ¶
func (CostDetail) DetailType() string
DetailType implements FindingDetail.
type Duration ¶
type Duration time.Duration //nolint:recvcheck // MarshalJSON uses value receiver, UnmarshalJSON requires pointer
Duration wraps time.Duration with human-readable JSON marshaling. Serializes as "5m30s" instead of nanoseconds.
func (Duration) MarshalJSON ¶
MarshalJSON outputs the duration as a human-readable string.
func (*Duration) UnmarshalJSON ¶
UnmarshalJSON parses a duration from a JSON string like "5m30s".
type Engine ¶
type Engine struct {
// contains filtered or unexported fields
}
Engine orchestrates running analyzers over a set of run details.
func (*Engine) Run ¶
func (e *Engine) Run(ctx context.Context, details, allDetails []model.RunDetail, rerunStats map[int64]preprocess.RerunStats, workflowNames map[int64]string) AnalysisResult
Run executes all analyzers sequentially and collects results. allDetails is optional unfiltered data for analyzers that need it (e.g. failure analysis). rerunStats is optional per-workflow retry stats (computed before dedup). workflowNames maps WorkflowID → canonical name from ListWorkflows.
type FailingStep ¶
type FailingStep struct {
JobName string `json:"job_name"`
StepName string `json:"step_name"`
Count int `json:"count"`
Category string `json:"category"`
}
FailingStep identifies a step that frequently causes job failures.
type FailureAnalyzer ¶
type FailureAnalyzer struct{}
FailureAnalyzer analyzes failure rates across workflows. Uses AllDetails (unfiltered) from AnalysisContext.
func (FailureAnalyzer) Analyze ¶
func (FailureAnalyzer) Analyze(_ context.Context, ac *AnalysisContext) ([]Finding, error)
Analyze implements Analyzer.
type FailureDetail ¶
type FailureDetail struct {
Workflow string `json:"workflow"`
TotalRuns int `json:"total_runs"`
FailureCount int `json:"failure_count"`
FailureRate float64 `json:"failure_rate"`
FailureKind string `json:"failure_kind"`
Trend string `json:"trend"` // improving, worsening, stable
RecentFailureRate float64 `json:"recent_failure_rate"` // failure rate in last 7 days
CancellationCount int `json:"cancellation_count"`
CancellationRate float64 `json:"cancellation_rate"`
ByConclusion map[string]int `json:"by_conclusion"`
ByCategory map[string]int `json:"by_category,omitempty"`
FailingSteps []FailingStep `json:"failing_steps,omitempty"`
RetriedRuns int `json:"retried_runs"`
ExtraAttempts int `json:"extra_attempts"`
RerunRate float64 `json:"rerun_rate"`
}
FailureDetail contains reliability information for a workflow.
func (FailureDetail) DetailType ¶
func (FailureDetail) DetailType() string
DetailType implements FindingDetail.
type Finding ¶
type Finding struct {
Type string `json:"type"`
Severity string `json:"severity"`
Title string `json:"title"`
Description string `json:"description"`
Detail FindingDetail `json:"detail"`
}
Finding represents a single analysis result.
type FindingDetail ¶
type FindingDetail interface {
DetailType() string
}
FindingDetail is implemented by typed detail structs for each analyzer.
type JobCostBreakdown ¶
type JobCostBreakdown struct {
Name string `json:"name"`
BillableMinutes float64 `json:"billable_minutes"`
Multiplier float64 `json:"multiplier"`
Runs int `json:"runs"`
}
JobCostBreakdown holds cost info for a single job within a workflow.
type JobSummary ¶
type JobSummary struct {
Name string `json:"name"`
Stats SummaryStats `json:"stats"`
}
JobSummary holds stats for a single job within a workflow.
type OutlierAnalyzer ¶
type OutlierAnalyzer struct {
// Method selects the outlier detection method: "log-iqr" (default) or "mad"
Method string
// MinPercentile is the minimum percentile to report (default: 95).
// Outliers below this threshold are detected but not emitted as findings.
MinPercentile float64
}
OutlierAnalyzer detects runs or jobs with abnormally long durations.
func (OutlierAnalyzer) Analyze ¶
func (o OutlierAnalyzer) Analyze(_ context.Context, ac *AnalysisContext) ([]Finding, error)
type OutlierDetail ¶
type OutlierDetail struct {
RunID int64 `json:"run_id"`
CommitSHA string `json:"commit_sha"`
Duration Duration `json:"duration"`
Percentile float64 `json:"percentile"`
WorkflowName string `json:"workflow_name"`
JobName string `json:"job_name,omitempty"`
}
OutlierDetail contains information about an outlier run or job.
func (OutlierDetail) DetailType ¶
func (OutlierDetail) DetailType() string
DetailType implements FindingDetail.
type OutlierGroupDetail ¶
type OutlierGroupDetail struct {
WorkflowName string `json:"workflow_name"`
JobName string `json:"job_name,omitempty"`
Count int `json:"count"`
WorstDuration Duration `json:"worst_duration"`
WorstPercentile float64 `json:"worst_percentile"`
WorstCommitSHA string `json:"worst_commit_sha"`
MaxSeverity string `json:"max_severity"`
}
OutlierGroupDetail is a post-processed grouped view of outliers for a (workflow, job).
func (OutlierGroupDetail) DetailType ¶
func (OutlierGroupDetail) DetailType() string
DetailType implements FindingDetail.
type PipelineAnalyzer ¶
type PipelineAnalyzer struct{}
PipelineAnalyzer detects sequential dependency chains and parallelism efficiency.
func (PipelineAnalyzer) Analyze ¶
func (PipelineAnalyzer) Analyze(_ context.Context, ac *AnalysisContext) ([]Finding, error)
Analyze implements Analyzer.
type PipelineDetail ¶
type PipelineDetail struct {
Workflow string `json:"workflow"`
TotalRuns int `json:"total_runs"`
MedianWallClock Duration `json:"median_wall_clock"`
MedianJobSum Duration `json:"median_job_sum"`
Parallelism float64 `json:"parallelism"` // 0-1: fraction of job time that runs in parallel
Stages []PipelineStage `json:"stages"`
CriticalPath string `json:"critical_path"` // name of the slowest stage
}
PipelineDetail contains pipeline structure analysis for a workflow.
func (PipelineDetail) DetailType ¶
func (PipelineDetail) DetailType() string
DetailType implements FindingDetail.
type PipelineStage ¶
type PipelineStage struct {
Name string `json:"name"`
Jobs []string `json:"jobs"`
Duration Duration `json:"duration"` // median wall-clock duration of this stage
PctOfPipeline float64 `json:"pct_of_pipeline"`
Sequential bool `json:"sequential"` // true if this stage waits for the previous to finish
}
PipelineStage represents a group of jobs that run concurrently.
type QueueStats ¶
QueueStats holds queue/wait time statistics (CreatedAt to StartedAt gap).
type ResultMeta ¶
type ResultMeta struct {
Repo string `json:"repo"`
TotalRuns int `json:"total_runs"`
TimeRange [2]time.Time `json:"time_range"`
WorkflowIDs []int64 `json:"workflow_ids"`
}
ResultMeta contains metadata about the analysis run.
type RunnerAnalyzer ¶
type RunnerAnalyzer struct{}
RunnerAnalyzer flags jobs with mismatched runner sizes.
func (RunnerAnalyzer) Analyze ¶
func (RunnerAnalyzer) Analyze(_ context.Context, ac *AnalysisContext) ([]Finding, error)
Analyze implements Analyzer.
type RunnerDetail ¶
type RunnerDetail struct {
WorkflowName string `json:"workflow_name"`
JobName string `json:"job_name"`
RunnerLabel string `json:"runner_label"`
Cores int `json:"cores"`
MedianDur Duration `json:"median_duration"`
Runs int `json:"runs"`
Multiplier float64 `json:"multiplier"`
Issue string `json:"issue"` // "oversized" or "undersized"
Suggestion string `json:"suggestion"`
}
RunnerDetail contains runner sizing analysis for a job.
func (RunnerDetail) DetailType ¶
func (RunnerDetail) DetailType() string
DetailType implements FindingDetail.
type StepAnalyzer ¶
type StepAnalyzer struct {
// TopN is the max number of steps to report per job (default: 3).
TopN int
}
StepAnalyzer identifies the slowest and most variable steps per job.
func (StepAnalyzer) Analyze ¶
func (s StepAnalyzer) Analyze(_ context.Context, ac *AnalysisContext) ([]Finding, error)
Analyze implements Analyzer.
type StepSummary ¶
type StepSummary struct {
Name string `json:"name"`
Runs int `json:"runs"`
Median Duration `json:"median"`
P95 Duration `json:"p95"`
PctOfJob float64 `json:"pct_of_job"`
Volatility float64 `json:"volatility"`
}
StepSummary holds timing stats for a single step.
type StepTimingDetail ¶
type StepTimingDetail struct {
WorkflowName string `json:"workflow_name"`
JobName string `json:"job_name"`
TotalRuns int `json:"total_runs"`
Steps []StepSummary `json:"steps"`
}
StepTimingDetail contains step-level timing for a job.
func (StepTimingDetail) DetailType ¶
func (StepTimingDetail) DetailType() string
DetailType implements FindingDetail.
type SummaryAnalyzer ¶
type SummaryAnalyzer struct {
// GroupMatrix groups matrix job variants (e.g. "test (ubuntu, 20)" and "test (macos, 20)")
// under a single "test" entry with aggregate stats. Default: true.
GroupMatrix *bool
}
SummaryAnalyzer computes per-workflow and per-job summary statistics.
func (SummaryAnalyzer) Analyze ¶
func (s SummaryAnalyzer) Analyze(_ context.Context, ac *AnalysisContext) ([]Finding, error)
Analyze implements Analyzer.
type SummaryDetail ¶
type SummaryDetail struct {
Workflow string `json:"workflow"`
Stats SummaryStats `json:"stats"`
Queue QueueStats `json:"queue"`
Jobs []JobSummary `json:"jobs"`
}
SummaryDetail contains summary statistics for a workflow and its jobs.
func (SummaryDetail) DetailType ¶
func (SummaryDetail) DetailType() string
DetailType implements FindingDetail.
type SummaryStats ¶
type SummaryStats struct {
TotalRuns int `json:"total_runs"`
Mean Duration `json:"mean"`
Median Duration `json:"median"`
P95 Duration `json:"p95"`
P99 Duration `json:"p99"`
Min Duration `json:"min"`
Max Duration `json:"max"`
TotalTime Duration `json:"total_time"`
Volatility float64 `json:"volatility"`
VolatilityLabel string `json:"volatility_label"`
}
SummaryStats holds statistical measures for a duration series.