Documentation
¶
Index ¶
- Constants
- func CalculateConcentration(contributors []Contributor) float64
- func CalculateConfidence(m FileMetrics) float32
- func CalculateCouplingStrength(cochanges, commitsA, commitsB int) float64
- func CalculateEntropy(linesPerFile map[string]int) float64
- func CalculateHotspotScore(churnNorm, complexityNorm float64) float64
- func CalculateJITRisk(features CommitFeatures, weights JITWeights, norm NormalizationStats) float64
- func CalculateProbability(m FileMetrics, w DefectWeights) float32
- func EscapeMermaidLabel(s string) string
- func GenerateJITRecommendations(features CommitFeatures, score float64, factors map[string]float64) []string
- func NormalizeChurnCDF(commits int) float64
- func NormalizeComplexityCDF(avgCognitive float64) float64
- func SanitizeMermaidID(id string) string
- func SetConfidenceThresholds(thresholds ConfidenceThresholds)
- type ArchitecturalSmell
- type CallGraph
- type ChurnAnalysis
- type ChurnSummary
- type ClassMetrics
- type CloneAnalysis
- type CloneGroup
- type CloneInstance
- type CloneReport
- type CloneReportSummary
- type CloneSummary
- type CloneType
- type CodeClone
- type CohesionAnalysis
- type CohesionSummary
- type CommitFeatures
- type CommitRisk
- type ComplexityAnalysis
- type ComplexityHotspot
- type ComplexityMetrics
- func (m *ComplexityMetrics) ComplexityScore() float64
- func (m *ComplexityMetrics) IsSimple(t ComplexityThresholds) bool
- func (m *ComplexityMetrics) IsSimpleDefault() bool
- func (m *ComplexityMetrics) NeedsRefactoring(t ComplexityThresholds) bool
- func (m *ComplexityMetrics) NeedsRefactoringDefault() bool
- type ComplexityReport
- type ComplexitySummary
- type ComplexityThresholds
- type ComponentMetrics
- type ConfidenceLevel
- type ConfidenceThresholds
- type Contributor
- type DeadClass
- type DeadCodeAnalysis
- type DeadCodeAnalysisConfig
- type DeadCodeItem
- type DeadCodeKind
- type DeadCodeRankingSummary
- type DeadCodeResult
- type DeadCodeSummary
- type DeadCodeType
- type DeadFunction
- type DeadVariable
- type DebtCategory
- type DefectAnalysis
- type DefectPredictionReport
- type DefectScore
- type DefectSummary
- type DefectWeights
- type DependencyGraph
- type DuplicationHotspot
- type EdgeType
- type ExtendedComplexitySummary
- type ExtendedComplexityThresholds
- type FileChurnMetrics
- type FileComplexity
- type FileCoupling
- type FileDeadCodeMetrics
- type FileHotspot
- type FileMetrics
- type FileOwnership
- type FilePrediction
- type FunctionComplexity
- type Grade
- type GraphEdge
- type GraphMetrics
- type GraphNode
- type GraphSummary
- type HotspotAnalysis
- type HotspotSeverity
- type HotspotSummary
- type JITAnalysis
- type JITRiskLevel
- type JITSummary
- type JITWeights
- type Language
- type LanguageOverride
- type MermaidDirection
- type MermaidOptions
- type MetricCategory
- type MinHashSignature
- type NodeMetric
- type NodeType
- type NormalizationStats
- type OwnershipAnalysis
- type OwnershipSummary
- type PenaltyAttribution
- type PenaltyConfig
- type PenaltyCurve
- type PenaltyTracker
- type ProjectScore
- type ReferenceEdge
- type ReferenceNode
- type ReferenceType
- type RepoMap
- type RepoMapSummary
- type RiskLevel
- type SATDAnalysis
- type SATDSummary
- type Severity
- type SmellAnalysis
- type SmellMetrics
- type SmellSeverity
- type SmellSummary
- type SmellThresholds
- type SmellType
- type Symbol
- type TDGHotspot
- type TDGReport
- type TDGSeverity
- type TDGSummary
- type TdgComparison
- type TdgConfig
- type TdgScore
- type TechnicalDebt
- type TemporalCouplingAnalysis
- type TemporalCouplingSummary
- type ThresholdConfig
- type UnreachableBlock
- type Violation
- type ViolationSeverity
- type WeightConfig
Constants ¶
const ( HotspotThreshold = 0.5 StableThreshold = 0.1 )
Thresholds for hotspot and stable file detection.
const ( // CriticalHotspotThreshold indicates a critical hotspot requiring immediate attention. // Files scoring >= 0.6 have both high churn AND high complexity. CriticalHotspotThreshold = 0.6 // HighHotspotThreshold indicates a significant hotspot that should be reviewed. HighHotspotThreshold = 0.4 // ModerateHotspotThreshold indicates a file worth monitoring. ModerateHotspotThreshold = 0.25 // DefaultHotspotScoreThreshold is the default threshold for counting hotspots in summary. // Uses the "High" threshold as the default. DefaultHotspotScoreThreshold = HighHotspotThreshold )
Hotspot severity thresholds based on geometric mean of CDF-normalized scores.
const DefaultMinCochanges = 3
DefaultMinCochanges is the minimum co-change count to consider files coupled.
const StrongCouplingThreshold = 0.5
StrongCouplingThreshold is the threshold for considering coupling "strong".
Variables ¶
This section is empty.
Functions ¶
func CalculateConcentration ¶
func CalculateConcentration(contributors []Contributor) float64
CalculateConcentration computes ownership concentration (0-1). Uses simplified Gini-like coefficient: top owner's percentage / 100.
func CalculateConfidence ¶
func CalculateConfidence(m FileMetrics) float32
CalculateConfidence computes confidence based on data availability.
func CalculateCouplingStrength ¶
CalculateCouplingStrength computes the coupling strength between two files. Strength = cochanges / max(commitsA, commitsB)
func CalculateEntropy ¶
CalculateEntropy computes Shannon entropy of changes across files. Entropy = -sum(p_i * log2(p_i)) where p_i = lines_in_file_i / total_lines
func CalculateHotspotScore ¶
CalculateHotspotScore computes the hotspot score using geometric mean. This preserves the "intersection" semantics: both churn AND complexity must be elevated for a high score.
func CalculateJITRisk ¶
func CalculateJITRisk(features CommitFeatures, weights JITWeights, norm NormalizationStats) float64
CalculateJITRisk computes the risk score for a commit using JIT features.
func CalculateProbability ¶
func CalculateProbability(m FileMetrics, w DefectWeights) float32
CalculateProbability computes defect probability from metrics. PMAT-compatible: uses CDF normalization and sigmoid transformation.
func EscapeMermaidLabel ¶
EscapeMermaidLabel escapes special characters in labels for Mermaid.
func GenerateJITRecommendations ¶
func GenerateJITRecommendations(features CommitFeatures, score float64, factors map[string]float64) []string
GenerateJITRecommendations suggests actions based on risk factors.
func NormalizeChurnCDF ¶
NormalizeChurnCDF normalizes commit count using empirical CDF. Returns a value between 0 and 1 representing the percentile.
func NormalizeComplexityCDF ¶
NormalizeComplexityCDF normalizes average cognitive complexity using empirical CDF. Returns a value between 0 and 1 representing the percentile.
func SanitizeMermaidID ¶
SanitizeMermaidID makes an ID safe for Mermaid diagrams.
func SetConfidenceThresholds ¶
func SetConfidenceThresholds(thresholds ConfidenceThresholds)
SetConfidenceThresholds allows customizing the confidence level thresholds. This function is thread-safe and typically called once at startup.
Types ¶
type ArchitecturalSmell ¶
type ArchitecturalSmell struct {
Type SmellType `json:"type"`
Severity SmellSeverity `json:"severity"`
Components []string `json:"components"`
Description string `json:"description"`
Suggestion string `json:"suggestion"`
Metrics SmellMetrics `json:"metrics,omitempty"`
}
ArchitecturalSmell represents a detected architectural smell.
type CallGraph ¶
type CallGraph struct {
Nodes map[uint32]*ReferenceNode `json:"nodes" toon:"-"`
Edges []ReferenceEdge `json:"edges" toon:"edges"`
EntryPoints []uint32 `json:"entry_points" toon:"entry_points"`
EdgeIndex map[uint32][]int `json:"-" toon:"-"` // node -> edge indices (outgoing)
}
CallGraph represents the reference graph for reachability analysis.
func (*CallGraph) AddEdge ¶
func (g *CallGraph) AddEdge(edge ReferenceEdge)
AddEdge adds an edge to the call graph with indexing.
func (*CallGraph) AddNode ¶
func (g *CallGraph) AddNode(node *ReferenceNode)
AddNode adds a node to the call graph.
func (*CallGraph) GetOutgoingEdges ¶
func (g *CallGraph) GetOutgoingEdges(nodeID uint32) []ReferenceEdge
GetOutgoingEdges returns all edges originating from a node.
type ChurnAnalysis ¶
type ChurnAnalysis struct {
GeneratedAt time.Time `json:"generated_at"`
PeriodDays int `json:"period_days"`
RepositoryRoot string `json:"repository_root"`
Files []FileChurnMetrics `json:"files"`
Summary ChurnSummary `json:"summary"`
}
ChurnAnalysis represents the full churn analysis result.
type ChurnSummary ¶
type ChurnSummary struct {
// Required fields matching pmat
TotalCommits int `json:"total_commits"`
TotalFilesChanged int `json:"total_files_changed"`
HotspotFiles []string `json:"hotspot_files"`
StableFiles []string `json:"stable_files"`
AuthorContributions map[string]int `json:"author_contributions"`
MeanChurnScore float64 `json:"mean_churn_score"`
VarianceChurnScore float64 `json:"variance_churn_score"`
StdDevChurnScore float64 `json:"stddev_churn_score"`
// Additional metrics not in pmat
TotalAdditions int `json:"total_additions,omitempty"`
TotalDeletions int `json:"total_deletions,omitempty"`
AvgCommitsPerFile float64 `json:"avg_commits_per_file,omitempty"`
MaxChurnScore float64 `json:"max_churn_score,omitempty"`
P50ChurnScore float64 `json:"p50_churn_score,omitempty"`
P95ChurnScore float64 `json:"p95_churn_score,omitempty"`
}
ChurnSummary provides aggregate statistics.
func NewChurnSummary ¶
func NewChurnSummary() ChurnSummary
NewChurnSummary creates an initialized summary.
func (*ChurnSummary) CalculateStatistics ¶
func (s *ChurnSummary) CalculateStatistics(files []FileChurnMetrics)
CalculateStatistics computes mean, variance, standard deviation, and percentiles.
func (*ChurnSummary) IdentifyHotspotAndStableFiles ¶
func (s *ChurnSummary) IdentifyHotspotAndStableFiles(files []FileChurnMetrics)
IdentifyHotspotAndStableFiles populates HotspotFiles and StableFiles. Files must be sorted by ChurnScore descending before calling. Hotspots: top 10 files filtered by churn_score > 0.5 Stable: bottom 10 files filtered by churn_score < 0.1 and commit_count > 0
type ClassMetrics ¶
type ClassMetrics struct {
Path string `json:"path"`
ClassName string `json:"class_name"`
Language string `json:"language"`
StartLine int `json:"start_line"`
EndLine int `json:"end_line"`
// Weighted Methods per Class - sum of cyclomatic complexity of all methods
WMC int `json:"wmc"`
// Coupling Between Objects - number of other classes referenced
CBO int `json:"cbo"`
// Response For Class - number of methods that can be executed in response to a message
RFC int `json:"rfc"`
// Lack of Cohesion in Methods (LCOM4) - number of connected components in method-field graph
// Lower is better; 1 = fully cohesive, >1 = could be split
LCOM int `json:"lcom"`
// Depth of Inheritance Tree
DIT int `json:"dit"`
// Number of Children (direct subclasses)
NOC int `json:"noc"`
// Number of methods
NOM int `json:"nom"`
// Number of fields/attributes
NOF int `json:"nof"`
// Lines of code in the class
LOC int `json:"loc"`
// Method names for reference
Methods []string `json:"methods,omitempty"`
// Field names for reference
Fields []string `json:"fields,omitempty"`
// Classes this class couples to
CoupledClasses []string `json:"coupled_classes,omitempty"`
}
ClassMetrics represents CK metrics for a single class.
type CloneAnalysis ¶
type CloneAnalysis struct {
Clones []CodeClone `json:"clones"`
Groups []CloneGroup `json:"groups,omitempty"`
Summary CloneSummary `json:"summary"`
TotalFilesScanned int `json:"total_files_scanned"`
MinLines int `json:"min_lines"`
Threshold float64 `json:"threshold"`
}
CloneAnalysis represents the full duplicate detection result.
func (*CloneAnalysis) ToCloneReport ¶
func (a *CloneAnalysis) ToCloneReport() *CloneReport
ToCloneReport converts CloneAnalysis to pmat-compatible format.
type CloneGroup ¶
type CloneGroup struct {
ID uint64 `json:"id"`
Type CloneType `json:"type"`
Instances []CloneInstance `json:"instances"`
TotalLines int `json:"total_lines"`
TotalTokens int `json:"total_tokens"`
AverageSimilarity float64 `json:"average_similarity"`
}
CloneGroup represents a group of similar code fragments.
type CloneInstance ¶
type CloneInstance struct {
File string `json:"file"`
StartLine uint32 `json:"start_line"`
EndLine uint32 `json:"end_line"`
Lines int `json:"lines"`
NormalizedHash uint64 `json:"normalized_hash"`
Similarity float64 `json:"similarity"`
}
CloneInstance represents a single occurrence within a clone group.
type CloneReport ¶
type CloneReport struct {
Summary CloneReportSummary `json:"summary"`
Groups []CloneGroup `json:"groups"`
Hotspots []DuplicationHotspot `json:"hotspots"`
}
CloneReport is the pmat-compatible output format.
type CloneReportSummary ¶
type CloneReportSummary struct {
TotalFiles int `json:"total_files"`
TotalFragments int `json:"total_fragments"`
DuplicateLines int `json:"duplicate_lines"`
TotalLines int `json:"total_lines"`
DuplicationRatio float64 `json:"duplication_ratio"`
CloneGroups int `json:"clone_groups"`
LargestGroupSize int `json:"largest_group_size"`
}
CloneReportSummary is the pmat-compatible summary format.
type CloneSummary ¶
type CloneSummary struct {
TotalClones int `json:"total_clones"`
TotalGroups int `json:"total_groups"`
Type1Count int `json:"type1_count"`
Type2Count int `json:"type2_count"`
Type3Count int `json:"type3_count"`
DuplicatedLines int `json:"duplicated_lines"`
TotalLines int `json:"total_lines"`
DuplicationRatio float64 `json:"duplication_ratio"`
FileOccurrences map[string]int `json:"file_occurrences"`
AvgSimilarity float64 `json:"avg_similarity"`
P50Similarity float64 `json:"p50_similarity"`
P95Similarity float64 `json:"p95_similarity"`
Hotspots []DuplicationHotspot `json:"hotspots,omitempty"`
}
CloneSummary provides aggregate statistics.
func NewCloneSummary ¶
func NewCloneSummary() CloneSummary
NewCloneSummary creates an initialized summary.
func (*CloneSummary) AddClone ¶
func (s *CloneSummary) AddClone(c CodeClone)
AddClone updates the summary with a new clone.
type CodeClone ¶
type CodeClone struct {
Type CloneType `json:"type"`
Similarity float64 `json:"similarity"`
FileA string `json:"file_a"`
FileB string `json:"file_b"`
StartLineA uint32 `json:"start_line_a"`
EndLineA uint32 `json:"end_line_a"`
StartLineB uint32 `json:"start_line_b"`
EndLineB uint32 `json:"end_line_b"`
LinesA int `json:"lines_a"`
LinesB int `json:"lines_b"`
TokenCount int `json:"token_count,omitempty"`
GroupID uint64 `json:"group_id,omitempty"`
}
CodeClone represents a detected duplicate code fragment.
type CohesionAnalysis ¶
type CohesionAnalysis struct {
GeneratedAt time.Time `json:"generated_at"`
Classes []ClassMetrics `json:"classes"`
Summary CohesionSummary `json:"summary"`
}
CohesionAnalysis represents the full CK metrics analysis result.
func (*CohesionAnalysis) CalculateSummary ¶
func (c *CohesionAnalysis) CalculateSummary()
CalculateSummary computes summary statistics.
func (*CohesionAnalysis) SortByCBO ¶
func (c *CohesionAnalysis) SortByCBO()
SortByCBO sorts classes by CBO in descending order (most coupled first).
func (*CohesionAnalysis) SortByDIT ¶
func (c *CohesionAnalysis) SortByDIT()
SortByDIT sorts classes by DIT in descending order (deepest inheritance first).
func (*CohesionAnalysis) SortByLCOM ¶
func (c *CohesionAnalysis) SortByLCOM()
SortByLCOM sorts classes by LCOM in descending order (least cohesive first).
func (*CohesionAnalysis) SortByWMC ¶
func (c *CohesionAnalysis) SortByWMC()
SortByWMC sorts classes by WMC in descending order (most complex first).
type CohesionSummary ¶
type CohesionSummary struct {
TotalClasses int `json:"total_classes"`
TotalFiles int `json:"total_files"`
AvgWMC float64 `json:"avg_wmc"`
AvgCBO float64 `json:"avg_cbo"`
AvgRFC float64 `json:"avg_rfc"`
AvgLCOM float64 `json:"avg_lcom"`
MaxWMC int `json:"max_wmc"`
MaxCBO int `json:"max_cbo"`
MaxRFC int `json:"max_rfc"`
MaxLCOM int `json:"max_lcom"`
MaxDIT int `json:"max_dit"`
// Classes with high LCOM (>1) that may need refactoring
LowCohesionCount int `json:"low_cohesion_count"`
}
CohesionSummary provides aggregate CK metrics.
type CommitFeatures ¶
type CommitFeatures struct {
CommitHash string `json:"commit_hash"`
Author string `json:"author"`
Message string `json:"message"`
Timestamp time.Time `json:"timestamp"`
IsFix bool `json:"is_fix"` // FIX: Bug fix commit?
IsAutomated bool `json:"is_automated"` // Automated/trivial commit (CI, merge, etc.)
Entropy float64 `json:"entropy"` // Entropy: Change distribution
LinesAdded int `json:"lines_added"` // LA
LinesDeleted int `json:"lines_deleted"` // LD
NumFiles int `json:"num_files"` // NF
UniqueChanges int `json:"unique_changes"` // NUC: Prior commits to these files
NumDevelopers int `json:"num_developers"` // NDEV: Unique devs on these files
AuthorExperience int `json:"author_experience"` // EXP: Author's prior commits
FilesModified []string `json:"files_modified"`
}
CommitFeatures represents JIT features extracted from a commit.
type CommitRisk ¶
type CommitRisk struct {
CommitHash string `json:"commit_hash"`
Author string `json:"author"`
Message string `json:"message"`
Timestamp time.Time `json:"timestamp"`
RiskScore float64 `json:"risk_score"`
RiskLevel JITRiskLevel `json:"risk_level"`
ContributingFactors map[string]float64 `json:"contributing_factors"`
Recommendations []string `json:"recommendations"`
FilesModified []string `json:"files_modified"`
}
CommitRisk represents the JIT prediction result for a single commit.
type ComplexityAnalysis ¶
type ComplexityAnalysis struct {
Files []FileComplexity `json:"files"`
Summary ComplexitySummary `json:"summary"`
}
ComplexityAnalysis represents the full analysis result.
type ComplexityHotspot ¶
type ComplexityHotspot struct {
File string `json:"file"`
Function string `json:"function,omitempty"`
Line uint32 `json:"line"`
Complexity uint32 `json:"complexity"`
ComplexityType string `json:"complexity_type"`
}
ComplexityHotspot identifies a high-complexity location in the codebase.
type ComplexityMetrics ¶
type ComplexityMetrics struct {
Cyclomatic uint32 `json:"cyclomatic"`
Cognitive uint32 `json:"cognitive"`
MaxNesting int `json:"max_nesting"`
Lines int `json:"lines"`
}
ComplexityMetrics represents code complexity measurements for a function or file.
func (*ComplexityMetrics) ComplexityScore ¶
func (m *ComplexityMetrics) ComplexityScore() float64
ComplexityScore calculates a composite complexity score for ranking. Combines cyclomatic, cognitive, nesting, and lines with weighted factors.
func (*ComplexityMetrics) IsSimple ¶
func (m *ComplexityMetrics) IsSimple(t ComplexityThresholds) bool
IsSimple returns true if complexity is within acceptable limits.
func (*ComplexityMetrics) IsSimpleDefault ¶
func (m *ComplexityMetrics) IsSimpleDefault() bool
IsSimpleDefault checks if complexity is low using fixed thresholds (pmat compatible). Returns true if cyclomatic <= 5 and cognitive <= 7.
func (*ComplexityMetrics) NeedsRefactoring ¶
func (m *ComplexityMetrics) NeedsRefactoring(t ComplexityThresholds) bool
NeedsRefactoring returns true if any metric significantly exceeds thresholds.
func (*ComplexityMetrics) NeedsRefactoringDefault ¶
func (m *ComplexityMetrics) NeedsRefactoringDefault() bool
NeedsRefactoringDefault checks if complexity exceeds fixed thresholds (pmat compatible). Returns true if cyclomatic > 10 or cognitive > 15.
type ComplexityReport ¶
type ComplexityReport struct {
Summary ExtendedComplexitySummary `json:"summary"`
Violations []Violation `json:"violations"`
Hotspots []ComplexityHotspot `json:"hotspots"`
Files []FileComplexity `json:"files"`
TechnicalDebtHours float32 `json:"technical_debt_hours"`
}
ComplexityReport is the full analysis report with violations and hotspots.
func AggregateResults ¶
func AggregateResults(files []FileComplexity) *ComplexityReport
AggregateResults creates a ComplexityReport from file metrics using default thresholds.
func AggregateResultsWithThresholds ¶
func AggregateResultsWithThresholds(files []FileComplexity, maxCyclomatic, maxCognitive *uint32) *ComplexityReport
AggregateResultsWithThresholds creates a ComplexityReport with custom thresholds. If maxCyclomatic or maxCognitive is nil, defaults are used.
func (*ComplexityReport) ErrorCount ¶
func (r *ComplexityReport) ErrorCount() int
ErrorCount returns the number of error-severity violations.
func (*ComplexityReport) WarningCount ¶
func (r *ComplexityReport) WarningCount() int
WarningCount returns the number of warning-severity violations.
type ComplexitySummary ¶
type ComplexitySummary struct {
TotalFiles int `json:"total_files"`
TotalFunctions int `json:"total_functions"`
AvgCyclomatic float64 `json:"avg_cyclomatic"`
AvgCognitive float64 `json:"avg_cognitive"`
MaxCyclomatic uint32 `json:"max_cyclomatic"`
MaxCognitive uint32 `json:"max_cognitive"`
P50Cyclomatic uint32 `json:"p50_cyclomatic"`
P90Cyclomatic uint32 `json:"p90_cyclomatic"`
P95Cyclomatic uint32 `json:"p95_cyclomatic"`
P50Cognitive uint32 `json:"p50_cognitive"`
P90Cognitive uint32 `json:"p90_cognitive"`
P95Cognitive uint32 `json:"p95_cognitive"`
ViolationCount int `json:"violation_count"`
}
ComplexitySummary provides aggregate statistics.
type ComplexityThresholds ¶
type ComplexityThresholds struct {
MaxCyclomatic uint32 `json:"max_cyclomatic"`
MaxCognitive uint32 `json:"max_cognitive"`
MaxNesting int `json:"max_nesting"`
}
ComplexityThresholds defines the limits for complexity violations.
func DefaultComplexityThresholds ¶
func DefaultComplexityThresholds() ComplexityThresholds
DefaultComplexityThresholds returns sensible defaults.
type ComponentMetrics ¶
type ComponentMetrics struct {
ID string `json:"id"`
Name string `json:"name"`
FanIn int `json:"fan_in"` // Afferent coupling (incoming dependencies)
FanOut int `json:"fan_out"` // Efferent coupling (outgoing dependencies)
Instability float64 `json:"instability"` // Ce / (Ca + Ce), 0 = stable, 1 = unstable
IsHub bool `json:"is_hub"` // Fan-in + Fan-out > threshold
IsGod bool `json:"is_god"` // High fan-in AND high fan-out
}
ComponentMetrics provides instability metrics for a component.
func (*ComponentMetrics) CalculateInstability ¶
func (c *ComponentMetrics) CalculateInstability() float64
Instability calculates Martin's Instability metric. I = Ce / (Ca + Ce) Where Ce = efferent coupling (outgoing), Ca = afferent coupling (incoming) Returns 0 for stable packages (all incoming), 1 for unstable (all outgoing)
type ConfidenceLevel ¶
type ConfidenceLevel string
ConfidenceLevel indicates how certain we are about dead code detection.
const ( ConfidenceHigh ConfidenceLevel = "High" ConfidenceMedium ConfidenceLevel = "Medium" ConfidenceLow ConfidenceLevel = "Low" )
type ConfidenceThresholds ¶
type ConfidenceThresholds struct {
HighThreshold float64 // Confidence >= this is "High" (default: 0.8)
MediumThreshold float64 // Confidence >= this (and < High) is "Medium" (default: 0.5)
}
ConfidenceThresholds defines the thresholds for confidence level classification. These thresholds are based on empirical analysis of dead code detection accuracy: - High (>=0.8): Private/unexported symbols with no references have very low false positive rates - Medium (>=0.5): Exported symbols without internal usage may still be part of public API - Low (<0.5): Symbols matching dynamic usage patterns (reflection, callbacks) need manual review
func DefaultConfidenceThresholds ¶
func DefaultConfidenceThresholds() ConfidenceThresholds
DefaultConfidenceThresholds returns the default confidence thresholds.
func GetConfidenceThresholds ¶
func GetConfidenceThresholds() ConfidenceThresholds
GetConfidenceThresholds returns the current confidence thresholds. This function is thread-safe.
type Contributor ¶
type Contributor struct {
Name string `json:"name"`
Email string `json:"email"`
LinesOwned int `json:"lines_owned"`
Percentage float64 `json:"percentage"` // 0-100
}
Contributor represents a contributor to a file.
type DeadClass ¶
type DeadClass struct {
Name string `json:"name"`
File string `json:"file"`
Line uint32 `json:"line"`
EndLine uint32 `json:"end_line"`
Visibility string `json:"visibility"`
Confidence float64 `json:"confidence"`
ConfidenceLevel ConfidenceLevel `json:"confidence_level"`
ConfidenceReason string `json:"confidence_reason"`
Reason string `json:"reason"`
Kind DeadCodeKind `json:"kind,omitempty"`
NodeID uint32 `json:"node_id,omitempty"`
}
DeadClass represents an unused class/struct/type.
func (*DeadClass) SetConfidenceLevel ¶
func (c *DeadClass) SetConfidenceLevel()
SetConfidenceLevel sets the confidence level and reason based on the numeric confidence. Uses configurable thresholds from GetConfidenceThresholds().
type DeadCodeAnalysis ¶
type DeadCodeAnalysis struct {
DeadFunctions []DeadFunction `json:"dead_functions"`
DeadVariables []DeadVariable `json:"dead_variables"`
DeadClasses []DeadClass `json:"dead_classes,omitempty"`
UnreachableCode []UnreachableBlock `json:"unreachable_code"`
Summary DeadCodeSummary `json:"summary"`
CallGraph *CallGraph `json:"call_graph,omitempty"`
}
DeadCodeAnalysis represents the full dead code detection result.
type DeadCodeAnalysisConfig ¶
type DeadCodeAnalysisConfig struct {
IncludeUnreachable bool `json:"include_unreachable"`
IncludeTests bool `json:"include_tests"`
MinDeadLines int `json:"min_dead_lines"`
}
DeadCodeAnalysisConfig holds configuration for dead code analysis (pmat compatible).
type DeadCodeItem ¶
type DeadCodeItem struct {
ItemType DeadCodeType `json:"item_type"`
Name string `json:"name"`
Line uint32 `json:"line"`
Reason string `json:"reason"`
}
DeadCodeItem represents an individual dead code item within a file (pmat compatible).
type DeadCodeKind ¶
type DeadCodeKind string
DeadCodeKind classifies the type of dead code detected.
const ( DeadKindFunction DeadCodeKind = "unused_function" DeadKindClass DeadCodeKind = "unused_class" DeadKindVariable DeadCodeKind = "unused_variable" DeadKindUnreachable DeadCodeKind = "unreachable_code" DeadKindDeadBranch DeadCodeKind = "dead_branch" )
type DeadCodeRankingSummary ¶
type DeadCodeRankingSummary struct {
TotalFilesAnalyzed int `json:"total_files_analyzed"`
FilesWithDeadCode int `json:"files_with_dead_code"`
TotalDeadLines int `json:"total_dead_lines"`
DeadPercentage float32 `json:"dead_percentage"`
DeadFunctions int `json:"dead_functions"`
DeadClasses int `json:"dead_classes"`
DeadModules int `json:"dead_modules"`
UnreachableBlocks int `json:"unreachable_blocks"`
}
DeadCodeRankingSummary provides aggregate statistics (pmat compatible).
type DeadCodeResult ¶
type DeadCodeResult struct {
Summary DeadCodeRankingSummary `json:"summary"`
Files []FileDeadCodeMetrics `json:"files"`
TotalFiles int `json:"total_files"`
AnalyzedFiles int `json:"analyzed_files"`
AnalysisTimestamp time.Time `json:"analysis_timestamp,omitempty"`
Config DeadCodeAnalysisConfig `json:"config,omitempty"`
}
DeadCodeResult is the pmat-compatible output format for dead code analysis.
func NewDeadCodeResult ¶
func NewDeadCodeResult() *DeadCodeResult
NewDeadCodeResult creates a new pmat-compatible dead code result.
func (*DeadCodeResult) FromDeadCodeAnalysis ¶
func (r *DeadCodeResult) FromDeadCodeAnalysis(analysis *DeadCodeAnalysis)
FromDeadCodeAnalysis converts the internal DeadCodeAnalysis to pmat-compatible format.
type DeadCodeSummary ¶
type DeadCodeSummary struct {
TotalDeadFunctions int `json:"total_dead_functions" toon:"total_dead_functions"`
TotalDeadVariables int `json:"total_dead_variables" toon:"total_dead_variables"`
TotalDeadClasses int `json:"total_dead_classes" toon:"total_dead_classes"`
TotalUnreachableBlocks int `json:"total_unreachable_blocks" toon:"total_unreachable_blocks"`
TotalUnreachableLines int `json:"total_unreachable_lines" toon:"total_unreachable_lines"`
DeadCodePercentage float64 `json:"dead_code_percentage" toon:"dead_code_percentage"`
ByFile map[string]int `json:"by_file" toon:"by_file"`
ByKind map[DeadCodeKind]int `json:"by_kind,omitempty" toon:"-"`
TotalFilesAnalyzed int `json:"total_files_analyzed" toon:"total_files_analyzed"`
TotalLinesAnalyzed int `json:"total_lines_analyzed" toon:"total_lines_analyzed"`
TotalNodesInGraph int `json:"total_nodes_in_graph,omitempty" toon:"total_nodes_in_graph,omitempty"`
ReachableNodes int `json:"reachable_nodes,omitempty" toon:"reachable_nodes,omitempty"`
UnreachableNodes int `json:"unreachable_nodes,omitempty" toon:"unreachable_nodes,omitempty"`
ConfidenceLevel float64 `json:"confidence_level,omitempty" toon:"confidence_level,omitempty"`
}
DeadCodeSummary provides aggregate statistics.
func NewDeadCodeSummary ¶
func NewDeadCodeSummary() DeadCodeSummary
NewDeadCodeSummary creates an initialized summary.
func (*DeadCodeSummary) AddDeadClass ¶
func (s *DeadCodeSummary) AddDeadClass(c DeadClass)
AddDeadClass updates the summary with a dead class.
func (*DeadCodeSummary) AddDeadFunction ¶
func (s *DeadCodeSummary) AddDeadFunction(f DeadFunction)
AddDeadFunction updates the summary with a dead function.
func (*DeadCodeSummary) AddDeadVariable ¶
func (s *DeadCodeSummary) AddDeadVariable(v DeadVariable)
AddDeadVariable updates the summary with a dead variable.
func (*DeadCodeSummary) AddUnreachableBlock ¶
func (s *DeadCodeSummary) AddUnreachableBlock(b UnreachableBlock)
AddUnreachableBlock updates the summary with unreachable code.
func (*DeadCodeSummary) CalculatePercentage ¶
func (s *DeadCodeSummary) CalculatePercentage()
CalculatePercentage computes dead code percentage.
type DeadCodeType ¶
type DeadCodeType string
DeadCodeType classifies the type of dead code item (pmat compatible).
const ( DeadCodeTypeFunction DeadCodeType = "function" DeadCodeTypeClass DeadCodeType = "class" DeadCodeTypeVariable DeadCodeType = "variable" DeadCodeTypeUnreachable DeadCodeType = "unreachable" )
type DeadFunction ¶
type DeadFunction struct {
Name string `json:"name"`
File string `json:"file"`
Line uint32 `json:"line"`
EndLine uint32 `json:"end_line"`
Visibility string `json:"visibility"` // public, private, internal
Confidence float64 `json:"confidence"` // 0.0-1.0, how certain we are it's dead
ConfidenceLevel ConfidenceLevel `json:"confidence_level"`
ConfidenceReason string `json:"confidence_reason"` // Why we have this confidence level
Reason string `json:"reason"` // Why it's considered dead
Kind DeadCodeKind `json:"kind,omitempty"`
NodeID uint32 `json:"node_id,omitempty"`
}
DeadFunction represents an unused function detected in the codebase.
func (*DeadFunction) SetConfidenceLevel ¶
func (f *DeadFunction) SetConfidenceLevel()
SetConfidenceLevel sets the confidence level and reason based on the numeric confidence. Uses configurable thresholds from GetConfidenceThresholds().
type DeadVariable ¶
type DeadVariable struct {
Name string `json:"name"`
File string `json:"file"`
Line uint32 `json:"line"`
Visibility string `json:"visibility"`
Confidence float64 `json:"confidence"`
ConfidenceLevel ConfidenceLevel `json:"confidence_level"`
ConfidenceReason string `json:"confidence_reason"`
Reason string `json:"reason,omitempty"`
Kind DeadCodeKind `json:"kind,omitempty"`
NodeID uint32 `json:"node_id,omitempty"`
}
DeadVariable represents an unused variable.
func (*DeadVariable) SetConfidenceLevel ¶
func (v *DeadVariable) SetConfidenceLevel()
SetConfidenceLevel sets the confidence level and reason based on the numeric confidence. Uses configurable thresholds from GetConfidenceThresholds().
type DebtCategory ¶
type DebtCategory string
DebtCategory represents the type of technical debt.
const ( DebtDesign DebtCategory = "design" // HACK, KLUDGE, SMELL DebtDefect DebtCategory = "defect" // BUG, FIXME, BROKEN DebtRequirement DebtCategory = "requirement" // TODO, FEAT, ENHANCEMENT DebtTest DebtCategory = "test" // FAILING, SKIP, DISABLED DebtPerformance DebtCategory = "performance" // SLOW, OPTIMIZE, PERF DebtSecurity DebtCategory = "security" // SECURITY, VULN, UNSAFE )
func (DebtCategory) String ¶
func (d DebtCategory) String() string
String implements fmt.Stringer for toon serialization.
type DefectAnalysis ¶
type DefectAnalysis struct {
Files []DefectScore `json:"files"`
Summary DefectSummary `json:"summary"`
Weights DefectWeights `json:"weights"`
}
DefectAnalysis represents the full defect prediction result (internal format).
func (*DefectAnalysis) ToDefectPredictionReport ¶
func (a *DefectAnalysis) ToDefectPredictionReport() *DefectPredictionReport
ToDefectPredictionReport converts DefectAnalysis to pmat-compatible format.
type DefectPredictionReport ¶
type DefectPredictionReport struct {
TotalFiles int `json:"total_files"`
HighRiskFiles int `json:"high_risk_files"`
MediumRiskFiles int `json:"medium_risk_files"`
LowRiskFiles int `json:"low_risk_files"`
FilePredictions []FilePrediction `json:"file_predictions"`
}
DefectPredictionReport is the pmat-compatible output format.
type DefectScore ¶
type DefectScore struct {
FilePath string `json:"file_path"`
Probability float32 `json:"probability"` // 0.0 to 1.0
Confidence float32 `json:"confidence"` // 0.0 to 1.0
RiskLevel RiskLevel `json:"risk_level"`
ContributingFactors map[string]float32 `json:"contributing_factors"`
Recommendations []string `json:"recommendations"`
}
DefectScore represents the prediction result for a file (internal format).
type DefectSummary ¶
type DefectSummary struct {
TotalFiles int `json:"total_files"`
HighRiskCount int `json:"high_risk_count"`
MediumRiskCount int `json:"medium_risk_count"`
LowRiskCount int `json:"low_risk_count"`
AvgProbability float32 `json:"avg_probability"`
P50Probability float32 `json:"p50_probability"`
P95Probability float32 `json:"p95_probability"`
}
DefectSummary provides aggregate statistics (internal format).
type DefectWeights ¶
type DefectWeights struct {
Churn float32 `json:"churn"` // 0.35
Complexity float32 `json:"complexity"` // 0.30
Duplication float32 `json:"duplication"` // 0.25
Coupling float32 `json:"coupling"` // 0.10
}
DefectWeights defines the weights for defect prediction factors. Based on empirical research (PMAT approach).
func DefaultDefectWeights ¶
func DefaultDefectWeights() DefectWeights
DefaultDefectWeights returns the standard weights.
type DependencyGraph ¶
DependencyGraph represents the full graph structure.
func NewDependencyGraph ¶
func NewDependencyGraph() *DependencyGraph
NewDependencyGraph creates an empty graph.
func (*DependencyGraph) AddEdge ¶
func (g *DependencyGraph) AddEdge(edge GraphEdge)
AddEdge adds an edge to the graph.
func (*DependencyGraph) AddNode ¶
func (g *DependencyGraph) AddNode(node GraphNode)
AddNode adds a node to the graph.
func (*DependencyGraph) ToMermaid ¶
func (g *DependencyGraph) ToMermaid() string
ToMermaid generates Mermaid diagram syntax from the graph using default options.
func (*DependencyGraph) ToMermaidWithOptions ¶
func (g *DependencyGraph) ToMermaidWithOptions(opts MermaidOptions) string
ToMermaidWithOptions generates Mermaid diagram syntax with custom options.
type DuplicationHotspot ¶
type DuplicationHotspot struct {
File string `json:"file"`
DuplicateLines int `json:"duplicate_lines"`
CloneGroupCount int `json:"clone_group_count"`
Severity float64 `json:"severity"`
}
DuplicationHotspot represents a file with high duplication.
type ExtendedComplexitySummary ¶
type ExtendedComplexitySummary struct {
TotalFiles int `json:"total_files"`
TotalFunctions int `json:"total_functions"`
MedianCyclomatic float32 `json:"median_cyclomatic"`
MedianCognitive float32 `json:"median_cognitive"`
MaxCyclomatic uint32 `json:"max_cyclomatic"`
MaxCognitive uint32 `json:"max_cognitive"`
P90Cyclomatic uint32 `json:"p90_cyclomatic"`
P90Cognitive uint32 `json:"p90_cognitive"`
TechnicalDebtHours float32 `json:"technical_debt_hours"`
}
ExtendedComplexitySummary provides enhanced statistics (pmat compatible).
type ExtendedComplexityThresholds ¶
type ExtendedComplexityThresholds struct {
CyclomaticWarn uint32 `json:"cyclomatic_warn"`
CyclomaticError uint32 `json:"cyclomatic_error"`
CognitiveWarn uint32 `json:"cognitive_warn"`
CognitiveError uint32 `json:"cognitive_error"`
NestingMax uint8 `json:"nesting_max"`
MethodLength uint16 `json:"method_length"`
}
ExtendedComplexityThresholds provides warn and error levels (pmat compatible).
func DefaultExtendedThresholds ¶
func DefaultExtendedThresholds() ExtendedComplexityThresholds
DefaultExtendedThresholds returns pmat-compatible default thresholds.
type FileChurnMetrics ¶
type FileChurnMetrics struct {
Path string `json:"path"`
RelativePath string `json:"relative_path"`
Commits int `json:"commit_count"`
UniqueAuthors []string `json:"unique_authors"`
AuthorCounts map[string]int `json:"-"` // internal: author name -> commit count
LinesAdded int `json:"additions"`
LinesDeleted int `json:"deletions"`
ChurnScore float64 `json:"churn_score"` // 0.0-1.0 normalized
FirstCommit time.Time `json:"first_seen"`
LastCommit time.Time `json:"last_modified"`
// Relative churn metrics (research-backed - Nagappan & Ball 2005)
TotalLOC int `json:"total_loc,omitempty"` // Current lines of code in file
LOCReadError bool `json:"loc_read_error,omitempty"` // True if file could not be read for LOC count
RelativeChurn float64 `json:"relative_churn,omitempty"` // (LinesAdded + LinesDeleted) / TotalLOC
ChurnRate float64 `json:"churn_rate,omitempty"` // RelativeChurn / DaysSinceFirstCommit
ChangeFrequency float64 `json:"change_frequency,omitempty"` // Commits / DaysSinceFirstCommit
DaysActive int `json:"days_active,omitempty"` // Days between first and last commit
}
FileChurnMetrics represents git churn data for a single file.
func (*FileChurnMetrics) CalculateChurnScore ¶
func (f *FileChurnMetrics) CalculateChurnScore() float64
CalculateChurnScore computes a normalized churn score. Uses the same formula as the reference implementation: churn_score = (commit_factor * 0.6 + change_factor * 0.4)
func (*FileChurnMetrics) CalculateChurnScoreWithMax ¶
func (f *FileChurnMetrics) CalculateChurnScoreWithMax(maxCommits, maxChanges int) float64
CalculateChurnScoreWithMax computes churn score with explicit max values.
func (*FileChurnMetrics) CalculateRelativeChurn ¶
func (f *FileChurnMetrics) CalculateRelativeChurn(now time.Time)
CalculateRelativeChurn computes relative churn metrics. These metrics are research-backed per Nagappan & Ball (2005): "Use of relative code churn measures to predict system defect density" Relative churn discriminates fault-prone files with 89% accuracy.
func (*FileChurnMetrics) IsHotspot ¶
func (f *FileChurnMetrics) IsHotspot(threshold float64) bool
IsHotspot returns true if the file has high churn.
type FileComplexity ¶
type FileComplexity struct {
Path string `json:"path"`
Language string `json:"language"`
Functions []FunctionComplexity `json:"functions"`
TotalCyclomatic uint32 `json:"total_cyclomatic"`
TotalCognitive uint32 `json:"total_cognitive"`
AvgCyclomatic float64 `json:"avg_cyclomatic"`
AvgCognitive float64 `json:"avg_cognitive"`
MaxCyclomatic uint32 `json:"max_cyclomatic"`
MaxCognitive uint32 `json:"max_cognitive"`
ViolationCount int `json:"violation_count"`
}
FileComplexity represents aggregated complexity for a file.
type FileCoupling ¶
type FileCoupling struct {
FileA string `json:"file_a"`
FileB string `json:"file_b"`
CochangeCount int `json:"cochange_count"`
CouplingStrength float64 `json:"coupling_strength"` // 0-1
CommitsA int `json:"commits_a"`
CommitsB int `json:"commits_b"`
}
FileCoupling represents the temporal coupling between two files.
type FileDeadCodeMetrics ¶
type FileDeadCodeMetrics struct {
Path string `json:"path"`
DeadLines int `json:"dead_lines"`
TotalLines int `json:"total_lines"`
DeadPercentage float32 `json:"dead_percentage"`
DeadFunctions int `json:"dead_functions"`
DeadClasses int `json:"dead_classes"`
DeadModules int `json:"dead_modules"`
UnreachableBlocks int `json:"unreachable_blocks"`
DeadScore float32 `json:"dead_score"`
Confidence ConfidenceLevel `json:"confidence"`
Items []DeadCodeItem `json:"items"`
}
FileDeadCodeMetrics contains file-level dead code metrics with items (pmat compatible).
func (*FileDeadCodeMetrics) AddItem ¶
func (f *FileDeadCodeMetrics) AddItem(item DeadCodeItem)
AddItem adds a dead code item and updates counts.
func (*FileDeadCodeMetrics) CalculateScore ¶
func (f *FileDeadCodeMetrics) CalculateScore()
CalculateScore calculates the dead code score using weighted algorithm (pmat compatible).
func (*FileDeadCodeMetrics) UpdatePercentage ¶
func (f *FileDeadCodeMetrics) UpdatePercentage()
UpdatePercentage updates the dead percentage based on current counts.
type FileHotspot ¶
type FileHotspot struct {
Path string `json:"path"`
HotspotScore float64 `json:"hotspot_score"` // 0-1, geometric mean of normalized churn and complexity
ChurnScore float64 `json:"churn_score"` // 0-1, CDF-normalized
ComplexityScore float64 `json:"complexity_score"` // 0-1, CDF-normalized
Commits int `json:"commits"`
AvgCognitive float64 `json:"avg_cognitive"`
AvgCyclomatic float64 `json:"avg_cyclomatic"`
TotalFunctions int `json:"total_functions"`
}
FileHotspot represents hotspot metrics for a single file.
func (*FileHotspot) Severity ¶
func (f *FileHotspot) Severity() HotspotSeverity
Severity returns the severity level based on the hotspot score.
type FileMetrics ¶
type FileMetrics struct {
FilePath string `json:"file_path"`
ChurnScore float32 `json:"churn_score"` // 0.0 to 1.0
Complexity float32 `json:"complexity"` // Raw complexity
DuplicateRatio float32 `json:"duplicate_ratio"` // 0.0 to 1.0
AfferentCoupling float32 `json:"afferent_coupling"` // Incoming deps
EfferentCoupling float32 `json:"efferent_coupling"` // Outgoing deps
LinesOfCode int `json:"lines_of_code"`
CyclomaticComplexity uint32 `json:"cyclomatic_complexity"`
CognitiveComplexity uint32 `json:"cognitive_complexity"`
}
FileMetrics contains input metrics for defect prediction.
type FileOwnership ¶
type FileOwnership struct {
Path string `json:"path"`
PrimaryOwner string `json:"primary_owner"`
OwnershipPercent float64 `json:"ownership_percent"` // 0-100
Concentration float64 `json:"concentration"` // 0-1, higher = more concentrated
TotalLines int `json:"total_lines"`
Contributors []Contributor `json:"contributors,omitempty"`
IsSilo bool `json:"is_silo"` // Single contributor
}
FileOwnership represents ownership metrics for a single file.
type FilePrediction ¶
type FilePrediction struct {
FilePath string `json:"file_path"`
RiskScore float32 `json:"risk_score"`
RiskLevel string `json:"risk_level"`
Factors []string `json:"factors"`
}
FilePrediction represents a file's defect prediction (pmat-compatible).
type FunctionComplexity ¶
type FunctionComplexity struct {
Name string `json:"name"`
File string `json:"file"`
StartLine uint32 `json:"start_line"`
EndLine uint32 `json:"end_line"`
Metrics ComplexityMetrics `json:"metrics"`
Violations []string `json:"violations,omitempty"`
}
FunctionComplexity represents complexity metrics for a single function.
type Grade ¶
type Grade string
Grade represents a letter grade from A+ to F (PMAT-compatible). Higher grades indicate better code quality.
func GradeFromScore ¶
GradeFromScore converts a 0-100 score to a letter grade.
type GraphEdge ¶
type GraphEdge struct {
From string `json:"from"`
To string `json:"to"`
Type EdgeType `json:"type"`
Weight float64 `json:"weight,omitempty"`
}
GraphEdge represents a dependency between nodes.
type GraphMetrics ¶
type GraphMetrics struct {
NodeMetrics []NodeMetric `json:"node_metrics"`
Summary GraphSummary `json:"summary"`
}
GraphMetrics represents centrality and other graph metrics.
type GraphNode ¶
type GraphNode struct {
ID string `json:"id"`
Name string `json:"name"`
Type NodeType `json:"type"` // file, function, class, module
File string `json:"file"`
Line uint32 `json:"line,omitempty"`
Attributes map[string]string `json:"attributes,omitempty"`
}
GraphNode represents a node in the dependency graph.
type GraphSummary ¶
type GraphSummary struct {
TotalNodes int `json:"total_nodes"`
TotalEdges int `json:"total_edges"`
AvgDegree float64 `json:"avg_degree"`
Density float64 `json:"density"`
Components int `json:"components"`
LargestComponent int `json:"largest_component"`
StronglyConnectedComponents int `json:"strongly_connected_components"`
CycleCount int `json:"cycle_count"`
CycleNodes []string `json:"cycle_nodes,omitempty"`
IsCyclic bool `json:"is_cyclic"`
Diameter int `json:"diameter,omitempty"`
Radius int `json:"radius,omitempty"`
ClusteringCoefficient float64 `json:"clustering_coefficient"`
Assortativity float64 `json:"assortativity"`
Transitivity float64 `json:"transitivity"`
Reciprocity float64 `json:"reciprocity,omitempty"`
Modularity float64 `json:"modularity,omitempty"`
CommunityCount int `json:"community_count,omitempty"`
}
GraphSummary provides aggregate graph statistics.
type HotspotAnalysis ¶
type HotspotAnalysis struct {
GeneratedAt time.Time `json:"generated_at"`
PeriodDays int `json:"period_days"`
Files []FileHotspot `json:"files"`
Summary HotspotSummary `json:"summary"`
}
HotspotAnalysis represents the full hotspot analysis result.
func (*HotspotAnalysis) CalculateSummary ¶
func (h *HotspotAnalysis) CalculateSummary()
CalculateSummary computes summary statistics from file hotspots. Files must be sorted by HotspotScore descending before calling.
type HotspotSeverity ¶
type HotspotSeverity string
HotspotSeverity represents the severity level of a hotspot.
const ( HotspotSeverityCritical HotspotSeverity = "critical" HotspotSeverityHigh HotspotSeverity = "high" HotspotSeverityModerate HotspotSeverity = "moderate" HotspotSeverityLow HotspotSeverity = "low" )
type HotspotSummary ¶
type HotspotSummary struct {
TotalFiles int `json:"total_files"`
HotspotCount int `json:"hotspot_count"` // Files above threshold
MaxHotspotScore float64 `json:"max_hotspot_score"`
AvgHotspotScore float64 `json:"avg_hotspot_score"`
P50HotspotScore float64 `json:"p50_hotspot_score"`
P90HotspotScore float64 `json:"p90_hotspot_score"`
}
HotspotSummary provides aggregate statistics for hotspot analysis.
type JITAnalysis ¶
type JITAnalysis struct {
GeneratedAt time.Time `json:"generated_at"`
PeriodDays int `json:"period_days"`
Commits []CommitRisk `json:"commits"`
Summary JITSummary `json:"summary"`
Weights JITWeights `json:"weights"`
Normalization NormalizationStats `json:"normalization"`
}
JITAnalysis represents the full JIT defect prediction result.
func NewJITAnalysis ¶
func NewJITAnalysis() *JITAnalysis
NewJITAnalysis creates an initialized JIT analysis.
type JITRiskLevel ¶
type JITRiskLevel string
JITRiskLevel represents the risk level for a commit.
const ( JITRiskLow JITRiskLevel = "low" // < 0.4 JITRiskMedium JITRiskLevel = "medium" // 0.4 - 0.7 JITRiskHigh JITRiskLevel = "high" // >= 0.7 )
func GetJITRiskLevel ¶
func GetJITRiskLevel(score float64) JITRiskLevel
GetJITRiskLevel determines risk level from score.
type JITSummary ¶
type JITSummary struct {
TotalCommits int `json:"total_commits"`
HighRiskCount int `json:"high_risk_count"`
MediumRiskCount int `json:"medium_risk_count"`
LowRiskCount int `json:"low_risk_count"`
BugFixCount int `json:"bug_fix_count"`
AvgRiskScore float64 `json:"avg_risk_score"`
P50RiskScore float64 `json:"p50_risk_score"`
P95RiskScore float64 `json:"p95_risk_score"`
}
JITSummary provides aggregate statistics.
type JITWeights ¶
type JITWeights struct {
FIX float64 `json:"fix"` // Is bug fix commit?
Entropy float64 `json:"entropy"` // Change entropy across files
LA float64 `json:"la"` // Lines added
NUC float64 `json:"nuc"` // Number of unique prior commits
NF float64 `json:"nf"` // Number of files modified
LD float64 `json:"ld"` // Lines deleted
NDEV float64 `json:"ndev"` // Number of developers
EXP float64 `json:"exp"` // Author experience (inverted)
}
JITWeights defines the weights for JIT defect prediction features. Based on Kamei et al. (2013) "A Large-Scale Empirical Study of Just-in-Time Quality Assurance" and Zeng et al. (2021) showing simple models match deep learning accuracy (~65%).
func DefaultJITWeights ¶
func DefaultJITWeights() JITWeights
DefaultJITWeights returns research-backed weights from the requirements spec.
type Language ¶
type Language string
Language represents the detected programming language.
const ( LanguageUnknown Language = "unknown" LanguageRust Language = "rust" LanguageGo Language = "go" LanguagePython Language = "python" LanguageJavaScript Language = "javascript" LanguageTypeScript Language = "typescript" LanguageJava Language = "java" LanguageC Language = "c" LanguageCpp Language = "cpp" LanguageCSharp Language = "csharp" LanguageRuby Language = "ruby" LanguagePHP Language = "php" LanguageSwift Language = "swift" LanguageKotlin Language = "kotlin" )
func LanguageFromExtension ¶
LanguageFromExtension detects the language from a file extension.
func (Language) Confidence ¶
Confidence returns the detection confidence for the language.
type LanguageOverride ¶
type LanguageOverride struct {
MaxCognitiveComplexity *uint32 `json:"max_cognitive_complexity,omitempty" toml:"max_cognitive_complexity,omitempty"`
MinDocCoverage *float32 `json:"min_doc_coverage,omitempty" toml:"min_doc_coverage,omitempty"`
EnforceErrorCheck *bool `json:"enforce_error_check,omitempty" toml:"enforce_error_check,omitempty"`
MaxFunctionLength *uint32 `json:"max_function_length,omitempty" toml:"max_function_length,omitempty"`
}
LanguageOverride defines language-specific overrides.
type MermaidDirection ¶
type MermaidDirection string
MermaidDirection specifies the graph direction.
const ( DirectionTD MermaidDirection = "TD" // Top-down DirectionLR MermaidDirection = "LR" // Left-right DirectionBT MermaidDirection = "BT" // Bottom-top DirectionRL MermaidDirection = "RL" // Right-left )
type MermaidOptions ¶
type MermaidOptions struct {
MaxNodes int `json:"max_nodes"`
MaxEdges int `json:"max_edges"`
ShowComplexity bool `json:"show_complexity"`
GroupByModule bool `json:"group_by_module"`
NodeComplexity map[string]int `json:"node_complexity,omitempty"`
Direction MermaidDirection `json:"direction"`
}
MermaidOptions configures Mermaid diagram generation.
func DefaultMermaidOptions ¶
func DefaultMermaidOptions() MermaidOptions
DefaultMermaidOptions returns sensible defaults.
type MetricCategory ¶
type MetricCategory string
MetricCategory represents a category of TDG metrics.
const ( MetricStructuralComplexity MetricCategory = "structural_complexity" MetricSemanticComplexity MetricCategory = "semantic_complexity" MetricDuplication MetricCategory = "duplication" MetricCoupling MetricCategory = "coupling" MetricDocumentation MetricCategory = "documentation" MetricConsistency MetricCategory = "consistency" )
type MinHashSignature ¶
type MinHashSignature struct {
Values []uint64 `json:"values"`
}
MinHashSignature represents a MinHash signature for similarity estimation.
func (*MinHashSignature) JaccardSimilarity ¶
func (s *MinHashSignature) JaccardSimilarity(other *MinHashSignature) float64
JaccardSimilarity computes similarity between two MinHash signatures.
type NodeMetric ¶
type NodeMetric struct {
NodeID string `json:"node_id"`
Name string `json:"name"`
PageRank float64 `json:"pagerank"`
BetweennessCentrality float64 `json:"betweenness_centrality"`
ClosenessCentrality float64 `json:"closeness_centrality"`
EigenvectorCentrality float64 `json:"eigenvector_centrality"`
HarmonicCentrality float64 `json:"harmonic_centrality"`
InDegree int `json:"in_degree"`
OutDegree int `json:"out_degree"`
ClusteringCoef float64 `json:"clustering_coefficient"`
CommunityID int `json:"community_id,omitempty"`
}
NodeMetric represents computed metrics for a single node.
type NormalizationStats ¶
type NormalizationStats struct {
MaxLinesAdded int `json:"max_lines_added"`
MaxLinesDeleted int `json:"max_lines_deleted"`
MaxNumFiles int `json:"max_num_files"`
MaxUniqueChanges int `json:"max_unique_changes"`
MaxNumDevelopers int `json:"max_num_developers"`
MaxAuthorExperience int `json:"max_author_experience"`
MaxEntropy float64 `json:"max_entropy"`
}
NormalizationStats holds min-max values for normalization.
type OwnershipAnalysis ¶
type OwnershipAnalysis struct {
GeneratedAt time.Time `json:"generated_at"`
Files []FileOwnership `json:"files"`
Summary OwnershipSummary `json:"summary"`
}
OwnershipAnalysis represents the full ownership analysis result.
func (*OwnershipAnalysis) CalculateSummary ¶
func (o *OwnershipAnalysis) CalculateSummary()
CalculateSummary computes summary statistics.
type OwnershipSummary ¶
type OwnershipSummary struct {
TotalFiles int `json:"total_files"`
BusFactor int `json:"bus_factor"`
SiloCount int `json:"silo_count"`
AvgContributors float64 `json:"avg_contributors"`
MaxConcentration float64 `json:"max_concentration"`
TopContributors []string `json:"top_contributors"`
}
OwnershipSummary provides aggregate statistics.
type PenaltyAttribution ¶
type PenaltyAttribution struct {
SourceMetric MetricCategory `json:"source_metric"`
Amount float32 `json:"amount"`
AppliedTo []MetricCategory `json:"applied_to"`
Issue string `json:"issue"`
}
PenaltyAttribution tracks where a penalty was applied.
type PenaltyConfig ¶
type PenaltyConfig struct {
ComplexityPenaltyBase PenaltyCurve `json:"complexity_penalty_base" toml:"complexity_penalty_base"`
DuplicationPenaltyCurve PenaltyCurve `json:"duplication_penalty_curve" toml:"duplication_penalty_curve"`
CouplingPenaltyCurve PenaltyCurve `json:"coupling_penalty_curve" toml:"coupling_penalty_curve"`
}
PenaltyConfig defines penalty curves for each metric.
func DefaultPenaltyConfig ¶
func DefaultPenaltyConfig() PenaltyConfig
DefaultPenaltyConfig returns the default penalty configuration.
type PenaltyCurve ¶
type PenaltyCurve string
PenaltyCurve defines how penalties are applied.
const ( PenaltyCurveLinear PenaltyCurve = "linear" PenaltyCurveLogarithmic PenaltyCurve = "logarithmic" PenaltyCurveQuadratic PenaltyCurve = "quadratic" PenaltyCurveExponential PenaltyCurve = "exponential" )
func (PenaltyCurve) Apply ¶
func (pc PenaltyCurve) Apply(value, base float32) float32
Apply applies the penalty curve to a value.
type PenaltyTracker ¶
type PenaltyTracker struct {
// contains filtered or unexported fields
}
PenaltyTracker tracks penalties applied during analysis.
func NewPenaltyTracker ¶
func NewPenaltyTracker() *PenaltyTracker
NewPenaltyTracker creates a new penalty tracker.
func (*PenaltyTracker) Apply ¶
func (pt *PenaltyTracker) Apply(issueID string, category MetricCategory, amount float32, issue string) float32
Apply attempts to apply a penalty, returning the amount if applied or 0 if already applied.
func (*PenaltyTracker) GetAttributions ¶
func (pt *PenaltyTracker) GetAttributions() []PenaltyAttribution
GetAttributions returns all applied penalty attributions.
type ProjectScore ¶
type ProjectScore struct {
Files []TdgScore `json:"files"`
AverageScore float32 `json:"average_score"`
AverageGrade Grade `json:"average_grade"`
TotalFiles int `json:"total_files"`
LanguageDistribution map[Language]int `json:"language_distribution"`
GradeDistribution map[Grade]int `json:"grade_distribution"`
}
ProjectScore represents aggregated TDG scores for a project.
func AggregateProjectScore ¶
func AggregateProjectScore(scores []TdgScore) ProjectScore
AggregateProjectScore creates a ProjectScore from individual file scores.
func (*ProjectScore) Average ¶
func (p *ProjectScore) Average() TdgScore
Average returns the average TDG score across all files.
func (*ProjectScore) ToTDGReport ¶
func (p *ProjectScore) ToTDGReport(topN int) *TDGReport
ToTDGReport converts a ProjectScore to pmat-compatible TDGReport. Omen uses 0-100 scale (higher = better quality), pmat uses 0-5 scale (higher = more debt).
type ReferenceEdge ¶
type ReferenceEdge struct {
From uint32 `json:"from"`
To uint32 `json:"to"`
Type ReferenceType `json:"type"`
Confidence float64 `json:"confidence"`
}
ReferenceEdge represents a relationship between two code elements.
type ReferenceNode ¶
type ReferenceNode struct {
ID uint32 `json:"id"`
Name string `json:"name"`
File string `json:"file"`
Line uint32 `json:"line"`
EndLine uint32 `json:"end_line"`
Kind string `json:"kind"` // function, class, variable
Language string `json:"language"`
IsExported bool `json:"is_exported"`
IsEntry bool `json:"is_entry"`
}
ReferenceNode represents a code element in the reference graph.
type ReferenceType ¶
type ReferenceType string
ReferenceType classifies the relationship between code elements.
const ( RefDirectCall ReferenceType = "direct_call" RefIndirectCall ReferenceType = "indirect_call" RefImport ReferenceType = "import" RefInheritance ReferenceType = "inheritance" RefTypeReference ReferenceType = "type_reference" RefDynamicDispatch ReferenceType = "dynamic_dispatch" )
type RepoMap ¶
type RepoMap struct {
GeneratedAt time.Time `json:"generated_at"`
Symbols []Symbol `json:"symbols"`
Summary RepoMapSummary `json:"summary"`
}
RepoMap represents a PageRank-ranked summary of repository symbols.
func (*RepoMap) CalculateSummary ¶
func (r *RepoMap) CalculateSummary()
CalculateSummary computes summary statistics for the repo map.
func (*RepoMap) SortByPageRank ¶
func (r *RepoMap) SortByPageRank()
SortByPageRank sorts symbols by PageRank in descending order.
type RepoMapSummary ¶
type RepoMapSummary struct {
TotalSymbols int `json:"total_symbols"`
TotalFiles int `json:"total_files"`
AvgPageRank float64 `json:"avg_pagerank"`
MaxPageRank float64 `json:"max_pagerank"`
AvgConnections float64 `json:"avg_connections"`
}
RepoMapSummary provides aggregate statistics for the repo map.
type RiskLevel ¶
type RiskLevel string
RiskLevel represents the defect probability risk category. PMAT-compatible: 3 levels with thresholds at 0.3 and 0.7
func CalculateRiskLevel ¶
CalculateRiskLevel determines risk level from probability. PMAT-compatible: Low (<0.3), Medium (0.3-0.7), High (>=0.7)
type SATDAnalysis ¶
type SATDAnalysis struct {
Items []TechnicalDebt `json:"items"`
Summary SATDSummary `json:"summary"`
TotalFilesAnalyzed int `json:"total_files_analyzed"`
FilesWithDebt int `json:"files_with_debt"`
AnalyzedAt time.Time `json:"analyzed_at"`
}
SATDAnalysis represents the full SATD analysis result.
type SATDSummary ¶
type SATDSummary struct {
TotalItems int `json:"total_items"`
BySeverity map[string]int `json:"by_severity"`
ByCategory map[string]int `json:"by_category"`
ByFile map[string]int `json:"by_file,omitempty"`
FilesWithSATD int `json:"files_with_satd,omitempty"`
AvgAgeDays float64 `json:"avg_age_days,omitempty"`
}
SATDSummary provides aggregate statistics.
func NewSATDSummary ¶
func NewSATDSummary() SATDSummary
NewSATDSummary creates an initialized summary.
func (*SATDSummary) AddItem ¶
func (s *SATDSummary) AddItem(item TechnicalDebt)
AddItem updates the summary with a new debt item.
type Severity ¶
type Severity string
Severity represents the urgency of addressing the debt.
type SmellAnalysis ¶
type SmellAnalysis struct {
GeneratedAt time.Time `json:"generated_at"`
Smells []ArchitecturalSmell `json:"smells"`
Components []ComponentMetrics `json:"components"`
Summary SmellSummary `json:"summary"`
Thresholds SmellThresholds `json:"thresholds"`
}
SmellAnalysis represents the full architectural smell analysis result.
func NewSmellAnalysis ¶
func NewSmellAnalysis() *SmellAnalysis
NewSmellAnalysis creates an initialized smell analysis.
func (*SmellAnalysis) AddSmell ¶
func (a *SmellAnalysis) AddSmell(smell ArchitecturalSmell)
AddSmell adds a smell and updates the summary.
func (*SmellAnalysis) CalculateSummary ¶
func (a *SmellAnalysis) CalculateSummary()
CalculateSummary computes summary statistics from components.
type SmellMetrics ¶
type SmellMetrics struct {
FanIn int `json:"fan_in,omitempty"`
FanOut int `json:"fan_out,omitempty"`
Instability float64 `json:"instability,omitempty"`
CycleLength int `json:"cycle_length,omitempty"`
}
SmellMetrics provides quantitative data about the smell.
type SmellSeverity ¶
type SmellSeverity string
SmellSeverity represents the severity level of an architectural smell.
const ( SmellSeverityCritical SmellSeverity = "critical" SmellSeverityHigh SmellSeverity = "high" SmellSeverityMedium SmellSeverity = "medium" )
type SmellSummary ¶
type SmellSummary struct {
TotalSmells int `json:"total_smells"`
CyclicCount int `json:"cyclic_count"`
HubCount int `json:"hub_count"`
UnstableCount int `json:"unstable_count"`
GodCount int `json:"god_count"`
CriticalCount int `json:"critical_count"`
HighCount int `json:"high_count"`
MediumCount int `json:"medium_count"`
TotalComponents int `json:"total_components"`
AverageInstability float64 `json:"average_instability"`
}
SmellSummary provides aggregate statistics.
type SmellThresholds ¶
type SmellThresholds struct {
HubThreshold int `json:"hub_threshold"` // Fan-in + Fan-out threshold for hub detection
GodFanInThreshold int `json:"god_fan_in_threshold"` // Minimum fan-in for god component
GodFanOutThreshold int `json:"god_fan_out_threshold"` // Minimum fan-out for god component
InstabilityDifference float64 `json:"instability_difference"` // Max I difference for unstable dependency
StableThreshold float64 `json:"stable_threshold"` // I < this is considered stable
UnstableThreshold float64 `json:"unstable_threshold"` // I > this is considered unstable
}
SmellThresholds configures detection thresholds for architectural smells.
func DefaultSmellThresholds ¶
func DefaultSmellThresholds() SmellThresholds
DefaultSmellThresholds returns sensible default thresholds.
type Symbol ¶
type Symbol struct {
Name string `json:"name"`
Kind string `json:"kind"` // function, class, method, etc.
File string `json:"file"`
Line int `json:"line"`
Signature string `json:"signature"` // Full signature or summary
PageRank float64 `json:"pagerank"`
InDegree int `json:"in_degree"` // How many symbols call/use this
OutDegree int `json:"out_degree"` // How many symbols this calls/uses
}
Symbol represents a code symbol in the repository map.
type TDGHotspot ¶
type TDGHotspot struct {
Path string `json:"path"`
TdgScore float64 `json:"tdg_score"`
PrimaryFactor string `json:"primary_factor"`
EstimatedHours float64 `json:"estimated_hours"`
}
TDGHotspot represents a file with high technical debt (pmat-compatible).
type TDGReport ¶
type TDGReport struct {
Summary TDGSummary `json:"summary"`
Hotspots []TDGHotspot `json:"hotspots"`
}
TDGReport is the pmat-compatible TDG analysis output.
type TDGSeverity ¶
type TDGSeverity string
TDGSeverity represents the severity classification based on thresholds (pmat-compatible).
const ( TDGSeverityNormal TDGSeverity = "normal" // TDG < 1.5 TDGSeverityWarning TDGSeverity = "warning" // TDG 1.5-2.5 TDGSeverityCritical TDGSeverity = "critical" // TDG > 2.5 )
func TDGSeverityFromValue ¶
func TDGSeverityFromValue(value float64) TDGSeverity
TDGSeverityFromValue converts a TDG value (0-5 scale) to severity.
type TDGSummary ¶
type TDGSummary struct {
TotalFiles int `json:"total_files"`
CriticalFiles int `json:"critical_files"`
WarningFiles int `json:"warning_files"`
AverageTdg float64 `json:"average_tdg"`
P95Tdg float64 `json:"p95_tdg"`
P99Tdg float64 `json:"p99_tdg"`
EstimatedDebtHours float64 `json:"estimated_debt_hours"`
}
TDGSummary provides aggregate statistics (pmat-compatible).
type TdgComparison ¶
type TdgComparison struct {
Source1 TdgScore `json:"source1"`
Source2 TdgScore `json:"source2"`
Delta float32 `json:"delta"`
ImprovementPercentage float32 `json:"improvement_percentage"`
Winner string `json:"winner"`
Improvements []string `json:"improvements"`
Regressions []string `json:"regressions"`
}
TdgComparison represents a comparison between two TDG scores.
func NewTdgComparison ¶
func NewTdgComparison(source1, source2 TdgScore) TdgComparison
NewTdgComparison creates a comparison between two scores.
type TdgConfig ¶
type TdgConfig struct {
Weights WeightConfig `json:"weights" toml:"weights"`
Thresholds ThresholdConfig `json:"thresholds" toml:"thresholds"`
Penalties PenaltyConfig `json:"penalties" toml:"penalties"`
LanguageOverrides map[string]LanguageOverride `json:"language_overrides,omitempty" toml:"language_overrides,omitempty"`
}
TdgConfig is the TDG configuration.
func DefaultTdgConfig ¶
func DefaultTdgConfig() TdgConfig
DefaultTdgConfig returns the default TDG configuration.
func LoadTdgConfig ¶
LoadTdgConfig loads configuration from a JSON file.
type TdgScore ¶
type TdgScore struct {
// Component scores (each contributes to the 100-point total)
StructuralComplexity float32 `json:"structural_complexity"` // Max 20 points
SemanticComplexity float32 `json:"semantic_complexity"` // Max 15 points
DuplicationRatio float32 `json:"duplication_ratio"` // Max 15 points
CouplingScore float32 `json:"coupling_score"` // Max 15 points
DocCoverage float32 `json:"doc_coverage"` // Max 5 points
ConsistencyScore float32 `json:"consistency_score"` // Max 10 points
HotspotScore float32 `json:"hotspot_score"` // Max 10 points (churn x complexity)
TemporalCouplingScore float32 `json:"temporal_coupling_score"` // Max 10 points (co-change patterns)
EntropyScore float32 `json:"entropy_score"` // Max 10 points (pattern entropy)
// Aggregated score and grade
Total float32 `json:"total"` // 0-100 (higher is better)
Grade Grade `json:"grade"` // A+ to F
// Metadata
Confidence float32 `json:"confidence"` // 0-1 confidence in the score
Language Language `json:"language"` // Detected language
FilePath string `json:"file_path,omitempty"` // Source file path
CriticalDefectsCount int `json:"critical_defects_count"` // Count of critical defects
HasCriticalDefects bool `json:"has_critical_defects"` // Auto-fail flag
// Penalty tracking for transparency
PenaltiesApplied []PenaltyAttribution `json:"penalties_applied,omitempty"`
}
TdgScore represents a TDG score (0-100, higher is better).
func NewTdgScore ¶
func NewTdgScore() TdgScore
NewTdgScore creates a new TDG score with default values.
func (*TdgScore) CalculateTotal ¶
func (s *TdgScore) CalculateTotal()
CalculateTotal computes the total score and grade from components.
func (*TdgScore) SetMetric ¶
func (s *TdgScore) SetMetric(category MetricCategory, value float32)
SetMetric sets a metric value by category.
type TechnicalDebt ¶
type TechnicalDebt struct {
Category DebtCategory `json:"category" toon:"category"`
Severity Severity `json:"severity" toon:"severity"`
File string `json:"file" toon:"file"`
Line uint32 `json:"line" toon:"line"`
Description string `json:"description" toon:"description"`
Marker string `json:"marker" toon:"marker"` // TODO, FIXME, HACK, etc.
Text string `json:"text,omitempty" toon:"text,omitempty"`
Column uint32 `json:"column,omitempty" toon:"column,omitempty"`
ContextHash string `json:"context_hash,omitempty" toon:"context_hash,omitempty"` // BLAKE3 hash for identity tracking
Author string `json:"author,omitempty" toon:"author,omitempty"`
Date *time.Time `json:"date,omitempty" toon:"date,omitempty"`
}
TechnicalDebt represents a single SATD item found in code.
type TemporalCouplingAnalysis ¶
type TemporalCouplingAnalysis struct {
GeneratedAt time.Time `json:"generated_at"`
PeriodDays int `json:"period_days"`
MinCochanges int `json:"min_cochanges"`
Couplings []FileCoupling `json:"couplings"`
Summary TemporalCouplingSummary `json:"summary"`
}
TemporalCouplingAnalysis represents the full temporal coupling analysis result.
func (*TemporalCouplingAnalysis) CalculateSummary ¶
func (t *TemporalCouplingAnalysis) CalculateSummary(totalFiles int)
CalculateSummary computes summary statistics from couplings. Couplings should be sorted by CouplingStrength descending before calling.
type TemporalCouplingSummary ¶
type TemporalCouplingSummary struct {
TotalCouplings int `json:"total_couplings"`
StrongCouplings int `json:"strong_couplings"` // Strength >= 0.5
AvgCouplingStrength float64 `json:"avg_coupling_strength"`
MaxCouplingStrength float64 `json:"max_coupling_strength"`
TotalFilesAnalyzed int `json:"total_files_analyzed"`
}
TemporalCouplingSummary provides aggregate statistics.
type ThresholdConfig ¶
type ThresholdConfig struct {
MaxCyclomaticComplexity uint32 `json:"max_cyclomatic_complexity" toml:"max_cyclomatic_complexity"`
MaxCognitiveComplexity uint32 `json:"max_cognitive_complexity" toml:"max_cognitive_complexity"`
MaxNestingDepth uint32 `json:"max_nesting_depth" toml:"max_nesting_depth"`
MinTokenSequence uint32 `json:"min_token_sequence" toml:"min_token_sequence"`
SimilarityThreshold float32 `json:"similarity_threshold" toml:"similarity_threshold"`
MaxCoupling uint32 `json:"max_coupling" toml:"max_coupling"`
MinDocCoverage float32 `json:"min_doc_coverage" toml:"min_doc_coverage"`
}
ThresholdConfig defines thresholds for TDG analysis.
func DefaultThresholdConfig ¶
func DefaultThresholdConfig() ThresholdConfig
DefaultThresholdConfig returns enterprise-standard thresholds.
type UnreachableBlock ¶
type UnreachableBlock struct {
File string `json:"file"`
StartLine uint32 `json:"start_line"`
EndLine uint32 `json:"end_line"`
Reason string `json:"reason"` // e.g., "after return", "dead branch"
}
UnreachableBlock represents code that can never execute.
type Violation ¶
type Violation struct {
Severity ViolationSeverity `json:"severity"`
Rule string `json:"rule"`
Message string `json:"message"`
Value uint32 `json:"value"`
Threshold uint32 `json:"threshold"`
File string `json:"file"`
Line uint32 `json:"line"`
Function string `json:"function,omitempty"`
}
Violation represents a complexity threshold violation.
type ViolationSeverity ¶
type ViolationSeverity string
ViolationSeverity indicates the severity of a complexity violation.
const ( SeverityWarning ViolationSeverity = "warning" SeverityError ViolationSeverity = "error" )
type WeightConfig ¶
type WeightConfig struct {
StructuralComplexity float32 `json:"structural_complexity" toml:"structural_complexity"`
SemanticComplexity float32 `json:"semantic_complexity" toml:"semantic_complexity"`
Duplication float32 `json:"duplication" toml:"duplication"`
Coupling float32 `json:"coupling" toml:"coupling"`
Documentation float32 `json:"documentation" toml:"documentation"`
Consistency float32 `json:"consistency" toml:"consistency"`
Hotspot float32 `json:"hotspot" toml:"hotspot"`
TemporalCoupling float32 `json:"temporal_coupling" toml:"temporal_coupling"`
}
WeightConfig defines the weight for each TDG metric component.
func DefaultWeightConfig ¶
func DefaultWeightConfig() WeightConfig
DefaultWeightConfig returns the default weight configuration. Weights are rebalanced to include hotspot (10%) and temporal coupling (10%).