Files
bzzz/pkg/slurp/temporal/staleness_detector.go
anthonyrawlins d96c931a29 Resolve import cycles and migrate to chorus.services module path
This comprehensive refactoring addresses critical architectural issues:

IMPORT CYCLE RESOLUTION:
• pkg/crypto ↔ pkg/slurp/roles: Created pkg/security/access_levels.go
• pkg/ucxl → pkg/dht: Created pkg/storage/interfaces.go
• pkg/slurp/leader → pkg/election → pkg/slurp/storage: Moved types to pkg/election/interfaces.go

MODULE PATH MIGRATION:
• Changed from github.com/anthonyrawlins/bzzz to chorus.services/bzzz
• Updated all import statements across 115+ files
• Maintains compatibility while removing personal GitHub account dependency

TYPE SYSTEM IMPROVEMENTS:
• Resolved duplicate type declarations in crypto package
• Added missing type definitions (RoleStatus, TimeRestrictions, KeyStatus, KeyRotationResult)
• Proper interface segregation to prevent future cycles

ARCHITECTURAL BENEFITS:
• Build now progresses past structural issues to normal dependency resolution
• Cleaner separation of concerns between packages
• Eliminates circular dependencies that prevented compilation
• Establishes foundation for scalable codebase growth

🤖 Generated with [Claude Code](https://claude.ai/code)

Co-Authored-By: Claude <noreply@anthropic.com>
2025-08-17 10:04:25 +10:00

895 lines
25 KiB
Go

package temporal
import (
"context"
"fmt"
"math"
"sort"
"sync"
"time"
"chorus.services/bzzz/pkg/ucxl"
)
// stalenessDetectorImpl implements the StalenessDetector interface
type stalenessDetectorImpl struct {
mu sync.RWMutex
// Reference to the temporal graph
graph *temporalGraphImpl
// Configuration
weights *StalenessWeights
defaultThreshold float64
analysisWindow time.Duration
// Cached results
lastDetectionRun time.Time
cachedStaleContexts []*StaleContext
cachedStatistics *StalenessStatistics
cacheValidDuration time.Duration
// Detection settings
enableTimeBasedStaleness bool
enableInfluenceBasedStaleness bool
enableActivityBasedStaleness bool
enableImportanceBasedStaleness bool
enableComplexityBasedStaleness bool
enableDependencyBasedStaleness bool
}
// NewStalenessDetector creates a new staleness detector
func NewStalenessDetector(graph *temporalGraphImpl) StalenessDetector {
return &stalenessDetectorImpl{
graph: graph,
weights: graph.stalenessWeight,
defaultThreshold: 0.6,
analysisWindow: 30 * 24 * time.Hour, // 30 days
cacheValidDuration: time.Minute * 15,
// Enable all detection methods by default
enableTimeBasedStaleness: true,
enableInfluenceBasedStaleness: true,
enableActivityBasedStaleness: true,
enableImportanceBasedStaleness: true,
enableComplexityBasedStaleness: true,
enableDependencyBasedStaleness: true,
}
}
// CalculateStaleness calculates staleness score based on decision relationships
func (sd *stalenessDetectorImpl) CalculateStaleness(ctx context.Context, address ucxl.Address) (float64, error) {
sd.mu.RLock()
defer sd.mu.RUnlock()
sd.graph.mu.RLock()
defer sd.graph.mu.RUnlock()
node, err := sd.graph.getLatestNodeUnsafe(address)
if err != nil {
return 0, fmt.Errorf("node not found: %w", err)
}
return sd.calculateNodeStaleness(node), nil
}
// DetectStaleContexts detects all stale contexts above threshold
func (sd *stalenessDetectorImpl) DetectStaleContexts(ctx context.Context, threshold float64) ([]*StaleContext, error) {
sd.mu.Lock()
defer sd.mu.Unlock()
// Check cache validity
if sd.cachedStaleContexts != nil && time.Since(sd.lastDetectionRun) < sd.cacheValidDuration {
// Filter cached results by threshold
filtered := make([]*StaleContext, 0)
for _, stale := range sd.cachedStaleContexts {
if stale.StalenessScore >= threshold {
filtered = append(filtered, stale)
}
}
return filtered, nil
}
sd.graph.mu.RLock()
defer sd.graph.mu.RUnlock()
staleContexts := make([]*StaleContext, 0)
detectionStart := time.Now()
// Analyze all nodes for staleness
for _, node := range sd.graph.nodes {
stalenessScore := sd.calculateNodeStaleness(node)
if stalenessScore >= threshold {
staleContext := &StaleContext{
UCXLAddress: node.UCXLAddress,
TemporalNode: node,
StalenessScore: stalenessScore,
LastUpdated: node.Timestamp,
Reasons: sd.analyzeStalenessReasons(node, stalenessScore),
SuggestedActions: sd.generateRefreshActions(node),
RelatedChanges: sd.findRelatedChanges(node),
Priority: sd.calculatePriority(stalenessScore, node),
}
staleContexts = append(staleContexts, staleContext)
}
}
// Sort by staleness score (highest first)
sort.Slice(staleContexts, func(i, j int) bool {
return staleContexts[i].StalenessScore > staleContexts[j].StalenessScore
})
// Update cache
sd.cachedStaleContexts = staleContexts
sd.lastDetectionRun = time.Now()
// Update statistics
sd.updateStatistics(len(sd.graph.nodes), len(staleContexts), time.Since(detectionStart))
return staleContexts, nil
}
// GetStalenessReasons gets reasons why context is considered stale
func (sd *stalenessDetectorImpl) GetStalenessReasons(ctx context.Context, address ucxl.Address) ([]string, error) {
sd.mu.RLock()
defer sd.mu.RUnlock()
sd.graph.mu.RLock()
defer sd.graph.mu.RUnlock()
node, err := sd.graph.getLatestNodeUnsafe(address)
if err != nil {
return nil, fmt.Errorf("node not found: %w", err)
}
stalenessScore := sd.calculateNodeStaleness(node)
return sd.analyzeStalenessReasons(node, stalenessScore), nil
}
// SuggestRefreshActions suggests actions to refresh stale context
func (sd *stalenessDetectorImpl) SuggestRefreshActions(ctx context.Context, address ucxl.Address) ([]*RefreshAction, error) {
sd.mu.RLock()
defer sd.mu.RUnlock()
sd.graph.mu.RLock()
defer sd.graph.mu.RUnlock()
node, err := sd.graph.getLatestNodeUnsafe(address)
if err != nil {
return nil, fmt.Errorf("node not found: %w", err)
}
actions := sd.generateRefreshActions(node)
// Convert to RefreshAction structs
refreshActions := make([]*RefreshAction, len(actions))
for i, action := range actions {
refreshActions[i] = &RefreshAction{
Type: sd.categorizeAction(action),
Description: action,
Priority: sd.calculateActionPriority(action, node),
EstimatedEffort: sd.estimateEffort(action),
RequiredRoles: sd.getRequiredRoles(action),
Dependencies: sd.getActionDependencies(action),
Metadata: make(map[string]interface{}),
}
}
// Sort by priority
sort.Slice(refreshActions, func(i, j int) bool {
return refreshActions[i].Priority > refreshActions[j].Priority
})
return refreshActions, nil
}
// UpdateStalenessWeights updates weights used in staleness calculation
func (sd *stalenessDetectorImpl) UpdateStalenessWeights(weights *StalenessWeights) error {
sd.mu.Lock()
defer sd.mu.Unlock()
// Validate weights
if err := sd.validateWeights(weights); err != nil {
return fmt.Errorf("invalid weights: %w", err)
}
sd.weights = weights
sd.graph.stalenessWeight = weights
// Clear cache to force recalculation
sd.cachedStaleContexts = nil
sd.cachedStatistics = nil
return nil
}
// GetStalenessStats returns staleness detection statistics
func (sd *stalenessDetectorImpl) GetStalenessStats() (*StalenessStatistics, error) {
sd.mu.RLock()
defer sd.mu.RUnlock()
if sd.cachedStatistics != nil {
return sd.cachedStatistics, nil
}
// Generate fresh statistics
sd.graph.mu.RLock()
defer sd.graph.mu.RUnlock()
totalContexts := int64(len(sd.graph.nodes))
staleCount := int64(0)
totalStaleness := 0.0
maxStaleness := 0.0
for _, node := range sd.graph.nodes {
staleness := sd.calculateNodeStaleness(node)
totalStaleness += staleness
if staleness > maxStaleness {
maxStaleness = staleness
}
if staleness >= sd.defaultThreshold {
staleCount++
}
}
avgStaleness := 0.0
if totalContexts > 0 {
avgStaleness = totalStaleness / float64(totalContexts)
}
stalenessRate := 0.0
if totalContexts > 0 {
stalenessRate = float64(staleCount) / float64(totalContexts) * 100.0
}
stats := &StalenessStatistics{
TotalContexts: totalContexts,
StaleContexts: staleCount,
StalenessRate: stalenessRate,
AverageStaleness: avgStaleness,
MaxStaleness: maxStaleness,
LastDetectionRun: sd.lastDetectionRun,
DetectionDuration: 0, // Will be updated during actual detection
RefreshRecommendations: staleCount, // One recommendation per stale context
}
sd.cachedStatistics = stats
return stats, nil
}
// Core staleness calculation logic
func (sd *stalenessDetectorImpl) calculateNodeStaleness(node *TemporalNode) float64 {
staleness := 0.0
// Time-based staleness
if sd.enableTimeBasedStaleness {
timeStaleness := sd.calculateTimeStaleness(node)
staleness += timeStaleness * sd.weights.TimeWeight
}
// Influence-based staleness
if sd.enableInfluenceBasedStaleness {
influenceStaleness := sd.calculateInfluenceStaleness(node)
staleness += influenceStaleness * sd.weights.InfluenceWeight
}
// Activity-based staleness
if sd.enableActivityBasedStaleness {
activityStaleness := sd.calculateActivityStaleness(node)
staleness += activityStaleness * sd.weights.ActivityWeight
}
// Importance-based staleness
if sd.enableImportanceBasedStaleness {
importanceStaleness := sd.calculateImportanceStaleness(node)
staleness += importanceStaleness * sd.weights.ImportanceWeight
}
// Complexity-based staleness
if sd.enableComplexityBasedStaleness {
complexityStaleness := sd.calculateComplexityStaleness(node)
staleness += complexityStaleness * sd.weights.ComplexityWeight
}
// Dependency-based staleness
if sd.enableDependencyBasedStaleness {
dependencyStaleness := sd.calculateDependencyStaleness(node)
staleness += dependencyStaleness * sd.weights.DependencyWeight
}
// Ensure staleness is between 0 and 1
return math.Max(0, math.Min(1.0, staleness))
}
func (sd *stalenessDetectorImpl) calculateTimeStaleness(node *TemporalNode) float64 {
timeSinceUpdate := time.Since(node.Timestamp)
// Define staleness curve: contexts become stale over time
// Fresh (0-7 days): 0-0.2 staleness
// Moderate (7-30 days): 0.2-0.6 staleness
// Stale (30-90 days): 0.6-0.9 staleness
// Very stale (90+ days): 0.9-1.0 staleness
days := timeSinceUpdate.Hours() / 24.0
if days <= 7 {
return days / 35.0 // 0-0.2 over 7 days
} else if days <= 30 {
return 0.2 + ((days-7)/23.0)*0.4 // 0.2-0.6 over 23 days
} else if days <= 90 {
return 0.6 + ((days-30)/60.0)*0.3 // 0.6-0.9 over 60 days
} else {
return 0.9 + math.Min(0.1, (days-90)/365.0*0.1) // 0.9-1.0 over 365 days
}
}
func (sd *stalenessDetectorImpl) calculateInfluenceStaleness(node *TemporalNode) float64 {
// Context becomes stale if its influencers have changed significantly
staleness := 0.0
// Check if influencers have changed recently
cutoff := time.Now().Add(-sd.analysisWindow)
recentChanges := 0
totalInfluencers := len(node.InfluencedBy)
if totalInfluencers == 0 {
return 0.0 // No influencers means no influence-based staleness
}
for _, influencerAddr := range node.InfluencedBy {
if influencerNode := sd.findLatestNodeByAddress(influencerAddr); influencerNode != nil {
if influencerNode.Timestamp.After(cutoff) {
recentChanges++
}
}
}
// Higher staleness if many influencers have changed
if totalInfluencers > 0 {
staleness = float64(recentChanges) / float64(totalInfluencers)
}
// Boost staleness if this node hasn't been updated despite influencer changes
if recentChanges > 0 && node.Timestamp.Before(cutoff) {
staleness *= 1.5 // Amplify staleness
}
return math.Min(1.0, staleness)
}
func (sd *stalenessDetectorImpl) calculateActivityStaleness(node *TemporalNode) float64 {
// Context becomes stale if there's been a lot of related activity
activityScore := 0.0
cutoff := time.Now().Add(-7 * 24 * time.Hour) // Look at last week
// Count recent decisions in the influence network
recentDecisions := 0
totalConnections := len(node.Influences) + len(node.InfluencedBy)
if totalConnections == 0 {
return 0.0
}
// Check influences
for _, influencedAddr := range node.Influences {
if influencedNode := sd.findLatestNodeByAddress(influencedAddr); influencedNode != nil {
if influencedNode.Timestamp.After(cutoff) {
recentDecisions++
}
}
}
// Check influencers
for _, influencerAddr := range node.InfluencedBy {
if influencerNode := sd.findLatestNodeByAddress(influencerAddr); influencerNode != nil {
if influencerNode.Timestamp.After(cutoff) {
recentDecisions++
}
}
}
// High activity in network while this node is unchanged suggests staleness
activityScore = float64(recentDecisions) / float64(totalConnections)
// Amplify if this node is particularly old relative to the activity
nodeAge := time.Since(node.Timestamp).Hours() / 24.0
if nodeAge > 7 && activityScore > 0.3 {
activityScore *= 1.3
}
return math.Min(1.0, activityScore)
}
func (sd *stalenessDetectorImpl) calculateImportanceStaleness(node *TemporalNode) float64 {
// Important contexts (high influence, broad scope) become stale faster
importanceMultiplier := 1.0
// Factor in impact scope
switch node.ImpactScope {
case ImpactSystem:
importanceMultiplier *= 1.4
case ImpactProject:
importanceMultiplier *= 1.2
case ImpactModule:
importanceMultiplier *= 1.1
case ImpactLocal:
importanceMultiplier *= 1.0
}
// Factor in influence count
influenceCount := len(node.Influences)
if influenceCount > 5 {
importanceMultiplier *= 1.3
} else if influenceCount > 2 {
importanceMultiplier *= 1.1
}
// Factor in confidence (low confidence = higher staleness importance)
if node.Confidence < 0.6 {
importanceMultiplier *= 1.2
}
// Base staleness from time, amplified by importance
timeStaleness := sd.calculateTimeStaleness(node)
return math.Min(1.0, timeStaleness * importanceMultiplier)
}
func (sd *stalenessDetectorImpl) calculateComplexityStaleness(node *TemporalNode) float64 {
// Complex contexts (many technologies, long descriptions) become stale faster
complexityScore := 0.0
if node.Context != nil {
// Factor in technology count
techCount := len(node.Context.Technologies)
complexityScore += math.Min(0.3, float64(techCount)/10.0)
// Factor in insight count
insightCount := len(node.Context.Insights)
complexityScore += math.Min(0.2, float64(insightCount)/5.0)
// Factor in summary length (longer = more complex)
summaryLength := len(node.Context.Summary)
complexityScore += math.Min(0.2, float64(summaryLength)/500.0)
// Factor in purpose length
purposeLength := len(node.Context.Purpose)
complexityScore += math.Min(0.2, float64(purposeLength)/300.0)
// Factor in tag count
tagCount := len(node.Context.Tags)
complexityScore += math.Min(0.1, float64(tagCount)/5.0)
}
// Complex contexts need more frequent updates
timeFactor := sd.calculateTimeStaleness(node)
return math.Min(1.0, complexityScore * timeFactor * 1.5)
}
func (sd *stalenessDetectorImpl) calculateDependencyStaleness(node *TemporalNode) float64 {
// Context becomes stale if its dependencies have changed
staleness := 0.0
// Check if any dependencies (influencers) have evolved significantly
if len(node.InfluencedBy) == 0 {
return 0.0
}
significantChanges := 0
for _, depAddr := range node.InfluencedBy {
if depNode := sd.findLatestNodeByAddress(depAddr); depNode != nil {
// Check if dependency has had major changes
if sd.hasSignificantChange(depNode, node.Timestamp) {
significantChanges++
}
}
}
staleness = float64(significantChanges) / float64(len(node.InfluencedBy))
// Amplify if the changes are architectural or requirements-related
for _, depAddr := range node.InfluencedBy {
if depNode := sd.findLatestNodeByAddress(depAddr); depNode != nil {
if depNode.ChangeReason == ReasonArchitectureChange ||
depNode.ChangeReason == ReasonRequirementsChange {
staleness *= 1.3
break
}
}
}
return math.Min(1.0, staleness)
}
// Helper methods for staleness analysis
func (sd *stalenessDetectorImpl) analyzeStalenessReasons(node *TemporalNode, stalenessScore float64) []string {
reasons := make([]string, 0)
// Time-based reasons
timeSinceUpdate := time.Since(node.Timestamp)
if timeSinceUpdate > 30*24*time.Hour {
reasons = append(reasons, fmt.Sprintf("not updated in %d days", int(timeSinceUpdate.Hours()/24)))
} else if timeSinceUpdate > 7*24*time.Hour {
reasons = append(reasons, fmt.Sprintf("not updated in %d days", int(timeSinceUpdate.Hours()/24)))
}
// Influence-based reasons
recentInfluencerChanges := sd.countRecentInfluencerChanges(node)
if recentInfluencerChanges > 0 {
reasons = append(reasons, fmt.Sprintf("%d influencing contexts have changed recently", recentInfluencerChanges))
}
// Activity-based reasons
networkActivity := sd.calculateNetworkActivity(node)
if networkActivity > 0.5 {
reasons = append(reasons, "high activity in related contexts")
}
// Confidence-based reasons
if node.Confidence < 0.6 {
reasons = append(reasons, fmt.Sprintf("low confidence score (%.2f)", node.Confidence))
}
// Dependency-based reasons
dependencyChanges := sd.countDependencyChanges(node)
if dependencyChanges > 0 {
reasons = append(reasons, fmt.Sprintf("%d dependencies have changed", dependencyChanges))
}
// Scope-based reasons
if node.ImpactScope == ImpactSystem || node.ImpactScope == ImpactProject {
reasons = append(reasons, "high impact scope requires frequent updates")
}
return reasons
}
func (sd *stalenessDetectorImpl) generateRefreshActions(node *TemporalNode) []string {
actions := make([]string, 0)
// Always suggest basic review
actions = append(actions, "review context accuracy and completeness")
// Time-based actions
if time.Since(node.Timestamp) > 7*24*time.Hour {
actions = append(actions, "update context with recent changes")
}
// Influence-based actions
if sd.countRecentInfluencerChanges(node) > 0 {
actions = append(actions, "review influencing contexts for impact")
actions = append(actions, "validate dependencies are still accurate")
}
// Confidence-based actions
if node.Confidence < 0.7 {
actions = append(actions, "improve context confidence through additional analysis")
actions = append(actions, "validate context information with subject matter experts")
}
// Technology-based actions
if node.Context != nil && len(node.Context.Technologies) > 5 {
actions = append(actions, "review technology stack for changes")
actions = append(actions, "update technology versions and compatibility")
}
// Impact-based actions
if node.ImpactScope == ImpactSystem || node.ImpactScope == ImpactProject {
actions = append(actions, "conduct architectural review")
actions = append(actions, "validate system-wide impact assumptions")
}
// Network-based actions
if len(node.Influences) > 3 {
actions = append(actions, "review all influenced contexts for consistency")
}
return actions
}
func (sd *stalenessDetectorImpl) findRelatedChanges(node *TemporalNode) []ucxl.Address {
relatedChanges := make([]ucxl.Address, 0)
cutoff := time.Now().Add(-7 * 24 * time.Hour)
// Find recent changes in the influence network
for _, addr := range node.Influences {
if relatedNode := sd.findLatestNodeByAddress(addr); relatedNode != nil {
if relatedNode.Timestamp.After(cutoff) {
relatedChanges = append(relatedChanges, addr)
}
}
}
for _, addr := range node.InfluencedBy {
if relatedNode := sd.findLatestNodeByAddress(addr); relatedNode != nil {
if relatedNode.Timestamp.After(cutoff) {
relatedChanges = append(relatedChanges, addr)
}
}
}
return relatedChanges
}
func (sd *stalenessDetectorImpl) calculatePriority(stalenessScore float64, node *TemporalNode) StalePriority {
// Start with staleness score
priority := stalenessScore
// Adjust based on impact scope
switch node.ImpactScope {
case ImpactSystem:
priority += 0.3
case ImpactProject:
priority += 0.2
case ImpactModule:
priority += 0.1
}
// Adjust based on influence count
influenceCount := len(node.Influences)
if influenceCount > 5 {
priority += 0.2
} else if influenceCount > 2 {
priority += 0.1
}
// Adjust based on age
age := time.Since(node.Timestamp)
if age > 90*24*time.Hour {
priority += 0.1
}
// Convert to priority level
if priority >= 0.9 {
return PriorityCritical
} else if priority >= 0.7 {
return PriorityHigh
} else if priority >= 0.5 {
return PriorityMedium
}
return PriorityLow
}
// Additional helper methods
func (sd *stalenessDetectorImpl) findLatestNodeByAddress(address ucxl.Address) *TemporalNode {
addressKey := address.String()
if nodes, exists := sd.graph.addressToNodes[addressKey]; exists && len(nodes) > 0 {
return nodes[len(nodes)-1]
}
return nil
}
func (sd *stalenessDetectorImpl) hasSignificantChange(node *TemporalNode, since time.Time) bool {
if node.Timestamp.Before(since) {
return false
}
// Consider architectural and requirements changes as significant
return node.ChangeReason == ReasonArchitectureChange ||
node.ChangeReason == ReasonRequirementsChange ||
node.ChangeReason == ReasonDesignDecision
}
func (sd *stalenessDetectorImpl) countRecentInfluencerChanges(node *TemporalNode) int {
cutoff := time.Now().Add(-7 * 24 * time.Hour)
changes := 0
for _, addr := range node.InfluencedBy {
if influencerNode := sd.findLatestNodeByAddress(addr); influencerNode != nil {
if influencerNode.Timestamp.After(cutoff) {
changes++
}
}
}
return changes
}
func (sd *stalenessDetectorImpl) calculateNetworkActivity(node *TemporalNode) float64 {
cutoff := time.Now().Add(-7 * 24 * time.Hour)
recentChanges := 0
totalConnections := len(node.Influences) + len(node.InfluencedBy)
if totalConnections == 0 {
return 0
}
for _, addr := range node.Influences {
if relatedNode := sd.findLatestNodeByAddress(addr); relatedNode != nil {
if relatedNode.Timestamp.After(cutoff) {
recentChanges++
}
}
}
for _, addr := range node.InfluencedBy {
if relatedNode := sd.findLatestNodeByAddress(addr); relatedNode != nil {
if relatedNode.Timestamp.After(cutoff) {
recentChanges++
}
}
}
return float64(recentChanges) / float64(totalConnections)
}
func (sd *stalenessDetectorImpl) countDependencyChanges(node *TemporalNode) int {
changes := 0
for _, addr := range node.InfluencedBy {
if depNode := sd.findLatestNodeByAddress(addr); depNode != nil {
if sd.hasSignificantChange(depNode, node.Timestamp) {
changes++
}
}
}
return changes
}
func (sd *stalenessDetectorImpl) validateWeights(weights *StalenessWeights) error {
if weights.TimeWeight < 0 || weights.TimeWeight > 1 {
return fmt.Errorf("TimeWeight must be between 0 and 1")
}
if weights.InfluenceWeight < 0 || weights.InfluenceWeight > 1 {
return fmt.Errorf("InfluenceWeight must be between 0 and 1")
}
if weights.ActivityWeight < 0 || weights.ActivityWeight > 1 {
return fmt.Errorf("ActivityWeight must be between 0 and 1")
}
if weights.ImportanceWeight < 0 || weights.ImportanceWeight > 1 {
return fmt.Errorf("ImportanceWeight must be between 0 and 1")
}
if weights.ComplexityWeight < 0 || weights.ComplexityWeight > 1 {
return fmt.Errorf("ComplexityWeight must be between 0 and 1")
}
if weights.DependencyWeight < 0 || weights.DependencyWeight > 1 {
return fmt.Errorf("DependencyWeight must be between 0 and 1")
}
// Note: We don't require weights to sum to 1.0 as they may be used in different combinations
return nil
}
func (sd *stalenessDetectorImpl) updateStatistics(totalContexts, staleContexts int, duration time.Duration) {
avgStaleness := 0.0
maxStaleness := 0.0
if totalContexts > 0 {
totalStaleness := 0.0
for _, node := range sd.graph.nodes {
staleness := sd.calculateNodeStaleness(node)
totalStaleness += staleness
if staleness > maxStaleness {
maxStaleness = staleness
}
}
avgStaleness = totalStaleness / float64(totalContexts)
}
stalenessRate := 0.0
if totalContexts > 0 {
stalenessRate = float64(staleContexts) / float64(totalContexts) * 100.0
}
sd.cachedStatistics = &StalenessStatistics{
TotalContexts: int64(totalContexts),
StaleContexts: int64(staleContexts),
StalenessRate: stalenessRate,
AverageStaleness: avgStaleness,
MaxStaleness: maxStaleness,
LastDetectionRun: time.Now(),
DetectionDuration: duration,
RefreshRecommendations: int64(staleContexts),
}
}
// Action categorization and estimation methods
func (sd *stalenessDetectorImpl) categorizeAction(action string) string {
switch {
case contains(action, "review"):
return "review"
case contains(action, "update"):
return "update"
case contains(action, "validate"):
return "validation"
case contains(action, "improve"):
return "improvement"
case contains(action, "technology"):
return "technical"
case contains(action, "architectural"):
return "architectural"
default:
return "general"
}
}
func (sd *stalenessDetectorImpl) calculateActionPriority(action string, node *TemporalNode) int {
priority := 5 // Base priority
// Increase priority for system/project scope
if node.ImpactScope == ImpactSystem {
priority += 3
} else if node.ImpactScope == ImpactProject {
priority += 2
}
// Increase priority for high-influence nodes
if len(node.Influences) > 5 {
priority += 2
}
// Increase priority for architectural actions
if contains(action, "architectural") {
priority += 2
}
// Increase priority for validation actions
if contains(action, "validate") {
priority += 1
}
return priority
}
func (sd *stalenessDetectorImpl) estimateEffort(action string) string {
switch {
case contains(action, "review context accuracy"):
return "medium"
case contains(action, "architectural review"):
return "high"
case contains(action, "validate dependencies"):
return "medium"
case contains(action, "update context"):
return "low"
case contains(action, "improve confidence"):
return "high"
case contains(action, "technology"):
return "medium"
default:
return "medium"
}
}
func (sd *stalenessDetectorImpl) getRequiredRoles(action string) []string {
switch {
case contains(action, "architectural"):
return []string{"architect", "technical_lead"}
case contains(action, "technology"):
return []string{"developer", "technical_lead"}
case contains(action, "validate"):
return []string{"analyst", "subject_matter_expert"}
case contains(action, "review"):
return []string{"analyst", "developer"}
default:
return []string{"analyst"}
}
}
func (sd *stalenessDetectorImpl) getActionDependencies(action string) []string {
dependencies := make([]string, 0)
if contains(action, "architectural") {
dependencies = append(dependencies, "stakeholder_availability", "documentation_access")
}
if contains(action, "validate dependencies") {
dependencies = append(dependencies, "dependency_analysis", "influence_mapping")
}
if contains(action, "improve confidence") {
dependencies = append(dependencies, "expert_review", "additional_analysis")
}
return dependencies
}