Files
bzzz/pkg/slurp/intelligence/goal_alignment.go
anthonyrawlins d96c931a29 Resolve import cycles and migrate to chorus.services module path
This comprehensive refactoring addresses critical architectural issues:

IMPORT CYCLE RESOLUTION:
• pkg/crypto ↔ pkg/slurp/roles: Created pkg/security/access_levels.go
• pkg/ucxl → pkg/dht: Created pkg/storage/interfaces.go
• pkg/slurp/leader → pkg/election → pkg/slurp/storage: Moved types to pkg/election/interfaces.go

MODULE PATH MIGRATION:
• Changed from github.com/anthonyrawlins/bzzz to chorus.services/bzzz
• Updated all import statements across 115+ files
• Maintains compatibility while removing personal GitHub account dependency

TYPE SYSTEM IMPROVEMENTS:
• Resolved duplicate type declarations in crypto package
• Added missing type definitions (RoleStatus, TimeRestrictions, KeyStatus, KeyRotationResult)
• Proper interface segregation to prevent future cycles

ARCHITECTURAL BENEFITS:
• Build now progresses past structural issues to normal dependency resolution
• Cleaner separation of concerns between packages
• Eliminates circular dependencies that prevented compilation
• Establishes foundation for scalable codebase growth

🤖 Generated with [Claude Code](https://claude.ai/code)

Co-Authored-By: Claude <noreply@anthropic.com>
2025-08-17 10:04:25 +10:00

1383 lines
42 KiB
Go

package intelligence
import (
"context"
"fmt"
"math"
"sort"
"strings"
"sync"
"time"
slurpContext "chorus.services/bzzz/pkg/slurp/context"
)
// GoalAlignmentEngine provides comprehensive goal alignment assessment
type GoalAlignmentEngine struct {
mu sync.RWMutex
config *EngineConfig
scoringEngine *ScoringEngine
dimensionAnalyzer *DimensionAnalyzer
priorityCalculator *PriorityCalculator
trendAnalyzer *TrendAnalyzer
recommendationEngine *RecommendationEngine
metrics *AlignmentMetrics
}
// ScoringEngine handles multi-dimensional scoring algorithms
type ScoringEngine struct {
dimensions []*ScoringDimension
weightConfig *WeightConfiguration
normalizer *ScoreNormalizer
aggregator *ScoreAggregator
}
// ScoringDimension represents a single dimension of goal alignment
type ScoringDimension struct {
Name string `json:"name"`
Description string `json:"description"`
Weight float64 `json:"weight"`
Calculator DimensionCalculator `json:"-"`
Threshold float64 `json:"threshold"`
Priority int `json:"priority"`
Category string `json:"category"`
Metadata map[string]interface{} `json:"metadata"`
}
// DimensionCalculator interface for calculating dimension scores
type DimensionCalculator interface {
Calculate(ctx context.Context, node *slurpContext.ContextNode, goal *ProjectGoal) (*DimensionScore, error)
GetName() string
GetWeight() float64
Validate(node *slurpContext.ContextNode, goal *ProjectGoal) error
}
// DimensionScore represents a score for a single dimension
type DimensionScore struct {
Dimension string `json:"dimension"`
Score float64 `json:"score"`
Confidence float64 `json:"confidence"`
Evidence []string `json:"evidence"`
Reasoning string `json:"reasoning"`
SubScores map[string]float64 `json:"sub_scores"`
Metadata map[string]interface{} `json:"metadata"`
CalculatedAt time.Time `json:"calculated_at"`
}
// WeightConfiguration manages dimension weights
type WeightConfiguration struct {
GlobalWeights map[string]float64 `json:"global_weights"`
RoleWeights map[string]map[string]float64 `json:"role_weights"`
PhaseWeights map[string]map[string]float64 `json:"phase_weights"`
ProjectWeights map[string]map[string]float64 `json:"project_weights"`
DynamicWeights bool `json:"dynamic_weights"`
LastUpdated time.Time `json:"last_updated"`
}
// ScoreNormalizer normalizes scores across different dimensions
type ScoreNormalizer struct {
normalizationMethod string
referenceData *NormalizationReference
}
// NormalizationReference contains reference data for normalization
type NormalizationReference struct {
HistoricalScores map[string]*ScoreDistribution `json:"historical_scores"`
Percentiles map[string]map[int]float64 `json:"percentiles"`
LastCalculated time.Time `json:"last_calculated"`
}
// ScoreDistribution represents score distribution statistics
type ScoreDistribution struct {
Mean float64 `json:"mean"`
Median float64 `json:"median"`
StdDev float64 `json:"std_dev"`
Min float64 `json:"min"`
Max float64 `json:"max"`
Count int `json:"count"`
Samples []float64 `json:"samples"`
}
// ScoreAggregator combines dimension scores into final alignment score
type ScoreAggregator struct {
method string
customLogic func([]*DimensionScore, *WeightConfiguration) float64
}
// DimensionAnalyzer analyzes alignment dimensions
type DimensionAnalyzer struct {
calculators map[string]DimensionCalculator
}
// PriorityCalculator calculates priority-based scoring adjustments
type PriorityCalculator struct {
priorityMatrix *PriorityMatrix
timeFactors *TimeFactors
}
// PriorityMatrix defines priority relationships
type PriorityMatrix struct {
Goals map[string]int `json:"goals"`
Phases map[string]int `json:"phases"`
Technologies map[string]int `json:"technologies"`
Roles map[string]int `json:"roles"`
Urgency map[string]float64 `json:"urgency"`
Impact map[string]float64 `json:"impact"`
}
// TimeFactors handles time-based priority adjustments
type TimeFactors struct {
DecayFunction string `json:"decay_function"`
HalfLife time.Duration `json:"half_life"`
UrgencyBoost float64 `json:"urgency_boost"`
DeadlineWeight float64 `json:"deadline_weight"`
PhaseAlignment map[string]float64 `json:"phase_alignment"`
}
// TrendAnalyzer analyzes alignment trends over time
type TrendAnalyzer struct {
historicalData *AlignmentHistory
trendDetector *TrendDetector
predictor *AlignmentPredictor
}
// AlignmentHistory stores historical alignment data
type AlignmentHistory struct {
mu sync.RWMutex
records []*AlignmentRecord
maxRecords int
retention time.Duration
}
// AlignmentRecord represents a historical alignment record
type AlignmentRecord struct {
NodePath string `json:"node_path"`
GoalID string `json:"goal_id"`
Score float64 `json:"score"`
Dimensions []*DimensionScore `json:"dimensions"`
Context map[string]interface{} `json:"context"`
Timestamp time.Time `json:"timestamp"`
Role string `json:"role"`
Phase string `json:"phase"`
}
// TrendDetector detects trends in alignment data
type TrendDetector struct {
methods []TrendDetectionMethod
}
// TrendDetectionMethod interface for trend detection algorithms
type TrendDetectionMethod interface {
DetectTrend(data []*AlignmentRecord) (*Trend, error)
GetName() string
GetConfidence() float64
}
// Trend represents a detected trend
type Trend struct {
Type string `json:"type"` // improving, declining, stable, volatile
Strength float64 `json:"strength"` // 0-1 strength of trend
Confidence float64 `json:"confidence"` // 0-1 confidence in detection
Duration time.Duration `json:"duration"` // duration of trend
Slope float64 `json:"slope"` // rate of change
Breakpoints []time.Time `json:"breakpoints"` // trend change points
Description string `json:"description"`
StartTime time.Time `json:"start_time"`
EndTime time.Time `json:"end_time"`
}
// AlignmentPredictor predicts future alignment scores
type AlignmentPredictor struct {
models []PredictionModel
}
// PredictionModel interface for alignment prediction
type PredictionModel interface {
Predict(ctx context.Context, history []*AlignmentRecord, horizon time.Duration) (*AlignmentPrediction, error)
GetName() string
GetAccuracy() float64
Train(data []*AlignmentRecord) error
}
// AlignmentPrediction represents predicted alignment
type AlignmentPrediction struct {
PredictedScore float64 `json:"predicted_score"`
ConfidenceInterval *ConfidenceInterval `json:"confidence_interval"`
Factors map[string]float64 `json:"factors"`
Scenarios []*Scenario `json:"scenarios"`
Recommendations []string `json:"recommendations"`
Horizon time.Duration `json:"horizon"`
Model string `json:"model"`
PredictedAt time.Time `json:"predicted_at"`
}
// ConfidenceInterval represents prediction confidence
type ConfidenceInterval struct {
Lower float64 `json:"lower"`
Upper float64 `json:"upper"`
Confidence float64 `json:"confidence"` // e.g., 0.95 for 95% confidence
}
// Scenario represents a prediction scenario
type Scenario struct {
Name string `json:"name"`
Probability float64 `json:"probability"`
Score float64 `json:"score"`
Description string `json:"description"`
Assumptions []string `json:"assumptions"`
}
// RecommendationEngine generates alignment improvement recommendations
type RecommendationEngine struct {
ruleEngine *RecommendationRuleEngine
mlEngine *MLRecommendationEngine
prioritizer *RecommendationPrioritizer
}
// RecommendationRuleEngine provides rule-based recommendations
type RecommendationRuleEngine struct {
rules []*RecommendationRule
}
// RecommendationRule defines a recommendation rule
type RecommendationRule struct {
ID string `json:"id"`
Name string `json:"name"`
Condition RecommendationCondition `json:"condition"`
Action RecommendationAction `json:"action"`
Priority int `json:"priority"`
Confidence float64 `json:"confidence"`
Category string `json:"category"`
Tags []string `json:"tags"`
}
// RecommendationCondition defines when a rule applies
type RecommendationCondition struct {
ScoreThreshold float64 `json:"score_threshold"`
DimensionFilters map[string]float64 `json:"dimension_filters"`
TrendConditions []string `json:"trend_conditions"`
ContextFilters map[string]interface{} `json:"context_filters"`
LogicalOperator string `json:"logical_operator"` // AND, OR
}
// RecommendationAction defines what to recommend
type RecommendationAction struct {
Type string `json:"type"`
Description string `json:"description"`
Impact float64 `json:"impact"`
Effort float64 `json:"effort"`
Timeline string `json:"timeline"`
Resources []string `json:"resources"`
Dependencies []string `json:"dependencies"`
Metadata map[string]interface{} `json:"metadata"`
}
// MLRecommendationEngine provides ML-based recommendations
type MLRecommendationEngine struct {
models []RecommendationModel
}
// RecommendationModel interface for ML recommendation models
type RecommendationModel interface {
GenerateRecommendations(ctx context.Context, node *slurpContext.ContextNode, alignmentData *AlignmentAssessment) ([]*Recommendation, error)
Train(data []*TrainingExample) error
GetName() string
GetConfidence() float64
}
// TrainingExample represents training data for ML models
type TrainingExample struct {
Node *slurpContext.ContextNode `json:"node"`
AlignmentBefore *AlignmentAssessment `json:"alignment_before"`
AlignmentAfter *AlignmentAssessment `json:"alignment_after"`
Actions []*RecommendationAction `json:"actions"`
Outcome float64 `json:"outcome"`
Timestamp time.Time `json:"timestamp"`
}
// RecommendationPrioritizer prioritizes recommendations
type RecommendationPrioritizer struct {
criteria []PrioritizationCriterion
}
// PrioritizationCriterion defines how to prioritize recommendations
type PrioritizationCriterion struct {
Name string `json:"name"`
Weight float64 `json:"weight"`
Calculator func(*Recommendation) float64 `json:"-"`
}
// AlignmentMetrics tracks alignment assessment metrics
type AlignmentMetrics struct {
mu sync.RWMutex
totalAssessments int64
successfulAssessments int64
averageScore float64
scoreDistribution *ScoreDistribution
dimensionPerformance map[string]*DimensionMetrics
goalPerformance map[string]*GoalMetrics
lastReset time.Time
}
// DimensionMetrics tracks metrics for a specific dimension
type DimensionMetrics struct {
TotalCalculations int64 `json:"total_calculations"`
AverageScore float64 `json:"average_score"`
Distribution *ScoreDistribution `json:"distribution"`
FailureRate float64 `json:"failure_rate"`
LastCalculated time.Time `json:"last_calculated"`
}
// GoalMetrics tracks metrics for a specific goal
type GoalMetrics struct {
TotalAssessments int64 `json:"total_assessments"`
AverageAlignment float64 `json:"average_alignment"`
TrendDirection string `json:"trend_direction"`
LastAssessed time.Time `json:"last_assessed"`
SuccessRate float64 `json:"success_rate"`
}
// AlignmentAssessment represents a complete alignment assessment
type AlignmentAssessment struct {
NodePath string `json:"node_path"`
GoalID string `json:"goal_id"`
OverallScore float64 `json:"overall_score"`
Confidence float64 `json:"confidence"`
DimensionScores []*DimensionScore `json:"dimension_scores"`
Recommendations []*Recommendation `json:"recommendations"`
Trends []*Trend `json:"trends"`
Predictions []*AlignmentPrediction `json:"predictions"`
Context map[string]interface{} `json:"context"`
AssessedAt time.Time `json:"assessed_at"`
AssessedBy string `json:"assessed_by"`
Role string `json:"role"`
Phase string `json:"phase"`
}
// Recommendation represents an alignment improvement recommendation
type Recommendation struct {
ID string `json:"id"`
Title string `json:"title"`
Description string `json:"description"`
Action *RecommendationAction `json:"action"`
Priority int `json:"priority"`
Confidence float64 `json:"confidence"`
Impact float64 `json:"impact"`
Effort float64 `json:"effort"`
Timeline string `json:"timeline"`
Category string `json:"category"`
Tags []string `json:"tags"`
Dependencies []string `json:"dependencies"`
Resources []string `json:"resources"`
SuccessCriteria []string `json:"success_criteria"`
RiskFactors []string `json:"risk_factors"`
Alternatives []*Recommendation `json:"alternatives"`
GeneratedAt time.Time `json:"generated_at"`
GeneratedBy string `json:"generated_by"`
}
// NewGoalAlignmentEngine creates a new goal alignment engine
func NewGoalAlignmentEngine(config *EngineConfig) *GoalAlignmentEngine {
engine := &GoalAlignmentEngine{
config: config,
scoringEngine: NewScoringEngine(config),
dimensionAnalyzer: NewDimensionAnalyzer(),
priorityCalculator: NewPriorityCalculator(),
trendAnalyzer: NewTrendAnalyzer(),
recommendationEngine: NewRecommendationEngine(),
metrics: NewAlignmentMetrics(),
}
return engine
}
// NewScoringEngine creates a scoring engine
func NewScoringEngine(config *EngineConfig) *ScoringEngine {
engine := &ScoringEngine{
dimensions: []*ScoringDimension{},
weightConfig: NewWeightConfiguration(),
normalizer: NewScoreNormalizer(),
aggregator: NewScoreAggregator(),
}
// Initialize standard dimensions
engine.initializeStandardDimensions()
return engine
}
// AssessAlignment performs comprehensive goal alignment assessment
func (gae *GoalAlignmentEngine) AssessAlignment(ctx context.Context, node *slurpContext.ContextNode, goal *ProjectGoal, role string) (*AlignmentAssessment, error) {
start := time.Now()
defer func() {
gae.metrics.recordAssessment(time.Since(start))
}()
// Calculate dimension scores
dimensionScores, err := gae.calculateDimensionScores(ctx, node, goal)
if err != nil {
gae.metrics.recordFailure()
return nil, fmt.Errorf("failed to calculate dimension scores: %w", err)
}
// Apply priority adjustments
adjustedScores := gae.priorityCalculator.adjustScores(dimensionScores, goal, role)
// Calculate overall score
overallScore := gae.scoringEngine.aggregator.aggregate(adjustedScores, gae.scoringEngine.weightConfig)
// Normalize scores
normalizedScore := gae.scoringEngine.normalizer.normalize(overallScore, "overall")
// Generate recommendations
recommendations, err := gae.recommendationEngine.generateRecommendations(ctx, node, goal, adjustedScores)
if err != nil {
recommendations = []*Recommendation{} // Continue with empty recommendations
}
// Analyze trends
trends := gae.trendAnalyzer.analyzeTrends(node.Path, goal.ID)
// Generate predictions
predictions, err := gae.trendAnalyzer.predictor.predictAlignment(ctx, node.Path, goal.ID, 30*24*time.Hour)
if err != nil {
predictions = []*AlignmentPrediction{} // Continue with empty predictions
}
// Calculate confidence
confidence := gae.calculateOverallConfidence(adjustedScores)
assessment := &AlignmentAssessment{
NodePath: node.Path,
GoalID: goal.ID,
OverallScore: normalizedScore,
Confidence: confidence,
DimensionScores: adjustedScores,
Recommendations: recommendations,
Trends: trends,
Predictions: predictions,
Context: map[string]interface{}{
"role": role,
"goal_name": goal.Name,
"phase": goal.Phase,
},
AssessedAt: time.Now(),
AssessedBy: "GoalAlignmentEngine",
Role: role,
Phase: goal.Phase,
}
// Record historical data
gae.trendAnalyzer.recordAlignment(assessment)
gae.metrics.recordSuccess(normalizedScore)
return assessment, nil
}
// calculateDimensionScores calculates scores for all dimensions
func (gae *GoalAlignmentEngine) calculateDimensionScores(ctx context.Context, node *slurpContext.ContextNode, goal *ProjectGoal) ([]*DimensionScore, error) {
scores := []*DimensionScore{}
for _, dimension := range gae.scoringEngine.dimensions {
score, err := dimension.Calculator.Calculate(ctx, node, goal)
if err != nil {
// Log error but continue with other dimensions
continue
}
scores = append(scores, score)
}
if len(scores) == 0 {
return nil, fmt.Errorf("no dimension scores calculated")
}
return scores, nil
}
// calculateOverallConfidence calculates overall confidence from dimension scores
func (gae *GoalAlignmentEngine) calculateOverallConfidence(scores []*DimensionScore) float64 {
if len(scores) == 0 {
return 0.0
}
totalConfidence := 0.0
for _, score := range scores {
totalConfidence += score.Confidence
}
return totalConfidence / float64(len(scores))
}
// Standard dimension calculators
// KeywordAlignmentCalculator calculates alignment based on keyword matching
type KeywordAlignmentCalculator struct {
name string
weight float64
}
func NewKeywordAlignmentCalculator() *KeywordAlignmentCalculator {
return &KeywordAlignmentCalculator{
name: "keyword_alignment",
weight: 0.3,
}
}
func (kac *KeywordAlignmentCalculator) GetName() string {
return kac.name
}
func (kac *KeywordAlignmentCalculator) GetWeight() float64 {
return kac.weight
}
func (kac *KeywordAlignmentCalculator) Validate(node *slurpContext.ContextNode, goal *ProjectGoal) error {
if node == nil || goal == nil {
return fmt.Errorf("node and goal cannot be nil")
}
return nil
}
func (kac *KeywordAlignmentCalculator) Calculate(ctx context.Context, node *slurpContext.ContextNode, goal *ProjectGoal) (*DimensionScore, error) {
if err := kac.Validate(node, goal); err != nil {
return nil, err
}
// Combine node text for analysis
nodeText := strings.ToLower(node.Summary + " " + node.Purpose + " " + strings.Join(node.Technologies, " ") + " " + strings.Join(node.Tags, " "))
// Calculate keyword matches
matches := 0
evidence := []string{}
for _, keyword := range goal.Keywords {
if strings.Contains(nodeText, strings.ToLower(keyword)) {
matches++
evidence = append(evidence, fmt.Sprintf("Found keyword: %s", keyword))
}
}
// Calculate score
score := 0.0
if len(goal.Keywords) > 0 {
score = float64(matches) / float64(len(goal.Keywords))
}
// Calculate confidence based on evidence strength
confidence := math.Min(0.9, float64(matches)*0.2+0.1)
return &DimensionScore{
Dimension: kac.name,
Score: score,
Confidence: confidence,
Evidence: evidence,
Reasoning: fmt.Sprintf("Found %d out of %d keywords", matches, len(goal.Keywords)),
SubScores: map[string]float64{"keyword_matches": float64(matches)},
CalculatedAt: time.Now(),
}, nil
}
// TechnologyAlignmentCalculator calculates alignment based on technology stack
type TechnologyAlignmentCalculator struct {
name string
weight float64
}
func NewTechnologyAlignmentCalculator() *TechnologyAlignmentCalculator {
return &TechnologyAlignmentCalculator{
name: "technology_alignment",
weight: 0.25,
}
}
func (tac *TechnologyAlignmentCalculator) GetName() string {
return tac.name
}
func (tac *TechnologyAlignmentCalculator) GetWeight() float64 {
return tac.weight
}
func (tac *TechnologyAlignmentCalculator) Validate(node *slurpContext.ContextNode, goal *ProjectGoal) error {
if node == nil || goal == nil {
return fmt.Errorf("node and goal cannot be nil")
}
return nil
}
func (tac *TechnologyAlignmentCalculator) Calculate(ctx context.Context, node *slurpContext.ContextNode, goal *ProjectGoal) (*DimensionScore, error) {
if err := tac.Validate(node, goal); err != nil {
return nil, err
}
// Check if goal keywords include technology-related terms
techKeywords := []string{}
for _, keyword := range goal.Keywords {
if tac.isTechnologyKeyword(keyword) {
techKeywords = append(techKeywords, keyword)
}
}
if len(techKeywords) == 0 {
// If no tech keywords in goal, score based on general technology presence
score := 0.5
if len(node.Technologies) > 0 {
score = 0.7
}
return &DimensionScore{
Dimension: tac.name,
Score: score,
Confidence: 0.5,
Evidence: []string{"No specific technology requirements in goal"},
Reasoning: "General technology assessment",
CalculatedAt: time.Now(),
}, nil
}
// Calculate technology alignment
matches := 0
evidence := []string{}
for _, tech := range node.Technologies {
for _, keyword := range techKeywords {
if strings.Contains(strings.ToLower(tech), strings.ToLower(keyword)) ||
strings.Contains(strings.ToLower(keyword), strings.ToLower(tech)) {
matches++
evidence = append(evidence, fmt.Sprintf("Technology match: %s ~ %s", tech, keyword))
}
}
}
score := 0.0
if len(techKeywords) > 0 {
score = float64(matches) / float64(len(techKeywords))
}
confidence := math.Min(0.9, float64(matches)*0.3+0.2)
return &DimensionScore{
Dimension: tac.name,
Score: score,
Confidence: confidence,
Evidence: evidence,
Reasoning: fmt.Sprintf("Technology alignment: %d matches out of %d tech keywords", matches, len(techKeywords)),
SubScores: map[string]float64{"tech_matches": float64(matches)},
CalculatedAt: time.Now(),
}, nil
}
func (tac *TechnologyAlignmentCalculator) isTechnologyKeyword(keyword string) bool {
techTerms := []string{
"go", "golang", "python", "javascript", "typescript", "java", "rust", "c++", "c#",
"react", "vue", "angular", "node", "express", "django", "flask", "spring",
"docker", "kubernetes", "aws", "azure", "gcp", "terraform", "ansible",
"mysql", "postgresql", "mongodb", "redis", "elasticsearch",
"microservices", "api", "rest", "graphql", "grpc", "websocket",
}
lowerKeyword := strings.ToLower(keyword)
for _, term := range techTerms {
if strings.Contains(lowerKeyword, term) {
return true
}
}
return false
}
// Initialize scoring engine with standard dimensions
func (se *ScoringEngine) initializeStandardDimensions() {
dimensions := []*ScoringDimension{
{
Name: "keyword_alignment",
Description: "Alignment based on keyword matching",
Weight: 0.3,
Calculator: NewKeywordAlignmentCalculator(),
Threshold: 0.3,
Priority: 1,
Category: "content",
},
{
Name: "technology_alignment",
Description: "Alignment based on technology stack",
Weight: 0.25,
Calculator: NewTechnologyAlignmentCalculator(),
Threshold: 0.2,
Priority: 2,
Category: "technical",
},
{
Name: "purpose_alignment",
Description: "Alignment based on stated purpose",
Weight: 0.2,
Calculator: NewPurposeAlignmentCalculator(),
Threshold: 0.25,
Priority: 1,
Category: "functional",
},
{
Name: "phase_alignment",
Description: "Alignment with project phase",
Weight: 0.15,
Calculator: NewPhaseAlignmentCalculator(),
Threshold: 0.3,
Priority: 3,
Category: "temporal",
},
{
Name: "context_relevance",
Description: "Overall context relevance",
Weight: 0.1,
Calculator: NewContextRelevanceCalculator(),
Threshold: 0.2,
Priority: 4,
Category: "contextual",
},
}
se.dimensions = dimensions
}
// Additional calculator implementations would follow similar patterns...
// PurposeAlignmentCalculator calculates alignment based on stated purpose
type PurposeAlignmentCalculator struct {
name string
weight float64
}
func NewPurposeAlignmentCalculator() *PurposeAlignmentCalculator {
return &PurposeAlignmentCalculator{
name: "purpose_alignment",
weight: 0.2,
}
}
func (pac *PurposeAlignmentCalculator) GetName() string { return pac.name }
func (pac *PurposeAlignmentCalculator) GetWeight() float64 { return pac.weight }
func (pac *PurposeAlignmentCalculator) Validate(node *slurpContext.ContextNode, goal *ProjectGoal) error {
if node == nil || goal == nil {
return fmt.Errorf("node and goal cannot be nil")
}
return nil
}
func (pac *PurposeAlignmentCalculator) Calculate(ctx context.Context, node *slurpContext.ContextNode, goal *ProjectGoal) (*DimensionScore, error) {
// Semantic similarity between node purpose and goal description
purposeAlignment := pac.calculateSemanticSimilarity(node.Purpose, goal.Description)
return &DimensionScore{
Dimension: pac.name,
Score: purposeAlignment,
Confidence: 0.7,
Evidence: []string{fmt.Sprintf("Purpose: %s", node.Purpose)},
Reasoning: "Semantic similarity between purpose and goal description",
CalculatedAt: time.Now(),
}, nil
}
func (pac *PurposeAlignmentCalculator) calculateSemanticSimilarity(purpose, description string) float64 {
// Simple implementation - in production would use more sophisticated NLP
purposeWords := strings.Fields(strings.ToLower(purpose))
descWords := strings.Fields(strings.ToLower(description))
matches := 0
for _, pWord := range purposeWords {
for _, dWord := range descWords {
if pWord == dWord || strings.Contains(pWord, dWord) || strings.Contains(dWord, pWord) {
matches++
break
}
}
}
if len(purposeWords) == 0 {
return 0.0
}
return float64(matches) / float64(len(purposeWords))
}
// PhaseAlignmentCalculator calculates alignment with project phase
type PhaseAlignmentCalculator struct {
name string
weight float64
}
func NewPhaseAlignmentCalculator() *PhaseAlignmentCalculator {
return &PhaseAlignmentCalculator{
name: "phase_alignment",
weight: 0.15,
}
}
func (phac *PhaseAlignmentCalculator) GetName() string { return phac.name }
func (phac *PhaseAlignmentCalculator) GetWeight() float64 { return phac.weight }
func (phac *PhaseAlignmentCalculator) Validate(node *slurpContext.ContextNode, goal *ProjectGoal) error {
return nil
}
func (phac *PhaseAlignmentCalculator) Calculate(ctx context.Context, node *slurpContext.ContextNode, goal *ProjectGoal) (*DimensionScore, error) {
// Phase alignment logic
phaseScore := phac.calculatePhaseRelevance(node, goal.Phase)
return &DimensionScore{
Dimension: phac.name,
Score: phaseScore,
Confidence: 0.8,
Evidence: []string{fmt.Sprintf("Goal phase: %s", goal.Phase)},
Reasoning: "Alignment with current project phase",
CalculatedAt: time.Now(),
}, nil
}
func (phac *PhaseAlignmentCalculator) calculatePhaseRelevance(node *slurpContext.ContextNode, phase string) float64 {
// Simple phase relevance calculation
phaseRelevance := map[string]map[string]float64{
"planning": {
"documentation": 0.9,
"architecture": 0.8,
"research": 0.9,
"design": 0.8,
},
"development": {
"implementation": 0.9,
"testing": 0.7,
"coding": 0.9,
"api": 0.8,
},
"testing": {
"testing": 0.9,
"quality": 0.8,
"validation": 0.9,
"bug": 0.7,
},
"deployment": {
"deployment": 0.9,
"infrastructure": 0.8,
"monitoring": 0.8,
"production": 0.9,
},
}
nodeText := strings.ToLower(node.Purpose + " " + node.Summary)
relevanceMap, exists := phaseRelevance[strings.ToLower(phase)]
if !exists {
return 0.5 // Default relevance
}
maxRelevance := 0.0
for keyword, score := range relevanceMap {
if strings.Contains(nodeText, keyword) {
if score > maxRelevance {
maxRelevance = score
}
}
}
if maxRelevance == 0.0 {
return 0.4 // Default when no specific phase keywords found
}
return maxRelevance
}
// ContextRelevanceCalculator calculates overall context relevance
type ContextRelevanceCalculator struct {
name string
weight float64
}
func NewContextRelevanceCalculator() *ContextRelevanceCalculator {
return &ContextRelevanceCalculator{
name: "context_relevance",
weight: 0.1,
}
}
func (crc *ContextRelevanceCalculator) GetName() string { return crc.name }
func (crc *ContextRelevanceCalculator) GetWeight() float64 { return crc.weight }
func (crc *ContextRelevanceCalculator) Validate(node *slurpContext.ContextNode, goal *ProjectGoal) error {
return nil
}
func (crc *ContextRelevanceCalculator) Calculate(ctx context.Context, node *slurpContext.ContextNode, goal *ProjectGoal) (*DimensionScore, error) {
// Calculate overall context relevance
relevanceScore := crc.calculateRelevance(node, goal)
return &DimensionScore{
Dimension: crc.name,
Score: relevanceScore,
Confidence: 0.6,
Evidence: []string{"Overall context assessment"},
Reasoning: "Calculated based on multiple context factors",
CalculatedAt: time.Now(),
}, nil
}
func (crc *ContextRelevanceCalculator) calculateRelevance(node *slurpContext.ContextNode, goal *ProjectGoal) float64 {
// Combine multiple factors for overall relevance
factors := []float64{}
// Factor 1: Specificity vs Goal Priority
specificityFactor := float64(node.ContextSpecificity) / 10.0 * (1.0 / float64(goal.Priority))
factors = append(factors, specificityFactor)
// Factor 2: RAG Confidence
factors = append(factors, node.RAGConfidence)
// Factor 3: Technology richness
techFactor := math.Min(1.0, float64(len(node.Technologies))/3.0)
factors = append(factors, techFactor)
// Factor 4: Insight richness
insightFactor := math.Min(1.0, float64(len(node.Insights))/5.0)
factors = append(factors, insightFactor)
// Calculate weighted average
totalWeight := 0.0
weightedSum := 0.0
weights := []float64{0.4, 0.3, 0.2, 0.1}
for i, factor := range factors {
if i < len(weights) {
weightedSum += factor * weights[i]
totalWeight += weights[i]
}
}
if totalWeight == 0.0 {
return 0.5
}
return weightedSum / totalWeight
}
// Helper methods for scoring engine components
func NewWeightConfiguration() *WeightConfiguration {
return &WeightConfiguration{
GlobalWeights: make(map[string]float64),
RoleWeights: make(map[string]map[string]float64),
PhaseWeights: make(map[string]map[string]float64),
ProjectWeights: make(map[string]map[string]float64),
DynamicWeights: true,
LastUpdated: time.Now(),
}
}
func NewScoreNormalizer() *ScoreNormalizer {
return &ScoreNormalizer{
normalizationMethod: "z_score",
referenceData: &NormalizationReference{
HistoricalScores: make(map[string]*ScoreDistribution),
Percentiles: make(map[string]map[int]float64),
LastCalculated: time.Now(),
},
}
}
func NewScoreAggregator() *ScoreAggregator {
return &ScoreAggregator{
method: "weighted_average",
}
}
func (sa *ScoreAggregator) aggregate(scores []*DimensionScore, weights *WeightConfiguration) float64 {
if len(scores) == 0 {
return 0.0
}
totalWeight := 0.0
weightedSum := 0.0
for _, score := range scores {
weight := 1.0 // Default weight
if globalWeight, exists := weights.GlobalWeights[score.Dimension]; exists {
weight = globalWeight
}
weightedSum += score.Score * weight
totalWeight += weight
}
if totalWeight == 0.0 {
return 0.0
}
return weightedSum / totalWeight
}
func (sn *ScoreNormalizer) normalize(score float64, dimension string) float64 {
// Simple normalization - in production would use more sophisticated methods
return math.Max(0.0, math.Min(1.0, score))
}
// Create remaining component constructors...
func NewDimensionAnalyzer() *DimensionAnalyzer {
return &DimensionAnalyzer{
calculators: make(map[string]DimensionCalculator),
}
}
func NewPriorityCalculator() *PriorityCalculator {
return &PriorityCalculator{
priorityMatrix: &PriorityMatrix{
Goals: make(map[string]int),
Phases: make(map[string]int),
Technologies: make(map[string]int),
Roles: make(map[string]int),
Urgency: make(map[string]float64),
Impact: make(map[string]float64),
},
timeFactors: &TimeFactors{
DecayFunction: "exponential",
HalfLife: 30 * 24 * time.Hour,
UrgencyBoost: 1.5,
DeadlineWeight: 2.0,
PhaseAlignment: make(map[string]float64),
},
}
}
func (pc *PriorityCalculator) adjustScores(scores []*DimensionScore, goal *ProjectGoal, role string) []*DimensionScore {
adjusted := make([]*DimensionScore, len(scores))
for i, score := range scores {
adjustedScore := *score // Copy the score
// Apply priority adjustments
priorityMultiplier := pc.calculatePriorityMultiplier(goal, role)
adjustedScore.Score *= priorityMultiplier
// Apply time-based adjustments
timeMultiplier := pc.calculateTimeMultiplier(goal)
adjustedScore.Score *= timeMultiplier
// Ensure score stays within bounds
adjustedScore.Score = math.Max(0.0, math.Min(1.0, adjustedScore.Score))
adjusted[i] = &adjustedScore
}
return adjusted
}
func (pc *PriorityCalculator) calculatePriorityMultiplier(goal *ProjectGoal, role string) float64 {
// Base multiplier from goal priority (inverse - higher priority = higher multiplier)
priorityMultiplier := 1.0 + (1.0 / float64(goal.Priority))
// Role-specific adjustments
if roleMultiplier, exists := pc.priorityMatrix.Urgency[role]; exists {
priorityMultiplier *= roleMultiplier
}
return priorityMultiplier
}
func (pc *PriorityCalculator) calculateTimeMultiplier(goal *ProjectGoal) float64 {
if goal.Deadline == nil {
return 1.0
}
// Calculate urgency based on deadline proximity
timeToDeadline := time.Until(*goal.Deadline)
if timeToDeadline <= 0 {
return pc.timeFactors.UrgencyBoost // Past deadline
}
// Exponential urgency increase as deadline approaches
urgencyFactor := math.Exp(-float64(timeToDeadline) / float64(pc.timeFactors.HalfLife))
return 1.0 + urgencyFactor*pc.timeFactors.DeadlineWeight
}
func NewTrendAnalyzer() *TrendAnalyzer {
return &TrendAnalyzer{
historicalData: &AlignmentHistory{
records: []*AlignmentRecord{},
maxRecords: 10000,
retention: 90 * 24 * time.Hour,
},
trendDetector: &TrendDetector{
methods: []TrendDetectionMethod{},
},
predictor: &AlignmentPredictor{
models: []PredictionModel{},
},
}
}
func (ta *TrendAnalyzer) analyzeTrends(nodePath, goalID string) []*Trend {
// Simple trend analysis - in production would be more sophisticated
return []*Trend{}
}
func (ta *TrendAnalyzer) recordAlignment(assessment *AlignmentAssessment) {
record := &AlignmentRecord{
NodePath: assessment.NodePath,
GoalID: assessment.GoalID,
Score: assessment.OverallScore,
Dimensions: assessment.DimensionScores,
Context: assessment.Context,
Timestamp: assessment.AssessedAt,
Role: assessment.Role,
Phase: assessment.Phase,
}
ta.historicalData.mu.Lock()
ta.historicalData.records = append(ta.historicalData.records, record)
// Trim old records if necessary
if len(ta.historicalData.records) > ta.historicalData.maxRecords {
ta.historicalData.records = ta.historicalData.records[1:]
}
ta.historicalData.mu.Unlock()
}
func (ap *AlignmentPredictor) predictAlignment(ctx context.Context, nodePath, goalID string, horizon time.Duration) ([]*AlignmentPrediction, error) {
// Simple prediction - in production would use ML models
return []*AlignmentPrediction{}, nil
}
func NewRecommendationEngine() *RecommendationEngine {
return &RecommendationEngine{
ruleEngine: NewRecommendationRuleEngine(),
mlEngine: NewMLRecommendationEngine(),
prioritizer: NewRecommendationPrioritizer(),
}
}
func (re *RecommendationEngine) generateRecommendations(ctx context.Context, node *slurpContext.ContextNode, goal *ProjectGoal, scores []*DimensionScore) ([]*Recommendation, error) {
recommendations := []*Recommendation{}
// Generate rule-based recommendations
ruleRecs, err := re.ruleEngine.generateRecommendations(scores)
if err == nil {
recommendations = append(recommendations, ruleRecs...)
}
// Generate ML-based recommendations (if available)
// mlRecs, err := re.mlEngine.generateRecommendations(ctx, node, scores)
// if err == nil {
// recommendations = append(recommendations, mlRecs...)
// }
// Prioritize recommendations
prioritized := re.prioritizer.prioritize(recommendations)
return prioritized, nil
}
func NewRecommendationRuleEngine() *RecommendationRuleEngine {
engine := &RecommendationRuleEngine{
rules: []*RecommendationRule{},
}
engine.loadDefaultRules()
return engine
}
func (rre *RecommendationRuleEngine) loadDefaultRules() {
rules := []*RecommendationRule{
{
ID: "low_keyword_alignment",
Name: "Improve Keyword Alignment",
Condition: RecommendationCondition{
DimensionFilters: map[string]float64{"keyword_alignment": 0.3},
LogicalOperator: "LT",
},
Action: RecommendationAction{
Type: "content_enhancement",
Description: "Add more relevant keywords to improve alignment with project goals",
Impact: 0.7,
Effort: 0.3,
Timeline: "short",
Resources: []string{"documentation", "content_review"},
},
Priority: 1,
Confidence: 0.8,
Category: "content",
},
{
ID: "technology_mismatch",
Name: "Address Technology Mismatch",
Condition: RecommendationCondition{
DimensionFilters: map[string]float64{"technology_alignment": 0.2},
LogicalOperator: "LT",
},
Action: RecommendationAction{
Type: "technology_update",
Description: "Update technology stack or documentation to better align with project goals",
Impact: 0.8,
Effort: 0.6,
Timeline: "medium",
Resources: []string{"development", "architecture_review"},
},
Priority: 2,
Confidence: 0.7,
Category: "technical",
},
}
rre.rules = rules
}
func (rre *RecommendationRuleEngine) generateRecommendations(scores []*DimensionScore) ([]*Recommendation, error) {
recommendations := []*Recommendation{}
for _, rule := range rre.rules {
if rre.evaluateCondition(rule.Condition, scores) {
rec := &Recommendation{
ID: rule.ID,
Title: rule.Name,
Description: rule.Action.Description,
Action: &rule.Action,
Priority: rule.Priority,
Confidence: rule.Confidence,
Impact: rule.Action.Impact,
Effort: rule.Action.Effort,
Timeline: rule.Action.Timeline,
Category: rule.Category,
Resources: rule.Action.Resources,
GeneratedAt: time.Now(),
GeneratedBy: "RuleEngine",
}
recommendations = append(recommendations, rec)
}
}
return recommendations, nil
}
func (rre *RecommendationRuleEngine) evaluateCondition(condition RecommendationCondition, scores []*DimensionScore) bool {
for dimension, threshold := range condition.DimensionFilters {
for _, score := range scores {
if score.Dimension == dimension {
switch condition.LogicalOperator {
case "LT":
return score.Score < threshold
case "GT":
return score.Score > threshold
case "EQ":
return math.Abs(score.Score-threshold) < 0.01
default:
return score.Score < threshold
}
}
}
}
return false
}
func NewMLRecommendationEngine() *MLRecommendationEngine {
return &MLRecommendationEngine{
models: []RecommendationModel{},
}
}
func NewRecommendationPrioritizer() *RecommendationPrioritizer {
return &RecommendationPrioritizer{
criteria: []PrioritizationCriterion{
{
Name: "impact_effort_ratio",
Weight: 0.4,
Calculator: func(rec *Recommendation) float64 {
if rec.Effort == 0 {
return rec.Impact
}
return rec.Impact / rec.Effort
},
},
{
Name: "confidence",
Weight: 0.3,
Calculator: func(rec *Recommendation) float64 {
return rec.Confidence
},
},
{
Name: "priority",
Weight: 0.3,
Calculator: func(rec *Recommendation) float64 {
return 1.0 / float64(rec.Priority) // Inverse priority
},
},
},
}
}
func (rp *RecommendationPrioritizer) prioritize(recommendations []*Recommendation) []*Recommendation {
// Calculate priority scores for each recommendation
for _, rec := range recommendations {
score := 0.0
totalWeight := 0.0
for _, criterion := range rp.criteria {
criterionScore := criterion.Calculator(rec)
score += criterionScore * criterion.Weight
totalWeight += criterion.Weight
}
if totalWeight > 0 {
rec.Priority = int((score / totalWeight) * 100) // Convert to 0-100 scale
}
}
// Sort by priority score (higher is better)
sort.Slice(recommendations, func(i, j int) bool {
return recommendations[i].Priority > recommendations[j].Priority
})
return recommendations
}
func NewAlignmentMetrics() *AlignmentMetrics {
return &AlignmentMetrics{
dimensionPerformance: make(map[string]*DimensionMetrics),
goalPerformance: make(map[string]*GoalMetrics),
lastReset: time.Now(),
}
}
func (am *AlignmentMetrics) recordAssessment(duration time.Duration) {
am.mu.Lock()
defer am.mu.Unlock()
am.totalAssessments++
}
func (am *AlignmentMetrics) recordSuccess(score float64) {
am.mu.Lock()
defer am.mu.Unlock()
am.successfulAssessments++
// Update average score
if am.totalAssessments == 1 {
am.averageScore = score
} else {
am.averageScore = (am.averageScore*float64(am.totalAssessments-1) + score) / float64(am.totalAssessments)
}
}
func (am *AlignmentMetrics) recordFailure() {
am.mu.Lock()
defer am.mu.Unlock()
// Failure count is totalAssessments - successfulAssessments
}
func (am *AlignmentMetrics) GetMetrics() map[string]interface{} {
am.mu.RLock()
defer am.mu.RUnlock()
successRate := 0.0
if am.totalAssessments > 0 {
successRate = float64(am.successfulAssessments) / float64(am.totalAssessments)
}
return map[string]interface{}{
"total_assessments": am.totalAssessments,
"successful_assessments": am.successfulAssessments,
"success_rate": successRate,
"average_score": am.averageScore,
"last_reset": am.lastReset,
}
}