feat: Production readiness improvements for WHOOSH council formation
Major security, observability, and configuration improvements:
## Security Hardening
- Implemented configurable CORS (no more wildcards)
- Added comprehensive auth middleware for admin endpoints
- Enhanced webhook HMAC validation
- Added input validation and rate limiting
- Security headers and CSP policies
## Configuration Management
- Made N8N webhook URL configurable (WHOOSH_N8N_BASE_URL)
- Replaced all hardcoded endpoints with environment variables
- Added feature flags for LLM vs heuristic composition
- Gitea fetch hardening with EAGER_FILTER and FULL_RESCAN options
## API Completeness
- Implemented GetCouncilComposition function
- Added GET /api/v1/councils/{id} endpoint
- Council artifacts API (POST/GET /api/v1/councils/{id}/artifacts)
- /admin/health/details endpoint with component status
- Database lookup for repository URLs (no hardcoded fallbacks)
## Observability & Performance
- Added OpenTelemetry distributed tracing with goal/pulse correlation
- Performance optimization database indexes
- Comprehensive health monitoring
- Enhanced logging and error handling
## Infrastructure
- Production-ready P2P discovery (replaces mock implementation)
- Removed unused Redis configuration
- Enhanced Docker Swarm integration
- Added migration files for performance indexes
## Code Quality
- Comprehensive input validation
- Graceful error handling and failsafe fallbacks
- Backwards compatibility maintained
- Following security best practices
🤖 Generated with [Claude Code](https://claude.ai/code)
Co-Authored-By: Claude <noreply@anthropic.com>
This commit is contained in:
@@ -189,6 +189,27 @@ type ComposerConfig struct {
|
||||
AnalysisTimeoutSecs int `json:"analysis_timeout_secs"`
|
||||
EnableCaching bool `json:"enable_caching"`
|
||||
CacheTTLMins int `json:"cache_ttl_mins"`
|
||||
|
||||
// Feature flags
|
||||
FeatureFlags FeatureFlags `json:"feature_flags"`
|
||||
}
|
||||
|
||||
// FeatureFlags controls experimental and optional features in the composer
|
||||
type FeatureFlags struct {
|
||||
// LLM-based analysis (vs heuristic-based)
|
||||
EnableLLMClassification bool `json:"enable_llm_classification"`
|
||||
EnableLLMSkillAnalysis bool `json:"enable_llm_skill_analysis"`
|
||||
EnableLLMTeamMatching bool `json:"enable_llm_team_matching"`
|
||||
|
||||
// Advanced analysis features
|
||||
EnableComplexityAnalysis bool `json:"enable_complexity_analysis"`
|
||||
EnableRiskAssessment bool `json:"enable_risk_assessment"`
|
||||
EnableAlternativeOptions bool `json:"enable_alternative_options"`
|
||||
|
||||
// Performance and debugging
|
||||
EnableAnalysisLogging bool `json:"enable_analysis_logging"`
|
||||
EnablePerformanceMetrics bool `json:"enable_performance_metrics"`
|
||||
EnableFailsafeFallback bool `json:"enable_failsafe_fallback"`
|
||||
}
|
||||
|
||||
// DefaultComposerConfig returns sensible defaults for MVP
|
||||
@@ -204,5 +225,26 @@ func DefaultComposerConfig() *ComposerConfig {
|
||||
AnalysisTimeoutSecs: 60,
|
||||
EnableCaching: true,
|
||||
CacheTTLMins: 30,
|
||||
FeatureFlags: DefaultFeatureFlags(),
|
||||
}
|
||||
}
|
||||
|
||||
// DefaultFeatureFlags returns conservative defaults that prioritize reliability
|
||||
func DefaultFeatureFlags() FeatureFlags {
|
||||
return FeatureFlags{
|
||||
// LLM features disabled by default - use heuristics for reliability
|
||||
EnableLLMClassification: false,
|
||||
EnableLLMSkillAnalysis: false,
|
||||
EnableLLMTeamMatching: false,
|
||||
|
||||
// Basic analysis features enabled
|
||||
EnableComplexityAnalysis: true,
|
||||
EnableRiskAssessment: true,
|
||||
EnableAlternativeOptions: false, // Disabled for MVP performance
|
||||
|
||||
// Debug and monitoring enabled
|
||||
EnableAnalysisLogging: true,
|
||||
EnablePerformanceMetrics: true,
|
||||
EnableFailsafeFallback: true,
|
||||
}
|
||||
}
|
||||
@@ -89,9 +89,24 @@ func (s *Service) AnalyzeAndComposeTeam(ctx context.Context, input *TaskAnalysis
|
||||
|
||||
// classifyTask analyzes the task and determines its characteristics
|
||||
func (s *Service) classifyTask(ctx context.Context, input *TaskAnalysisInput) (*TaskClassification, error) {
|
||||
// For MVP, implement rule-based classification
|
||||
// In production, this would call LLM for sophisticated analysis
|
||||
if s.config.FeatureFlags.EnableAnalysisLogging {
|
||||
log.Debug().
|
||||
Str("task_title", input.Title).
|
||||
Bool("llm_enabled", s.config.FeatureFlags.EnableLLMClassification).
|
||||
Msg("Starting task classification")
|
||||
}
|
||||
|
||||
// Choose classification method based on feature flag
|
||||
if s.config.FeatureFlags.EnableLLMClassification {
|
||||
return s.classifyTaskWithLLM(ctx, input)
|
||||
}
|
||||
|
||||
// Use heuristic-based classification (default/reliable path)
|
||||
return s.classifyTaskWithHeuristics(ctx, input)
|
||||
}
|
||||
|
||||
// classifyTaskWithHeuristics uses rule-based classification for reliability
|
||||
func (s *Service) classifyTaskWithHeuristics(ctx context.Context, input *TaskAnalysisInput) (*TaskClassification, error) {
|
||||
taskType := s.determineTaskType(input.Title, input.Description)
|
||||
complexity := s.estimateComplexity(input)
|
||||
domains := s.identifyDomains(input.TechStack, input.Requirements)
|
||||
@@ -106,9 +121,37 @@ func (s *Service) classifyTask(ctx context.Context, input *TaskAnalysisInput) (*
|
||||
RequiredExperience: s.determineRequiredExperience(complexity, taskType),
|
||||
}
|
||||
|
||||
if s.config.FeatureFlags.EnableAnalysisLogging {
|
||||
log.Debug().
|
||||
Str("task_type", string(taskType)).
|
||||
Float64("complexity", complexity).
|
||||
Strs("domains", domains).
|
||||
Msg("Task classified with heuristics")
|
||||
}
|
||||
|
||||
return classification, nil
|
||||
}
|
||||
|
||||
// classifyTaskWithLLM uses LLM-based classification for advanced analysis
|
||||
func (s *Service) classifyTaskWithLLM(ctx context.Context, input *TaskAnalysisInput) (*TaskClassification, error) {
|
||||
if s.config.FeatureFlags.EnableAnalysisLogging {
|
||||
log.Info().
|
||||
Str("model", s.config.ClassificationModel).
|
||||
Msg("Using LLM for task classification")
|
||||
}
|
||||
|
||||
// TODO: Implement LLM-based classification
|
||||
// This would make API calls to the configured LLM model
|
||||
// For now, fall back to heuristics if failsafe is enabled
|
||||
|
||||
if s.config.FeatureFlags.EnableFailsafeFallback {
|
||||
log.Warn().Msg("LLM classification not yet implemented, falling back to heuristics")
|
||||
return s.classifyTaskWithHeuristics(ctx, input)
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("LLM classification not implemented")
|
||||
}
|
||||
|
||||
// determineTaskType uses heuristics to classify the task type
|
||||
func (s *Service) determineTaskType(title, description string) TaskType {
|
||||
titleLower := strings.ToLower(title)
|
||||
@@ -290,6 +333,24 @@ func (s *Service) determineRequiredExperience(complexity float64, taskType TaskT
|
||||
|
||||
// analyzeSkillRequirements determines what skills are needed for the task
|
||||
func (s *Service) analyzeSkillRequirements(ctx context.Context, input *TaskAnalysisInput, classification *TaskClassification) (*SkillRequirements, error) {
|
||||
if s.config.FeatureFlags.EnableAnalysisLogging {
|
||||
log.Debug().
|
||||
Str("task_title", input.Title).
|
||||
Bool("llm_enabled", s.config.FeatureFlags.EnableLLMSkillAnalysis).
|
||||
Msg("Starting skill requirements analysis")
|
||||
}
|
||||
|
||||
// Choose analysis method based on feature flag
|
||||
if s.config.FeatureFlags.EnableLLMSkillAnalysis {
|
||||
return s.analyzeSkillRequirementsWithLLM(ctx, input, classification)
|
||||
}
|
||||
|
||||
// Use heuristic-based analysis (default/reliable path)
|
||||
return s.analyzeSkillRequirementsWithHeuristics(ctx, input, classification)
|
||||
}
|
||||
|
||||
// analyzeSkillRequirementsWithHeuristics uses rule-based skill analysis
|
||||
func (s *Service) analyzeSkillRequirementsWithHeuristics(ctx context.Context, input *TaskAnalysisInput, classification *TaskClassification) (*SkillRequirements, error) {
|
||||
critical := []SkillRequirement{}
|
||||
desirable := []SkillRequirement{}
|
||||
|
||||
@@ -333,11 +394,40 @@ func (s *Service) analyzeSkillRequirements(ctx context.Context, input *TaskAnaly
|
||||
})
|
||||
}
|
||||
|
||||
return &SkillRequirements{
|
||||
result := &SkillRequirements{
|
||||
CriticalSkills: critical,
|
||||
DesirableSkills: desirable,
|
||||
TotalSkillCount: len(critical) + len(desirable),
|
||||
}, nil
|
||||
}
|
||||
|
||||
if s.config.FeatureFlags.EnableAnalysisLogging {
|
||||
log.Debug().
|
||||
Int("critical_skills", len(critical)).
|
||||
Int("desirable_skills", len(desirable)).
|
||||
Msg("Skills analyzed with heuristics")
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// analyzeSkillRequirementsWithLLM uses LLM-based skill analysis
|
||||
func (s *Service) analyzeSkillRequirementsWithLLM(ctx context.Context, input *TaskAnalysisInput, classification *TaskClassification) (*SkillRequirements, error) {
|
||||
if s.config.FeatureFlags.EnableAnalysisLogging {
|
||||
log.Info().
|
||||
Str("model", s.config.SkillAnalysisModel).
|
||||
Msg("Using LLM for skill analysis")
|
||||
}
|
||||
|
||||
// TODO: Implement LLM-based skill analysis
|
||||
// This would make API calls to the configured LLM model
|
||||
// For now, fall back to heuristics if failsafe is enabled
|
||||
|
||||
if s.config.FeatureFlags.EnableFailsafeFallback {
|
||||
log.Warn().Msg("LLM skill analysis not yet implemented, falling back to heuristics")
|
||||
return s.analyzeSkillRequirementsWithHeuristics(ctx, input, classification)
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("LLM skill analysis not implemented")
|
||||
}
|
||||
|
||||
// getAvailableAgents retrieves agents that are available for assignment
|
||||
|
||||
Reference in New Issue
Block a user