feat: implement LLM integration for team composition engine
Some checks failed
WHOOSH CI / speclint (push) Has been cancelled
WHOOSH CI / contracts (push) Has been cancelled
WHOOSH CI / speclint (pull_request) Has been cancelled
WHOOSH CI / contracts (pull_request) Has been cancelled

Resolves WHOOSH-LLM-002: Replace stubbed LLM functions with full Ollama API integration

## New Features
- Full Ollama API integration with automatic endpoint discovery
- LLM-powered task classification using configurable models
- LLM-powered skill requirement analysis
- Graceful fallback to heuristics on LLM failures
- Feature flag support for LLM vs heuristic execution
- Performance optimization with smaller, faster models (llama3.2:latest)

## Implementation Details
- Created OllamaClient with connection pooling and timeout handling
- Structured prompt engineering for consistent JSON responses
- Robust error handling with automatic failover to heuristics
- Comprehensive integration tests validating functionality
- Support for multiple Ollama endpoints with health checking

## Performance & Reliability
- Timeout configuration prevents hanging requests
- Fallback mechanism ensures system reliability
- Uses 3.2B parameter model for balance of speed vs accuracy
- Graceful degradation when LLM services unavailable

## Files Added
- internal/composer/ollama.go: Core Ollama API integration
- internal/composer/llm_test.go: Comprehensive integration tests

## Files Modified
- internal/composer/service.go: Implemented LLM functions
- internal/composer/models.go: Updated config for performance

🤖 Generated with [Claude Code](https://claude.ai/code)

Co-Authored-By: Claude <noreply@anthropic.com>
This commit is contained in:
Claude Code
2025-09-21 21:57:16 +10:00
parent 9f57e48cef
commit 55dd5951ea
4 changed files with 698 additions and 30 deletions

View File

@@ -15,8 +15,9 @@ import (
// Service represents the Team Composer service
type Service struct {
db *pgxpool.Pool
config *ComposerConfig
db *pgxpool.Pool
config *ComposerConfig
ollamaClient *OllamaClient
}
// NewService creates a new Team Composer service
@@ -24,10 +25,14 @@ func NewService(db *pgxpool.Pool, config *ComposerConfig) *Service {
if config == nil {
config = DefaultComposerConfig()
}
// Initialize Ollama client for LLM operations
ollamaClient := NewOllamaClient(config.ClassificationModel)
return &Service{
db: db,
config: config,
db: db,
config: config,
ollamaClient: ollamaClient,
}
}
@@ -132,24 +137,56 @@ func (s *Service) classifyTaskWithHeuristics(ctx context.Context, input *TaskAna
return classification, nil
}
// classifyTaskWithLLM uses LLM-based classification for advanced analysis
// classifyTaskWithLLM uses LLM-based classification for advanced analysis
func (s *Service) classifyTaskWithLLM(ctx context.Context, input *TaskAnalysisInput) (*TaskClassification, error) {
if s.config.FeatureFlags.EnableAnalysisLogging {
log.Info().
Str("model", s.config.ClassificationModel).
Msg("Using LLM for task classification")
}
// TODO: Implement LLM-based classification
// This would make API calls to the configured LLM model
// For now, fall back to heuristics if failsafe is enabled
if s.config.FeatureFlags.EnableFailsafeFallback {
log.Warn().Msg("LLM classification not yet implemented, falling back to heuristics")
return s.classifyTaskWithHeuristics(ctx, input)
// Create classification prompt
prompt := s.ollamaClient.BuildTaskClassificationPrompt(input)
// Set timeout for LLM operation
llmCtx, cancel := context.WithTimeout(ctx, time.Duration(s.config.AnalysisTimeoutSecs)*time.Second)
defer cancel()
// Call Ollama API
response, err := s.ollamaClient.Generate(llmCtx, prompt)
if err != nil {
if s.config.FeatureFlags.EnableFailsafeFallback {
log.Warn().
Err(err).
Msg("LLM classification failed, falling back to heuristics")
return s.classifyTaskWithHeuristics(ctx, input)
}
return nil, fmt.Errorf("LLM classification failed: %w", err)
}
return nil, fmt.Errorf("LLM classification not implemented")
// Parse LLM response
classification, err := s.ollamaClient.ParseTaskClassificationResponse(response)
if err != nil {
if s.config.FeatureFlags.EnableFailsafeFallback {
log.Warn().
Err(err).
Str("response", response).
Msg("Failed to parse LLM classification response, falling back to heuristics")
return s.classifyTaskWithHeuristics(ctx, input)
}
return nil, fmt.Errorf("failed to parse LLM classification: %w", err)
}
if s.config.FeatureFlags.EnableAnalysisLogging {
log.Info().
Str("task_type", string(classification.TaskType)).
Float64("complexity", classification.ComplexityScore).
Strs("primary_domains", classification.PrimaryDomains).
Str("risk_level", classification.RiskLevel).
Msg("Task classified with LLM")
}
return classification, nil
}
// determineTaskType uses heuristics to classify the task type
@@ -417,17 +454,86 @@ func (s *Service) analyzeSkillRequirementsWithLLM(ctx context.Context, input *Ta
Str("model", s.config.SkillAnalysisModel).
Msg("Using LLM for skill analysis")
}
// TODO: Implement LLM-based skill analysis
// This would make API calls to the configured LLM model
// For now, fall back to heuristics if failsafe is enabled
if s.config.FeatureFlags.EnableFailsafeFallback {
log.Warn().Msg("LLM skill analysis not yet implemented, falling back to heuristics")
return s.analyzeSkillRequirementsWithHeuristics(ctx, input, classification)
// Create skill analysis prompt
prompt := s.ollamaClient.BuildSkillAnalysisPrompt(input, classification)
// Set timeout for LLM operation
llmCtx, cancel := context.WithTimeout(ctx, time.Duration(s.config.AnalysisTimeoutSecs)*time.Second)
defer cancel()
// Call Ollama API (use skill analysis model if different from classification model)
skillModel := s.config.SkillAnalysisModel
if skillModel != s.ollamaClient.model {
// Create a temporary client with the skill analysis model
skillClient := NewOllamaClient(skillModel)
response, err := skillClient.Generate(llmCtx, prompt)
if err != nil {
if s.config.FeatureFlags.EnableFailsafeFallback {
log.Warn().
Err(err).
Msg("LLM skill analysis failed, falling back to heuristics")
return s.analyzeSkillRequirementsWithHeuristics(ctx, input, classification)
}
return nil, fmt.Errorf("LLM skill analysis failed: %w", err)
}
// Parse LLM response
skillRequirements, err := s.ollamaClient.ParseSkillRequirementsResponse(response)
if err != nil {
if s.config.FeatureFlags.EnableFailsafeFallback {
log.Warn().
Err(err).
Str("response", response).
Msg("Failed to parse LLM skill analysis response, falling back to heuristics")
return s.analyzeSkillRequirementsWithHeuristics(ctx, input, classification)
}
return nil, fmt.Errorf("failed to parse LLM skill analysis: %w", err)
}
if s.config.FeatureFlags.EnableAnalysisLogging {
log.Info().
Int("critical_skills", len(skillRequirements.CriticalSkills)).
Int("desirable_skills", len(skillRequirements.DesirableSkills)).
Msg("Skills analyzed with LLM")
}
return skillRequirements, nil
}
return nil, fmt.Errorf("LLM skill analysis not implemented")
// Use the same client if models are the same
response, err := s.ollamaClient.Generate(llmCtx, prompt)
if err != nil {
if s.config.FeatureFlags.EnableFailsafeFallback {
log.Warn().
Err(err).
Msg("LLM skill analysis failed, falling back to heuristics")
return s.analyzeSkillRequirementsWithHeuristics(ctx, input, classification)
}
return nil, fmt.Errorf("LLM skill analysis failed: %w", err)
}
// Parse LLM response
skillRequirements, err := s.ollamaClient.ParseSkillRequirementsResponse(response)
if err != nil {
if s.config.FeatureFlags.EnableFailsafeFallback {
log.Warn().
Err(err).
Str("response", response).
Msg("Failed to parse LLM skill analysis response, falling back to heuristics")
return s.analyzeSkillRequirementsWithHeuristics(ctx, input, classification)
}
return nil, fmt.Errorf("failed to parse LLM skill analysis: %w", err)
}
if s.config.FeatureFlags.EnableAnalysisLogging {
log.Info().
Int("critical_skills", len(skillRequirements.CriticalSkills)).
Int("desirable_skills", len(skillRequirements.DesirableSkills)).
Msg("Skills analyzed with LLM")
}
return skillRequirements, nil
}
// getAvailableAgents retrieves agents that are available for assignment