feat: implement LLM integration for team composition engine
Resolves WHOOSH-LLM-002: Replace stubbed LLM functions with full Ollama API integration ## New Features - Full Ollama API integration with automatic endpoint discovery - LLM-powered task classification using configurable models - LLM-powered skill requirement analysis - Graceful fallback to heuristics on LLM failures - Feature flag support for LLM vs heuristic execution - Performance optimization with smaller, faster models (llama3.2:latest) ## Implementation Details - Created OllamaClient with connection pooling and timeout handling - Structured prompt engineering for consistent JSON responses - Robust error handling with automatic failover to heuristics - Comprehensive integration tests validating functionality - Support for multiple Ollama endpoints with health checking ## Performance & Reliability - Timeout configuration prevents hanging requests - Fallback mechanism ensures system reliability - Uses 3.2B parameter model for balance of speed vs accuracy - Graceful degradation when LLM services unavailable ## Files Added - internal/composer/ollama.go: Core Ollama API integration - internal/composer/llm_test.go: Comprehensive integration tests ## Files Modified - internal/composer/service.go: Implemented LLM functions - internal/composer/models.go: Updated config for performance 🤖 Generated with [Claude Code](https://claude.ai/code) Co-Authored-By: Claude <noreply@anthropic.com>
This commit is contained in:
220
internal/composer/llm_test.go
Normal file
220
internal/composer/llm_test.go
Normal file
@@ -0,0 +1,220 @@
|
||||
package composer
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func TestOllamaClient_Generate(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("Skipping integration test in short mode")
|
||||
}
|
||||
|
||||
client := NewOllamaClient("llama3.1:8b")
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
|
||||
defer cancel()
|
||||
|
||||
prompt := "What is the capital of France? Respond with just the city name."
|
||||
|
||||
response, err := client.Generate(ctx, prompt)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to generate response: %v", err)
|
||||
}
|
||||
|
||||
if response == "" {
|
||||
t.Error("Expected non-empty response")
|
||||
}
|
||||
|
||||
t.Logf("Ollama response: %s", response)
|
||||
}
|
||||
|
||||
func TestTaskClassificationWithLLM(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("Skipping integration test in short mode")
|
||||
}
|
||||
|
||||
// Create test configuration with LLM enabled
|
||||
config := DefaultComposerConfig()
|
||||
config.FeatureFlags.EnableLLMClassification = true
|
||||
config.FeatureFlags.EnableAnalysisLogging = true
|
||||
config.FeatureFlags.EnableFailsafeFallback = true
|
||||
|
||||
service := NewService(nil, config)
|
||||
|
||||
testInput := &TaskAnalysisInput{
|
||||
Title: "Fix Docker Client API compilation error in swarm_manager.go",
|
||||
Description: "The error is: undefined: types.ContainerLogsOptions. This needs to be fixed to allow proper compilation of the WHOOSH project.",
|
||||
Requirements: []string{
|
||||
"Fix compilation error",
|
||||
"Maintain backward compatibility",
|
||||
"Test the fix",
|
||||
},
|
||||
Priority: PriorityHigh,
|
||||
TechStack: []string{"Go", "Docker", "API"},
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second)
|
||||
defer cancel()
|
||||
|
||||
startTime := time.Now()
|
||||
classification, err := service.classifyTaskWithLLM(ctx, testInput)
|
||||
duration := time.Since(startTime)
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("LLM classification failed: %v", err)
|
||||
}
|
||||
|
||||
// Verify classification results
|
||||
if classification == nil {
|
||||
t.Fatal("Expected non-nil classification")
|
||||
}
|
||||
|
||||
if classification.TaskType == "" {
|
||||
t.Error("Expected task type to be set")
|
||||
}
|
||||
|
||||
if classification.ComplexityScore < 0 || classification.ComplexityScore > 1 {
|
||||
t.Errorf("Expected complexity score between 0-1, got %f", classification.ComplexityScore)
|
||||
}
|
||||
|
||||
if len(classification.PrimaryDomains) == 0 {
|
||||
t.Error("Expected at least one primary domain")
|
||||
}
|
||||
|
||||
// Check performance requirement
|
||||
if duration > 5*time.Second {
|
||||
t.Errorf("Classification took %v, exceeds 5s requirement", duration)
|
||||
}
|
||||
|
||||
t.Logf("Classification completed in %v", duration)
|
||||
t.Logf("Task Type: %s", classification.TaskType)
|
||||
t.Logf("Complexity: %.2f", classification.ComplexityScore)
|
||||
t.Logf("Primary Domains: %v", classification.PrimaryDomains)
|
||||
t.Logf("Risk Level: %s", classification.RiskLevel)
|
||||
}
|
||||
|
||||
func TestSkillAnalysisWithLLM(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("Skipping integration test in short mode")
|
||||
}
|
||||
|
||||
// Create test configuration with LLM enabled
|
||||
config := DefaultComposerConfig()
|
||||
config.FeatureFlags.EnableLLMSkillAnalysis = true
|
||||
config.FeatureFlags.EnableAnalysisLogging = true
|
||||
config.FeatureFlags.EnableFailsafeFallback = true
|
||||
|
||||
service := NewService(nil, config)
|
||||
|
||||
testInput := &TaskAnalysisInput{
|
||||
Title: "Implement LLM Integration for Team Composition Engine",
|
||||
Description: "Implement LLM-powered task classification and skill requirement analysis using Ollama API",
|
||||
Requirements: []string{
|
||||
"Connect to Ollama API",
|
||||
"Implement task classification",
|
||||
"Add error handling",
|
||||
"Support feature flags",
|
||||
},
|
||||
Priority: PriorityHigh,
|
||||
TechStack: []string{"Go", "HTTP API", "LLM", "JSON"},
|
||||
}
|
||||
|
||||
// Create a sample classification
|
||||
classification := &TaskClassification{
|
||||
TaskType: TaskTypeFeatureDevelopment,
|
||||
ComplexityScore: 0.7,
|
||||
PrimaryDomains: []string{"backend", "api", "ai"},
|
||||
SecondaryDomains: []string{"integration"},
|
||||
EstimatedDuration: 8,
|
||||
RiskLevel: "medium",
|
||||
RequiredExperience: "intermediate",
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second)
|
||||
defer cancel()
|
||||
|
||||
startTime := time.Now()
|
||||
skillRequirements, err := service.analyzeSkillRequirementsWithLLM(ctx, testInput, classification)
|
||||
duration := time.Since(startTime)
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("LLM skill analysis failed: %v", err)
|
||||
}
|
||||
|
||||
// Verify skill requirements results
|
||||
if skillRequirements == nil {
|
||||
t.Fatal("Expected non-nil skill requirements")
|
||||
}
|
||||
|
||||
if len(skillRequirements.CriticalSkills) == 0 {
|
||||
t.Error("Expected at least one critical skill")
|
||||
}
|
||||
|
||||
if skillRequirements.TotalSkillCount != len(skillRequirements.CriticalSkills)+len(skillRequirements.DesirableSkills) {
|
||||
t.Error("Total skill count mismatch")
|
||||
}
|
||||
|
||||
// Check performance requirement
|
||||
if duration > 5*time.Second {
|
||||
t.Errorf("Skill analysis took %v, exceeds 5s requirement", duration)
|
||||
}
|
||||
|
||||
t.Logf("Skill analysis completed in %v", duration)
|
||||
t.Logf("Critical Skills: %d", len(skillRequirements.CriticalSkills))
|
||||
t.Logf("Desirable Skills: %d", len(skillRequirements.DesirableSkills))
|
||||
t.Logf("Total Skills: %d", skillRequirements.TotalSkillCount)
|
||||
|
||||
// Log first few skills for verification
|
||||
for i, skill := range skillRequirements.CriticalSkills {
|
||||
if i < 3 {
|
||||
t.Logf("Critical Skill %d: %s (proficiency: %.2f)", i+1, skill.Domain, skill.MinProficiency)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestLLMIntegrationFallback(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("Skipping integration test in short mode")
|
||||
}
|
||||
|
||||
// Create test configuration with LLM enabled but invalid endpoint
|
||||
config := DefaultComposerConfig()
|
||||
config.FeatureFlags.EnableLLMClassification = true
|
||||
config.FeatureFlags.EnableFailsafeFallback = true
|
||||
config.AnalysisTimeoutSecs = 1 // Very short timeout to trigger failure
|
||||
|
||||
service := NewService(nil, config)
|
||||
|
||||
// Override with a client that will fail
|
||||
service.ollamaClient = &OllamaClient{
|
||||
baseURL: "http://invalid-endpoint:99999",
|
||||
model: "invalid-model",
|
||||
httpClient: &http.Client{
|
||||
Timeout: 1 * time.Second,
|
||||
},
|
||||
}
|
||||
|
||||
testInput := &TaskAnalysisInput{
|
||||
Title: "Test Task",
|
||||
Description: "This should fall back to heuristics",
|
||||
Priority: PriorityLow,
|
||||
TechStack: []string{"Go"},
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
// This should fall back to heuristics when LLM fails
|
||||
classification, err := service.classifyTaskWithLLM(ctx, testInput)
|
||||
if err != nil {
|
||||
t.Fatalf("Expected fallback to succeed, got error: %v", err)
|
||||
}
|
||||
|
||||
if classification == nil {
|
||||
t.Fatal("Expected classification result from fallback")
|
||||
}
|
||||
|
||||
t.Logf("Fallback classification successful: %s", classification.TaskType)
|
||||
}
|
||||
Reference in New Issue
Block a user