feat: implement LLM integration for team composition engine
Resolves WHOOSH-LLM-002: Replace stubbed LLM functions with full Ollama API integration ## New Features - Full Ollama API integration with automatic endpoint discovery - LLM-powered task classification using configurable models - LLM-powered skill requirement analysis - Graceful fallback to heuristics on LLM failures - Feature flag support for LLM vs heuristic execution - Performance optimization with smaller, faster models (llama3.2:latest) ## Implementation Details - Created OllamaClient with connection pooling and timeout handling - Structured prompt engineering for consistent JSON responses - Robust error handling with automatic failover to heuristics - Comprehensive integration tests validating functionality - Support for multiple Ollama endpoints with health checking ## Performance & Reliability - Timeout configuration prevents hanging requests - Fallback mechanism ensures system reliability - Uses 3.2B parameter model for balance of speed vs accuracy - Graceful degradation when LLM services unavailable ## Files Added - internal/composer/ollama.go: Core Ollama API integration - internal/composer/llm_test.go: Comprehensive integration tests ## Files Modified - internal/composer/service.go: Implemented LLM functions - internal/composer/models.go: Updated config for performance 🤖 Generated with [Claude Code](https://claude.ai/code) Co-Authored-By: Claude <noreply@anthropic.com>
This commit is contained in:
220
internal/composer/llm_test.go
Normal file
220
internal/composer/llm_test.go
Normal file
@@ -0,0 +1,220 @@
|
||||
package composer
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func TestOllamaClient_Generate(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("Skipping integration test in short mode")
|
||||
}
|
||||
|
||||
client := NewOllamaClient("llama3.1:8b")
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
|
||||
defer cancel()
|
||||
|
||||
prompt := "What is the capital of France? Respond with just the city name."
|
||||
|
||||
response, err := client.Generate(ctx, prompt)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to generate response: %v", err)
|
||||
}
|
||||
|
||||
if response == "" {
|
||||
t.Error("Expected non-empty response")
|
||||
}
|
||||
|
||||
t.Logf("Ollama response: %s", response)
|
||||
}
|
||||
|
||||
func TestTaskClassificationWithLLM(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("Skipping integration test in short mode")
|
||||
}
|
||||
|
||||
// Create test configuration with LLM enabled
|
||||
config := DefaultComposerConfig()
|
||||
config.FeatureFlags.EnableLLMClassification = true
|
||||
config.FeatureFlags.EnableAnalysisLogging = true
|
||||
config.FeatureFlags.EnableFailsafeFallback = true
|
||||
|
||||
service := NewService(nil, config)
|
||||
|
||||
testInput := &TaskAnalysisInput{
|
||||
Title: "Fix Docker Client API compilation error in swarm_manager.go",
|
||||
Description: "The error is: undefined: types.ContainerLogsOptions. This needs to be fixed to allow proper compilation of the WHOOSH project.",
|
||||
Requirements: []string{
|
||||
"Fix compilation error",
|
||||
"Maintain backward compatibility",
|
||||
"Test the fix",
|
||||
},
|
||||
Priority: PriorityHigh,
|
||||
TechStack: []string{"Go", "Docker", "API"},
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second)
|
||||
defer cancel()
|
||||
|
||||
startTime := time.Now()
|
||||
classification, err := service.classifyTaskWithLLM(ctx, testInput)
|
||||
duration := time.Since(startTime)
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("LLM classification failed: %v", err)
|
||||
}
|
||||
|
||||
// Verify classification results
|
||||
if classification == nil {
|
||||
t.Fatal("Expected non-nil classification")
|
||||
}
|
||||
|
||||
if classification.TaskType == "" {
|
||||
t.Error("Expected task type to be set")
|
||||
}
|
||||
|
||||
if classification.ComplexityScore < 0 || classification.ComplexityScore > 1 {
|
||||
t.Errorf("Expected complexity score between 0-1, got %f", classification.ComplexityScore)
|
||||
}
|
||||
|
||||
if len(classification.PrimaryDomains) == 0 {
|
||||
t.Error("Expected at least one primary domain")
|
||||
}
|
||||
|
||||
// Check performance requirement
|
||||
if duration > 5*time.Second {
|
||||
t.Errorf("Classification took %v, exceeds 5s requirement", duration)
|
||||
}
|
||||
|
||||
t.Logf("Classification completed in %v", duration)
|
||||
t.Logf("Task Type: %s", classification.TaskType)
|
||||
t.Logf("Complexity: %.2f", classification.ComplexityScore)
|
||||
t.Logf("Primary Domains: %v", classification.PrimaryDomains)
|
||||
t.Logf("Risk Level: %s", classification.RiskLevel)
|
||||
}
|
||||
|
||||
func TestSkillAnalysisWithLLM(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("Skipping integration test in short mode")
|
||||
}
|
||||
|
||||
// Create test configuration with LLM enabled
|
||||
config := DefaultComposerConfig()
|
||||
config.FeatureFlags.EnableLLMSkillAnalysis = true
|
||||
config.FeatureFlags.EnableAnalysisLogging = true
|
||||
config.FeatureFlags.EnableFailsafeFallback = true
|
||||
|
||||
service := NewService(nil, config)
|
||||
|
||||
testInput := &TaskAnalysisInput{
|
||||
Title: "Implement LLM Integration for Team Composition Engine",
|
||||
Description: "Implement LLM-powered task classification and skill requirement analysis using Ollama API",
|
||||
Requirements: []string{
|
||||
"Connect to Ollama API",
|
||||
"Implement task classification",
|
||||
"Add error handling",
|
||||
"Support feature flags",
|
||||
},
|
||||
Priority: PriorityHigh,
|
||||
TechStack: []string{"Go", "HTTP API", "LLM", "JSON"},
|
||||
}
|
||||
|
||||
// Create a sample classification
|
||||
classification := &TaskClassification{
|
||||
TaskType: TaskTypeFeatureDevelopment,
|
||||
ComplexityScore: 0.7,
|
||||
PrimaryDomains: []string{"backend", "api", "ai"},
|
||||
SecondaryDomains: []string{"integration"},
|
||||
EstimatedDuration: 8,
|
||||
RiskLevel: "medium",
|
||||
RequiredExperience: "intermediate",
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second)
|
||||
defer cancel()
|
||||
|
||||
startTime := time.Now()
|
||||
skillRequirements, err := service.analyzeSkillRequirementsWithLLM(ctx, testInput, classification)
|
||||
duration := time.Since(startTime)
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("LLM skill analysis failed: %v", err)
|
||||
}
|
||||
|
||||
// Verify skill requirements results
|
||||
if skillRequirements == nil {
|
||||
t.Fatal("Expected non-nil skill requirements")
|
||||
}
|
||||
|
||||
if len(skillRequirements.CriticalSkills) == 0 {
|
||||
t.Error("Expected at least one critical skill")
|
||||
}
|
||||
|
||||
if skillRequirements.TotalSkillCount != len(skillRequirements.CriticalSkills)+len(skillRequirements.DesirableSkills) {
|
||||
t.Error("Total skill count mismatch")
|
||||
}
|
||||
|
||||
// Check performance requirement
|
||||
if duration > 5*time.Second {
|
||||
t.Errorf("Skill analysis took %v, exceeds 5s requirement", duration)
|
||||
}
|
||||
|
||||
t.Logf("Skill analysis completed in %v", duration)
|
||||
t.Logf("Critical Skills: %d", len(skillRequirements.CriticalSkills))
|
||||
t.Logf("Desirable Skills: %d", len(skillRequirements.DesirableSkills))
|
||||
t.Logf("Total Skills: %d", skillRequirements.TotalSkillCount)
|
||||
|
||||
// Log first few skills for verification
|
||||
for i, skill := range skillRequirements.CriticalSkills {
|
||||
if i < 3 {
|
||||
t.Logf("Critical Skill %d: %s (proficiency: %.2f)", i+1, skill.Domain, skill.MinProficiency)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestLLMIntegrationFallback(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("Skipping integration test in short mode")
|
||||
}
|
||||
|
||||
// Create test configuration with LLM enabled but invalid endpoint
|
||||
config := DefaultComposerConfig()
|
||||
config.FeatureFlags.EnableLLMClassification = true
|
||||
config.FeatureFlags.EnableFailsafeFallback = true
|
||||
config.AnalysisTimeoutSecs = 1 // Very short timeout to trigger failure
|
||||
|
||||
service := NewService(nil, config)
|
||||
|
||||
// Override with a client that will fail
|
||||
service.ollamaClient = &OllamaClient{
|
||||
baseURL: "http://invalid-endpoint:99999",
|
||||
model: "invalid-model",
|
||||
httpClient: &http.Client{
|
||||
Timeout: 1 * time.Second,
|
||||
},
|
||||
}
|
||||
|
||||
testInput := &TaskAnalysisInput{
|
||||
Title: "Test Task",
|
||||
Description: "This should fall back to heuristics",
|
||||
Priority: PriorityLow,
|
||||
TechStack: []string{"Go"},
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
// This should fall back to heuristics when LLM fails
|
||||
classification, err := service.classifyTaskWithLLM(ctx, testInput)
|
||||
if err != nil {
|
||||
t.Fatalf("Expected fallback to succeed, got error: %v", err)
|
||||
}
|
||||
|
||||
if classification == nil {
|
||||
t.Fatal("Expected classification result from fallback")
|
||||
}
|
||||
|
||||
t.Logf("Fallback classification successful: %s", classification.TaskType)
|
||||
}
|
||||
@@ -215,14 +215,14 @@ type FeatureFlags struct {
|
||||
// DefaultComposerConfig returns sensible defaults for MVP
|
||||
func DefaultComposerConfig() *ComposerConfig {
|
||||
return &ComposerConfig{
|
||||
ClassificationModel: "llama3.1:8b",
|
||||
SkillAnalysisModel: "llama3.1:8b",
|
||||
MatchingModel: "llama3.1:8b",
|
||||
ClassificationModel: "llama3.2:latest", // Smaller 3.2B model for faster response
|
||||
SkillAnalysisModel: "llama3.2:latest", // Smaller 3.2B model for faster response
|
||||
MatchingModel: "llama3.2:latest", // Smaller 3.2B model for faster response
|
||||
DefaultStrategy: "minimal_viable",
|
||||
MinTeamSize: 1,
|
||||
MaxTeamSize: 3,
|
||||
SkillMatchThreshold: 0.6,
|
||||
AnalysisTimeoutSecs: 60,
|
||||
AnalysisTimeoutSecs: 30, // Reduced timeout for faster failover
|
||||
EnableCaching: true,
|
||||
CacheTTLMins: 30,
|
||||
FeatureFlags: DefaultFeatureFlags(),
|
||||
|
||||
342
internal/composer/ollama.go
Normal file
342
internal/composer/ollama.go
Normal file
@@ -0,0 +1,342 @@
|
||||
package composer
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/rs/zerolog/log"
|
||||
)
|
||||
|
||||
// OllamaClient provides LLM integration with Ollama instances
|
||||
type OllamaClient struct {
|
||||
baseURL string
|
||||
httpClient *http.Client
|
||||
model string
|
||||
}
|
||||
|
||||
// OllamaRequest represents a request to the Ollama API
|
||||
type OllamaRequest struct {
|
||||
Model string `json:"model"`
|
||||
Prompt string `json:"prompt"`
|
||||
Stream bool `json:"stream"`
|
||||
}
|
||||
|
||||
// OllamaResponse represents a response from the Ollama API
|
||||
type OllamaResponse struct {
|
||||
Model string `json:"model"`
|
||||
CreatedAt time.Time `json:"created_at"`
|
||||
Response string `json:"response"`
|
||||
Done bool `json:"done"`
|
||||
Context []int `json:"context,omitempty"`
|
||||
TotalDuration int64 `json:"total_duration,omitempty"`
|
||||
LoadDuration int64 `json:"load_duration,omitempty"`
|
||||
PromptEvalCount int `json:"prompt_eval_count,omitempty"`
|
||||
PromptEvalDuration int64 `json:"prompt_eval_duration,omitempty"`
|
||||
EvalCount int `json:"eval_count,omitempty"`
|
||||
EvalDuration int64 `json:"eval_duration,omitempty"`
|
||||
}
|
||||
|
||||
// NewOllamaClient creates a new Ollama client with fallback endpoints
|
||||
func NewOllamaClient(model string) *OllamaClient {
|
||||
// Default Ollama endpoints from cluster
|
||||
endpoints := []string{
|
||||
"http://192.168.1.27:11434",
|
||||
"http://192.168.1.72:11434",
|
||||
"http://192.168.1.113:11434",
|
||||
"http://192.168.1.106:11434",
|
||||
}
|
||||
|
||||
// Try to find a working endpoint
|
||||
baseURL := endpoints[0] // Default to first endpoint
|
||||
httpClient := &http.Client{
|
||||
Timeout: 30 * time.Second,
|
||||
}
|
||||
|
||||
// Quick health check to find working endpoint
|
||||
for _, endpoint := range endpoints {
|
||||
resp, err := httpClient.Get(endpoint + "/api/tags")
|
||||
if err == nil && resp.StatusCode == 200 {
|
||||
baseURL = endpoint
|
||||
resp.Body.Close()
|
||||
break
|
||||
}
|
||||
if resp != nil {
|
||||
resp.Body.Close()
|
||||
}
|
||||
}
|
||||
|
||||
log.Info().
|
||||
Str("base_url", baseURL).
|
||||
Str("model", model).
|
||||
Msg("Initialized Ollama client")
|
||||
|
||||
return &OllamaClient{
|
||||
baseURL: baseURL,
|
||||
httpClient: httpClient,
|
||||
model: model,
|
||||
}
|
||||
}
|
||||
|
||||
// Generate sends a prompt to Ollama and returns the response
|
||||
func (c *OllamaClient) Generate(ctx context.Context, prompt string) (string, error) {
|
||||
reqBody := OllamaRequest{
|
||||
Model: c.model,
|
||||
Prompt: prompt,
|
||||
Stream: false,
|
||||
}
|
||||
|
||||
jsonData, err := json.Marshal(reqBody)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to marshal request: %w", err)
|
||||
}
|
||||
|
||||
req, err := http.NewRequestWithContext(ctx, "POST", c.baseURL+"/api/generate", bytes.NewBuffer(jsonData))
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to create request: %w", err)
|
||||
}
|
||||
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
|
||||
startTime := time.Now()
|
||||
resp, err := c.httpClient.Do(req)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to send request: %w", err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return "", fmt.Errorf("ollama API returned status %d", resp.StatusCode)
|
||||
}
|
||||
|
||||
var ollamaResp OllamaResponse
|
||||
if err := json.NewDecoder(resp.Body).Decode(&ollamaResp); err != nil {
|
||||
return "", fmt.Errorf("failed to decode response: %w", err)
|
||||
}
|
||||
|
||||
duration := time.Since(startTime)
|
||||
log.Debug().
|
||||
Str("model", c.model).
|
||||
Dur("duration", duration).
|
||||
Int("eval_count", ollamaResp.EvalCount).
|
||||
Msg("Ollama generation completed")
|
||||
|
||||
return ollamaResp.Response, nil
|
||||
}
|
||||
|
||||
// TaskClassificationPrompt builds a prompt for task classification
|
||||
func (c *OllamaClient) BuildTaskClassificationPrompt(input *TaskAnalysisInput) string {
|
||||
var prompt strings.Builder
|
||||
|
||||
prompt.WriteString("You are an expert software project manager analyzing development tasks. ")
|
||||
prompt.WriteString("Classify the following task and provide analysis in JSON format.\n\n")
|
||||
|
||||
prompt.WriteString("Task Details:\n")
|
||||
prompt.WriteString(fmt.Sprintf("Title: %s\n", input.Title))
|
||||
prompt.WriteString(fmt.Sprintf("Description: %s\n", input.Description))
|
||||
|
||||
if len(input.Requirements) > 0 {
|
||||
prompt.WriteString(fmt.Sprintf("Requirements: %s\n", strings.Join(input.Requirements, ", ")))
|
||||
}
|
||||
|
||||
if len(input.TechStack) > 0 {
|
||||
prompt.WriteString(fmt.Sprintf("Tech Stack: %s\n", strings.Join(input.TechStack, ", ")))
|
||||
}
|
||||
|
||||
prompt.WriteString(fmt.Sprintf("Priority: %s\n\n", input.Priority))
|
||||
|
||||
prompt.WriteString("Analyze this task and respond with valid JSON in this EXACT format:\n")
|
||||
prompt.WriteString("{\n")
|
||||
prompt.WriteString(" \"task_type\": \"feature_development\",\n")
|
||||
prompt.WriteString(" \"complexity_score\": 0.7,\n")
|
||||
prompt.WriteString(" \"primary_domains\": [\"backend\", \"api\"],\n")
|
||||
prompt.WriteString(" \"secondary_domains\": [\"testing\"],\n")
|
||||
prompt.WriteString(" \"estimated_duration_hours\": 8,\n")
|
||||
prompt.WriteString(" \"risk_level\": \"medium\",\n")
|
||||
prompt.WriteString(" \"required_experience\": \"intermediate\"\n")
|
||||
prompt.WriteString("}\n\n")
|
||||
|
||||
prompt.WriteString("Task types: feature_development, bug_fix, refactoring, security, integration, optimization, maintenance\n")
|
||||
prompt.WriteString("Complexity: number between 0.1-1.0\n")
|
||||
prompt.WriteString("Duration: integer between 1-40\n")
|
||||
prompt.WriteString("Risk: minimal, low, medium, high\n")
|
||||
prompt.WriteString("Experience: junior, intermediate, senior\n\n")
|
||||
|
||||
prompt.WriteString("Respond ONLY with valid JSON, no markdown, no other text.")
|
||||
|
||||
return prompt.String()
|
||||
}
|
||||
|
||||
// SkillAnalysisPrompt builds a prompt for skill requirement analysis
|
||||
func (c *OllamaClient) BuildSkillAnalysisPrompt(input *TaskAnalysisInput, classification *TaskClassification) string {
|
||||
var prompt strings.Builder
|
||||
|
||||
prompt.WriteString("You are an expert technical recruiter analyzing skill requirements for software development tasks. ")
|
||||
prompt.WriteString("Analyze the task and provide detailed skill requirements in JSON format.\n\n")
|
||||
|
||||
prompt.WriteString("Task Details:\n")
|
||||
prompt.WriteString(fmt.Sprintf("Title: %s\n", input.Title))
|
||||
prompt.WriteString(fmt.Sprintf("Description: %s\n", input.Description))
|
||||
prompt.WriteString(fmt.Sprintf("Task Type: %s\n", classification.TaskType))
|
||||
prompt.WriteString(fmt.Sprintf("Complexity: %.1f\n", classification.ComplexityScore))
|
||||
prompt.WriteString(fmt.Sprintf("Primary Domains: %s\n", strings.Join(classification.PrimaryDomains, ", ")))
|
||||
|
||||
if len(input.Requirements) > 0 {
|
||||
prompt.WriteString(fmt.Sprintf("Requirements: %s\n", strings.Join(input.Requirements, ", ")))
|
||||
}
|
||||
|
||||
if len(input.TechStack) > 0 {
|
||||
prompt.WriteString(fmt.Sprintf("Tech Stack: %s\n", strings.Join(input.TechStack, ", ")))
|
||||
}
|
||||
|
||||
prompt.WriteString("\nAnalyze and respond with JSON containing:\n")
|
||||
prompt.WriteString("{\n")
|
||||
prompt.WriteString(" \"critical_skills\": [\n")
|
||||
prompt.WriteString(" {\n")
|
||||
prompt.WriteString(" \"domain\": \"skill domain name\",\n")
|
||||
prompt.WriteString(" \"min_proficiency\": 0.1-1.0,\n")
|
||||
prompt.WriteString(" \"weight\": 0.1-1.0,\n")
|
||||
prompt.WriteString(" \"critical\": true\n")
|
||||
prompt.WriteString(" }\n")
|
||||
prompt.WriteString(" ],\n")
|
||||
prompt.WriteString(" \"desirable_skills\": [\n")
|
||||
prompt.WriteString(" {\n")
|
||||
prompt.WriteString(" \"domain\": \"skill domain name\",\n")
|
||||
prompt.WriteString(" \"min_proficiency\": 0.1-1.0,\n")
|
||||
prompt.WriteString(" \"weight\": 0.1-1.0,\n")
|
||||
prompt.WriteString(" \"critical\": false\n")
|
||||
prompt.WriteString(" }\n")
|
||||
prompt.WriteString(" ],\n")
|
||||
prompt.WriteString(" \"total_skill_count\": 0\n")
|
||||
prompt.WriteString("}\n\n")
|
||||
|
||||
prompt.WriteString("Focus on:\n")
|
||||
prompt.WriteString("- Programming languages and frameworks\n")
|
||||
prompt.WriteString("- Domain expertise (backend, frontend, devops, etc.)\n")
|
||||
prompt.WriteString("- Tools and technologies\n")
|
||||
prompt.WriteString("- Soft skills relevant to the task type\n\n")
|
||||
|
||||
prompt.WriteString("Respond ONLY with valid JSON, no other text.")
|
||||
|
||||
return prompt.String()
|
||||
}
|
||||
|
||||
// ParseTaskClassificationResponse parses LLM response into TaskClassification
|
||||
func (c *OllamaClient) ParseTaskClassificationResponse(response string) (*TaskClassification, error) {
|
||||
// Clean the response - remove markdown code blocks if present
|
||||
response = strings.TrimSpace(response)
|
||||
response = strings.TrimPrefix(response, "```json")
|
||||
response = strings.TrimPrefix(response, "```")
|
||||
response = strings.TrimSuffix(response, "```")
|
||||
response = strings.TrimSpace(response)
|
||||
|
||||
var result struct {
|
||||
TaskType string `json:"task_type"`
|
||||
ComplexityScore float64 `json:"complexity_score"`
|
||||
PrimaryDomains []string `json:"primary_domains"`
|
||||
SecondaryDomains []string `json:"secondary_domains"`
|
||||
EstimatedDuration int `json:"estimated_duration_hours"`
|
||||
RiskLevel string `json:"risk_level"`
|
||||
RequiredExperience string `json:"required_experience"`
|
||||
}
|
||||
|
||||
if err := json.Unmarshal([]byte(response), &result); err != nil {
|
||||
return nil, fmt.Errorf("failed to parse classification response: %w", err)
|
||||
}
|
||||
|
||||
// Convert task type string to TaskType
|
||||
var taskType TaskType
|
||||
switch result.TaskType {
|
||||
case "feature_development":
|
||||
taskType = TaskTypeFeatureDevelopment
|
||||
case "bug_fix":
|
||||
taskType = TaskTypeBugFix
|
||||
case "refactoring":
|
||||
taskType = TaskTypeRefactoring
|
||||
case "security":
|
||||
taskType = TaskTypeSecurity
|
||||
case "integration":
|
||||
taskType = TaskTypeIntegration
|
||||
case "optimization":
|
||||
taskType = TaskTypeOptimization
|
||||
case "maintenance":
|
||||
taskType = TaskTypeMaintenance
|
||||
default:
|
||||
taskType = TaskTypeFeatureDevelopment // Default fallback
|
||||
}
|
||||
|
||||
classification := &TaskClassification{
|
||||
TaskType: taskType,
|
||||
ComplexityScore: result.ComplexityScore,
|
||||
PrimaryDomains: result.PrimaryDomains,
|
||||
SecondaryDomains: result.SecondaryDomains,
|
||||
EstimatedDuration: result.EstimatedDuration,
|
||||
RiskLevel: result.RiskLevel,
|
||||
RequiredExperience: result.RequiredExperience,
|
||||
}
|
||||
|
||||
return classification, nil
|
||||
}
|
||||
|
||||
// ParseSkillRequirementsResponse parses LLM response into SkillRequirements
|
||||
func (c *OllamaClient) ParseSkillRequirementsResponse(response string) (*SkillRequirements, error) {
|
||||
// Clean the response - remove markdown code blocks if present
|
||||
response = strings.TrimSpace(response)
|
||||
response = strings.TrimPrefix(response, "```json")
|
||||
response = strings.TrimPrefix(response, "```")
|
||||
response = strings.TrimSuffix(response, "```")
|
||||
response = strings.TrimSpace(response)
|
||||
|
||||
var result struct {
|
||||
CriticalSkills []struct {
|
||||
Domain string `json:"domain"`
|
||||
MinProficiency float64 `json:"min_proficiency"`
|
||||
Weight float64 `json:"weight"`
|
||||
Critical bool `json:"critical"`
|
||||
} `json:"critical_skills"`
|
||||
DesirableSkills []struct {
|
||||
Domain string `json:"domain"`
|
||||
MinProficiency float64 `json:"min_proficiency"`
|
||||
Weight float64 `json:"weight"`
|
||||
Critical bool `json:"critical"`
|
||||
} `json:"desirable_skills"`
|
||||
TotalSkillCount int `json:"total_skill_count"`
|
||||
}
|
||||
|
||||
if err := json.Unmarshal([]byte(response), &result); err != nil {
|
||||
return nil, fmt.Errorf("failed to parse skill requirements response: %w", err)
|
||||
}
|
||||
|
||||
// Convert to SkillRequirements format
|
||||
critical := make([]SkillRequirement, len(result.CriticalSkills))
|
||||
for i, skill := range result.CriticalSkills {
|
||||
critical[i] = SkillRequirement{
|
||||
Domain: skill.Domain,
|
||||
MinProficiency: skill.MinProficiency,
|
||||
Weight: skill.Weight,
|
||||
Critical: skill.Critical,
|
||||
}
|
||||
}
|
||||
|
||||
desirable := make([]SkillRequirement, len(result.DesirableSkills))
|
||||
for i, skill := range result.DesirableSkills {
|
||||
desirable[i] = SkillRequirement{
|
||||
Domain: skill.Domain,
|
||||
MinProficiency: skill.MinProficiency,
|
||||
Weight: skill.Weight,
|
||||
Critical: skill.Critical,
|
||||
}
|
||||
}
|
||||
|
||||
skillRequirements := &SkillRequirements{
|
||||
CriticalSkills: critical,
|
||||
DesirableSkills: desirable,
|
||||
TotalSkillCount: len(critical) + len(desirable),
|
||||
}
|
||||
|
||||
return skillRequirements, nil
|
||||
}
|
||||
@@ -15,8 +15,9 @@ import (
|
||||
|
||||
// Service represents the Team Composer service
|
||||
type Service struct {
|
||||
db *pgxpool.Pool
|
||||
config *ComposerConfig
|
||||
db *pgxpool.Pool
|
||||
config *ComposerConfig
|
||||
ollamaClient *OllamaClient
|
||||
}
|
||||
|
||||
// NewService creates a new Team Composer service
|
||||
@@ -24,10 +25,14 @@ func NewService(db *pgxpool.Pool, config *ComposerConfig) *Service {
|
||||
if config == nil {
|
||||
config = DefaultComposerConfig()
|
||||
}
|
||||
|
||||
|
||||
// Initialize Ollama client for LLM operations
|
||||
ollamaClient := NewOllamaClient(config.ClassificationModel)
|
||||
|
||||
return &Service{
|
||||
db: db,
|
||||
config: config,
|
||||
db: db,
|
||||
config: config,
|
||||
ollamaClient: ollamaClient,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -132,24 +137,56 @@ func (s *Service) classifyTaskWithHeuristics(ctx context.Context, input *TaskAna
|
||||
return classification, nil
|
||||
}
|
||||
|
||||
// classifyTaskWithLLM uses LLM-based classification for advanced analysis
|
||||
// classifyTaskWithLLM uses LLM-based classification for advanced analysis
|
||||
func (s *Service) classifyTaskWithLLM(ctx context.Context, input *TaskAnalysisInput) (*TaskClassification, error) {
|
||||
if s.config.FeatureFlags.EnableAnalysisLogging {
|
||||
log.Info().
|
||||
Str("model", s.config.ClassificationModel).
|
||||
Msg("Using LLM for task classification")
|
||||
}
|
||||
|
||||
// TODO: Implement LLM-based classification
|
||||
// This would make API calls to the configured LLM model
|
||||
// For now, fall back to heuristics if failsafe is enabled
|
||||
|
||||
if s.config.FeatureFlags.EnableFailsafeFallback {
|
||||
log.Warn().Msg("LLM classification not yet implemented, falling back to heuristics")
|
||||
return s.classifyTaskWithHeuristics(ctx, input)
|
||||
|
||||
// Create classification prompt
|
||||
prompt := s.ollamaClient.BuildTaskClassificationPrompt(input)
|
||||
|
||||
// Set timeout for LLM operation
|
||||
llmCtx, cancel := context.WithTimeout(ctx, time.Duration(s.config.AnalysisTimeoutSecs)*time.Second)
|
||||
defer cancel()
|
||||
|
||||
// Call Ollama API
|
||||
response, err := s.ollamaClient.Generate(llmCtx, prompt)
|
||||
if err != nil {
|
||||
if s.config.FeatureFlags.EnableFailsafeFallback {
|
||||
log.Warn().
|
||||
Err(err).
|
||||
Msg("LLM classification failed, falling back to heuristics")
|
||||
return s.classifyTaskWithHeuristics(ctx, input)
|
||||
}
|
||||
return nil, fmt.Errorf("LLM classification failed: %w", err)
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("LLM classification not implemented")
|
||||
|
||||
// Parse LLM response
|
||||
classification, err := s.ollamaClient.ParseTaskClassificationResponse(response)
|
||||
if err != nil {
|
||||
if s.config.FeatureFlags.EnableFailsafeFallback {
|
||||
log.Warn().
|
||||
Err(err).
|
||||
Str("response", response).
|
||||
Msg("Failed to parse LLM classification response, falling back to heuristics")
|
||||
return s.classifyTaskWithHeuristics(ctx, input)
|
||||
}
|
||||
return nil, fmt.Errorf("failed to parse LLM classification: %w", err)
|
||||
}
|
||||
|
||||
if s.config.FeatureFlags.EnableAnalysisLogging {
|
||||
log.Info().
|
||||
Str("task_type", string(classification.TaskType)).
|
||||
Float64("complexity", classification.ComplexityScore).
|
||||
Strs("primary_domains", classification.PrimaryDomains).
|
||||
Str("risk_level", classification.RiskLevel).
|
||||
Msg("Task classified with LLM")
|
||||
}
|
||||
|
||||
return classification, nil
|
||||
}
|
||||
|
||||
// determineTaskType uses heuristics to classify the task type
|
||||
@@ -417,17 +454,86 @@ func (s *Service) analyzeSkillRequirementsWithLLM(ctx context.Context, input *Ta
|
||||
Str("model", s.config.SkillAnalysisModel).
|
||||
Msg("Using LLM for skill analysis")
|
||||
}
|
||||
|
||||
// TODO: Implement LLM-based skill analysis
|
||||
// This would make API calls to the configured LLM model
|
||||
// For now, fall back to heuristics if failsafe is enabled
|
||||
|
||||
if s.config.FeatureFlags.EnableFailsafeFallback {
|
||||
log.Warn().Msg("LLM skill analysis not yet implemented, falling back to heuristics")
|
||||
return s.analyzeSkillRequirementsWithHeuristics(ctx, input, classification)
|
||||
|
||||
// Create skill analysis prompt
|
||||
prompt := s.ollamaClient.BuildSkillAnalysisPrompt(input, classification)
|
||||
|
||||
// Set timeout for LLM operation
|
||||
llmCtx, cancel := context.WithTimeout(ctx, time.Duration(s.config.AnalysisTimeoutSecs)*time.Second)
|
||||
defer cancel()
|
||||
|
||||
// Call Ollama API (use skill analysis model if different from classification model)
|
||||
skillModel := s.config.SkillAnalysisModel
|
||||
if skillModel != s.ollamaClient.model {
|
||||
// Create a temporary client with the skill analysis model
|
||||
skillClient := NewOllamaClient(skillModel)
|
||||
response, err := skillClient.Generate(llmCtx, prompt)
|
||||
if err != nil {
|
||||
if s.config.FeatureFlags.EnableFailsafeFallback {
|
||||
log.Warn().
|
||||
Err(err).
|
||||
Msg("LLM skill analysis failed, falling back to heuristics")
|
||||
return s.analyzeSkillRequirementsWithHeuristics(ctx, input, classification)
|
||||
}
|
||||
return nil, fmt.Errorf("LLM skill analysis failed: %w", err)
|
||||
}
|
||||
|
||||
// Parse LLM response
|
||||
skillRequirements, err := s.ollamaClient.ParseSkillRequirementsResponse(response)
|
||||
if err != nil {
|
||||
if s.config.FeatureFlags.EnableFailsafeFallback {
|
||||
log.Warn().
|
||||
Err(err).
|
||||
Str("response", response).
|
||||
Msg("Failed to parse LLM skill analysis response, falling back to heuristics")
|
||||
return s.analyzeSkillRequirementsWithHeuristics(ctx, input, classification)
|
||||
}
|
||||
return nil, fmt.Errorf("failed to parse LLM skill analysis: %w", err)
|
||||
}
|
||||
|
||||
if s.config.FeatureFlags.EnableAnalysisLogging {
|
||||
log.Info().
|
||||
Int("critical_skills", len(skillRequirements.CriticalSkills)).
|
||||
Int("desirable_skills", len(skillRequirements.DesirableSkills)).
|
||||
Msg("Skills analyzed with LLM")
|
||||
}
|
||||
|
||||
return skillRequirements, nil
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("LLM skill analysis not implemented")
|
||||
|
||||
// Use the same client if models are the same
|
||||
response, err := s.ollamaClient.Generate(llmCtx, prompt)
|
||||
if err != nil {
|
||||
if s.config.FeatureFlags.EnableFailsafeFallback {
|
||||
log.Warn().
|
||||
Err(err).
|
||||
Msg("LLM skill analysis failed, falling back to heuristics")
|
||||
return s.analyzeSkillRequirementsWithHeuristics(ctx, input, classification)
|
||||
}
|
||||
return nil, fmt.Errorf("LLM skill analysis failed: %w", err)
|
||||
}
|
||||
|
||||
// Parse LLM response
|
||||
skillRequirements, err := s.ollamaClient.ParseSkillRequirementsResponse(response)
|
||||
if err != nil {
|
||||
if s.config.FeatureFlags.EnableFailsafeFallback {
|
||||
log.Warn().
|
||||
Err(err).
|
||||
Str("response", response).
|
||||
Msg("Failed to parse LLM skill analysis response, falling back to heuristics")
|
||||
return s.analyzeSkillRequirementsWithHeuristics(ctx, input, classification)
|
||||
}
|
||||
return nil, fmt.Errorf("failed to parse LLM skill analysis: %w", err)
|
||||
}
|
||||
|
||||
if s.config.FeatureFlags.EnableAnalysisLogging {
|
||||
log.Info().
|
||||
Int("critical_skills", len(skillRequirements.CriticalSkills)).
|
||||
Int("desirable_skills", len(skillRequirements.DesirableSkills)).
|
||||
Msg("Skills analyzed with LLM")
|
||||
}
|
||||
|
||||
return skillRequirements, nil
|
||||
}
|
||||
|
||||
// getAvailableAgents retrieves agents that are available for assignment
|
||||
|
||||
Reference in New Issue
Block a user