Files
WHOOSH/internal/composer/ollama.go
Claude Code 55dd5951ea
Some checks failed
WHOOSH CI / speclint (push) Has been cancelled
WHOOSH CI / contracts (push) Has been cancelled
WHOOSH CI / speclint (pull_request) Has been cancelled
WHOOSH CI / contracts (pull_request) Has been cancelled
feat: implement LLM integration for team composition engine
Resolves WHOOSH-LLM-002: Replace stubbed LLM functions with full Ollama API integration

## New Features
- Full Ollama API integration with automatic endpoint discovery
- LLM-powered task classification using configurable models
- LLM-powered skill requirement analysis
- Graceful fallback to heuristics on LLM failures
- Feature flag support for LLM vs heuristic execution
- Performance optimization with smaller, faster models (llama3.2:latest)

## Implementation Details
- Created OllamaClient with connection pooling and timeout handling
- Structured prompt engineering for consistent JSON responses
- Robust error handling with automatic failover to heuristics
- Comprehensive integration tests validating functionality
- Support for multiple Ollama endpoints with health checking

## Performance & Reliability
- Timeout configuration prevents hanging requests
- Fallback mechanism ensures system reliability
- Uses 3.2B parameter model for balance of speed vs accuracy
- Graceful degradation when LLM services unavailable

## Files Added
- internal/composer/ollama.go: Core Ollama API integration
- internal/composer/llm_test.go: Comprehensive integration tests

## Files Modified
- internal/composer/service.go: Implemented LLM functions
- internal/composer/models.go: Updated config for performance

🤖 Generated with [Claude Code](https://claude.ai/code)

Co-Authored-By: Claude <noreply@anthropic.com>
2025-09-21 21:57:16 +10:00

342 lines
12 KiB
Go

package composer
import (
"bytes"
"context"
"encoding/json"
"fmt"
"net/http"
"strings"
"time"
"github.com/rs/zerolog/log"
)
// OllamaClient provides LLM integration with Ollama instances
type OllamaClient struct {
baseURL string
httpClient *http.Client
model string
}
// OllamaRequest represents a request to the Ollama API
type OllamaRequest struct {
Model string `json:"model"`
Prompt string `json:"prompt"`
Stream bool `json:"stream"`
}
// OllamaResponse represents a response from the Ollama API
type OllamaResponse struct {
Model string `json:"model"`
CreatedAt time.Time `json:"created_at"`
Response string `json:"response"`
Done bool `json:"done"`
Context []int `json:"context,omitempty"`
TotalDuration int64 `json:"total_duration,omitempty"`
LoadDuration int64 `json:"load_duration,omitempty"`
PromptEvalCount int `json:"prompt_eval_count,omitempty"`
PromptEvalDuration int64 `json:"prompt_eval_duration,omitempty"`
EvalCount int `json:"eval_count,omitempty"`
EvalDuration int64 `json:"eval_duration,omitempty"`
}
// NewOllamaClient creates a new Ollama client with fallback endpoints
func NewOllamaClient(model string) *OllamaClient {
// Default Ollama endpoints from cluster
endpoints := []string{
"http://192.168.1.27:11434",
"http://192.168.1.72:11434",
"http://192.168.1.113:11434",
"http://192.168.1.106:11434",
}
// Try to find a working endpoint
baseURL := endpoints[0] // Default to first endpoint
httpClient := &http.Client{
Timeout: 30 * time.Second,
}
// Quick health check to find working endpoint
for _, endpoint := range endpoints {
resp, err := httpClient.Get(endpoint + "/api/tags")
if err == nil && resp.StatusCode == 200 {
baseURL = endpoint
resp.Body.Close()
break
}
if resp != nil {
resp.Body.Close()
}
}
log.Info().
Str("base_url", baseURL).
Str("model", model).
Msg("Initialized Ollama client")
return &OllamaClient{
baseURL: baseURL,
httpClient: httpClient,
model: model,
}
}
// Generate sends a prompt to Ollama and returns the response
func (c *OllamaClient) Generate(ctx context.Context, prompt string) (string, error) {
reqBody := OllamaRequest{
Model: c.model,
Prompt: prompt,
Stream: false,
}
jsonData, err := json.Marshal(reqBody)
if err != nil {
return "", fmt.Errorf("failed to marshal request: %w", err)
}
req, err := http.NewRequestWithContext(ctx, "POST", c.baseURL+"/api/generate", bytes.NewBuffer(jsonData))
if err != nil {
return "", fmt.Errorf("failed to create request: %w", err)
}
req.Header.Set("Content-Type", "application/json")
startTime := time.Now()
resp, err := c.httpClient.Do(req)
if err != nil {
return "", fmt.Errorf("failed to send request: %w", err)
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
return "", fmt.Errorf("ollama API returned status %d", resp.StatusCode)
}
var ollamaResp OllamaResponse
if err := json.NewDecoder(resp.Body).Decode(&ollamaResp); err != nil {
return "", fmt.Errorf("failed to decode response: %w", err)
}
duration := time.Since(startTime)
log.Debug().
Str("model", c.model).
Dur("duration", duration).
Int("eval_count", ollamaResp.EvalCount).
Msg("Ollama generation completed")
return ollamaResp.Response, nil
}
// TaskClassificationPrompt builds a prompt for task classification
func (c *OllamaClient) BuildTaskClassificationPrompt(input *TaskAnalysisInput) string {
var prompt strings.Builder
prompt.WriteString("You are an expert software project manager analyzing development tasks. ")
prompt.WriteString("Classify the following task and provide analysis in JSON format.\n\n")
prompt.WriteString("Task Details:\n")
prompt.WriteString(fmt.Sprintf("Title: %s\n", input.Title))
prompt.WriteString(fmt.Sprintf("Description: %s\n", input.Description))
if len(input.Requirements) > 0 {
prompt.WriteString(fmt.Sprintf("Requirements: %s\n", strings.Join(input.Requirements, ", ")))
}
if len(input.TechStack) > 0 {
prompt.WriteString(fmt.Sprintf("Tech Stack: %s\n", strings.Join(input.TechStack, ", ")))
}
prompt.WriteString(fmt.Sprintf("Priority: %s\n\n", input.Priority))
prompt.WriteString("Analyze this task and respond with valid JSON in this EXACT format:\n")
prompt.WriteString("{\n")
prompt.WriteString(" \"task_type\": \"feature_development\",\n")
prompt.WriteString(" \"complexity_score\": 0.7,\n")
prompt.WriteString(" \"primary_domains\": [\"backend\", \"api\"],\n")
prompt.WriteString(" \"secondary_domains\": [\"testing\"],\n")
prompt.WriteString(" \"estimated_duration_hours\": 8,\n")
prompt.WriteString(" \"risk_level\": \"medium\",\n")
prompt.WriteString(" \"required_experience\": \"intermediate\"\n")
prompt.WriteString("}\n\n")
prompt.WriteString("Task types: feature_development, bug_fix, refactoring, security, integration, optimization, maintenance\n")
prompt.WriteString("Complexity: number between 0.1-1.0\n")
prompt.WriteString("Duration: integer between 1-40\n")
prompt.WriteString("Risk: minimal, low, medium, high\n")
prompt.WriteString("Experience: junior, intermediate, senior\n\n")
prompt.WriteString("Respond ONLY with valid JSON, no markdown, no other text.")
return prompt.String()
}
// SkillAnalysisPrompt builds a prompt for skill requirement analysis
func (c *OllamaClient) BuildSkillAnalysisPrompt(input *TaskAnalysisInput, classification *TaskClassification) string {
var prompt strings.Builder
prompt.WriteString("You are an expert technical recruiter analyzing skill requirements for software development tasks. ")
prompt.WriteString("Analyze the task and provide detailed skill requirements in JSON format.\n\n")
prompt.WriteString("Task Details:\n")
prompt.WriteString(fmt.Sprintf("Title: %s\n", input.Title))
prompt.WriteString(fmt.Sprintf("Description: %s\n", input.Description))
prompt.WriteString(fmt.Sprintf("Task Type: %s\n", classification.TaskType))
prompt.WriteString(fmt.Sprintf("Complexity: %.1f\n", classification.ComplexityScore))
prompt.WriteString(fmt.Sprintf("Primary Domains: %s\n", strings.Join(classification.PrimaryDomains, ", ")))
if len(input.Requirements) > 0 {
prompt.WriteString(fmt.Sprintf("Requirements: %s\n", strings.Join(input.Requirements, ", ")))
}
if len(input.TechStack) > 0 {
prompt.WriteString(fmt.Sprintf("Tech Stack: %s\n", strings.Join(input.TechStack, ", ")))
}
prompt.WriteString("\nAnalyze and respond with JSON containing:\n")
prompt.WriteString("{\n")
prompt.WriteString(" \"critical_skills\": [\n")
prompt.WriteString(" {\n")
prompt.WriteString(" \"domain\": \"skill domain name\",\n")
prompt.WriteString(" \"min_proficiency\": 0.1-1.0,\n")
prompt.WriteString(" \"weight\": 0.1-1.0,\n")
prompt.WriteString(" \"critical\": true\n")
prompt.WriteString(" }\n")
prompt.WriteString(" ],\n")
prompt.WriteString(" \"desirable_skills\": [\n")
prompt.WriteString(" {\n")
prompt.WriteString(" \"domain\": \"skill domain name\",\n")
prompt.WriteString(" \"min_proficiency\": 0.1-1.0,\n")
prompt.WriteString(" \"weight\": 0.1-1.0,\n")
prompt.WriteString(" \"critical\": false\n")
prompt.WriteString(" }\n")
prompt.WriteString(" ],\n")
prompt.WriteString(" \"total_skill_count\": 0\n")
prompt.WriteString("}\n\n")
prompt.WriteString("Focus on:\n")
prompt.WriteString("- Programming languages and frameworks\n")
prompt.WriteString("- Domain expertise (backend, frontend, devops, etc.)\n")
prompt.WriteString("- Tools and technologies\n")
prompt.WriteString("- Soft skills relevant to the task type\n\n")
prompt.WriteString("Respond ONLY with valid JSON, no other text.")
return prompt.String()
}
// ParseTaskClassificationResponse parses LLM response into TaskClassification
func (c *OllamaClient) ParseTaskClassificationResponse(response string) (*TaskClassification, error) {
// Clean the response - remove markdown code blocks if present
response = strings.TrimSpace(response)
response = strings.TrimPrefix(response, "```json")
response = strings.TrimPrefix(response, "```")
response = strings.TrimSuffix(response, "```")
response = strings.TrimSpace(response)
var result struct {
TaskType string `json:"task_type"`
ComplexityScore float64 `json:"complexity_score"`
PrimaryDomains []string `json:"primary_domains"`
SecondaryDomains []string `json:"secondary_domains"`
EstimatedDuration int `json:"estimated_duration_hours"`
RiskLevel string `json:"risk_level"`
RequiredExperience string `json:"required_experience"`
}
if err := json.Unmarshal([]byte(response), &result); err != nil {
return nil, fmt.Errorf("failed to parse classification response: %w", err)
}
// Convert task type string to TaskType
var taskType TaskType
switch result.TaskType {
case "feature_development":
taskType = TaskTypeFeatureDevelopment
case "bug_fix":
taskType = TaskTypeBugFix
case "refactoring":
taskType = TaskTypeRefactoring
case "security":
taskType = TaskTypeSecurity
case "integration":
taskType = TaskTypeIntegration
case "optimization":
taskType = TaskTypeOptimization
case "maintenance":
taskType = TaskTypeMaintenance
default:
taskType = TaskTypeFeatureDevelopment // Default fallback
}
classification := &TaskClassification{
TaskType: taskType,
ComplexityScore: result.ComplexityScore,
PrimaryDomains: result.PrimaryDomains,
SecondaryDomains: result.SecondaryDomains,
EstimatedDuration: result.EstimatedDuration,
RiskLevel: result.RiskLevel,
RequiredExperience: result.RequiredExperience,
}
return classification, nil
}
// ParseSkillRequirementsResponse parses LLM response into SkillRequirements
func (c *OllamaClient) ParseSkillRequirementsResponse(response string) (*SkillRequirements, error) {
// Clean the response - remove markdown code blocks if present
response = strings.TrimSpace(response)
response = strings.TrimPrefix(response, "```json")
response = strings.TrimPrefix(response, "```")
response = strings.TrimSuffix(response, "```")
response = strings.TrimSpace(response)
var result struct {
CriticalSkills []struct {
Domain string `json:"domain"`
MinProficiency float64 `json:"min_proficiency"`
Weight float64 `json:"weight"`
Critical bool `json:"critical"`
} `json:"critical_skills"`
DesirableSkills []struct {
Domain string `json:"domain"`
MinProficiency float64 `json:"min_proficiency"`
Weight float64 `json:"weight"`
Critical bool `json:"critical"`
} `json:"desirable_skills"`
TotalSkillCount int `json:"total_skill_count"`
}
if err := json.Unmarshal([]byte(response), &result); err != nil {
return nil, fmt.Errorf("failed to parse skill requirements response: %w", err)
}
// Convert to SkillRequirements format
critical := make([]SkillRequirement, len(result.CriticalSkills))
for i, skill := range result.CriticalSkills {
critical[i] = SkillRequirement{
Domain: skill.Domain,
MinProficiency: skill.MinProficiency,
Weight: skill.Weight,
Critical: skill.Critical,
}
}
desirable := make([]SkillRequirement, len(result.DesirableSkills))
for i, skill := range result.DesirableSkills {
desirable[i] = SkillRequirement{
Domain: skill.Domain,
MinProficiency: skill.MinProficiency,
Weight: skill.Weight,
Critical: skill.Critical,
}
}
skillRequirements := &SkillRequirements{
CriticalSkills: critical,
DesirableSkills: desirable,
TotalSkillCount: len(critical) + len(desirable),
}
return skillRequirements, nil
}