Implement wave-based scaling system for CHORUS Docker Swarm orchestration
- Health gates system for pre-scaling validation (KACHING, BACKBEAT, bootstrap peers) - Assignment broker API for per-replica configuration management - Bootstrap pool management with weighted peer selection and health monitoring - Wave-based scaling algorithm with exponential backoff and failure recovery - Enhanced SwarmManager with Docker service scaling capabilities - Comprehensive scaling metrics collection and reporting system - RESTful HTTP API for external scaling operations and monitoring - Integration with CHORUS P2P networking and assignment systems 🤖 Generated with [Claude Code](https://claude.ai/code) Co-Authored-By: Claude <noreply@anthropic.com>
This commit is contained in:
101
cmd/test-llm/main.go
Normal file
101
cmd/test-llm/main.go
Normal file
@@ -0,0 +1,101 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"log"
|
||||
"time"
|
||||
|
||||
"github.com/chorus-services/whoosh/internal/composer"
|
||||
)
|
||||
|
||||
func main() {
|
||||
log.Println("🧪 Testing WHOOSH LLM Integration")
|
||||
|
||||
// Create a test configuration with LLM features enabled
|
||||
config := composer.DefaultComposerConfig()
|
||||
config.FeatureFlags.EnableLLMClassification = true
|
||||
config.FeatureFlags.EnableLLMSkillAnalysis = true
|
||||
config.FeatureFlags.EnableAnalysisLogging = true
|
||||
config.FeatureFlags.EnableFailsafeFallback = true
|
||||
|
||||
// Create service without database for this test
|
||||
service := composer.NewService(nil, config)
|
||||
|
||||
// Test input - simulating WHOOSH-LLM-002 task
|
||||
testInput := &composer.TaskAnalysisInput{
|
||||
Title: "WHOOSH-LLM-002: Implement LLM Integration for Team Composition Engine",
|
||||
Description: "Implement LLM-powered task classification and skill requirement analysis using Ollama API. Replace stubbed functions with real AI-powered analysis.",
|
||||
Requirements: []string{
|
||||
"Connect to Ollama API endpoints",
|
||||
"Implement task classification with LLM",
|
||||
"Implement skill requirement analysis",
|
||||
"Add error handling and fallback to heuristics",
|
||||
"Support feature flags for LLM vs heuristic execution",
|
||||
},
|
||||
Repository: "https://gitea.chorus.services/tony/WHOOSH",
|
||||
Priority: composer.PriorityHigh,
|
||||
TechStack: []string{"Go", "Docker", "Ollama", "PostgreSQL", "HTTP API"},
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
log.Println("📊 Testing LLM Task Classification...")
|
||||
startTime := time.Now()
|
||||
|
||||
// Test task classification
|
||||
classification, err := testTaskClassification(ctx, service, testInput)
|
||||
if err != nil {
|
||||
log.Fatalf("❌ Task classification failed: %v", err)
|
||||
}
|
||||
|
||||
classificationDuration := time.Since(startTime)
|
||||
log.Printf("✅ Task Classification completed in %v", classificationDuration)
|
||||
printClassification(classification)
|
||||
|
||||
log.Println("\n🔍 Testing LLM Skill Analysis...")
|
||||
startTime = time.Now()
|
||||
|
||||
// Test skill analysis
|
||||
skillRequirements, err := testSkillAnalysis(ctx, service, testInput, classification)
|
||||
if err != nil {
|
||||
log.Fatalf("❌ Skill analysis failed: %v", err)
|
||||
}
|
||||
|
||||
skillDuration := time.Since(startTime)
|
||||
log.Printf("✅ Skill Analysis completed in %v", skillDuration)
|
||||
printSkillRequirements(skillRequirements)
|
||||
|
||||
totalTime := classificationDuration + skillDuration
|
||||
log.Printf("\n🏁 Total LLM processing time: %v", totalTime)
|
||||
|
||||
if totalTime > 5*time.Second {
|
||||
log.Printf("⚠️ Warning: Total time (%v) exceeds 5s requirement", totalTime)
|
||||
} else {
|
||||
log.Printf("✅ Performance requirement met (< 5s)")
|
||||
}
|
||||
|
||||
log.Println("\n🎉 LLM Integration test completed successfully!")
|
||||
}
|
||||
|
||||
func testTaskClassification(ctx context.Context, service *composer.Service, input *composer.TaskAnalysisInput) (*composer.TaskClassification, error) {
|
||||
// Use reflection to access private method for testing
|
||||
// In a real test, we'd create public test methods
|
||||
return service.DetermineTaskType(input.Title, input.Description), nil
|
||||
}
|
||||
|
||||
func testSkillAnalysis(ctx context.Context, service *composer.Service, input *composer.TaskAnalysisInput, classification *composer.TaskClassification) (*composer.SkillRequirements, error) {
|
||||
// Test the skill analysis using the public test method
|
||||
return service.AnalyzeSkillRequirementsLocal(input, classification)
|
||||
}
|
||||
|
||||
func printClassification(classification *composer.TaskClassification) {
|
||||
data, _ := json.MarshalIndent(classification, " ", " ")
|
||||
fmt.Printf(" Classification Result:\n %s\n", string(data))
|
||||
}
|
||||
|
||||
func printSkillRequirements(requirements *composer.SkillRequirements) {
|
||||
data, _ := json.MarshalIndent(requirements, " ", " ")
|
||||
fmt.Printf(" Skill Requirements:\n %s\n", string(data))
|
||||
}
|
||||
Reference in New Issue
Block a user