4352 lines
136 KiB
Go
4352 lines
136 KiB
Go
package server
|
||
|
||
import (
|
||
"bytes"
|
||
"context"
|
||
"database/sql"
|
||
"encoding/json"
|
||
"fmt"
|
||
"io"
|
||
"net/http"
|
||
"os"
|
||
"path/filepath"
|
||
"regexp"
|
||
"strconv"
|
||
"strings"
|
||
"sync"
|
||
"time"
|
||
|
||
"github.com/chorus-services/whoosh/internal/agents"
|
||
"github.com/chorus-services/whoosh/internal/auth"
|
||
"github.com/chorus-services/whoosh/internal/backbeat"
|
||
"github.com/chorus-services/whoosh/internal/composer"
|
||
"github.com/chorus-services/whoosh/internal/config"
|
||
"github.com/chorus-services/whoosh/internal/council"
|
||
"github.com/chorus-services/whoosh/internal/database"
|
||
"github.com/chorus-services/whoosh/internal/gitea"
|
||
"github.com/chorus-services/whoosh/internal/monitor"
|
||
"github.com/chorus-services/whoosh/internal/orchestrator"
|
||
"github.com/chorus-services/whoosh/internal/p2p"
|
||
"github.com/chorus-services/whoosh/internal/tasks"
|
||
"github.com/chorus-services/whoosh/internal/tracing"
|
||
"github.com/chorus-services/whoosh/internal/validation"
|
||
"github.com/go-chi/chi/v5"
|
||
"github.com/go-chi/chi/v5/middleware"
|
||
"github.com/go-chi/cors"
|
||
"github.com/go-chi/render"
|
||
"github.com/google/uuid"
|
||
"github.com/jackc/pgx/v5"
|
||
"github.com/jackc/pgx/v5/pgconn"
|
||
"github.com/rs/zerolog"
|
||
"github.com/rs/zerolog/log"
|
||
"go.opentelemetry.io/otel/attribute"
|
||
)
|
||
|
||
// Global version variable set by main package
|
||
var version = "development"
|
||
|
||
var ucxlIdentSanitizer = regexp.MustCompile(`[^A-Za-z0-9_.-]`)
|
||
|
||
func sanitizeUCXLIdentifier(input string) string {
|
||
lowered := strings.ToLower(input)
|
||
return ucxlIdentSanitizer.ReplaceAllString(lowered, "-")
|
||
}
|
||
|
||
func truncateString(input string, limit int) string {
|
||
if limit <= 0 || len(input) <= limit {
|
||
return input
|
||
}
|
||
if limit <= 3 {
|
||
return input[:limit]
|
||
}
|
||
return input[:limit-3] + "..."
|
||
}
|
||
|
||
// SetVersion sets the global version variable
|
||
func SetVersion(v string) {
|
||
version = v
|
||
}
|
||
|
||
type Server struct {
|
||
config *config.Config
|
||
db *database.DB
|
||
httpServer *http.Server
|
||
router chi.Router
|
||
giteaClient *gitea.Client
|
||
webhookHandler *gitea.WebhookHandler
|
||
authMiddleware *auth.Middleware
|
||
rateLimiter *auth.RateLimiter
|
||
p2pDiscovery *p2p.Discovery
|
||
p2pBroadcaster *p2p.Broadcaster
|
||
agentRegistry *agents.Registry
|
||
backbeat *backbeat.Integration
|
||
teamComposer *composer.Service
|
||
councilComposer *council.CouncilComposer
|
||
taskService *tasks.Service
|
||
giteaIntegration *tasks.GiteaIntegration
|
||
repoMonitor *monitor.Monitor
|
||
swarmManager *orchestrator.SwarmManager
|
||
agentDeployer *orchestrator.AgentDeployer
|
||
scalingController *orchestrator.ScalingController
|
||
healthGates *orchestrator.HealthGates
|
||
assignmentBroker *orchestrator.AssignmentBroker
|
||
bootstrapManager *orchestrator.BootstrapPoolManager
|
||
metricsCollector *orchestrator.ScalingMetricsCollector
|
||
scalingAPI *orchestrator.ScalingAPI
|
||
validator *validation.Validator
|
||
constraintMu sync.Mutex
|
||
rebroadcastMu sync.Mutex
|
||
activeBroadcasts map[uuid.UUID]context.CancelFunc
|
||
roleProfiles map[string]RoleProfile
|
||
startTime time.Time
|
||
}
|
||
|
||
func NewServer(cfg *config.Config, db *database.DB) (*Server, error) {
|
||
// Initialize core services
|
||
taskService := tasks.NewService(db.Pool)
|
||
giteaIntegration := tasks.NewGiteaIntegration(taskService, gitea.NewClient(cfg.GITEA), nil)
|
||
|
||
// Initialize P2P discovery and agent registry
|
||
p2pDiscovery := p2p.NewDiscovery()
|
||
p2pBroadcaster := p2p.NewBroadcaster(p2pDiscovery)
|
||
agentRegistry := agents.NewRegistry(db.Pool, p2pDiscovery)
|
||
|
||
// Initialize team composer
|
||
teamComposer := composer.NewService(db.Pool, nil) // Use default config
|
||
|
||
// Initialize council composer for project kickoffs
|
||
councilComposer := council.NewCouncilComposer(db.Pool)
|
||
|
||
// Initialize Docker Swarm orchestrator services conditionally
|
||
var swarmManager *orchestrator.SwarmManager
|
||
var agentDeployer *orchestrator.AgentDeployer
|
||
var scalingController *orchestrator.ScalingController
|
||
var healthGates *orchestrator.HealthGates
|
||
var assignmentBroker *orchestrator.AssignmentBroker
|
||
var bootstrapManager *orchestrator.BootstrapPoolManager
|
||
var metricsCollector *orchestrator.ScalingMetricsCollector
|
||
var scalingAPI *orchestrator.ScalingAPI
|
||
|
||
if cfg.Docker.Enabled {
|
||
var err error
|
||
swarmManager, err = orchestrator.NewSwarmManager("", "registry.home.deepblack.cloud")
|
||
if err != nil {
|
||
return nil, fmt.Errorf("failed to create swarm manager: %w", err)
|
||
}
|
||
|
||
agentDeployer = orchestrator.NewAgentDeployer(swarmManager, db.Pool, "registry.home.deepblack.cloud")
|
||
|
||
// Initialize scaling system components
|
||
log.Info().Msg("🌊 Initializing wave-based scaling system")
|
||
|
||
// Initialize health gates for scaling decisions
|
||
healthGates = orchestrator.NewHealthGates(
|
||
"http://localhost:8081", // KACHING URL - will be configurable
|
||
"http://localhost:8082", // BACKBEAT URL - will be configurable
|
||
"http://localhost:8080", // Self for CHORUS health
|
||
)
|
||
|
||
// Initialize bootstrap pool manager
|
||
bootstrapConfig := orchestrator.BootstrapPoolConfig{
|
||
MinPoolSize: 5,
|
||
MaxPoolSize: 30,
|
||
HealthCheckInterval: 2 * time.Minute,
|
||
StaleThreshold: 10 * time.Minute,
|
||
PreferredRoles: []string{"admin", "coordinator", "stable"},
|
||
}
|
||
bootstrapManager = orchestrator.NewBootstrapPoolManager(bootstrapConfig)
|
||
|
||
// Initialize assignment broker
|
||
assignmentBroker = orchestrator.NewAssignmentBroker(bootstrapManager)
|
||
|
||
// Initialize metrics collector
|
||
metricsCollector = orchestrator.NewScalingMetricsCollector(1000) // Keep 1000 operations
|
||
|
||
// Initialize scaling controller
|
||
scalingController = orchestrator.NewScalingController(
|
||
swarmManager,
|
||
healthGates,
|
||
assignmentBroker,
|
||
bootstrapManager,
|
||
metricsCollector,
|
||
)
|
||
|
||
// Initialize scaling API
|
||
scalingAPI = orchestrator.NewScalingAPI(scalingController, metricsCollector)
|
||
|
||
log.Info().Msg("✅ Wave-based scaling system initialized")
|
||
} else {
|
||
log.Warn().Msg("🐳 Docker integration disabled - scaling system and council agent deployment unavailable")
|
||
}
|
||
|
||
// Initialize repository monitor with team composer, council composer, and agent deployer
|
||
repoMonitor := monitor.NewMonitor(db.Pool, cfg.GITEA, teamComposer, councilComposer, agentDeployer)
|
||
|
||
s := &Server{
|
||
config: cfg,
|
||
db: db,
|
||
giteaClient: gitea.NewClient(cfg.GITEA),
|
||
webhookHandler: gitea.NewWebhookHandler(cfg.GITEA.WebhookToken),
|
||
authMiddleware: auth.NewMiddleware(cfg.Auth.JWTSecret, cfg.Auth.ServiceTokens),
|
||
rateLimiter: auth.NewRateLimiter(100, time.Minute),
|
||
p2pDiscovery: p2pDiscovery,
|
||
p2pBroadcaster: p2pBroadcaster,
|
||
agentRegistry: agentRegistry,
|
||
teamComposer: teamComposer,
|
||
councilComposer: councilComposer,
|
||
taskService: taskService,
|
||
giteaIntegration: giteaIntegration,
|
||
repoMonitor: repoMonitor,
|
||
swarmManager: swarmManager,
|
||
agentDeployer: agentDeployer,
|
||
scalingController: scalingController,
|
||
healthGates: healthGates,
|
||
assignmentBroker: assignmentBroker,
|
||
bootstrapManager: bootstrapManager,
|
||
metricsCollector: metricsCollector,
|
||
scalingAPI: scalingAPI,
|
||
validator: validation.NewValidator(),
|
||
roleProfiles: defaultRoleProfiles(),
|
||
}
|
||
s.activeBroadcasts = make(map[uuid.UUID]context.CancelFunc)
|
||
s.startTime = time.Now()
|
||
|
||
// Initialize BACKBEAT integration if enabled
|
||
if cfg.BACKBEAT.Enabled {
|
||
backbeatIntegration, err := backbeat.NewIntegration(&cfg.BACKBEAT)
|
||
if err != nil {
|
||
return nil, fmt.Errorf("failed to create BACKBEAT integration: %w", err)
|
||
}
|
||
s.backbeat = backbeatIntegration
|
||
}
|
||
|
||
s.setupRouter()
|
||
s.setupRoutes()
|
||
|
||
s.httpServer = &http.Server{
|
||
Addr: cfg.Server.ListenAddr,
|
||
Handler: s.router,
|
||
ReadTimeout: cfg.Server.ReadTimeout,
|
||
WriteTimeout: cfg.Server.WriteTimeout,
|
||
}
|
||
|
||
return s, nil
|
||
}
|
||
|
||
func (s *Server) setupRouter() {
|
||
r := chi.NewRouter()
|
||
|
||
// Middleware
|
||
r.Use(middleware.RequestID)
|
||
r.Use(middleware.RealIP)
|
||
r.Use(middleware.Logger)
|
||
r.Use(middleware.Recoverer)
|
||
r.Use(middleware.Timeout(30 * time.Second))
|
||
r.Use(validation.SecurityHeaders)
|
||
r.Use(s.rateLimiter.RateLimitMiddleware)
|
||
|
||
// CORS configuration - restrict origins to configured values
|
||
r.Use(cors.Handler(cors.Options{
|
||
AllowedOrigins: s.config.Server.AllowedOrigins,
|
||
AllowedMethods: []string{"GET", "POST", "PUT", "DELETE", "OPTIONS"},
|
||
AllowedHeaders: []string{"Accept", "Authorization", "Content-Type", "X-CSRF-Token", "X-Gitea-Signature"},
|
||
ExposedHeaders: []string{"Link"},
|
||
AllowCredentials: true,
|
||
MaxAge: 300,
|
||
}))
|
||
|
||
// Content-Type handling
|
||
r.Use(render.SetContentType(render.ContentTypeJSON))
|
||
|
||
s.router = r
|
||
}
|
||
|
||
func (s *Server) setupRoutes() {
|
||
// Static file serving for UI assets
|
||
uiDir := resolveUIDir()
|
||
log.Info().Str("ui_dir", uiDir).Msg("📦 WHOOSH serving UI static files")
|
||
s.router.Handle("/ui/*", http.StripPrefix("/ui/", http.FileServer(http.Dir(uiDir))))
|
||
|
||
// Root-path static files to avoid '/ui' prefix in URLs
|
||
s.router.Get("/styles.css", func(w http.ResponseWriter, r *http.Request) {
|
||
http.ServeFile(w, r, filepath.Join(resolveUIDir(), "styles.css"))
|
||
})
|
||
s.router.Get("/script.js", func(w http.ResponseWriter, r *http.Request) {
|
||
http.ServeFile(w, r, filepath.Join(resolveUIDir(), "script.js"))
|
||
})
|
||
// Optional: serve assets at root as well
|
||
s.router.Handle("/assets/*", http.StripPrefix("/", http.FileServer(http.Dir(resolveUIDir()))))
|
||
|
||
// Root route - serve basic dashboard
|
||
s.router.Get("/", s.dashboardHandler)
|
||
|
||
// Health check endpoints
|
||
s.router.Get("/health", s.healthHandler)
|
||
s.router.Get("/health/ready", s.readinessHandler)
|
||
// Metrics endpoint (Prometheus text format)
|
||
s.router.Get("/metrics", s.metricsHandler)
|
||
|
||
// Back-compat alias for admin health details under /api
|
||
// Some UIs or external monitors may call /api/admin/health/details
|
||
// even though the canonical route is /admin/health/details
|
||
s.router.Get("/api/admin/health/details", s.healthDetailsHandler)
|
||
|
||
// Admin health endpoint with detailed information
|
||
s.router.Get("/admin/health/details", s.healthDetailsHandler)
|
||
|
||
// API v1 routes
|
||
s.router.Route("/api/v1", func(r chi.Router) {
|
||
// MVP endpoints - minimal team management
|
||
r.Route("/teams", func(r chi.Router) {
|
||
r.Get("/", s.listTeamsHandler)
|
||
r.With(s.authMiddleware.AdminRequired).Post("/", s.createTeamHandler)
|
||
r.Get("/{teamID}", s.getTeamHandler)
|
||
r.With(s.authMiddleware.AdminRequired).Put("/{teamID}/status", s.updateTeamStatusHandler)
|
||
r.Post("/analyze", s.analyzeTeamCompositionHandler)
|
||
})
|
||
|
||
// Task ingestion from GITEA
|
||
r.Route("/tasks", func(r chi.Router) {
|
||
r.Get("/", s.listTasksHandler)
|
||
r.With(s.authMiddleware.ServiceTokenRequired).Post("/ingest", s.ingestTaskHandler)
|
||
r.Get("/{taskID}", s.getTaskHandler)
|
||
})
|
||
|
||
// Project management endpoints
|
||
r.Route("/projects", func(r chi.Router) {
|
||
r.Get("/", s.listProjectsHandler)
|
||
r.Post("/", s.createProjectHandler)
|
||
|
||
r.Route("/{projectID}", func(r chi.Router) {
|
||
r.Get("/", s.getProjectHandler)
|
||
r.Delete("/", s.deleteProjectHandler)
|
||
r.Get("/tasks", s.listProjectTasksHandler)
|
||
r.Get("/tasks/available", s.listAvailableTasksHandler)
|
||
r.Get("/repository", s.getProjectRepositoryHandler)
|
||
r.Post("/analyze", s.analyzeProjectHandler)
|
||
|
||
r.Route("/tasks/{taskNumber}", func(r chi.Router) {
|
||
r.Get("/", s.getProjectTaskHandler)
|
||
r.Post("/claim", s.claimTaskHandler)
|
||
r.Put("/status", s.updateTaskStatusHandler)
|
||
r.Post("/complete", s.completeTaskHandler)
|
||
})
|
||
})
|
||
})
|
||
|
||
// Agent registration endpoints
|
||
r.Route("/agents", func(r chi.Router) {
|
||
r.Get("/", s.listAgentsHandler)
|
||
r.Post("/register", s.registerAgentHandler)
|
||
r.Put("/{agentID}/status", s.updateAgentStatusHandler)
|
||
})
|
||
|
||
// SLURP proxy endpoints
|
||
r.Route("/slurp", func(r chi.Router) {
|
||
r.Post("/submit", s.slurpSubmitHandler)
|
||
r.Get("/artifacts/{ucxlAddr}", s.slurpRetrieveHandler)
|
||
})
|
||
|
||
// Repository monitoring endpoints
|
||
r.Route("/repositories", func(r chi.Router) {
|
||
r.Get("/", s.listRepositoriesHandler)
|
||
r.With(s.authMiddleware.AdminRequired).Post("/", s.createRepositoryHandler)
|
||
r.Get("/{repoID}", s.getRepositoryHandler)
|
||
r.With(s.authMiddleware.AdminRequired).Put("/{repoID}", s.updateRepositoryHandler)
|
||
r.With(s.authMiddleware.AdminRequired).Delete("/{repoID}", s.deleteRepositoryHandler)
|
||
r.With(s.authMiddleware.AdminRequired).Post("/{repoID}/sync", s.syncRepositoryHandler)
|
||
r.With(s.authMiddleware.AdminRequired).Post("/{repoID}/ensure-labels", s.ensureRepositoryLabelsHandler)
|
||
r.Get("/{repoID}/logs", s.getRepositorySyncLogsHandler)
|
||
})
|
||
|
||
// Council management endpoints
|
||
r.Route("/councils", func(r chi.Router) {
|
||
r.Get("/", s.listCouncilsHandler)
|
||
r.Get("/{councilID}", s.getCouncilHandler)
|
||
|
||
r.Route("/{councilID}/artifacts", func(r chi.Router) {
|
||
r.Get("/", s.getCouncilArtifactsHandler)
|
||
r.With(s.authMiddleware.AdminRequired).Post("/", s.createCouncilArtifactHandler)
|
||
})
|
||
|
||
// Agent role claiming endpoint
|
||
r.Post("/{councilID}/claims", s.handleCouncilRoleClaim)
|
||
|
||
// Persona status acknowledgment from CHORUS agents
|
||
r.Post("/{councilID}/roles/{roleName}/personas", s.handleCouncilPersonaAck)
|
||
})
|
||
|
||
// Scaling system endpoints
|
||
if s.scalingAPI != nil {
|
||
log.Info().Msg("🌊 Registering wave-based scaling API routes")
|
||
s.scalingAPI.RegisterRoutes(r)
|
||
}
|
||
|
||
// Assignment broker endpoints (if Docker enabled)
|
||
if s.assignmentBroker != nil {
|
||
r.Route("/assignments", func(r chi.Router) {
|
||
s.assignmentBroker.RegisterRoutes(r)
|
||
})
|
||
}
|
||
|
||
// BACKBEAT monitoring endpoints
|
||
r.Route("/backbeat", func(r chi.Router) {
|
||
r.Get("/status", s.backbeatStatusHandler)
|
||
})
|
||
})
|
||
|
||
// GITEA webhook endpoint
|
||
s.router.Post(s.config.GITEA.WebhookPath, s.giteaWebhookHandler)
|
||
}
|
||
|
||
func (s *Server) Start(ctx context.Context) error {
|
||
// Start BACKBEAT integration if enabled
|
||
if s.backbeat != nil {
|
||
if err := s.backbeat.Start(ctx); err != nil {
|
||
return fmt.Errorf("failed to start BACKBEAT integration: %w", err)
|
||
}
|
||
}
|
||
|
||
// Start bootstrap pool manager if available
|
||
if s.bootstrapManager != nil {
|
||
log.Info().Msg("🔄 Starting bootstrap pool manager")
|
||
go s.bootstrapManager.Start(ctx)
|
||
}
|
||
|
||
// Start P2P discovery service
|
||
if err := s.p2pDiscovery.Start(); err != nil {
|
||
return fmt.Errorf("failed to start P2P discovery: %w", err)
|
||
}
|
||
|
||
// Start agent registry service
|
||
if err := s.agentRegistry.Start(); err != nil {
|
||
return fmt.Errorf("failed to start agent registry: %w", err)
|
||
}
|
||
|
||
// Start repository monitoring service
|
||
if s.repoMonitor != nil {
|
||
go func() {
|
||
if err := s.repoMonitor.Start(ctx); err != nil && err != context.Canceled {
|
||
log.Error().Err(err).Msg("Repository monitoring service failed")
|
||
}
|
||
}()
|
||
log.Info().Msg("🔍 Repository monitoring service started")
|
||
}
|
||
|
||
log.Info().
|
||
Str("addr", s.httpServer.Addr).
|
||
Msg("HTTP server starting")
|
||
|
||
if err := s.httpServer.ListenAndServe(); err != nil && err != http.ErrServerClosed {
|
||
return fmt.Errorf("server failed to start: %w", err)
|
||
}
|
||
|
||
return nil
|
||
}
|
||
|
||
func (s *Server) Shutdown(ctx context.Context) error {
|
||
log.Info().Msg("HTTP server shutting down")
|
||
|
||
// Stop BACKBEAT integration
|
||
if s.backbeat != nil {
|
||
if err := s.backbeat.Stop(); err != nil {
|
||
log.Error().Err(err).Msg("Failed to stop BACKBEAT integration")
|
||
}
|
||
}
|
||
|
||
// Stop agent registry service
|
||
if err := s.agentRegistry.Stop(); err != nil {
|
||
log.Error().Err(err).Msg("Failed to stop agent registry service")
|
||
}
|
||
|
||
// Stop P2P discovery service
|
||
if err := s.p2pDiscovery.Stop(); err != nil {
|
||
log.Error().Err(err).Msg("Failed to stop P2P discovery service")
|
||
}
|
||
|
||
// Stop repository monitoring service
|
||
if s.repoMonitor != nil {
|
||
s.repoMonitor.Stop()
|
||
log.Info().Msg("🛑 Repository monitoring service stopped")
|
||
}
|
||
|
||
// Stop scaling controller and related services
|
||
if s.scalingController != nil {
|
||
if err := s.scalingController.Close(); err != nil {
|
||
log.Error().Err(err).Msg("Failed to stop scaling controller")
|
||
} else {
|
||
log.Info().Msg("🌊 Wave-based scaling controller stopped")
|
||
}
|
||
}
|
||
|
||
if err := s.httpServer.Shutdown(ctx); err != nil {
|
||
return fmt.Errorf("server shutdown failed: %w", err)
|
||
}
|
||
|
||
return nil
|
||
}
|
||
|
||
// Health check handlers
|
||
func (s *Server) healthHandler(w http.ResponseWriter, r *http.Request) {
|
||
response := map[string]interface{}{
|
||
"status": "ok",
|
||
"service": "whoosh",
|
||
"version": "0.1.0-mvp",
|
||
}
|
||
|
||
// Include BACKBEAT health information if available
|
||
if s.backbeat != nil {
|
||
response["backbeat"] = s.backbeat.GetHealth()
|
||
}
|
||
|
||
render.JSON(w, r, response)
|
||
}
|
||
|
||
func (s *Server) readinessHandler(w http.ResponseWriter, r *http.Request) {
|
||
ctx, cancel := context.WithTimeout(r.Context(), 5*time.Second)
|
||
defer cancel()
|
||
|
||
// Check database connection
|
||
if err := s.db.Health(ctx); err != nil {
|
||
log.Error().Err(err).Msg("Database health check failed")
|
||
render.Status(r, http.StatusServiceUnavailable)
|
||
render.JSON(w, r, map[string]string{
|
||
"status": "unavailable",
|
||
"error": "database connection failed",
|
||
})
|
||
return
|
||
}
|
||
|
||
render.JSON(w, r, map[string]string{
|
||
"status": "ready",
|
||
"database": "connected",
|
||
})
|
||
}
|
||
|
||
// metricsHandler exposes a minimal Prometheus text format so the UI
|
||
// dashboard can render metrics and external scrapers can poll it.
|
||
func (s *Server) metricsHandler(w http.ResponseWriter, r *http.Request) {
|
||
w.Header().Set("Content-Type", "text/plain; version=0.0.4; charset=utf-8")
|
||
|
||
uptime := time.Since(s.startTime).Seconds()
|
||
|
||
ctx, cancel := context.WithTimeout(r.Context(), 500*time.Millisecond)
|
||
defer cancel()
|
||
dbHealthy := 0
|
||
if err := s.db.Health(ctx); err == nil {
|
||
dbHealthy = 1
|
||
}
|
||
|
||
var buf bytes.Buffer
|
||
// Service info
|
||
fmt.Fprintf(&buf, "# HELP whoosh_info Service information.\n")
|
||
fmt.Fprintf(&buf, "# TYPE whoosh_info gauge\n")
|
||
fmt.Fprintf(&buf, "whoosh_info{version=\"%s\"} 1\n", version)
|
||
|
||
// Uptime
|
||
fmt.Fprintf(&buf, "# HELP whoosh_uptime_seconds Uptime of the WHOOSH server.\n")
|
||
fmt.Fprintf(&buf, "# TYPE whoosh_uptime_seconds counter\n")
|
||
fmt.Fprintf(&buf, "whoosh_uptime_seconds %.0f\n", uptime)
|
||
|
||
// Database health
|
||
fmt.Fprintf(&buf, "# HELP whoosh_database_healthy Database health status (1=healthy,0=unhealthy).\n")
|
||
fmt.Fprintf(&buf, "# TYPE whoosh_database_healthy gauge\n")
|
||
fmt.Fprintf(&buf, "whoosh_database_healthy %d\n", dbHealthy)
|
||
|
||
_, _ = w.Write(buf.Bytes())
|
||
}
|
||
|
||
// healthDetailsHandler provides comprehensive system health information
|
||
func (s *Server) healthDetailsHandler(w http.ResponseWriter, r *http.Request) {
|
||
ctx, span := tracing.StartSpan(r.Context(), "health_check_details")
|
||
defer span.End()
|
||
|
||
ctx, cancel := context.WithTimeout(ctx, 10*time.Second)
|
||
defer cancel()
|
||
|
||
response := map[string]interface{}{
|
||
"service": "whoosh",
|
||
"version": version,
|
||
"timestamp": time.Now().Unix(),
|
||
"uptime": time.Since(s.startTime).Seconds(),
|
||
"status": "healthy",
|
||
"components": make(map[string]interface{}),
|
||
}
|
||
|
||
overallHealthy := true
|
||
components := make(map[string]interface{})
|
||
|
||
// Database Health Check
|
||
dbHealth := map[string]interface{}{
|
||
"name": "database",
|
||
"type": "postgresql",
|
||
}
|
||
|
||
if err := s.db.Health(ctx); err != nil {
|
||
dbHealth["status"] = "unhealthy"
|
||
dbHealth["error"] = err.Error()
|
||
dbHealth["last_checked"] = time.Now().Unix()
|
||
overallHealthy = false
|
||
span.SetAttributes(attribute.Bool("health.database.healthy", false))
|
||
} else {
|
||
dbHealth["status"] = "healthy"
|
||
dbHealth["last_checked"] = time.Now().Unix()
|
||
|
||
// Get database statistics
|
||
var dbStats map[string]interface{}
|
||
if stats := s.db.Pool.Stat(); stats != nil {
|
||
dbStats = map[string]interface{}{
|
||
"max_conns": stats.MaxConns(),
|
||
"acquired_conns": stats.AcquiredConns(),
|
||
"idle_conns": stats.IdleConns(),
|
||
"constructing_conns": stats.ConstructingConns(),
|
||
}
|
||
}
|
||
dbHealth["statistics"] = dbStats
|
||
span.SetAttributes(attribute.Bool("health.database.healthy", true))
|
||
}
|
||
components["database"] = dbHealth
|
||
|
||
// Gitea Health Check
|
||
giteaHealth := map[string]interface{}{
|
||
"name": "gitea",
|
||
"type": "external_service",
|
||
}
|
||
|
||
if s.giteaClient != nil {
|
||
if err := s.giteaClient.TestConnection(ctx); err != nil {
|
||
giteaHealth["status"] = "unhealthy"
|
||
giteaHealth["error"] = err.Error()
|
||
giteaHealth["endpoint"] = s.config.GITEA.BaseURL
|
||
overallHealthy = false
|
||
span.SetAttributes(attribute.Bool("health.gitea.healthy", false))
|
||
} else {
|
||
giteaHealth["status"] = "healthy"
|
||
giteaHealth["endpoint"] = s.config.GITEA.BaseURL
|
||
giteaHealth["webhook_path"] = s.config.GITEA.WebhookPath
|
||
span.SetAttributes(attribute.Bool("health.gitea.healthy", true))
|
||
}
|
||
} else {
|
||
giteaHealth["status"] = "not_configured"
|
||
span.SetAttributes(attribute.Bool("health.gitea.healthy", false))
|
||
}
|
||
giteaHealth["last_checked"] = time.Now().Unix()
|
||
components["gitea"] = giteaHealth
|
||
|
||
// BackBeat Health Check
|
||
backbeatHealth := map[string]interface{}{
|
||
"name": "backbeat",
|
||
"type": "internal_service",
|
||
}
|
||
|
||
if s.backbeat != nil {
|
||
bbHealth := s.backbeat.GetHealth()
|
||
if connected, ok := bbHealth["connected"].(bool); ok && connected {
|
||
backbeatHealth["status"] = "healthy"
|
||
backbeatHealth["details"] = bbHealth
|
||
span.SetAttributes(attribute.Bool("health.backbeat.healthy", true))
|
||
} else {
|
||
backbeatHealth["status"] = "unhealthy"
|
||
backbeatHealth["details"] = bbHealth
|
||
backbeatHealth["error"] = "not connected to NATS cluster"
|
||
overallHealthy = false
|
||
span.SetAttributes(attribute.Bool("health.backbeat.healthy", false))
|
||
}
|
||
} else {
|
||
backbeatHealth["status"] = "not_configured"
|
||
span.SetAttributes(attribute.Bool("health.backbeat.healthy", false))
|
||
}
|
||
backbeatHealth["last_checked"] = time.Now().Unix()
|
||
components["backbeat"] = backbeatHealth
|
||
|
||
// Docker Swarm Health Check (if enabled)
|
||
swarmHealth := map[string]interface{}{
|
||
"name": "docker_swarm",
|
||
"type": "orchestration",
|
||
}
|
||
|
||
if s.config.Docker.Enabled {
|
||
// Basic Docker connection check - actual swarm health would need Docker client
|
||
swarmHealth["status"] = "unknown"
|
||
swarmHealth["note"] = "Docker integration enabled but health check not implemented"
|
||
swarmHealth["socket_path"] = s.config.Docker.Host
|
||
} else {
|
||
swarmHealth["status"] = "disabled"
|
||
}
|
||
swarmHealth["last_checked"] = time.Now().Unix()
|
||
components["docker_swarm"] = swarmHealth
|
||
|
||
// Repository Monitor Health
|
||
monitorHealth := map[string]interface{}{
|
||
"name": "repository_monitor",
|
||
"type": "internal_service",
|
||
}
|
||
|
||
if s.repoMonitor != nil {
|
||
// Get repository monitoring statistics
|
||
query := `SELECT
|
||
COUNT(*) as total_repos,
|
||
COUNT(*) FILTER (WHERE sync_status = 'active') as active_repos,
|
||
COUNT(*) FILTER (WHERE sync_status = 'error') as error_repos,
|
||
COUNT(*) FILTER (WHERE monitor_issues = true) as monitored_repos
|
||
FROM repositories`
|
||
|
||
var totalRepos, activeRepos, errorRepos, monitoredRepos int
|
||
err := s.db.Pool.QueryRow(ctx, query).Scan(&totalRepos, &activeRepos, &errorRepos, &monitoredRepos)
|
||
|
||
if err != nil {
|
||
monitorHealth["status"] = "unhealthy"
|
||
monitorHealth["error"] = err.Error()
|
||
overallHealthy = false
|
||
} else {
|
||
monitorHealth["status"] = "healthy"
|
||
monitorHealth["statistics"] = map[string]interface{}{
|
||
"total_repositories": totalRepos,
|
||
"active_repositories": activeRepos,
|
||
"error_repositories": errorRepos,
|
||
"monitored_repositories": monitoredRepos,
|
||
}
|
||
}
|
||
span.SetAttributes(attribute.Bool("health.repository_monitor.healthy", err == nil))
|
||
} else {
|
||
monitorHealth["status"] = "not_configured"
|
||
span.SetAttributes(attribute.Bool("health.repository_monitor.healthy", false))
|
||
}
|
||
monitorHealth["last_checked"] = time.Now().Unix()
|
||
components["repository_monitor"] = monitorHealth
|
||
|
||
// Overall system status
|
||
if !overallHealthy {
|
||
response["status"] = "unhealthy"
|
||
span.SetAttributes(
|
||
attribute.String("health.overall_status", "unhealthy"),
|
||
attribute.Bool("health.overall_healthy", false),
|
||
)
|
||
} else {
|
||
span.SetAttributes(
|
||
attribute.String("health.overall_status", "healthy"),
|
||
attribute.Bool("health.overall_healthy", true),
|
||
)
|
||
}
|
||
|
||
response["components"] = components
|
||
response["healthy"] = overallHealthy
|
||
|
||
// Set appropriate HTTP status
|
||
if !overallHealthy {
|
||
render.Status(r, http.StatusServiceUnavailable)
|
||
}
|
||
|
||
render.JSON(w, r, response)
|
||
}
|
||
|
||
// MVP handlers for team and task management
|
||
func (s *Server) listTeamsHandler(w http.ResponseWriter, r *http.Request) {
|
||
// Parse pagination parameters
|
||
limitStr := r.URL.Query().Get("limit")
|
||
offsetStr := r.URL.Query().Get("offset")
|
||
|
||
limit := 20 // Default limit
|
||
offset := 0 // Default offset
|
||
|
||
if limitStr != "" {
|
||
if l, err := strconv.Atoi(limitStr); err == nil && l > 0 && l <= 100 {
|
||
limit = l
|
||
}
|
||
}
|
||
|
||
if offsetStr != "" {
|
||
if o, err := strconv.Atoi(offsetStr); err == nil && o >= 0 {
|
||
offset = o
|
||
}
|
||
}
|
||
|
||
// Get teams from database
|
||
teams, total, err := s.teamComposer.ListTeams(r.Context(), limit, offset)
|
||
if err != nil {
|
||
log.Error().Err(err).Msg("Failed to list teams")
|
||
render.Status(r, http.StatusInternalServerError)
|
||
render.JSON(w, r, map[string]string{"error": "failed to retrieve teams"})
|
||
return
|
||
}
|
||
|
||
render.JSON(w, r, map[string]interface{}{
|
||
"teams": teams,
|
||
"total": total,
|
||
"limit": limit,
|
||
"offset": offset,
|
||
})
|
||
}
|
||
|
||
func (s *Server) createTeamHandler(w http.ResponseWriter, r *http.Request) {
|
||
var taskInput composer.TaskAnalysisInput
|
||
|
||
if err := json.NewDecoder(r.Body).Decode(&taskInput); err != nil {
|
||
render.Status(r, http.StatusBadRequest)
|
||
render.JSON(w, r, map[string]string{"error": "invalid request body"})
|
||
return
|
||
}
|
||
|
||
// Validate required fields
|
||
if taskInput.Title == "" {
|
||
render.Status(r, http.StatusBadRequest)
|
||
render.JSON(w, r, map[string]string{"error": "title is required"})
|
||
return
|
||
}
|
||
|
||
if taskInput.Description == "" {
|
||
render.Status(r, http.StatusBadRequest)
|
||
render.JSON(w, r, map[string]string{"error": "description is required"})
|
||
return
|
||
}
|
||
|
||
// Set defaults if not provided
|
||
if taskInput.Priority == "" {
|
||
taskInput.Priority = composer.PriorityMedium
|
||
}
|
||
|
||
log.Info().
|
||
Str("task_title", taskInput.Title).
|
||
Str("priority", string(taskInput.Priority)).
|
||
Msg("Starting team composition for new task")
|
||
|
||
// Analyze task and compose team
|
||
result, err := s.teamComposer.AnalyzeAndComposeTeam(r.Context(), &taskInput)
|
||
if err != nil {
|
||
log.Error().Err(err).Msg("Team composition failed")
|
||
render.Status(r, http.StatusInternalServerError)
|
||
render.JSON(w, r, map[string]string{"error": "team composition failed"})
|
||
return
|
||
}
|
||
|
||
// Create the team in database
|
||
team, err := s.teamComposer.CreateTeam(r.Context(), result.TeamComposition, &taskInput)
|
||
if err != nil {
|
||
log.Error().Err(err).Msg("Failed to create team")
|
||
render.Status(r, http.StatusInternalServerError)
|
||
render.JSON(w, r, map[string]string{"error": "failed to create team"})
|
||
return
|
||
}
|
||
|
||
log.Info().
|
||
Str("team_id", team.ID.String()).
|
||
Str("team_name", team.Name).
|
||
Float64("confidence_score", result.TeamComposition.ConfidenceScore).
|
||
Msg("Team created successfully")
|
||
|
||
// Return both the team and the composition analysis
|
||
response := map[string]interface{}{
|
||
"team": team,
|
||
"composition_result": result,
|
||
"message": "Team created successfully",
|
||
}
|
||
|
||
render.Status(r, http.StatusCreated)
|
||
render.JSON(w, r, response)
|
||
}
|
||
|
||
func (s *Server) getTeamHandler(w http.ResponseWriter, r *http.Request) {
|
||
teamIDStr := chi.URLParam(r, "teamID")
|
||
teamID, err := uuid.Parse(teamIDStr)
|
||
if err != nil {
|
||
render.Status(r, http.StatusBadRequest)
|
||
render.JSON(w, r, map[string]string{"error": "invalid team ID"})
|
||
return
|
||
}
|
||
|
||
team, assignments, err := s.teamComposer.GetTeam(r.Context(), teamID)
|
||
if err != nil {
|
||
if err.Error() == "team not found" {
|
||
render.Status(r, http.StatusNotFound)
|
||
render.JSON(w, r, map[string]string{"error": "team not found"})
|
||
return
|
||
}
|
||
|
||
log.Error().Err(err).Str("team_id", teamIDStr).Msg("Failed to get team")
|
||
render.Status(r, http.StatusInternalServerError)
|
||
render.JSON(w, r, map[string]string{"error": "failed to retrieve team"})
|
||
return
|
||
}
|
||
|
||
response := map[string]interface{}{
|
||
"team": team,
|
||
"assignments": assignments,
|
||
}
|
||
|
||
render.JSON(w, r, response)
|
||
}
|
||
|
||
func (s *Server) updateTeamStatusHandler(w http.ResponseWriter, r *http.Request) {
|
||
teamIDStr := chi.URLParam(r, "teamID")
|
||
teamID, err := uuid.Parse(teamIDStr)
|
||
if err != nil {
|
||
render.Status(r, http.StatusBadRequest)
|
||
render.JSON(w, r, map[string]string{"error": "invalid team ID"})
|
||
return
|
||
}
|
||
|
||
var statusUpdate struct {
|
||
Status string `json:"status"`
|
||
Reason string `json:"reason,omitempty"`
|
||
}
|
||
|
||
if err := json.NewDecoder(r.Body).Decode(&statusUpdate); err != nil {
|
||
render.Status(r, http.StatusBadRequest)
|
||
render.JSON(w, r, map[string]string{"error": "invalid request body"})
|
||
return
|
||
}
|
||
|
||
// Validate status values
|
||
validStatuses := map[string]bool{
|
||
"forming": true,
|
||
"active": true,
|
||
"completed": true,
|
||
"disbanded": true,
|
||
}
|
||
|
||
if !validStatuses[statusUpdate.Status] {
|
||
render.Status(r, http.StatusBadRequest)
|
||
render.JSON(w, r, map[string]string{"error": "invalid status. Valid values: forming, active, completed, disbanded"})
|
||
return
|
||
}
|
||
|
||
// Update team status in database
|
||
updateQuery := `UPDATE teams SET status = $1, updated_at = $2 WHERE id = $3`
|
||
|
||
if statusUpdate.Status == "completed" {
|
||
updateQuery = `UPDATE teams SET status = $1, updated_at = $2, completed_at = $2 WHERE id = $3`
|
||
}
|
||
|
||
_, err = s.db.Pool.Exec(r.Context(), updateQuery, statusUpdate.Status, time.Now(), teamID)
|
||
if err != nil {
|
||
log.Error().Err(err).
|
||
Str("team_id", teamIDStr).
|
||
Str("status", statusUpdate.Status).
|
||
Msg("Failed to update team status")
|
||
render.Status(r, http.StatusInternalServerError)
|
||
render.JSON(w, r, map[string]string{"error": "failed to update team status"})
|
||
return
|
||
}
|
||
|
||
log.Info().
|
||
Str("team_id", teamIDStr).
|
||
Str("status", statusUpdate.Status).
|
||
Str("reason", statusUpdate.Reason).
|
||
Msg("Team status updated")
|
||
|
||
render.JSON(w, r, map[string]interface{}{
|
||
"team_id": teamIDStr,
|
||
"status": statusUpdate.Status,
|
||
"message": "Team status updated successfully",
|
||
})
|
||
}
|
||
|
||
func (s *Server) analyzeTeamCompositionHandler(w http.ResponseWriter, r *http.Request) {
|
||
var taskInput composer.TaskAnalysisInput
|
||
|
||
if err := json.NewDecoder(r.Body).Decode(&taskInput); err != nil {
|
||
render.Status(r, http.StatusBadRequest)
|
||
render.JSON(w, r, map[string]string{"error": "invalid request body"})
|
||
return
|
||
}
|
||
|
||
// Validate required fields
|
||
if taskInput.Title == "" {
|
||
render.Status(r, http.StatusBadRequest)
|
||
render.JSON(w, r, map[string]string{"error": "title is required"})
|
||
return
|
||
}
|
||
|
||
if taskInput.Description == "" {
|
||
render.Status(r, http.StatusBadRequest)
|
||
render.JSON(w, r, map[string]string{"error": "description is required"})
|
||
return
|
||
}
|
||
|
||
// Set defaults if not provided
|
||
if taskInput.Priority == "" {
|
||
taskInput.Priority = composer.PriorityMedium
|
||
}
|
||
|
||
log.Info().
|
||
Str("task_title", taskInput.Title).
|
||
Str("priority", string(taskInput.Priority)).
|
||
Msg("Analyzing team composition requirements")
|
||
|
||
// Analyze task and compose team (without creating it)
|
||
result, err := s.teamComposer.AnalyzeAndComposeTeam(r.Context(), &taskInput)
|
||
if err != nil {
|
||
log.Error().Err(err).Msg("Team composition analysis failed")
|
||
render.Status(r, http.StatusInternalServerError)
|
||
render.JSON(w, r, map[string]string{"error": "team composition analysis failed"})
|
||
return
|
||
}
|
||
|
||
log.Info().
|
||
Str("analysis_id", result.AnalysisID.String()).
|
||
Float64("confidence_score", result.TeamComposition.ConfidenceScore).
|
||
Int("recommended_team_size", result.TeamComposition.EstimatedSize).
|
||
Msg("Team composition analysis completed")
|
||
|
||
render.JSON(w, r, result)
|
||
}
|
||
|
||
func (s *Server) listTasksHandler(w http.ResponseWriter, r *http.Request) {
|
||
// Parse query parameters
|
||
statusParam := r.URL.Query().Get("status")
|
||
priorityParam := r.URL.Query().Get("priority")
|
||
repositoryParam := r.URL.Query().Get("repository")
|
||
limitStr := r.URL.Query().Get("limit")
|
||
offsetStr := r.URL.Query().Get("offset")
|
||
|
||
// Build filter
|
||
filter := &tasks.TaskFilter{}
|
||
|
||
if statusParam != "" && statusParam != "all" {
|
||
filter.Status = []tasks.TaskStatus{tasks.TaskStatus(statusParam)}
|
||
}
|
||
|
||
if priorityParam != "" {
|
||
filter.Priority = []tasks.TaskPriority{tasks.TaskPriority(priorityParam)}
|
||
}
|
||
|
||
if repositoryParam != "" {
|
||
filter.Repository = repositoryParam
|
||
}
|
||
|
||
if limitStr != "" {
|
||
if limit, err := strconv.Atoi(limitStr); err == nil && limit > 0 && limit <= 100 {
|
||
filter.Limit = limit
|
||
}
|
||
}
|
||
|
||
if offsetStr != "" {
|
||
if offset, err := strconv.Atoi(offsetStr); err == nil && offset >= 0 {
|
||
filter.Offset = offset
|
||
}
|
||
}
|
||
|
||
// Get tasks from database
|
||
taskList, total, err := s.taskService.ListTasks(r.Context(), filter)
|
||
if err != nil {
|
||
log.Error().Err(err).Msg("Failed to list tasks")
|
||
render.Status(r, http.StatusInternalServerError)
|
||
render.JSON(w, r, map[string]string{"error": "failed to retrieve tasks"})
|
||
return
|
||
}
|
||
|
||
render.JSON(w, r, map[string]interface{}{
|
||
"tasks": taskList,
|
||
"total": total,
|
||
"filter": filter,
|
||
})
|
||
}
|
||
|
||
func (s *Server) ingestTaskHandler(w http.ResponseWriter, r *http.Request) {
|
||
var taskData struct {
|
||
Title string `json:"title"`
|
||
Description string `json:"description"`
|
||
Repository string `json:"repository"`
|
||
IssueURL string `json:"issue_url,omitempty"`
|
||
Priority string `json:"priority,omitempty"`
|
||
Labels []string `json:"labels,omitempty"`
|
||
Source string `json:"source,omitempty"` // "manual", "gitea", "webhook"
|
||
}
|
||
|
||
if err := json.NewDecoder(r.Body).Decode(&taskData); err != nil {
|
||
render.Status(r, http.StatusBadRequest)
|
||
render.JSON(w, r, map[string]string{"error": "invalid request body"})
|
||
return
|
||
}
|
||
|
||
// Validate required fields
|
||
if taskData.Title == "" {
|
||
render.Status(r, http.StatusBadRequest)
|
||
render.JSON(w, r, map[string]string{"error": "title is required"})
|
||
return
|
||
}
|
||
|
||
if taskData.Description == "" {
|
||
render.Status(r, http.StatusBadRequest)
|
||
render.JSON(w, r, map[string]string{"error": "description is required"})
|
||
return
|
||
}
|
||
|
||
// Set defaults
|
||
if taskData.Priority == "" {
|
||
taskData.Priority = "medium"
|
||
}
|
||
|
||
if taskData.Source == "" {
|
||
taskData.Source = "manual"
|
||
}
|
||
|
||
// Create task ID
|
||
taskID := uuid.New().String()
|
||
|
||
log.Info().
|
||
Str("task_id", taskID).
|
||
Str("title", taskData.Title).
|
||
Str("repository", taskData.Repository).
|
||
Str("source", taskData.Source).
|
||
Msg("Ingesting new task")
|
||
|
||
// For MVP, we'll create the task record and attempt team composition
|
||
// In production, this would persist to a tasks table and queue for processing
|
||
|
||
// Create task in database first
|
||
createInput := &tasks.CreateTaskInput{
|
||
ExternalID: taskID, // Use generated ID as external ID for manual tasks
|
||
ExternalURL: taskData.IssueURL,
|
||
SourceType: tasks.SourceType(taskData.Source),
|
||
Title: taskData.Title,
|
||
Description: taskData.Description,
|
||
Priority: tasks.TaskPriority(taskData.Priority),
|
||
Repository: taskData.Repository,
|
||
Labels: taskData.Labels,
|
||
}
|
||
|
||
createdTask, err := s.taskService.CreateTask(r.Context(), createInput)
|
||
if err != nil {
|
||
log.Error().Err(err).Str("task_id", taskID).Msg("Failed to create task")
|
||
render.Status(r, http.StatusInternalServerError)
|
||
render.JSON(w, r, map[string]string{"error": "failed to create task"})
|
||
return
|
||
}
|
||
|
||
// Convert to TaskAnalysisInput for team composition
|
||
taskInput := &composer.TaskAnalysisInput{
|
||
Title: createdTask.Title,
|
||
Description: createdTask.Description,
|
||
Repository: createdTask.Repository,
|
||
Requirements: createdTask.Requirements,
|
||
Priority: composer.TaskPriority(createdTask.Priority),
|
||
TechStack: createdTask.TechStack,
|
||
Metadata: map[string]interface{}{
|
||
"task_id": createdTask.ID.String(),
|
||
"source": taskData.Source,
|
||
"issue_url": taskData.IssueURL,
|
||
"labels": createdTask.Labels,
|
||
},
|
||
}
|
||
|
||
// Start team composition analysis in background for complex tasks
|
||
// For simple tasks, we can process synchronously
|
||
isComplex := len(taskData.Description) > 200 ||
|
||
len(taskData.Labels) > 3 ||
|
||
taskData.Priority == "high" ||
|
||
taskData.Priority == "critical"
|
||
|
||
if isComplex {
|
||
// For complex tasks, start async team composition
|
||
go s.processTaskAsync(taskID, taskInput)
|
||
|
||
// Return immediate response
|
||
render.Status(r, http.StatusAccepted)
|
||
render.JSON(w, r, map[string]interface{}{
|
||
"task_id": taskID,
|
||
"status": "queued",
|
||
"message": "Task queued for team composition analysis",
|
||
})
|
||
} else {
|
||
// For simple tasks, process synchronously
|
||
result, err := s.teamComposer.AnalyzeAndComposeTeam(r.Context(), taskInput)
|
||
if err != nil {
|
||
log.Error().Err(err).Str("task_id", taskID).Msg("Task analysis failed")
|
||
render.Status(r, http.StatusInternalServerError)
|
||
render.JSON(w, r, map[string]string{"error": "task analysis failed"})
|
||
return
|
||
}
|
||
|
||
// Create the team
|
||
team, err := s.teamComposer.CreateTeam(r.Context(), result.TeamComposition, taskInput)
|
||
if err != nil {
|
||
log.Error().Err(err).Str("task_id", taskID).Msg("Team creation failed")
|
||
render.Status(r, http.StatusInternalServerError)
|
||
render.JSON(w, r, map[string]string{"error": "team creation failed"})
|
||
return
|
||
}
|
||
|
||
log.Info().
|
||
Str("task_id", taskID).
|
||
Str("team_id", team.ID.String()).
|
||
Msg("Task ingested and team created")
|
||
|
||
render.Status(r, http.StatusCreated)
|
||
render.JSON(w, r, map[string]interface{}{
|
||
"task_id": taskID,
|
||
"team": team,
|
||
"composition_result": result,
|
||
"status": "completed",
|
||
"message": "Task ingested and team created successfully",
|
||
})
|
||
}
|
||
}
|
||
|
||
func (s *Server) getTaskHandler(w http.ResponseWriter, r *http.Request) {
|
||
taskIDStr := chi.URLParam(r, "taskID")
|
||
taskID, err := uuid.Parse(taskIDStr)
|
||
if err != nil {
|
||
render.Status(r, http.StatusBadRequest)
|
||
render.JSON(w, r, map[string]string{"error": "invalid task ID format"})
|
||
return
|
||
}
|
||
|
||
// Get task from database
|
||
task, err := s.taskService.GetTask(r.Context(), taskID)
|
||
if err != nil {
|
||
if strings.Contains(err.Error(), "not found") {
|
||
render.Status(r, http.StatusNotFound)
|
||
render.JSON(w, r, map[string]string{"error": "task not found"})
|
||
return
|
||
}
|
||
|
||
log.Error().Err(err).Str("task_id", taskIDStr).Msg("Failed to get task")
|
||
render.Status(r, http.StatusInternalServerError)
|
||
render.JSON(w, r, map[string]string{"error": "failed to retrieve task"})
|
||
return
|
||
}
|
||
|
||
render.JSON(w, r, map[string]interface{}{
|
||
"task": task,
|
||
})
|
||
}
|
||
|
||
func (s *Server) slurpSubmitHandler(w http.ResponseWriter, r *http.Request) {
|
||
// Parse the submission request
|
||
var submission struct {
|
||
TeamID string `json:"team_id"`
|
||
ArtifactType string `json:"artifact_type"`
|
||
Content map[string]interface{} `json:"content"`
|
||
Metadata map[string]interface{} `json:"metadata,omitempty"`
|
||
}
|
||
|
||
if err := json.NewDecoder(r.Body).Decode(&submission); err != nil {
|
||
render.Status(r, http.StatusBadRequest)
|
||
render.JSON(w, r, map[string]string{"error": "invalid request body"})
|
||
return
|
||
}
|
||
|
||
// Validate required fields
|
||
if submission.TeamID == "" || submission.ArtifactType == "" {
|
||
render.Status(r, http.StatusBadRequest)
|
||
render.JSON(w, r, map[string]string{"error": "team_id and artifact_type are required"})
|
||
return
|
||
}
|
||
|
||
// Generate UCXL address for the submission
|
||
ucxlAddr := fmt.Sprintf("ucxl://%s/%s/%d",
|
||
submission.TeamID,
|
||
submission.ArtifactType,
|
||
time.Now().Unix())
|
||
|
||
// For MVP, we'll store basic metadata in the database
|
||
// In production, this would proxy to actual SLURP service
|
||
teamUUID, err := uuid.Parse(submission.TeamID)
|
||
if err != nil {
|
||
render.Status(r, http.StatusBadRequest)
|
||
render.JSON(w, r, map[string]string{"error": "invalid team_id format"})
|
||
return
|
||
}
|
||
|
||
// Store submission record
|
||
submissionID := uuid.New()
|
||
metadataJSON, _ := json.Marshal(submission.Metadata)
|
||
|
||
insertQuery := `
|
||
INSERT INTO slurp_submissions (id, team_id, ucxl_address, artifact_type, metadata, submitted_at, status)
|
||
VALUES ($1, $2, $3, $4, $5, $6, $7)
|
||
`
|
||
|
||
_, err = s.db.Pool.Exec(r.Context(), insertQuery,
|
||
submissionID, teamUUID, ucxlAddr, submission.ArtifactType,
|
||
metadataJSON, time.Now(), "submitted")
|
||
|
||
if err != nil {
|
||
log.Error().Err(err).
|
||
Str("team_id", submission.TeamID).
|
||
Str("artifact_type", submission.ArtifactType).
|
||
Msg("Failed to store SLURP submission")
|
||
render.Status(r, http.StatusInternalServerError)
|
||
render.JSON(w, r, map[string]string{"error": "failed to store submission"})
|
||
return
|
||
}
|
||
|
||
log.Info().
|
||
Str("team_id", submission.TeamID).
|
||
Str("artifact_type", submission.ArtifactType).
|
||
Str("ucxl_address", ucxlAddr).
|
||
Msg("SLURP submission stored")
|
||
|
||
render.Status(r, http.StatusCreated)
|
||
render.JSON(w, r, map[string]interface{}{
|
||
"submission_id": submissionID,
|
||
"ucxl_address": ucxlAddr,
|
||
"status": "submitted",
|
||
"message": "Artifact submitted to SLURP successfully (MVP mode)",
|
||
})
|
||
}
|
||
|
||
func (s *Server) slurpRetrieveHandler(w http.ResponseWriter, r *http.Request) {
|
||
ucxlAddress := r.URL.Query().Get("ucxl_address")
|
||
if ucxlAddress == "" {
|
||
render.Status(r, http.StatusBadRequest)
|
||
render.JSON(w, r, map[string]string{"error": "ucxl_address query parameter is required"})
|
||
return
|
||
}
|
||
|
||
log.Info().
|
||
Str("ucxl_address", ucxlAddress).
|
||
Msg("Retrieving SLURP submission")
|
||
|
||
// Query the submission from database
|
||
query := `
|
||
SELECT id, team_id, ucxl_address, artifact_type, metadata, submitted_at, status
|
||
FROM slurp_submissions
|
||
WHERE ucxl_address = $1
|
||
`
|
||
|
||
row := s.db.Pool.QueryRow(r.Context(), query, ucxlAddress)
|
||
|
||
var (
|
||
id uuid.UUID
|
||
teamID uuid.UUID
|
||
retrievedAddr string
|
||
artifactType string
|
||
metadataJSON []byte
|
||
submittedAt time.Time
|
||
status string
|
||
)
|
||
|
||
err := row.Scan(&id, &teamID, &retrievedAddr, &artifactType, &metadataJSON, &submittedAt, &status)
|
||
if err != nil {
|
||
if err.Error() == "no rows in result set" {
|
||
render.Status(r, http.StatusNotFound)
|
||
render.JSON(w, r, map[string]string{"error": "SLURP submission not found"})
|
||
return
|
||
}
|
||
|
||
log.Error().Err(err).
|
||
Str("ucxl_address", ucxlAddress).
|
||
Msg("Failed to retrieve SLURP submission")
|
||
render.Status(r, http.StatusInternalServerError)
|
||
render.JSON(w, r, map[string]string{"error": "failed to retrieve submission"})
|
||
return
|
||
}
|
||
|
||
// Parse metadata
|
||
var metadata map[string]interface{}
|
||
if len(metadataJSON) > 0 {
|
||
json.Unmarshal(metadataJSON, &metadata)
|
||
}
|
||
|
||
submission := map[string]interface{}{
|
||
"id": id,
|
||
"team_id": teamID,
|
||
"ucxl_address": retrievedAddr,
|
||
"artifact_type": artifactType,
|
||
"metadata": metadata,
|
||
"submitted_at": submittedAt.Format(time.RFC3339),
|
||
"status": status,
|
||
}
|
||
|
||
// For MVP, we return the metadata. In production, this would
|
||
// proxy to SLURP service to retrieve actual artifact content
|
||
render.JSON(w, r, map[string]interface{}{
|
||
"submission": submission,
|
||
"message": "SLURP submission retrieved (MVP mode - metadata only)",
|
||
})
|
||
}
|
||
|
||
// CHORUS Integration Handlers
|
||
|
||
func (s *Server) listProjectTasksHandler(w http.ResponseWriter, r *http.Request) {
|
||
projectID := chi.URLParam(r, "projectID")
|
||
|
||
log.Info().
|
||
Str("project_id", projectID).
|
||
Msg("Listing tasks for project")
|
||
|
||
// For MVP, return mock tasks associated with the project
|
||
// In production, this would query actual tasks from database
|
||
tasks := []map[string]interface{}{
|
||
{
|
||
"id": "task-001",
|
||
"project_id": projectID,
|
||
"title": "Setup project infrastructure",
|
||
"description": "Initialize Docker, CI/CD, and database setup",
|
||
"status": "completed",
|
||
"priority": "high",
|
||
"assigned_team": nil,
|
||
"created_at": time.Now().Add(-48 * time.Hour).Format(time.RFC3339),
|
||
"completed_at": time.Now().Add(-12 * time.Hour).Format(time.RFC3339),
|
||
},
|
||
{
|
||
"id": "task-002",
|
||
"project_id": projectID,
|
||
"title": "Implement authentication system",
|
||
"description": "JWT-based authentication with user management",
|
||
"status": "active",
|
||
"priority": "high",
|
||
"assigned_team": "team-001",
|
||
"created_at": time.Now().Add(-24 * time.Hour).Format(time.RFC3339),
|
||
"updated_at": time.Now().Add(-2 * time.Hour).Format(time.RFC3339),
|
||
},
|
||
{
|
||
"id": "task-003",
|
||
"project_id": projectID,
|
||
"title": "Create API documentation",
|
||
"description": "OpenAPI/Swagger documentation for all endpoints",
|
||
"status": "queued",
|
||
"priority": "medium",
|
||
"assigned_team": nil,
|
||
"created_at": time.Now().Add(-6 * time.Hour).Format(time.RFC3339),
|
||
},
|
||
}
|
||
|
||
render.JSON(w, r, map[string]interface{}{
|
||
"project_id": projectID,
|
||
"tasks": tasks,
|
||
"total": len(tasks),
|
||
"message": "Project tasks retrieved (MVP mock data)",
|
||
})
|
||
}
|
||
|
||
func (s *Server) listAvailableTasksHandler(w http.ResponseWriter, r *http.Request) {
|
||
// Get query parameters for filtering
|
||
skillFilter := r.URL.Query().Get("skills")
|
||
priorityFilter := r.URL.Query().Get("priority")
|
||
|
||
log.Info().
|
||
Str("skill_filter", skillFilter).
|
||
Str("priority_filter", priorityFilter).
|
||
Msg("Listing available tasks")
|
||
|
||
// For MVP, return mock available tasks that agents can claim
|
||
// In production, this would query unassigned tasks from database
|
||
availableTasks := []map[string]interface{}{
|
||
{
|
||
"id": "task-004",
|
||
"title": "Fix memory leak in user service",
|
||
"description": "Investigate and fix memory leak causing high memory usage",
|
||
"status": "available",
|
||
"priority": "high",
|
||
"skills_required": []string{"go", "debugging", "performance"},
|
||
"estimated_hours": 8,
|
||
"repository": "example/user-service",
|
||
"created_at": time.Now().Add(-3 * time.Hour).Format(time.RFC3339),
|
||
},
|
||
{
|
||
"id": "task-005",
|
||
"title": "Add rate limiting to API",
|
||
"description": "Implement rate limiting middleware for API endpoints",
|
||
"status": "available",
|
||
"priority": "medium",
|
||
"skills_required": []string{"go", "middleware", "api"},
|
||
"estimated_hours": 4,
|
||
"repository": "example/api-gateway",
|
||
"created_at": time.Now().Add(-1 * time.Hour).Format(time.RFC3339),
|
||
},
|
||
{
|
||
"id": "task-006",
|
||
"title": "Update React components",
|
||
"description": "Migrate legacy class components to functional components",
|
||
"status": "available",
|
||
"priority": "low",
|
||
"skills_required": []string{"react", "javascript", "frontend"},
|
||
"estimated_hours": 12,
|
||
"repository": "example/web-ui",
|
||
"created_at": time.Now().Add(-30 * time.Minute).Format(time.RFC3339),
|
||
},
|
||
}
|
||
|
||
// Apply filtering if specified
|
||
filteredTasks := availableTasks
|
||
|
||
if priorityFilter != "" {
|
||
filtered := []map[string]interface{}{}
|
||
for _, task := range availableTasks {
|
||
if task["priority"] == priorityFilter {
|
||
filtered = append(filtered, task)
|
||
}
|
||
}
|
||
filteredTasks = filtered
|
||
}
|
||
|
||
render.JSON(w, r, map[string]interface{}{
|
||
"available_tasks": filteredTasks,
|
||
"total": len(filteredTasks),
|
||
"filters": map[string]string{
|
||
"skills": skillFilter,
|
||
"priority": priorityFilter,
|
||
},
|
||
"message": "Available tasks retrieved (MVP mock data)",
|
||
})
|
||
}
|
||
|
||
func (s *Server) getProjectRepositoryHandler(w http.ResponseWriter, r *http.Request) {
|
||
render.Status(r, http.StatusNotImplemented)
|
||
render.JSON(w, r, map[string]string{"error": "not implemented"})
|
||
}
|
||
|
||
func (s *Server) getProjectTaskHandler(w http.ResponseWriter, r *http.Request) {
|
||
render.Status(r, http.StatusNotImplemented)
|
||
render.JSON(w, r, map[string]string{"error": "not implemented"})
|
||
}
|
||
|
||
func (s *Server) claimTaskHandler(w http.ResponseWriter, r *http.Request) {
|
||
taskIDStr := chi.URLParam(r, "taskID")
|
||
taskID, err := uuid.Parse(taskIDStr)
|
||
if err != nil {
|
||
render.Status(r, http.StatusBadRequest)
|
||
render.JSON(w, r, map[string]string{"error": "invalid task ID format"})
|
||
return
|
||
}
|
||
|
||
var claimData struct {
|
||
TeamID string `json:"team_id"`
|
||
AgentID string `json:"agent_id,omitempty"`
|
||
Reason string `json:"reason,omitempty"`
|
||
}
|
||
|
||
if err := json.NewDecoder(r.Body).Decode(&claimData); err != nil {
|
||
render.Status(r, http.StatusBadRequest)
|
||
render.JSON(w, r, map[string]string{"error": "invalid request body"})
|
||
return
|
||
}
|
||
|
||
if claimData.TeamID == "" {
|
||
render.Status(r, http.StatusBadRequest)
|
||
render.JSON(w, r, map[string]string{"error": "team_id is required"})
|
||
return
|
||
}
|
||
|
||
// Validate team exists
|
||
teamUUID, err := uuid.Parse(claimData.TeamID)
|
||
if err != nil {
|
||
render.Status(r, http.StatusBadRequest)
|
||
render.JSON(w, r, map[string]string{"error": "invalid team_id format"})
|
||
return
|
||
}
|
||
|
||
// Check if team exists
|
||
_, _, err = s.teamComposer.GetTeam(r.Context(), teamUUID)
|
||
if err != nil {
|
||
if strings.Contains(err.Error(), "not found") {
|
||
render.Status(r, http.StatusNotFound)
|
||
render.JSON(w, r, map[string]string{"error": "team not found"})
|
||
return
|
||
}
|
||
|
||
log.Error().Err(err).Str("team_id", claimData.TeamID).Msg("Failed to validate team")
|
||
render.Status(r, http.StatusInternalServerError)
|
||
render.JSON(w, r, map[string]string{"error": "failed to validate team"})
|
||
return
|
||
}
|
||
|
||
// Parse agent ID if provided
|
||
var agentUUID *uuid.UUID
|
||
if claimData.AgentID != "" {
|
||
agentID, err := uuid.Parse(claimData.AgentID)
|
||
if err != nil {
|
||
render.Status(r, http.StatusBadRequest)
|
||
render.JSON(w, r, map[string]string{"error": "invalid agent_id format"})
|
||
return
|
||
}
|
||
agentUUID = &agentID
|
||
}
|
||
|
||
// Assign task to team/agent
|
||
assignment := &tasks.TaskAssignment{
|
||
TaskID: taskID,
|
||
TeamID: &teamUUID,
|
||
AgentID: agentUUID,
|
||
Reason: claimData.Reason,
|
||
}
|
||
|
||
err = s.taskService.AssignTask(r.Context(), assignment)
|
||
if err != nil {
|
||
log.Error().Err(err).Str("task_id", taskIDStr).Msg("Failed to assign task")
|
||
render.Status(r, http.StatusInternalServerError)
|
||
render.JSON(w, r, map[string]string{"error": "failed to assign task"})
|
||
return
|
||
}
|
||
|
||
log.Info().
|
||
Str("task_id", taskIDStr).
|
||
Str("team_id", claimData.TeamID).
|
||
Str("agent_id", claimData.AgentID).
|
||
Msg("Task assigned to team")
|
||
|
||
render.JSON(w, r, map[string]interface{}{
|
||
"task_id": taskIDStr,
|
||
"team_id": claimData.TeamID,
|
||
"agent_id": claimData.AgentID,
|
||
"status": "claimed",
|
||
"claimed_at": time.Now().Format(time.RFC3339),
|
||
"message": "Task assigned successfully",
|
||
})
|
||
}
|
||
|
||
func (s *Server) updateTaskStatusHandler(w http.ResponseWriter, r *http.Request) {
|
||
render.Status(r, http.StatusNotImplemented)
|
||
render.JSON(w, r, map[string]string{"error": "not implemented"})
|
||
}
|
||
|
||
func (s *Server) completeTaskHandler(w http.ResponseWriter, r *http.Request) {
|
||
render.Status(r, http.StatusNotImplemented)
|
||
render.JSON(w, r, map[string]string{"error": "not implemented"})
|
||
}
|
||
|
||
func (s *Server) listAgentsHandler(w http.ResponseWriter, r *http.Request) {
|
||
// Get discovered CHORUS agents from P2P discovery
|
||
discoveredAgents := s.p2pDiscovery.GetAgents()
|
||
|
||
// Convert to API format
|
||
agents := make([]map[string]interface{}, 0, len(discoveredAgents))
|
||
onlineCount := 0
|
||
idleCount := 0
|
||
offlineCount := 0
|
||
|
||
for _, agent := range discoveredAgents {
|
||
agentData := map[string]interface{}{
|
||
"id": agent.ID,
|
||
"name": agent.Name,
|
||
"status": agent.Status,
|
||
"capabilities": agent.Capabilities,
|
||
"model": agent.Model,
|
||
"endpoint": agent.Endpoint,
|
||
"last_seen": agent.LastSeen.Format(time.RFC3339),
|
||
"tasks_completed": agent.TasksCompleted,
|
||
"p2p_addr": agent.P2PAddr,
|
||
"cluster_id": agent.ClusterID,
|
||
}
|
||
|
||
// Add current team if present
|
||
if agent.CurrentTeam != "" {
|
||
agentData["current_team"] = agent.CurrentTeam
|
||
} else {
|
||
agentData["current_team"] = nil
|
||
}
|
||
|
||
agents = append(agents, agentData)
|
||
|
||
// Count status
|
||
switch agent.Status {
|
||
case "online":
|
||
onlineCount++
|
||
case "idle":
|
||
idleCount++
|
||
case "working":
|
||
onlineCount++ // Working agents are considered online
|
||
default:
|
||
offlineCount++
|
||
}
|
||
}
|
||
|
||
render.JSON(w, r, map[string]interface{}{
|
||
"agents": agents,
|
||
"total": len(agents),
|
||
"online": onlineCount,
|
||
"idle": idleCount,
|
||
"offline": offlineCount,
|
||
})
|
||
}
|
||
|
||
func (s *Server) registerAgentHandler(w http.ResponseWriter, r *http.Request) {
|
||
var agentData struct {
|
||
Name string `json:"name"`
|
||
EndpointURL string `json:"endpoint_url"`
|
||
Capabilities map[string]interface{} `json:"capabilities"`
|
||
}
|
||
|
||
if err := json.NewDecoder(r.Body).Decode(&agentData); err != nil {
|
||
render.Status(r, http.StatusBadRequest)
|
||
render.JSON(w, r, map[string]string{"error": "invalid request body"})
|
||
return
|
||
}
|
||
|
||
// Validate required fields
|
||
if agentData.Name == "" || agentData.EndpointURL == "" {
|
||
render.Status(r, http.StatusBadRequest)
|
||
render.JSON(w, r, map[string]string{"error": "name and endpoint_url are required"})
|
||
return
|
||
}
|
||
|
||
// Create agent record
|
||
agent := &composer.Agent{
|
||
ID: uuid.New(),
|
||
Name: agentData.Name,
|
||
EndpointURL: agentData.EndpointURL,
|
||
Capabilities: agentData.Capabilities,
|
||
Status: composer.AgentStatusAvailable,
|
||
LastSeen: time.Now(),
|
||
PerformanceMetrics: make(map[string]interface{}),
|
||
CreatedAt: time.Now(),
|
||
UpdatedAt: time.Now(),
|
||
}
|
||
|
||
// Initialize empty capabilities if none provided
|
||
if agent.Capabilities == nil {
|
||
agent.Capabilities = make(map[string]interface{})
|
||
}
|
||
|
||
// Insert into database
|
||
capabilitiesJSON, _ := json.Marshal(agent.Capabilities)
|
||
metricsJSON, _ := json.Marshal(agent.PerformanceMetrics)
|
||
|
||
query := `
|
||
INSERT INTO agents (id, name, endpoint_url, capabilities, status, last_seen, performance_metrics, created_at, updated_at)
|
||
VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9)
|
||
`
|
||
|
||
_, err := s.db.Pool.Exec(r.Context(), query,
|
||
agent.ID, agent.Name, agent.EndpointURL, capabilitiesJSON,
|
||
agent.Status, agent.LastSeen, metricsJSON,
|
||
agent.CreatedAt, agent.UpdatedAt)
|
||
|
||
if err != nil {
|
||
log.Error().Err(err).Str("agent_name", agent.Name).Msg("Failed to register agent")
|
||
render.Status(r, http.StatusInternalServerError)
|
||
render.JSON(w, r, map[string]string{"error": "failed to register agent"})
|
||
return
|
||
}
|
||
|
||
log.Info().
|
||
Str("agent_id", agent.ID.String()).
|
||
Str("agent_name", agent.Name).
|
||
Str("endpoint", agent.EndpointURL).
|
||
Msg("Agent registered successfully")
|
||
|
||
render.Status(r, http.StatusCreated)
|
||
render.JSON(w, r, map[string]interface{}{
|
||
"agent": agent,
|
||
"message": "Agent registered successfully",
|
||
})
|
||
}
|
||
|
||
func (s *Server) updateAgentStatusHandler(w http.ResponseWriter, r *http.Request) {
|
||
agentIDStr := chi.URLParam(r, "agentID")
|
||
agentID, err := uuid.Parse(agentIDStr)
|
||
if err != nil {
|
||
render.Status(r, http.StatusBadRequest)
|
||
render.JSON(w, r, map[string]string{"error": "invalid agent ID"})
|
||
return
|
||
}
|
||
|
||
var statusUpdate struct {
|
||
Status string `json:"status"`
|
||
PerformanceMetrics map[string]interface{} `json:"performance_metrics,omitempty"`
|
||
Reason string `json:"reason,omitempty"`
|
||
}
|
||
|
||
if err := json.NewDecoder(r.Body).Decode(&statusUpdate); err != nil {
|
||
render.Status(r, http.StatusBadRequest)
|
||
render.JSON(w, r, map[string]string{"error": "invalid request body"})
|
||
return
|
||
}
|
||
|
||
// Validate status values
|
||
validStatuses := map[string]bool{
|
||
"available": true,
|
||
"busy": true,
|
||
"idle": true,
|
||
"offline": true,
|
||
}
|
||
|
||
if !validStatuses[statusUpdate.Status] {
|
||
render.Status(r, http.StatusBadRequest)
|
||
render.JSON(w, r, map[string]string{"error": "invalid status. Valid values: available, busy, idle, offline"})
|
||
return
|
||
}
|
||
|
||
// Update agent status and last_seen timestamp
|
||
updateQuery := `
|
||
UPDATE agents
|
||
SET status = $1, last_seen = $2, updated_at = $2
|
||
WHERE id = $3
|
||
`
|
||
|
||
_, err = s.db.Pool.Exec(r.Context(), updateQuery, statusUpdate.Status, time.Now(), agentID)
|
||
if err != nil {
|
||
log.Error().Err(err).
|
||
Str("agent_id", agentIDStr).
|
||
Str("status", statusUpdate.Status).
|
||
Msg("Failed to update agent status")
|
||
render.Status(r, http.StatusInternalServerError)
|
||
render.JSON(w, r, map[string]string{"error": "failed to update agent status"})
|
||
return
|
||
}
|
||
|
||
// Update performance metrics if provided
|
||
if statusUpdate.PerformanceMetrics != nil {
|
||
metricsJSON, _ := json.Marshal(statusUpdate.PerformanceMetrics)
|
||
_, err = s.db.Pool.Exec(r.Context(),
|
||
`UPDATE agents SET performance_metrics = $1 WHERE id = $2`,
|
||
metricsJSON, agentID)
|
||
|
||
if err != nil {
|
||
log.Warn().Err(err).
|
||
Str("agent_id", agentIDStr).
|
||
Msg("Failed to update agent performance metrics")
|
||
}
|
||
}
|
||
|
||
log.Info().
|
||
Str("agent_id", agentIDStr).
|
||
Str("status", statusUpdate.Status).
|
||
Str("reason", statusUpdate.Reason).
|
||
Msg("Agent status updated")
|
||
|
||
render.JSON(w, r, map[string]interface{}{
|
||
"agent_id": agentIDStr,
|
||
"status": statusUpdate.Status,
|
||
"message": "Agent status updated successfully",
|
||
})
|
||
}
|
||
|
||
// Project Management Handlers
|
||
|
||
func (s *Server) listProjectsHandler(w http.ResponseWriter, r *http.Request) {
|
||
ctx := r.Context()
|
||
|
||
// Query councils table (which stores project data)
|
||
rows, err := s.db.Pool.Query(ctx, `
|
||
SELECT id, project_name, repository, project_brief, status, created_at, metadata
|
||
FROM councils
|
||
ORDER BY created_at DESC
|
||
`)
|
||
if err != nil {
|
||
log.Error().Err(err).Msg("Failed to query councils")
|
||
render.Status(r, http.StatusInternalServerError)
|
||
render.JSON(w, r, map[string]string{"error": "failed to fetch projects"})
|
||
return
|
||
}
|
||
defer rows.Close()
|
||
|
||
projects := []map[string]interface{}{}
|
||
for rows.Next() {
|
||
var id string
|
||
var name, repo, description, status string
|
||
var createdAt time.Time
|
||
var metadata []byte
|
||
|
||
if err := rows.Scan(&id, &name, &repo, &description, &status, &createdAt, &metadata); err != nil {
|
||
log.Error().Err(err).Msg("Failed to scan council row")
|
||
continue
|
||
}
|
||
|
||
project := map[string]interface{}{
|
||
"id": id,
|
||
"name": name,
|
||
"repo_url": repo,
|
||
"description": description,
|
||
"status": status,
|
||
"created_at": createdAt.Format(time.RFC3339),
|
||
}
|
||
|
||
// Parse metadata if available
|
||
if metadata != nil {
|
||
var meta map[string]interface{}
|
||
if err := json.Unmarshal(metadata, &meta); err == nil {
|
||
if lang, ok := meta["language"].(string); ok {
|
||
project["language"] = lang
|
||
}
|
||
if owner, ok := meta["owner"].(string); ok {
|
||
project["owner"] = owner
|
||
}
|
||
}
|
||
}
|
||
|
||
projects = append(projects, project)
|
||
}
|
||
|
||
render.JSON(w, r, map[string]interface{}{
|
||
"projects": projects,
|
||
"total": len(projects),
|
||
})
|
||
}
|
||
|
||
// createProjectHandler handles POST /api/projects requests to add new GITEA repositories
|
||
// for team composition analysis. This is the core MVP functionality that allows users
|
||
// to register repositories that will be analyzed by the N8N workflow.
|
||
//
|
||
// Implementation decision: We use an anonymous struct for the request payload rather than
|
||
// a named struct because this is a simple, internal API that doesn't need to be shared
|
||
// across packages. This reduces complexity while maintaining type safety.
|
||
//
|
||
// TODO: In production, this would persist to PostgreSQL database rather than just
|
||
// returning in-memory data. The database integration is prepared in the docker-compose
|
||
// but not yet implemented in the handlers.
|
||
func (s *Server) createProjectHandler(w http.ResponseWriter, r *http.Request) {
|
||
ctx := r.Context()
|
||
|
||
// Parse request - now only requires repository_url
|
||
var reqData struct {
|
||
RepositoryURL string `json:"repository_url"`
|
||
}
|
||
|
||
if err := s.validator.DecodeAndValidateJSON(r, &reqData); err != nil {
|
||
render.Status(r, http.StatusBadRequest)
|
||
render.JSON(w, r, map[string]string{"error": "invalid JSON payload"})
|
||
return
|
||
}
|
||
|
||
// Validate repository URL
|
||
if reqData.RepositoryURL == "" {
|
||
render.Status(r, http.StatusBadRequest)
|
||
render.JSON(w, r, map[string]string{"error": "repository_url is required"})
|
||
return
|
||
}
|
||
|
||
// Parse repository URL to extract owner and repo name
|
||
// Expected format: https://gitea.chorus.services/owner/repo
|
||
repoURL := validation.SanitizeString(reqData.RepositoryURL)
|
||
parts := strings.Split(strings.TrimSuffix(repoURL, ".git"), "/")
|
||
if len(parts) < 2 {
|
||
render.Status(r, http.StatusBadRequest)
|
||
render.JSON(w, r, map[string]string{"error": "invalid repository URL format"})
|
||
return
|
||
}
|
||
|
||
repoName := parts[len(parts)-1]
|
||
owner := parts[len(parts)-2]
|
||
|
||
// Fetch repository metadata from GITEA
|
||
log.Info().
|
||
Str("owner", owner).
|
||
Str("repo", repoName).
|
||
Msg("Fetching repository metadata from GITEA")
|
||
|
||
repo, err := s.giteaClient.GetRepository(ctx, owner, repoName)
|
||
if err != nil {
|
||
log.Error().
|
||
Err(err).
|
||
Str("owner", owner).
|
||
Str("repo", repoName).
|
||
Msg("Failed to fetch repository from GITEA")
|
||
render.Status(r, http.StatusBadRequest)
|
||
render.JSON(w, r, map[string]string{"error": fmt.Sprintf("failed to fetch repository: %v", err)})
|
||
return
|
||
}
|
||
|
||
log.Info().
|
||
Str("repo_url", repoURL).
|
||
Str("title", repo.Name).
|
||
Str("description", repo.Description).
|
||
Msg("Creating new council from GITEA repository")
|
||
|
||
// Prepare metadata with owner and language info
|
||
metadata := map[string]interface{}{
|
||
"owner": repo.Owner.Login,
|
||
"language": repo.Language,
|
||
}
|
||
|
||
// Create council formation request
|
||
formationRequest := &council.CouncilFormationRequest{
|
||
ProjectName: repo.Name,
|
||
Repository: repoURL,
|
||
ProjectBrief: repo.Description,
|
||
Metadata: metadata,
|
||
}
|
||
|
||
log.Info().
|
||
Str("project", repo.Name).
|
||
Msg("🎭 Triggering council formation workflow")
|
||
|
||
// Form council (this creates the council record and agents)
|
||
composition, err := s.councilComposer.FormCouncil(ctx, formationRequest)
|
||
if err != nil {
|
||
log.Error().
|
||
Err(err).
|
||
Str("project", repo.Name).
|
||
Msg("Failed to form council")
|
||
render.Status(r, http.StatusInternalServerError)
|
||
render.JSON(w, r, map[string]string{"error": fmt.Sprintf("failed to form council: %v", err)})
|
||
return
|
||
}
|
||
|
||
log.Info().
|
||
Str("council_id", composition.CouncilID.String()).
|
||
Str("project", repo.Name).
|
||
Msg("✅ Council formation completed successfully")
|
||
|
||
// Broadcast council opportunity to CHORUS agents via P2P
|
||
go func() {
|
||
broadcastCtx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
|
||
defer cancel()
|
||
|
||
// Prepare council roles for broadcasting
|
||
coreRoles := make([]p2p.CouncilRole, len(composition.CoreAgents))
|
||
for i, agent := range composition.CoreAgents {
|
||
coreRoles[i] = p2p.CouncilRole{
|
||
RoleName: agent.RoleName,
|
||
AgentName: agent.AgentName,
|
||
Required: agent.Required,
|
||
Description: fmt.Sprintf("Core council role: %s", agent.AgentName),
|
||
}
|
||
}
|
||
|
||
optionalRoles := make([]p2p.CouncilRole, len(composition.OptionalAgents))
|
||
for i, agent := range composition.OptionalAgents {
|
||
optionalRoles[i] = p2p.CouncilRole{
|
||
RoleName: agent.RoleName,
|
||
AgentName: agent.AgentName,
|
||
Required: agent.Required,
|
||
Description: fmt.Sprintf("Optional council role: %s", agent.AgentName),
|
||
}
|
||
}
|
||
|
||
// Create opportunity broadcast
|
||
opportunity := &p2p.CouncilOpportunity{
|
||
CouncilID: composition.CouncilID,
|
||
ProjectName: repo.Name,
|
||
Repository: repoURL,
|
||
ProjectBrief: repo.Description,
|
||
CoreRoles: coreRoles,
|
||
OptionalRoles: optionalRoles,
|
||
UCXLAddress: fmt.Sprintf("ucxl://team:council@project:%s:council/councils/%s", sanitizeUCXLIdentifier(repo.Name), composition.CouncilID.String()),
|
||
FormationDeadline: time.Now().Add(24 * time.Hour), // 24 hours to form council
|
||
CreatedAt: composition.CreatedAt,
|
||
Metadata: metadata,
|
||
}
|
||
|
||
// Broadcast to all CHORUS agents
|
||
err := s.p2pBroadcaster.BroadcastCouncilOpportunity(broadcastCtx, opportunity)
|
||
if err != nil {
|
||
log.Error().
|
||
Err(err).
|
||
Str("council_id", composition.CouncilID.String()).
|
||
Msg("Failed to broadcast council opportunity to CHORUS agents")
|
||
} else {
|
||
log.Info().
|
||
Str("council_id", composition.CouncilID.String()).
|
||
Int("core_roles", len(coreRoles)).
|
||
Int("optional_roles", len(optionalRoles)).
|
||
Msg("📡 Successfully broadcast council opportunity to CHORUS agents")
|
||
|
||
s.startCouncilRebroadcastMonitor(opportunity)
|
||
}
|
||
}()
|
||
|
||
// Create response project object
|
||
project := map[string]interface{}{
|
||
"id": composition.CouncilID.String(),
|
||
"name": repo.Name,
|
||
"repo_url": repoURL,
|
||
"description": repo.Description,
|
||
"owner": repo.Owner.Login,
|
||
"language": repo.Language,
|
||
"status": "forming",
|
||
"created_at": time.Now().Format(time.RFC3339),
|
||
}
|
||
|
||
render.Status(r, http.StatusCreated)
|
||
render.JSON(w, r, project)
|
||
}
|
||
|
||
// deleteProjectHandler handles DELETE /api/projects/{projectID} requests to remove
|
||
// repositories from management. This allows users to clean up their project list
|
||
// and stop monitoring repositories that are no longer relevant.
|
||
//
|
||
// Implementation decision: We use chi.URLParam to extract the project ID from the
|
||
// URL path rather than query parameters, following REST conventions where the
|
||
// resource identifier is part of the path structure.
|
||
func (s *Server) deleteProjectHandler(w http.ResponseWriter, r *http.Request) {
|
||
ctx := r.Context()
|
||
|
||
// Extract project ID from URL path parameter
|
||
projectID := chi.URLParam(r, "projectID")
|
||
|
||
// Parse UUID
|
||
councilID, err := uuid.Parse(projectID)
|
||
if err != nil {
|
||
render.Status(r, http.StatusBadRequest)
|
||
render.JSON(w, r, map[string]string{"error": "invalid project ID"})
|
||
return
|
||
}
|
||
|
||
// Delete from councils table
|
||
result, err := s.db.Pool.Exec(ctx, `
|
||
DELETE FROM councils WHERE id = $1
|
||
`, councilID)
|
||
|
||
if err != nil {
|
||
log.Error().
|
||
Err(err).
|
||
Str("council_id", councilID.String()).
|
||
Msg("Failed to delete council from database")
|
||
render.Status(r, http.StatusInternalServerError)
|
||
render.JSON(w, r, map[string]string{"error": "failed to delete project"})
|
||
return
|
||
}
|
||
|
||
// Check if council was found and deleted
|
||
if result.RowsAffected() == 0 {
|
||
render.Status(r, http.StatusNotFound)
|
||
render.JSON(w, r, map[string]string{"error": "project not found"})
|
||
return
|
||
}
|
||
|
||
log.Info().
|
||
Str("council_id", councilID.String()).
|
||
Msg("Deleted council")
|
||
|
||
render.JSON(w, r, map[string]string{"message": "project deleted"})
|
||
}
|
||
|
||
// getProjectHandler handles GET /api/projects/{projectID} requests to retrieve
|
||
// detailed information about a specific project, including its analysis results
|
||
// and team formation recommendations from the N8N workflow.
|
||
func (s *Server) getProjectHandler(w http.ResponseWriter, r *http.Request) {
|
||
ctx := r.Context()
|
||
projectID := chi.URLParam(r, "projectID")
|
||
|
||
// Parse UUID
|
||
councilID, err := uuid.Parse(projectID)
|
||
if err != nil {
|
||
render.Status(r, http.StatusBadRequest)
|
||
render.JSON(w, r, map[string]string{"error": "invalid project ID"})
|
||
return
|
||
}
|
||
|
||
// Query councils table
|
||
var id string
|
||
var name, repo, description, status string
|
||
var createdAt time.Time
|
||
var metadata []byte
|
||
|
||
err = s.db.Pool.QueryRow(ctx, `
|
||
SELECT id, project_name, repository, project_brief, status, created_at, metadata
|
||
FROM councils
|
||
WHERE id = $1
|
||
`, councilID).Scan(&id, &name, &repo, &description, &status, &createdAt, &metadata)
|
||
|
||
if err != nil {
|
||
if err == pgx.ErrNoRows {
|
||
render.Status(r, http.StatusNotFound)
|
||
render.JSON(w, r, map[string]string{"error": "project not found"})
|
||
return
|
||
}
|
||
log.Error().Err(err).Str("council_id", councilID.String()).Msg("Failed to query council")
|
||
render.Status(r, http.StatusInternalServerError)
|
||
render.JSON(w, r, map[string]string{"error": "internal server error"})
|
||
return
|
||
}
|
||
|
||
// Build project response
|
||
project := map[string]interface{}{
|
||
"id": id,
|
||
"name": name,
|
||
"repo_url": repo,
|
||
"description": description,
|
||
"status": status,
|
||
"created_at": createdAt.Format(time.RFC3339),
|
||
}
|
||
|
||
// Parse metadata for additional fields
|
||
if metadata != nil {
|
||
var meta map[string]interface{}
|
||
if err := json.Unmarshal(metadata, &meta); err == nil {
|
||
if lang, ok := meta["language"].(string); ok {
|
||
project["language"] = lang
|
||
}
|
||
if owner, ok := meta["owner"].(string); ok {
|
||
project["owner"] = owner
|
||
}
|
||
if techStack, ok := meta["tech_stack"].([]interface{}); ok {
|
||
project["tech_stack"] = techStack
|
||
}
|
||
if teamSize, ok := meta["team_size"].(float64); ok {
|
||
project["team_size"] = int(teamSize)
|
||
}
|
||
}
|
||
}
|
||
|
||
render.JSON(w, r, project)
|
||
}
|
||
|
||
// analyzeProjectHandler handles POST /api/projects/{projectID}/analyze requests to
|
||
// trigger the N8N Team Formation Analysis workflow. This is the core integration point
|
||
// that connects WHOOSH to the AI-powered repository analysis system.
|
||
//
|
||
// Implementation decisions:
|
||
// 1. 60-second timeout for N8N requests because LLM analysis can be slow
|
||
// 2. Direct HTTP client rather than a service layer for simplicity in MVP
|
||
// 3. Graceful fallback to mock data when request body is empty
|
||
// 4. Comprehensive error handling with structured logging for debugging
|
||
//
|
||
// This handler represents the "missing link" that was identified as the core need:
|
||
// WHOOSH UI → N8N workflow → LLM analysis → team formation recommendations
|
||
func (s *Server) analyzeProjectHandler(w http.ResponseWriter, r *http.Request) {
|
||
projectID := chi.URLParam(r, "projectID")
|
||
|
||
// Project data structure for N8N payload. In production, this would be fetched
|
||
// from the database using the projectID, but for MVP we allow it to be provided
|
||
// in the request body or fall back to predictable mock data.
|
||
var projectData struct {
|
||
RepoURL string `json:"repo_url"`
|
||
Name string `json:"name"`
|
||
}
|
||
|
||
// Handle both scenarios: explicit project data in request body (for testing)
|
||
// and implicit data fetching (for production UI). This flexibility makes the
|
||
// API easier to test manually while supporting the intended UI workflow.
|
||
if r.Body != http.NoBody {
|
||
if err := json.NewDecoder(r.Body).Decode(&projectData); err != nil {
|
||
// Try to fetch from database first, fallback to mock data if not found
|
||
if err := s.lookupProjectData(r.Context(), projectID, &projectData); err != nil {
|
||
// Fallback to predictable mock data based on projectID for testing
|
||
projectData.RepoURL = "https://gitea.chorus.services/tony/" + projectID
|
||
projectData.Name = projectID
|
||
}
|
||
}
|
||
} else {
|
||
// No body provided - try database lookup first, fallback to mock data
|
||
if err := s.lookupProjectData(r.Context(), projectID, &projectData); err != nil {
|
||
// Fallback to mock data if database lookup fails
|
||
projectData.RepoURL = "https://gitea.chorus.services/tony/" + projectID
|
||
projectData.Name = projectID
|
||
}
|
||
}
|
||
|
||
// Start BACKBEAT search tracking if available
|
||
searchID := fmt.Sprintf("analyze-%s", projectID)
|
||
if s.backbeat != nil {
|
||
if err := s.backbeat.StartSearch(searchID, fmt.Sprintf("Analyzing project %s (%s)", projectID, projectData.RepoURL), 4); err != nil {
|
||
log.Warn().Err(err).Msg("Failed to start BACKBEAT search tracking")
|
||
}
|
||
}
|
||
|
||
// Log the analysis initiation for debugging and audit trails. Repository URL
|
||
// is crucial for troubleshooting N8N workflow issues.
|
||
log.Info().
|
||
Str("project_id", projectID).
|
||
Str("repo_url", projectData.RepoURL).
|
||
Msg("🔍 Starting project analysis via N8N workflow with BACKBEAT tracking")
|
||
|
||
// Execute analysis within BACKBEAT beat budget (4 beats = 2 minutes at 2 BPM)
|
||
var analysisResult map[string]interface{}
|
||
analysisFunc := func() error {
|
||
// Update BACKBEAT phase to querying
|
||
if s.backbeat != nil {
|
||
s.backbeat.UpdateSearchPhase(searchID, backbeat.PhaseQuerying, 0)
|
||
}
|
||
|
||
// HTTP client with generous timeout because:
|
||
// 1. N8N workflow fetches multiple files from repository
|
||
// 2. LLM analysis (Ollama) can take 10-30 seconds depending on model size
|
||
// 3. Network latency between services in Docker Swarm
|
||
// 60 seconds provides buffer while still failing fast for real issues
|
||
client := &http.Client{Timeout: 60 * time.Second}
|
||
|
||
// Payload structure matches the N8N workflow webhook expectations.
|
||
// The workflow expects these exact field names to properly route data
|
||
// through the file fetching and analysis nodes.
|
||
payload := map[string]interface{}{
|
||
"repo_url": projectData.RepoURL, // Primary input for file fetching
|
||
"project_name": projectData.Name, // Used in LLM analysis context
|
||
}
|
||
|
||
// JSON marshaling without error checking is acceptable here because we control
|
||
// the payload structure and know it will always be valid JSON.
|
||
payloadBytes, _ := json.Marshal(payload)
|
||
|
||
// Call to configurable N8N instance for team formation workflow
|
||
// The webhook URL is constructed from the base URL in configuration
|
||
n8nWebhookURL := s.config.N8N.BaseURL + "/webhook/team-formation"
|
||
resp, err := client.Post(
|
||
n8nWebhookURL,
|
||
"application/json",
|
||
bytes.NewBuffer(payloadBytes),
|
||
)
|
||
|
||
// Network-level error handling (connection refused, timeout, DNS issues)
|
||
if err != nil {
|
||
log.Error().Err(err).Msg("Failed to trigger N8N workflow")
|
||
return fmt.Errorf("failed to trigger N8N workflow: %w", err)
|
||
}
|
||
defer resp.Body.Close()
|
||
|
||
// HTTP-level error handling (N8N returned an error status)
|
||
if resp.StatusCode != http.StatusOK {
|
||
log.Error().
|
||
Int("status", resp.StatusCode).
|
||
Msg("N8N workflow returned error")
|
||
return fmt.Errorf("N8N workflow returned status %d", resp.StatusCode)
|
||
}
|
||
|
||
// Update BACKBEAT phase to ranking
|
||
if s.backbeat != nil {
|
||
s.backbeat.UpdateSearchPhase(searchID, backbeat.PhaseRanking, 0)
|
||
}
|
||
|
||
// Read the N8N workflow response, which contains the team formation analysis
|
||
// results including detected technologies, complexity scores, and agent assignments.
|
||
body, err := io.ReadAll(resp.Body)
|
||
if err != nil {
|
||
log.Error().Err(err).Msg("Failed to read N8N response")
|
||
return fmt.Errorf("failed to read N8N response: %w", err)
|
||
}
|
||
|
||
// Parse and return N8N response
|
||
if err := json.Unmarshal(body, &analysisResult); err != nil {
|
||
log.Error().Err(err).Msg("Failed to parse N8N response")
|
||
return fmt.Errorf("failed to parse N8N response: %w", err)
|
||
}
|
||
|
||
return nil
|
||
}
|
||
|
||
// Execute analysis with BACKBEAT beat budget or fallback to direct execution
|
||
var analysisErr error
|
||
if s.backbeat != nil {
|
||
analysisErr = s.backbeat.ExecuteWithBeatBudget(4, analysisFunc)
|
||
if analysisErr != nil {
|
||
s.backbeat.FailSearch(searchID, analysisErr.Error())
|
||
}
|
||
} else {
|
||
analysisErr = analysisFunc()
|
||
}
|
||
|
||
if analysisErr != nil {
|
||
render.Status(r, http.StatusInternalServerError)
|
||
render.JSON(w, r, map[string]string{"error": analysisErr.Error()})
|
||
return
|
||
}
|
||
|
||
// Complete BACKBEAT search tracking
|
||
if s.backbeat != nil {
|
||
s.backbeat.CompleteSearch(searchID, 1)
|
||
}
|
||
|
||
log.Info().
|
||
Str("project_id", projectID).
|
||
Msg("🔍 Project analysis completed successfully with BACKBEAT tracking")
|
||
|
||
render.JSON(w, r, analysisResult)
|
||
}
|
||
|
||
func (s *Server) giteaWebhookHandler(w http.ResponseWriter, r *http.Request) {
|
||
ctx, span := tracing.StartWebhookSpan(r.Context(), "gitea_webhook", "gitea")
|
||
defer span.End()
|
||
|
||
// Parse webhook payload
|
||
payload, err := s.webhookHandler.ParsePayload(r)
|
||
if err != nil {
|
||
tracing.SetSpanError(span, err)
|
||
log.Error().Err(err).Msg("Failed to parse webhook payload")
|
||
render.Status(r, http.StatusBadRequest)
|
||
render.JSON(w, r, map[string]string{"error": "invalid payload"})
|
||
return
|
||
}
|
||
|
||
span.SetAttributes(
|
||
attribute.String("webhook.action", payload.Action),
|
||
attribute.String("webhook.repository", payload.Repository.FullName),
|
||
attribute.String("webhook.sender", payload.Sender.Login),
|
||
)
|
||
|
||
log.Info().
|
||
Str("action", payload.Action).
|
||
Str("repository", payload.Repository.FullName).
|
||
Str("sender", payload.Sender.Login).
|
||
Msg("Received GITEA webhook")
|
||
|
||
// Process webhook event
|
||
event := s.webhookHandler.ProcessWebhook(payload)
|
||
|
||
// Handle task-related webhooks
|
||
if event.TaskInfo != nil {
|
||
span.SetAttributes(
|
||
attribute.Bool("webhook.has_task_info", true),
|
||
attribute.String("webhook.task_type", event.TaskInfo["task_type"].(string)),
|
||
)
|
||
|
||
log.Info().
|
||
Interface("task_info", event.TaskInfo).
|
||
Msg("Processing task issue")
|
||
|
||
// MVP: Store basic task info for future team assignment
|
||
// In full implementation, this would trigger Team Composer
|
||
s.handleTaskWebhook(ctx, event)
|
||
} else {
|
||
span.SetAttributes(attribute.Bool("webhook.has_task_info", false))
|
||
}
|
||
|
||
span.SetAttributes(
|
||
attribute.String("webhook.status", "processed"),
|
||
attribute.Int64("webhook.timestamp", event.Timestamp),
|
||
)
|
||
|
||
render.JSON(w, r, map[string]interface{}{
|
||
"status": "received",
|
||
"event_id": event.Timestamp,
|
||
"processed": event.TaskInfo != nil,
|
||
})
|
||
}
|
||
|
||
func (s *Server) handleTaskWebhook(ctx context.Context, event *gitea.WebhookEvent) {
|
||
// MVP implementation: Log task details
|
||
// In full version, this would:
|
||
// 1. Analyze task complexity
|
||
// 2. Determine required team composition
|
||
// 3. Create team and assign agents
|
||
// 4. Set up P2P communication channels
|
||
|
||
log.Info().
|
||
Str("action", event.Action).
|
||
Str("repository", event.Repository).
|
||
Interface("task_info", event.TaskInfo).
|
||
Msg("Task webhook received - MVP logging")
|
||
|
||
// For MVP, we'll just acknowledge task detection
|
||
if event.Action == "opened" || event.Action == "reopened" {
|
||
taskType := event.TaskInfo["task_type"].(string)
|
||
priority := event.TaskInfo["priority"].(string)
|
||
|
||
log.Info().
|
||
Str("task_type", taskType).
|
||
Str("priority", priority).
|
||
Msg("New task detected - ready for team assignment")
|
||
}
|
||
}
|
||
|
||
// staticFileHandler serves static files from the UI directory
|
||
|
||
func (s *Server) dashboardHandler(w http.ResponseWriter, r *http.Request) {
|
||
// Serve the external index.html file
|
||
uiDir := resolveUIDir()
|
||
indexPath := filepath.Join(uiDir, "index.html")
|
||
|
||
// Check if the UI directory and index.html exist
|
||
if _, err := os.Stat(indexPath); os.IsNotExist(err) {
|
||
// Fallback to embedded HTML if external files don't exist
|
||
log.Warn().Msg("External UI files not found, using fallback message")
|
||
w.Header().Set("Content-Type", "text/html; charset=utf-8")
|
||
w.Write([]byte(`<!DOCTYPE html>
|
||
<html lang="en">
|
||
<head>
|
||
<meta charset="UTF-8">
|
||
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
||
<title>WHOOSH - Setup Required</title>
|
||
</head>
|
||
<body style="font-family: Arial, sans-serif; margin: 40px; background: #1a1a1a; color: #f0f0f0;">
|
||
<h1>WHOOSH Setup Required</h1>
|
||
<p>External UI files not found. Please ensure the ui/ directory is mounted and contains:</p>
|
||
<ul>
|
||
<li>index.html</li>
|
||
<li>styles.css</li>
|
||
<li>script.js</li>
|
||
</ul>
|
||
<p>Current working directory: ` + getCurrentDir() + `</p>
|
||
</body>
|
||
</html>`))
|
||
return
|
||
}
|
||
|
||
// Serve the external index.html file
|
||
http.ServeFile(w, r, indexPath)
|
||
}
|
||
|
||
// getCurrentDir returns the current working directory for debugging
|
||
func getCurrentDir() string {
|
||
dir, err := os.Getwd()
|
||
if err != nil {
|
||
return "unknown"
|
||
}
|
||
return dir
|
||
}
|
||
|
||
// resolveUIDir determines the directory to serve the UI from.
|
||
// It uses WHOOSH_UI_DIR if set, otherwise falls back to ./ui.
|
||
func resolveUIDir() string {
|
||
if v := strings.TrimSpace(os.Getenv("WHOOSH_UI_DIR")); v != "" {
|
||
return v
|
||
}
|
||
return "./ui"
|
||
}
|
||
|
||
// backbeatStatusHandler provides real-time BACKBEAT pulse data
|
||
func (s *Server) backbeatStatusHandler(w http.ResponseWriter, r *http.Request) {
|
||
now := time.Now()
|
||
|
||
// Get real BACKBEAT data if integration is available and started
|
||
if s.backbeat != nil {
|
||
health := s.backbeat.GetHealth()
|
||
|
||
// Extract real BACKBEAT data
|
||
currentBeat := int64(0)
|
||
if beatVal, ok := health["current_beat"]; ok {
|
||
if beat, ok := beatVal.(int64); ok {
|
||
currentBeat = beat
|
||
}
|
||
}
|
||
|
||
currentTempo := 2 // Default fallback
|
||
if tempoVal, ok := health["current_tempo"]; ok {
|
||
if tempo, ok := tempoVal.(int); ok {
|
||
currentTempo = tempo
|
||
}
|
||
}
|
||
|
||
connected := false
|
||
if connVal, ok := health["connected"]; ok {
|
||
if conn, ok := connVal.(bool); ok {
|
||
connected = conn
|
||
}
|
||
}
|
||
|
||
// Determine phase based on BACKBEAT health
|
||
phase := "normal"
|
||
if degradationVal, ok := health["local_degradation"]; ok {
|
||
if degraded, ok := degradationVal.(bool); ok && degraded {
|
||
phase = "degraded"
|
||
}
|
||
}
|
||
|
||
// Calculate average interval based on tempo (BPM to milliseconds)
|
||
averageInterval := 60000 / currentTempo // Convert BPM to milliseconds between beats
|
||
|
||
// Determine if current beat is a downbeat (every 4th beat)
|
||
isDownbeat := currentBeat%4 == 1
|
||
currentDownbeat := (currentBeat / 4) + 1
|
||
|
||
response := map[string]interface{}{
|
||
"current_beat": currentBeat,
|
||
"current_downbeat": currentDownbeat,
|
||
"average_interval": averageInterval,
|
||
"phase": phase,
|
||
"is_downbeat": isDownbeat,
|
||
"tempo": currentTempo,
|
||
"connected": connected,
|
||
"timestamp": now.Unix(),
|
||
"status": "live",
|
||
"backbeat_health": health,
|
||
}
|
||
|
||
w.Header().Set("Content-Type", "application/json")
|
||
if err := json.NewEncoder(w).Encode(response); err != nil {
|
||
http.Error(w, "Failed to encode response", http.StatusInternalServerError)
|
||
return
|
||
}
|
||
return
|
||
}
|
||
|
||
// Fallback to basic data if BACKBEAT integration is not available
|
||
response := map[string]interface{}{
|
||
"current_beat": 0,
|
||
"current_downbeat": 0,
|
||
"average_interval": 0,
|
||
"phase": "disconnected",
|
||
"is_downbeat": false,
|
||
"tempo": 0,
|
||
"connected": false,
|
||
"timestamp": now.Unix(),
|
||
"status": "no_backbeat",
|
||
"error": "BACKBEAT integration not available",
|
||
}
|
||
|
||
w.Header().Set("Content-Type", "application/json")
|
||
if err := json.NewEncoder(w).Encode(response); err != nil {
|
||
http.Error(w, "Failed to encode response", http.StatusInternalServerError)
|
||
return
|
||
}
|
||
}
|
||
|
||
// Repository Management Handlers
|
||
|
||
// listRepositoriesHandler returns all monitored repositories
|
||
func (s *Server) listRepositoriesHandler(w http.ResponseWriter, r *http.Request) {
|
||
log.Info().Msg("Listing monitored repositories")
|
||
|
||
query := `
|
||
SELECT id, name, owner, full_name, url, clone_url, ssh_url, source_type,
|
||
monitor_issues, monitor_pull_requests, enable_chorus_integration,
|
||
description, default_branch, is_private, language, topics,
|
||
last_sync_at, sync_status, sync_error, open_issues_count,
|
||
closed_issues_count, total_tasks_created, created_at, updated_at
|
||
FROM repositories
|
||
ORDER BY created_at DESC`
|
||
|
||
rows, err := s.db.Pool.Query(context.Background(), query)
|
||
if err != nil {
|
||
log.Error().Err(err).Msg("Failed to query repositories")
|
||
render.Status(r, http.StatusInternalServerError)
|
||
render.JSON(w, r, map[string]string{"error": "failed to query repositories"})
|
||
return
|
||
}
|
||
defer rows.Close()
|
||
|
||
repositories := []map[string]interface{}{}
|
||
for rows.Next() {
|
||
var id, name, owner, fullName, url, sourceType, defaultBranch, syncStatus string
|
||
var cloneURL, sshURL, description, syncError, language *string
|
||
var monitorIssues, monitorPRs, enableChorus, isPrivate bool
|
||
var topicsJSON []byte
|
||
var lastSyncAt *time.Time
|
||
var createdAt, updatedAt time.Time
|
||
var openIssues, closedIssues, totalTasks int
|
||
|
||
err := rows.Scan(&id, &name, &owner, &fullName, &url, &cloneURL, &sshURL, &sourceType,
|
||
&monitorIssues, &monitorPRs, &enableChorus, &description, &defaultBranch,
|
||
&isPrivate, &language, &topicsJSON, &lastSyncAt, &syncStatus, &syncError,
|
||
&openIssues, &closedIssues, &totalTasks, &createdAt, &updatedAt)
|
||
if err != nil {
|
||
log.Error().Err(err).Msg("Failed to scan repository row")
|
||
continue
|
||
}
|
||
|
||
// Parse topics from JSONB
|
||
var topics []string
|
||
if err := json.Unmarshal(topicsJSON, &topics); err != nil {
|
||
log.Error().Err(err).Msg("Failed to unmarshal topics")
|
||
topics = []string{} // Default to empty slice
|
||
}
|
||
|
||
// Handle nullable lastSyncAt
|
||
var lastSyncFormatted *string
|
||
if lastSyncAt != nil {
|
||
formatted := lastSyncAt.Format(time.RFC3339)
|
||
lastSyncFormatted = &formatted
|
||
}
|
||
|
||
repo := map[string]interface{}{
|
||
"id": id,
|
||
"name": name,
|
||
"owner": owner,
|
||
"full_name": fullName,
|
||
"url": url,
|
||
"clone_url": cloneURL,
|
||
"ssh_url": sshURL,
|
||
"source_type": sourceType,
|
||
"monitor_issues": monitorIssues,
|
||
"monitor_pull_requests": monitorPRs,
|
||
"enable_chorus_integration": enableChorus,
|
||
"description": description,
|
||
"default_branch": defaultBranch,
|
||
"is_private": isPrivate,
|
||
"language": language,
|
||
"topics": topics,
|
||
"last_sync_at": lastSyncFormatted,
|
||
"sync_status": syncStatus,
|
||
"sync_error": syncError,
|
||
"open_issues_count": openIssues,
|
||
"closed_issues_count": closedIssues,
|
||
"total_tasks_created": totalTasks,
|
||
"created_at": createdAt.Format(time.RFC3339),
|
||
"updated_at": updatedAt.Format(time.RFC3339),
|
||
}
|
||
repositories = append(repositories, repo)
|
||
}
|
||
|
||
render.JSON(w, r, map[string]interface{}{
|
||
"repositories": repositories,
|
||
"count": len(repositories),
|
||
})
|
||
}
|
||
|
||
// createRepositoryHandler adds a new repository to monitor
|
||
func (s *Server) createRepositoryHandler(w http.ResponseWriter, r *http.Request) {
|
||
var req struct {
|
||
Name string `json:"name"`
|
||
Owner string `json:"owner"`
|
||
URL string `json:"url"`
|
||
SourceType string `json:"source_type"`
|
||
MonitorIssues bool `json:"monitor_issues"`
|
||
MonitorPullRequests bool `json:"monitor_pull_requests"`
|
||
EnableChorusIntegration bool `json:"enable_chorus_integration"`
|
||
Description *string `json:"description"`
|
||
DefaultBranch string `json:"default_branch"`
|
||
IsPrivate bool `json:"is_private"`
|
||
Language *string `json:"language"`
|
||
Topics []string `json:"topics"`
|
||
}
|
||
|
||
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
|
||
render.Status(r, http.StatusBadRequest)
|
||
render.JSON(w, r, map[string]string{"error": "invalid request body"})
|
||
return
|
||
}
|
||
|
||
// Validate required fields
|
||
if req.Name == "" || req.Owner == "" || req.URL == "" {
|
||
render.Status(r, http.StatusBadRequest)
|
||
render.JSON(w, r, map[string]string{"error": "name, owner, and url are required"})
|
||
return
|
||
}
|
||
|
||
// Set defaults
|
||
if req.SourceType == "" {
|
||
req.SourceType = "gitea"
|
||
}
|
||
if req.DefaultBranch == "" {
|
||
req.DefaultBranch = "main"
|
||
}
|
||
if req.Topics == nil {
|
||
req.Topics = []string{}
|
||
}
|
||
|
||
fullName := req.Owner + "/" + req.Name
|
||
|
||
log.Info().
|
||
Str("repository", fullName).
|
||
Str("url", req.URL).
|
||
Msg("Creating new repository monitor")
|
||
|
||
query := `
|
||
INSERT INTO repositories (
|
||
name, owner, full_name, url, source_type, monitor_issues,
|
||
monitor_pull_requests, enable_chorus_integration, description,
|
||
default_branch, is_private, language, topics
|
||
) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13)
|
||
RETURNING id, created_at`
|
||
|
||
// Convert topics slice to JSON for JSONB column
|
||
topicsJSON, err := json.Marshal(req.Topics)
|
||
if err != nil {
|
||
log.Error().Err(err).Msg("Failed to marshal topics")
|
||
render.Status(r, http.StatusInternalServerError)
|
||
render.JSON(w, r, map[string]string{"error": "failed to process topics"})
|
||
return
|
||
}
|
||
|
||
var id string
|
||
var createdAt time.Time
|
||
err = s.db.Pool.QueryRow(context.Background(), query,
|
||
req.Name, req.Owner, fullName, req.URL, req.SourceType,
|
||
req.MonitorIssues, req.MonitorPullRequests, req.EnableChorusIntegration,
|
||
req.Description, req.DefaultBranch, req.IsPrivate, req.Language, topicsJSON).
|
||
Scan(&id, &createdAt)
|
||
|
||
if err != nil {
|
||
log.Error().Err(err).Msg("Failed to create repository")
|
||
render.Status(r, http.StatusInternalServerError)
|
||
render.JSON(w, r, map[string]string{"error": "failed to create repository"})
|
||
return
|
||
}
|
||
|
||
// Automatically create required labels in the Gitea repository
|
||
// @goal: WHOOSH-LABELS-004 - Automatic label creation on repository addition
|
||
// WHY: Ensures standardized ecosystem labels are available immediately for issue categorization
|
||
if req.SourceType == "gitea" && s.repoMonitor != nil && s.repoMonitor.GetGiteaClient() != nil {
|
||
log.Info().
|
||
Str("repository", fullName).
|
||
Msg("Creating required labels in Gitea repository")
|
||
|
||
// @goal: WHOOSH-LABELS-004 - Apply standardized label set to new repository
|
||
err := s.repoMonitor.GetGiteaClient().EnsureRequiredLabels(context.Background(), req.Owner, req.Name)
|
||
if err != nil {
|
||
log.Warn().
|
||
Err(err).
|
||
Str("repository", fullName).
|
||
Msg("Failed to create labels in Gitea repository - repository monitoring will still work")
|
||
// Don't fail the entire request if label creation fails
|
||
} else {
|
||
log.Info().
|
||
Str("repository", fullName).
|
||
Msg("Successfully created required labels in Gitea repository")
|
||
}
|
||
}
|
||
|
||
render.Status(r, http.StatusCreated)
|
||
render.JSON(w, r, map[string]interface{}{
|
||
"id": id,
|
||
"full_name": fullName,
|
||
"created_at": createdAt.Format(time.RFC3339),
|
||
"message": "Repository monitor created successfully",
|
||
})
|
||
}
|
||
|
||
// getRepositoryHandler returns a specific repository
|
||
func (s *Server) getRepositoryHandler(w http.ResponseWriter, r *http.Request) {
|
||
repoID := chi.URLParam(r, "repoID")
|
||
|
||
log.Info().Str("repository_id", repoID).Msg("Getting repository details")
|
||
|
||
query := `
|
||
SELECT id, name, owner, full_name, url, clone_url, ssh_url, source_type,
|
||
source_config, monitor_issues, monitor_pull_requests, monitor_releases,
|
||
enable_chorus_integration, chorus_task_labels, auto_assign_teams,
|
||
description, default_branch, is_private, language, topics,
|
||
last_sync_at, last_issue_sync, sync_status, sync_error,
|
||
open_issues_count, closed_issues_count, total_tasks_created,
|
||
created_at, updated_at
|
||
FROM repositories WHERE id = $1`
|
||
|
||
var repo struct {
|
||
ID string `json:"id"`
|
||
Name string `json:"name"`
|
||
Owner string `json:"owner"`
|
||
FullName string `json:"full_name"`
|
||
URL string `json:"url"`
|
||
CloneURL *string `json:"clone_url"`
|
||
SSHURL *string `json:"ssh_url"`
|
||
SourceType string `json:"source_type"`
|
||
SourceConfig []byte `json:"source_config"`
|
||
MonitorIssues bool `json:"monitor_issues"`
|
||
MonitorPullRequests bool `json:"monitor_pull_requests"`
|
||
MonitorReleases bool `json:"monitor_releases"`
|
||
EnableChorusIntegration bool `json:"enable_chorus_integration"`
|
||
ChorusTaskLabels []string `json:"chorus_task_labels"`
|
||
AutoAssignTeams bool `json:"auto_assign_teams"`
|
||
Description *string `json:"description"`
|
||
DefaultBranch string `json:"default_branch"`
|
||
IsPrivate bool `json:"is_private"`
|
||
Language *string `json:"language"`
|
||
Topics []string `json:"topics"`
|
||
LastSyncAt *time.Time `json:"last_sync_at"`
|
||
LastIssueSyncAt *time.Time `json:"last_issue_sync"`
|
||
SyncStatus string `json:"sync_status"`
|
||
SyncError *string `json:"sync_error"`
|
||
OpenIssuesCount int `json:"open_issues_count"`
|
||
ClosedIssuesCount int `json:"closed_issues_count"`
|
||
TotalTasksCreated int `json:"total_tasks_created"`
|
||
CreatedAt time.Time `json:"created_at"`
|
||
UpdatedAt time.Time `json:"updated_at"`
|
||
}
|
||
|
||
err := s.db.Pool.QueryRow(context.Background(), query, repoID).Scan(
|
||
&repo.ID, &repo.Name, &repo.Owner, &repo.FullName, &repo.URL,
|
||
&repo.CloneURL, &repo.SSHURL, &repo.SourceType, &repo.SourceConfig,
|
||
&repo.MonitorIssues, &repo.MonitorPullRequests, &repo.MonitorReleases,
|
||
&repo.EnableChorusIntegration, &repo.ChorusTaskLabels, &repo.AutoAssignTeams,
|
||
&repo.Description, &repo.DefaultBranch, &repo.IsPrivate, &repo.Language,
|
||
&repo.Topics, &repo.LastSyncAt, &repo.LastIssueSyncAt, &repo.SyncStatus,
|
||
&repo.SyncError, &repo.OpenIssuesCount, &repo.ClosedIssuesCount,
|
||
&repo.TotalTasksCreated, &repo.CreatedAt, &repo.UpdatedAt)
|
||
|
||
if err != nil {
|
||
if err.Error() == "no rows in result set" {
|
||
render.Status(r, http.StatusNotFound)
|
||
render.JSON(w, r, map[string]string{"error": "repository not found"})
|
||
return
|
||
}
|
||
log.Error().Err(err).Msg("Failed to get repository")
|
||
render.Status(r, http.StatusInternalServerError)
|
||
render.JSON(w, r, map[string]string{"error": "failed to get repository"})
|
||
return
|
||
}
|
||
|
||
render.JSON(w, r, repo)
|
||
}
|
||
|
||
// updateRepositoryHandler updates repository settings
|
||
func (s *Server) updateRepositoryHandler(w http.ResponseWriter, r *http.Request) {
|
||
repoID := chi.URLParam(r, "repoID")
|
||
|
||
var req struct {
|
||
MonitorIssues *bool `json:"monitor_issues"`
|
||
MonitorPullRequests *bool `json:"monitor_pull_requests"`
|
||
MonitorReleases *bool `json:"monitor_releases"`
|
||
EnableChorusIntegration *bool `json:"enable_chorus_integration"`
|
||
AutoAssignTeams *bool `json:"auto_assign_teams"`
|
||
Description *string `json:"description"`
|
||
DefaultBranch *string `json:"default_branch"`
|
||
Language *string `json:"language"`
|
||
Topics []string `json:"topics"`
|
||
}
|
||
|
||
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
|
||
render.Status(r, http.StatusBadRequest)
|
||
render.JSON(w, r, map[string]string{"error": "invalid request body"})
|
||
return
|
||
}
|
||
|
||
log.Info().Str("repository_id", repoID).Msg("Updating repository settings")
|
||
|
||
// Build dynamic update query
|
||
updates := []string{}
|
||
args := []interface{}{repoID}
|
||
argIndex := 2
|
||
|
||
if req.MonitorIssues != nil {
|
||
updates = append(updates, fmt.Sprintf("monitor_issues = $%d", argIndex))
|
||
args = append(args, *req.MonitorIssues)
|
||
argIndex++
|
||
}
|
||
if req.MonitorPullRequests != nil {
|
||
updates = append(updates, fmt.Sprintf("monitor_pull_requests = $%d", argIndex))
|
||
args = append(args, *req.MonitorPullRequests)
|
||
argIndex++
|
||
}
|
||
if req.MonitorReleases != nil {
|
||
updates = append(updates, fmt.Sprintf("monitor_releases = $%d", argIndex))
|
||
args = append(args, *req.MonitorReleases)
|
||
argIndex++
|
||
}
|
||
if req.EnableChorusIntegration != nil {
|
||
updates = append(updates, fmt.Sprintf("enable_chorus_integration = $%d", argIndex))
|
||
args = append(args, *req.EnableChorusIntegration)
|
||
argIndex++
|
||
}
|
||
if req.AutoAssignTeams != nil {
|
||
updates = append(updates, fmt.Sprintf("auto_assign_teams = $%d", argIndex))
|
||
args = append(args, *req.AutoAssignTeams)
|
||
argIndex++
|
||
}
|
||
if req.Description != nil {
|
||
updates = append(updates, fmt.Sprintf("description = $%d", argIndex))
|
||
args = append(args, *req.Description)
|
||
argIndex++
|
||
}
|
||
if req.DefaultBranch != nil {
|
||
updates = append(updates, fmt.Sprintf("default_branch = $%d", argIndex))
|
||
args = append(args, *req.DefaultBranch)
|
||
argIndex++
|
||
}
|
||
if req.Language != nil {
|
||
updates = append(updates, fmt.Sprintf("language = $%d", argIndex))
|
||
args = append(args, *req.Language)
|
||
argIndex++
|
||
}
|
||
if req.Topics != nil {
|
||
updates = append(updates, fmt.Sprintf("topics = $%d", argIndex))
|
||
args = append(args, req.Topics)
|
||
argIndex++
|
||
}
|
||
|
||
if len(updates) == 0 {
|
||
render.Status(r, http.StatusBadRequest)
|
||
render.JSON(w, r, map[string]string{"error": "no fields to update"})
|
||
return
|
||
}
|
||
|
||
updates = append(updates, fmt.Sprintf("updated_at = $%d", argIndex))
|
||
args = append(args, time.Now())
|
||
|
||
query := fmt.Sprintf("UPDATE repositories SET %s WHERE id = $1", strings.Join(updates, ", "))
|
||
|
||
_, err := s.db.Pool.Exec(context.Background(), query, args...)
|
||
if err != nil {
|
||
log.Error().Err(err).Msg("Failed to update repository")
|
||
render.Status(r, http.StatusInternalServerError)
|
||
render.JSON(w, r, map[string]string{"error": "failed to update repository"})
|
||
return
|
||
}
|
||
|
||
render.JSON(w, r, map[string]string{"message": "Repository updated successfully"})
|
||
}
|
||
|
||
// deleteRepositoryHandler removes a repository from monitoring
|
||
func (s *Server) deleteRepositoryHandler(w http.ResponseWriter, r *http.Request) {
|
||
repoID := chi.URLParam(r, "repoID")
|
||
|
||
log.Info().Str("repository_id", repoID).Msg("Deleting repository monitor")
|
||
|
||
query := "DELETE FROM repositories WHERE id = $1"
|
||
result, err := s.db.Pool.Exec(context.Background(), query, repoID)
|
||
if err != nil {
|
||
log.Error().Err(err).Msg("Failed to delete repository")
|
||
render.Status(r, http.StatusInternalServerError)
|
||
render.JSON(w, r, map[string]string{"error": "failed to delete repository"})
|
||
return
|
||
}
|
||
|
||
if result.RowsAffected() == 0 {
|
||
render.Status(r, http.StatusNotFound)
|
||
render.JSON(w, r, map[string]string{"error": "repository not found"})
|
||
return
|
||
}
|
||
|
||
render.JSON(w, r, map[string]string{"message": "Repository deleted successfully"})
|
||
}
|
||
|
||
// syncRepositoryHandler triggers a manual sync of repository issues
|
||
func (s *Server) syncRepositoryHandler(w http.ResponseWriter, r *http.Request) {
|
||
repoID := chi.URLParam(r, "repoID")
|
||
|
||
log.Info().Str("repository_id", repoID).Msg("Manual repository sync triggered")
|
||
|
||
if s.repoMonitor == nil {
|
||
render.Status(r, http.StatusServiceUnavailable)
|
||
render.JSON(w, r, map[string]string{"error": "repository monitoring service not available"})
|
||
return
|
||
}
|
||
|
||
// Trigger repository sync in background
|
||
go func() {
|
||
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute)
|
||
defer cancel()
|
||
|
||
if err := s.repoMonitor.SyncRepository(ctx, repoID); err != nil {
|
||
log.Error().
|
||
Err(err).
|
||
Str("repository_id", repoID).
|
||
Msg("Manual repository sync failed")
|
||
}
|
||
}()
|
||
|
||
render.JSON(w, r, map[string]interface{}{
|
||
"message": "Repository sync triggered",
|
||
"repository_id": repoID,
|
||
"status": "started",
|
||
})
|
||
}
|
||
|
||
// ensureRepositoryLabelsHandler ensures required labels exist in the Gitea repository
|
||
func (s *Server) ensureRepositoryLabelsHandler(w http.ResponseWriter, r *http.Request) {
|
||
repoID := chi.URLParam(r, "repoID")
|
||
|
||
log.Info().Str("repository_id", repoID).Msg("Ensuring repository labels")
|
||
|
||
if s.repoMonitor == nil || s.repoMonitor.GetGiteaClient() == nil {
|
||
render.Status(r, http.StatusServiceUnavailable)
|
||
render.JSON(w, r, map[string]string{"error": "repository monitoring service not available"})
|
||
return
|
||
}
|
||
|
||
// Get repository details first
|
||
query := "SELECT owner, name FROM repositories WHERE id = $1"
|
||
var owner, name string
|
||
err := s.db.Pool.QueryRow(context.Background(), query, repoID).Scan(&owner, &name)
|
||
if err != nil {
|
||
if err.Error() == "no rows in result set" {
|
||
render.Status(r, http.StatusNotFound)
|
||
render.JSON(w, r, map[string]string{"error": "repository not found"})
|
||
return
|
||
}
|
||
log.Error().Err(err).Msg("Failed to get repository")
|
||
render.Status(r, http.StatusInternalServerError)
|
||
render.JSON(w, r, map[string]string{"error": "failed to get repository"})
|
||
return
|
||
}
|
||
|
||
// @goal: WHOOSH-LABELS-004 - Manual label synchronization endpoint
|
||
// WHY: Allows updating existing repositories to standardized label set
|
||
err = s.repoMonitor.GetGiteaClient().EnsureRequiredLabels(context.Background(), owner, name)
|
||
if err != nil {
|
||
log.Error().
|
||
Err(err).
|
||
Str("repository_id", repoID).
|
||
Str("owner", owner).
|
||
Str("name", name).
|
||
Msg("Failed to ensure repository labels")
|
||
render.Status(r, http.StatusInternalServerError)
|
||
render.JSON(w, r, map[string]string{"error": "failed to create labels: " + err.Error()})
|
||
return
|
||
}
|
||
|
||
log.Info().
|
||
Str("repository_id", repoID).
|
||
Str("owner", owner).
|
||
Str("name", name).
|
||
Msg("Successfully ensured repository labels")
|
||
|
||
render.JSON(w, r, map[string]interface{}{
|
||
"message": "Repository labels ensured successfully",
|
||
"repository_id": repoID,
|
||
"owner": owner,
|
||
"name": name,
|
||
})
|
||
}
|
||
|
||
// getRepositorySyncLogsHandler returns sync logs for a repository
|
||
func (s *Server) getRepositorySyncLogsHandler(w http.ResponseWriter, r *http.Request) {
|
||
repoID := chi.URLParam(r, "repoID")
|
||
limit := 50
|
||
|
||
if limitParam := r.URL.Query().Get("limit"); limitParam != "" {
|
||
if l, err := strconv.Atoi(limitParam); err == nil && l > 0 && l <= 1000 {
|
||
limit = l
|
||
}
|
||
}
|
||
|
||
log.Info().Str("repository_id", repoID).Int("limit", limit).Msg("Getting repository sync logs")
|
||
|
||
query := `
|
||
SELECT id, sync_type, operation, status, message, error_details,
|
||
items_processed, items_created, items_updated, duration_ms,
|
||
external_id, external_url, created_at
|
||
FROM repository_sync_logs
|
||
WHERE repository_id = $1
|
||
ORDER BY created_at DESC
|
||
LIMIT $2`
|
||
|
||
rows, err := s.db.Pool.Query(context.Background(), query, repoID, limit)
|
||
if err != nil {
|
||
log.Error().Err(err).Msg("Failed to query sync logs")
|
||
render.Status(r, http.StatusInternalServerError)
|
||
render.JSON(w, r, map[string]string{"error": "failed to query sync logs"})
|
||
return
|
||
}
|
||
defer rows.Close()
|
||
|
||
logs := []map[string]interface{}{}
|
||
for rows.Next() {
|
||
var id, syncType, operation, status, message string
|
||
var errorDetails []byte
|
||
var itemsProcessed, itemsCreated, itemsUpdated, durationMs int
|
||
var externalID, externalURL *string
|
||
var createdAt time.Time
|
||
|
||
err := rows.Scan(&id, &syncType, &operation, &status, &message, &errorDetails,
|
||
&itemsProcessed, &itemsCreated, &itemsUpdated, &durationMs,
|
||
&externalID, &externalURL, &createdAt)
|
||
if err != nil {
|
||
log.Error().Err(err).Msg("Failed to scan sync log row")
|
||
continue
|
||
}
|
||
|
||
logEntry := map[string]interface{}{
|
||
"id": id,
|
||
"sync_type": syncType,
|
||
"operation": operation,
|
||
"status": status,
|
||
"message": message,
|
||
"error_details": string(errorDetails),
|
||
"items_processed": itemsProcessed,
|
||
"items_created": itemsCreated,
|
||
"items_updated": itemsUpdated,
|
||
"duration_ms": durationMs,
|
||
"external_id": externalID,
|
||
"external_url": externalURL,
|
||
"created_at": createdAt.Format(time.RFC3339),
|
||
}
|
||
logs = append(logs, logEntry)
|
||
}
|
||
|
||
render.JSON(w, r, map[string]interface{}{
|
||
"logs": logs,
|
||
"count": len(logs),
|
||
})
|
||
}
|
||
|
||
// Council management handlers
|
||
|
||
func (s *Server) listCouncilsHandler(w http.ResponseWriter, r *http.Request) {
|
||
// Query all councils with basic info
|
||
query := `
|
||
SELECT c.id, c.project_name, c.repository, c.status, c.created_at,
|
||
COUNT(DISTINCT ca.id) as agent_count,
|
||
COUNT(DISTINCT car.id) as artifacts_count
|
||
FROM councils c
|
||
LEFT JOIN council_agents ca ON c.id = ca.council_id
|
||
LEFT JOIN council_artifacts car ON c.id = car.council_id
|
||
GROUP BY c.id, c.project_name, c.repository, c.status, c.created_at
|
||
ORDER BY c.created_at DESC
|
||
LIMIT 100
|
||
`
|
||
|
||
rows, err := s.db.Pool.Query(r.Context(), query)
|
||
if err != nil {
|
||
log.Error().Err(err).Msg("Failed to query councils")
|
||
render.Status(r, http.StatusInternalServerError)
|
||
render.JSON(w, r, map[string]string{"error": "failed to retrieve councils"})
|
||
return
|
||
}
|
||
defer rows.Close()
|
||
|
||
var councils []map[string]interface{}
|
||
for rows.Next() {
|
||
var id uuid.UUID
|
||
var projectName, repository, status string
|
||
var createdAt time.Time
|
||
var agentCount, artifactsCount int
|
||
|
||
err := rows.Scan(&id, &projectName, &repository, &status, &createdAt, &agentCount, &artifactsCount)
|
||
if err != nil {
|
||
log.Error().Err(err).Msg("Failed to scan council row")
|
||
continue
|
||
}
|
||
|
||
council := map[string]interface{}{
|
||
"id": id,
|
||
"project_name": projectName,
|
||
"repository": repository,
|
||
"status": status,
|
||
"created_at": createdAt.Format(time.RFC3339),
|
||
"agent_count": agentCount,
|
||
"artifacts_count": artifactsCount,
|
||
}
|
||
|
||
councils = append(councils, council)
|
||
}
|
||
|
||
if err = rows.Err(); err != nil {
|
||
log.Error().Err(err).Msg("Error iterating council rows")
|
||
render.Status(r, http.StatusInternalServerError)
|
||
render.JSON(w, r, map[string]string{"error": "failed to process councils"})
|
||
return
|
||
}
|
||
|
||
render.JSON(w, r, map[string]interface{}{
|
||
"councils": councils,
|
||
"total": len(councils),
|
||
})
|
||
}
|
||
|
||
func (s *Server) getCouncilHandler(w http.ResponseWriter, r *http.Request) {
|
||
councilIDStr := chi.URLParam(r, "councilID")
|
||
councilID, err := uuid.Parse(councilIDStr)
|
||
if err != nil {
|
||
render.Status(r, http.StatusBadRequest)
|
||
render.JSON(w, r, map[string]string{"error": "invalid council ID"})
|
||
return
|
||
}
|
||
|
||
composition, err := s.councilComposer.GetCouncilComposition(r.Context(), councilID)
|
||
if err != nil {
|
||
if strings.Contains(err.Error(), "no rows in result set") {
|
||
render.Status(r, http.StatusNotFound)
|
||
render.JSON(w, r, map[string]string{"error": "council not found"})
|
||
return
|
||
}
|
||
|
||
log.Error().Err(err).Str("council_id", councilIDStr).Msg("Failed to get council composition")
|
||
render.Status(r, http.StatusInternalServerError)
|
||
render.JSON(w, r, map[string]string{"error": "failed to retrieve council"})
|
||
return
|
||
}
|
||
|
||
render.JSON(w, r, composition)
|
||
}
|
||
|
||
func (s *Server) getCouncilArtifactsHandler(w http.ResponseWriter, r *http.Request) {
|
||
councilIDStr := chi.URLParam(r, "councilID")
|
||
councilID, err := uuid.Parse(councilIDStr)
|
||
if err != nil {
|
||
render.Status(r, http.StatusBadRequest)
|
||
render.JSON(w, r, map[string]string{"error": "invalid council ID"})
|
||
return
|
||
}
|
||
|
||
// Query all artifacts for this council
|
||
query := `
|
||
SELECT id, artifact_type, artifact_name, content, content_json, produced_at, produced_by, status
|
||
FROM council_artifacts
|
||
WHERE council_id = $1
|
||
ORDER BY produced_at DESC
|
||
`
|
||
|
||
rows, err := s.db.Pool.Query(r.Context(), query, councilID)
|
||
if err != nil {
|
||
log.Error().Err(err).Str("council_id", councilIDStr).Msg("Failed to query council artifacts")
|
||
render.Status(r, http.StatusInternalServerError)
|
||
render.JSON(w, r, map[string]string{"error": "failed to retrieve artifacts"})
|
||
return
|
||
}
|
||
defer rows.Close()
|
||
|
||
var artifacts []map[string]interface{}
|
||
for rows.Next() {
|
||
var id uuid.UUID
|
||
var artifactType, artifactName, status string
|
||
var content *string
|
||
var contentJSON []byte
|
||
var producedAt time.Time
|
||
var producedBy *string
|
||
|
||
err := rows.Scan(&id, &artifactType, &artifactName, &content, &contentJSON, &producedAt, &producedBy, &status)
|
||
if err != nil {
|
||
log.Error().Err(err).Msg("Failed to scan artifact row")
|
||
continue
|
||
}
|
||
|
||
artifact := map[string]interface{}{
|
||
"id": id,
|
||
"artifact_type": artifactType,
|
||
"artifact_name": artifactName,
|
||
"content": content,
|
||
"produced_at": producedAt.Format(time.RFC3339),
|
||
"produced_by": producedBy,
|
||
"status": status,
|
||
}
|
||
|
||
// Parse JSON content if available
|
||
if contentJSON != nil {
|
||
var jsonData interface{}
|
||
if err := json.Unmarshal(contentJSON, &jsonData); err == nil {
|
||
artifact["content_json"] = jsonData
|
||
}
|
||
}
|
||
|
||
artifacts = append(artifacts, artifact)
|
||
}
|
||
|
||
if err = rows.Err(); err != nil {
|
||
log.Error().Err(err).Msg("Error iterating artifact rows")
|
||
render.Status(r, http.StatusInternalServerError)
|
||
render.JSON(w, r, map[string]string{"error": "failed to process artifacts"})
|
||
return
|
||
}
|
||
|
||
render.JSON(w, r, map[string]interface{}{
|
||
"council_id": councilID,
|
||
"artifacts": artifacts,
|
||
"count": len(artifacts),
|
||
})
|
||
}
|
||
|
||
func (s *Server) createCouncilArtifactHandler(w http.ResponseWriter, r *http.Request) {
|
||
councilIDStr := chi.URLParam(r, "councilID")
|
||
councilID, err := uuid.Parse(councilIDStr)
|
||
if err != nil {
|
||
render.Status(r, http.StatusBadRequest)
|
||
render.JSON(w, r, map[string]string{"error": "invalid council ID"})
|
||
return
|
||
}
|
||
|
||
var req struct {
|
||
ArtifactType string `json:"artifact_type"`
|
||
ArtifactName string `json:"artifact_name"`
|
||
Content *string `json:"content,omitempty"`
|
||
ContentJSON interface{} `json:"content_json,omitempty"`
|
||
ProducedBy *string `json:"produced_by,omitempty"`
|
||
Status *string `json:"status,omitempty"`
|
||
}
|
||
|
||
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
|
||
render.Status(r, http.StatusBadRequest)
|
||
render.JSON(w, r, map[string]string{"error": "invalid JSON body"})
|
||
return
|
||
}
|
||
|
||
if req.ArtifactType == "" || req.ArtifactName == "" {
|
||
render.Status(r, http.StatusBadRequest)
|
||
render.JSON(w, r, map[string]string{"error": "artifact_type and artifact_name are required"})
|
||
return
|
||
}
|
||
|
||
// Set default status if not provided
|
||
status := "draft"
|
||
if req.Status != nil {
|
||
status = *req.Status
|
||
}
|
||
|
||
// Validate artifact type (based on the constraint in the migration)
|
||
validTypes := map[string]bool{
|
||
"kickoff_manifest": true,
|
||
"seminal_dr": true,
|
||
"scaffold_plan": true,
|
||
"gate_tests": true,
|
||
"hmmm_thread": true,
|
||
"slurp_sources": true,
|
||
"shhh_policy": true,
|
||
"ucxl_root": true,
|
||
}
|
||
|
||
if !validTypes[req.ArtifactType] {
|
||
render.Status(r, http.StatusBadRequest)
|
||
render.JSON(w, r, map[string]string{"error": "invalid artifact_type"})
|
||
return
|
||
}
|
||
|
||
// Prepare JSON content
|
||
var contentJSONBytes []byte
|
||
if req.ContentJSON != nil {
|
||
contentJSONBytes, err = json.Marshal(req.ContentJSON)
|
||
if err != nil {
|
||
render.Status(r, http.StatusBadRequest)
|
||
render.JSON(w, r, map[string]string{"error": "invalid content_json"})
|
||
return
|
||
}
|
||
}
|
||
|
||
// Insert artifact
|
||
insertQuery := `
|
||
INSERT INTO council_artifacts (council_id, artifact_type, artifact_name, content, content_json, produced_by, status)
|
||
VALUES ($1, $2, $3, $4, $5, $6, $7)
|
||
RETURNING id, produced_at
|
||
`
|
||
|
||
var artifactID uuid.UUID
|
||
var producedAt time.Time
|
||
|
||
err = s.db.Pool.QueryRow(r.Context(), insertQuery, councilID, req.ArtifactType, req.ArtifactName,
|
||
req.Content, contentJSONBytes, req.ProducedBy, status).Scan(&artifactID, &producedAt)
|
||
|
||
if err != nil {
|
||
log.Error().Err(err).Str("council_id", councilIDStr).Msg("Failed to create council artifact")
|
||
render.Status(r, http.StatusInternalServerError)
|
||
render.JSON(w, r, map[string]string{"error": "failed to create artifact"})
|
||
return
|
||
}
|
||
|
||
response := map[string]interface{}{
|
||
"id": artifactID,
|
||
"council_id": councilID,
|
||
"artifact_type": req.ArtifactType,
|
||
"artifact_name": req.ArtifactName,
|
||
"content": req.Content,
|
||
"content_json": req.ContentJSON,
|
||
"produced_by": req.ProducedBy,
|
||
"status": status,
|
||
"produced_at": producedAt.Format(time.RFC3339),
|
||
}
|
||
|
||
render.Status(r, http.StatusCreated)
|
||
render.JSON(w, r, response)
|
||
}
|
||
|
||
// Helper methods for task processing
|
||
|
||
// lookupProjectData queries the repositories table to find project data by name
|
||
func (s *Server) lookupProjectData(ctx context.Context, projectID string, projectData *struct {
|
||
RepoURL string `json:"repo_url"`
|
||
Name string `json:"name"`
|
||
}) error {
|
||
// Query the repositories table to find the repository by name
|
||
// We assume projectID corresponds to the repository name
|
||
query := `
|
||
SELECT name, url
|
||
FROM repositories
|
||
WHERE name = $1 OR full_name LIKE '%/' || $1
|
||
LIMIT 1
|
||
`
|
||
|
||
var name, url string
|
||
err := s.db.Pool.QueryRow(ctx, query, projectID).Scan(&name, &url)
|
||
if err != nil {
|
||
if strings.Contains(err.Error(), "no rows in result set") {
|
||
return fmt.Errorf("project %s not found in repositories", projectID)
|
||
}
|
||
log.Error().Err(err).Str("project_id", projectID).Msg("Failed to query repository")
|
||
return fmt.Errorf("database error: %w", err)
|
||
}
|
||
|
||
// Populate the project data
|
||
projectData.Name = name
|
||
projectData.RepoURL = url
|
||
|
||
log.Info().
|
||
Str("project_id", projectID).
|
||
Str("name", name).
|
||
Str("repo_url", url).
|
||
Msg("Found project data in repositories table")
|
||
|
||
return nil
|
||
}
|
||
|
||
// inferTechStackFromLabels extracts technology information from labels
|
||
func (s *Server) inferTechStackFromLabels(labels []string) []string {
|
||
techMap := map[string]bool{
|
||
"go": true,
|
||
"golang": true,
|
||
"javascript": true,
|
||
"react": true,
|
||
"node": true,
|
||
"python": true,
|
||
"java": true,
|
||
"rust": true,
|
||
"docker": true,
|
||
"postgres": true,
|
||
"mysql": true,
|
||
"api": true,
|
||
"backend": true,
|
||
"frontend": true,
|
||
"database": true,
|
||
}
|
||
|
||
var techStack []string
|
||
for _, label := range labels {
|
||
if techMap[strings.ToLower(label)] {
|
||
techStack = append(techStack, strings.ToLower(label))
|
||
}
|
||
}
|
||
|
||
return techStack
|
||
}
|
||
|
||
// processTaskAsync handles complex task processing in background
|
||
func (s *Server) processTaskAsync(taskID string, taskInput *composer.TaskAnalysisInput) {
|
||
ctx := context.Background()
|
||
|
||
log.Info().
|
||
Str("task_id", taskID).
|
||
Msg("Starting async task processing")
|
||
|
||
result, err := s.teamComposer.AnalyzeAndComposeTeam(ctx, taskInput)
|
||
if err != nil {
|
||
log.Error().Err(err).
|
||
Str("task_id", taskID).
|
||
Msg("Async task analysis failed")
|
||
return
|
||
}
|
||
|
||
team, err := s.teamComposer.CreateTeam(ctx, result.TeamComposition, taskInput)
|
||
if err != nil {
|
||
log.Error().Err(err).
|
||
Str("task_id", taskID).
|
||
Msg("Async team creation failed")
|
||
return
|
||
}
|
||
|
||
log.Info().
|
||
Str("task_id", taskID).
|
||
Str("team_id", team.ID.String()).
|
||
Float64("confidence", result.TeamComposition.ConfidenceScore).
|
||
Msg("Async task processing completed")
|
||
|
||
// In production, this would update task status in database
|
||
// and potentially notify clients via websockets or webhooks
|
||
}
|
||
|
||
func (s *Server) startCouncilRebroadcastMonitor(opportunity *p2p.CouncilOpportunity) {
|
||
s.rebroadcastMu.Lock()
|
||
if cancel, ok := s.activeBroadcasts[opportunity.CouncilID]; ok {
|
||
delete(s.activeBroadcasts, opportunity.CouncilID)
|
||
cancel()
|
||
}
|
||
ctx, cancel := context.WithCancel(context.Background())
|
||
s.activeBroadcasts[opportunity.CouncilID] = cancel
|
||
s.rebroadcastMu.Unlock()
|
||
|
||
go s.councilRebroadcastLoop(ctx, opportunity)
|
||
}
|
||
|
||
func (s *Server) stopCouncilRebroadcast(councilID uuid.UUID) {
|
||
s.rebroadcastMu.Lock()
|
||
cancel, ok := s.activeBroadcasts[councilID]
|
||
if ok {
|
||
delete(s.activeBroadcasts, councilID)
|
||
}
|
||
s.rebroadcastMu.Unlock()
|
||
|
||
if ok {
|
||
cancel()
|
||
}
|
||
}
|
||
|
||
func (s *Server) clearCouncilBroadcast(councilID uuid.UUID) {
|
||
s.rebroadcastMu.Lock()
|
||
delete(s.activeBroadcasts, councilID)
|
||
s.rebroadcastMu.Unlock()
|
||
}
|
||
|
||
func (s *Server) councilRebroadcastLoop(ctx context.Context, opportunity *p2p.CouncilOpportunity) {
|
||
defer s.clearCouncilBroadcast(opportunity.CouncilID)
|
||
|
||
interval := 10 * time.Second
|
||
maxInterval := 2 * time.Minute
|
||
|
||
for {
|
||
select {
|
||
case <-ctx.Done():
|
||
log.Info().Str("council_id", opportunity.CouncilID.String()).Msg("🛑 Stopping council rebroadcast monitor")
|
||
return
|
||
case <-time.After(interval):
|
||
}
|
||
|
||
pending, err := s.hasUnfilledCoreRoles(ctx, opportunity.CouncilID)
|
||
if err != nil {
|
||
log.Warn().Err(err).Str("council_id", opportunity.CouncilID.String()).Msg("Failed to evaluate council staffing status")
|
||
continue
|
||
}
|
||
|
||
if !pending {
|
||
log.Info().Str("council_id", opportunity.CouncilID.String()).Msg("🎯 Council fully staffed; stopping rebroadcasts")
|
||
return
|
||
}
|
||
|
||
log.Info().
|
||
Str("council_id", opportunity.CouncilID.String()).
|
||
Dur("interval", interval).
|
||
Msg("📡 Re-broadcasting council opportunity to fill remaining roles")
|
||
|
||
broadcastCtx, cancel := context.WithTimeout(ctx, 20*time.Second)
|
||
if err := s.p2pBroadcaster.BroadcastCouncilOpportunity(broadcastCtx, opportunity); err != nil {
|
||
log.Warn().Err(err).Str("council_id", opportunity.CouncilID.String()).Msg("Council rebroadcast failed")
|
||
}
|
||
cancel()
|
||
|
||
if interval < maxInterval {
|
||
interval *= 2
|
||
if interval > maxInterval {
|
||
interval = maxInterval
|
||
}
|
||
}
|
||
}
|
||
}
|
||
|
||
func (s *Server) hasUnfilledCoreRoles(ctx context.Context, councilID uuid.UUID) (bool, error) {
|
||
query := `
|
||
SELECT COUNT(*)
|
||
FROM council_agents
|
||
WHERE council_id = $1
|
||
AND required = true
|
||
AND (deployed = false OR status NOT IN ('assigned', 'active'))
|
||
`
|
||
|
||
var remaining int
|
||
if err := s.db.Pool.QueryRow(ctx, query, councilID).Scan(&remaining); err != nil {
|
||
return false, err
|
||
}
|
||
|
||
return remaining > 0, nil
|
||
}
|
||
|
||
// handleCouncilRoleClaim handles POST /api/v1/councils/{councilID}/claims
|
||
// This endpoint receives role claim requests from CHORUS agents
|
||
func (s *Server) handleCouncilRoleClaim(w http.ResponseWriter, r *http.Request) {
|
||
ctx := r.Context()
|
||
|
||
councilID := chi.URLParam(r, "councilID")
|
||
if councilID == "" {
|
||
render.Status(r, http.StatusBadRequest)
|
||
render.JSON(w, r, map[string]string{"error": "council_id is required"})
|
||
return
|
||
}
|
||
|
||
// Parse UUID
|
||
councilUUID, err := uuid.Parse(councilID)
|
||
if err != nil {
|
||
render.Status(r, http.StatusBadRequest)
|
||
render.JSON(w, r, map[string]string{"error": "invalid council_id format"})
|
||
return
|
||
}
|
||
|
||
// Parse claim request
|
||
var claim struct {
|
||
AgentID string `json:"agent_id"`
|
||
AgentName string `json:"agent_name"`
|
||
RoleName string `json:"role_name"`
|
||
Capabilities []string `json:"capabilities"`
|
||
Confidence float64 `json:"confidence"`
|
||
Reasoning string `json:"reasoning"`
|
||
Endpoint string `json:"endpoint"`
|
||
P2PAddr string `json:"p2p_addr"`
|
||
}
|
||
|
||
if err := s.validator.DecodeAndValidateJSON(r, &claim); err != nil {
|
||
render.Status(r, http.StatusBadRequest)
|
||
render.JSON(w, r, map[string]string{"error": "invalid JSON payload"})
|
||
return
|
||
}
|
||
|
||
log.Info().
|
||
Str("council_id", councilID).
|
||
Str("agent_id", claim.AgentID).
|
||
Str("role_name", claim.RoleName).
|
||
Float64("confidence", claim.Confidence).
|
||
Msg("🤖 Agent claiming council role")
|
||
|
||
// Verify council exists and get current state
|
||
councilQuery := `
|
||
SELECT id, project_name, status, project_brief, repository, external_url, issue_id, brief_dispatched_at, brief_owner_role
|
||
FROM councils
|
||
WHERE id = $1
|
||
`
|
||
|
||
var existingCouncil struct {
|
||
ID uuid.UUID
|
||
ProjectName string
|
||
Status string
|
||
ProjectBrief string
|
||
Repository string
|
||
ExternalURL sql.NullString
|
||
IssueID sql.NullInt64
|
||
BriefDispatchedAt sql.NullTime
|
||
BriefOwnerRole sql.NullString
|
||
}
|
||
|
||
err = s.db.Pool.QueryRow(ctx, councilQuery, councilUUID).Scan(
|
||
&existingCouncil.ID,
|
||
&existingCouncil.ProjectName,
|
||
&existingCouncil.Status,
|
||
&existingCouncil.ProjectBrief,
|
||
&existingCouncil.Repository,
|
||
&existingCouncil.ExternalURL,
|
||
&existingCouncil.IssueID,
|
||
&existingCouncil.BriefDispatchedAt,
|
||
&existingCouncil.BriefOwnerRole,
|
||
)
|
||
|
||
if err != nil {
|
||
log.Error().
|
||
Err(err).
|
||
Str("council_id", councilID).
|
||
Msg("Council not found")
|
||
render.Status(r, http.StatusNotFound)
|
||
render.JSON(w, r, map[string]string{"error": "council not found"})
|
||
return
|
||
}
|
||
|
||
// Check if council is still forming (not already active/completed)
|
||
if existingCouncil.Status != "forming" && existingCouncil.Status != "active" {
|
||
render.Status(r, http.StatusConflict)
|
||
render.JSON(w, r, map[string]string{"error": fmt.Sprintf("council is %s, not accepting claims", existingCouncil.Status)})
|
||
return
|
||
}
|
||
|
||
// Check if role exists and is unclaimed
|
||
roleCheckQuery := `
|
||
SELECT agent_id, agent_name, role_name, required, deployed, status, persona_status, endpoint_url
|
||
FROM council_agents
|
||
WHERE council_id = $1 AND role_name = $2
|
||
`
|
||
|
||
var existingRole struct {
|
||
AgentID string
|
||
AgentName string
|
||
RoleName string
|
||
Required bool
|
||
Deployed bool
|
||
Status string
|
||
PersonaStatus string
|
||
EndpointURL sql.NullString
|
||
}
|
||
|
||
err = s.db.Pool.QueryRow(ctx, roleCheckQuery, councilUUID, claim.RoleName).Scan(
|
||
&existingRole.AgentID,
|
||
&existingRole.AgentName,
|
||
&existingRole.RoleName,
|
||
&existingRole.Required,
|
||
&existingRole.Deployed,
|
||
&existingRole.Status,
|
||
&existingRole.PersonaStatus,
|
||
&existingRole.EndpointURL,
|
||
)
|
||
|
||
if err != nil {
|
||
log.Error().
|
||
Err(err).
|
||
Str("role_name", claim.RoleName).
|
||
Msg("Role not found in council")
|
||
render.Status(r, http.StatusNotFound)
|
||
render.JSON(w, r, map[string]string{"error": "role not found in council"})
|
||
return
|
||
}
|
||
|
||
// Check if role is already claimed
|
||
if existingRole.Deployed || existingRole.Status == "assigned" || existingRole.Status == "active" {
|
||
log.Warn().
|
||
Str("role_name", claim.RoleName).
|
||
Str("current_agent", existingRole.AgentID).
|
||
Msg("Role already claimed by another agent")
|
||
render.Status(r, http.StatusConflict)
|
||
render.JSON(w, r, map[string]string{"error": "role already claimed"})
|
||
return
|
||
}
|
||
|
||
// Assign role to agent
|
||
updateQuery := `
|
||
UPDATE council_agents
|
||
SET
|
||
deployed = true,
|
||
status = 'assigned',
|
||
service_id = $1,
|
||
endpoint_url = $2,
|
||
persona_status = 'pending',
|
||
persona_loaded_at = NULL,
|
||
deployed_at = NOW(),
|
||
updated_at = NOW()
|
||
WHERE council_id = $3 AND role_name = $4
|
||
`
|
||
|
||
retry := false
|
||
_, err = s.db.Pool.Exec(ctx, updateQuery, claim.AgentID, claim.Endpoint, councilUUID, claim.RoleName)
|
||
if err != nil {
|
||
if pgErr, ok := err.(*pgconn.PgError); ok && pgErr.Code == "23514" {
|
||
retry = true
|
||
log.Warn().
|
||
Str("council_id", councilID).
|
||
Str("role_name", claim.RoleName).
|
||
Str("agent_id", claim.AgentID).
|
||
Msg("Council role assignment hit legacy status constraint – attempting remediation")
|
||
|
||
if ensureErr := s.ensureCouncilAgentStatusConstraint(ctx); ensureErr != nil {
|
||
log.Error().
|
||
Err(ensureErr).
|
||
Str("council_id", councilID).
|
||
Msg("Failed to reconcile council agent status constraint")
|
||
// keep original error to return below
|
||
} else {
|
||
_, err = s.db.Pool.Exec(ctx, updateQuery, claim.AgentID, claim.Endpoint, councilUUID, claim.RoleName)
|
||
}
|
||
}
|
||
}
|
||
|
||
if err != nil {
|
||
log.Error().
|
||
Err(err).
|
||
Str("council_id", councilID).
|
||
Str("role_name", claim.RoleName).
|
||
Msg("Failed to assign role to agent")
|
||
render.Status(r, http.StatusInternalServerError)
|
||
render.JSON(w, r, map[string]string{"error": "failed to assign role"})
|
||
return
|
||
}
|
||
|
||
if retry {
|
||
log.Info().
|
||
Str("council_id", councilID).
|
||
Str("role_name", claim.RoleName).
|
||
Msg("Council agent status constraint updated to allow 'assigned'")
|
||
}
|
||
|
||
log.Info().
|
||
Str("council_id", councilID).
|
||
Str("agent_id", claim.AgentID).
|
||
Str("role_name", claim.RoleName).
|
||
Msg("✅ Successfully assigned council role to agent")
|
||
|
||
roleProfile := s.lookupRoleProfile(claim.RoleName, existingRole.AgentName)
|
||
ucxlCouncilAddress := fmt.Sprintf("ucxl://team:council@project:%s:council/councils/%s", sanitizeUCXLIdentifier(existingCouncil.ProjectName), councilID)
|
||
|
||
briefSummary := map[string]interface{}{
|
||
"project_name": existingCouncil.ProjectName,
|
||
"repository": existingCouncil.Repository,
|
||
"ucxl_address": ucxlCouncilAddress,
|
||
}
|
||
|
||
if trimmed := strings.TrimSpace(existingCouncil.ProjectBrief); trimmed != "" {
|
||
briefSummary["summary"] = truncateString(trimmed, 2000)
|
||
}
|
||
|
||
if existingCouncil.ExternalURL.Valid {
|
||
briefSummary["external_url"] = existingCouncil.ExternalURL.String
|
||
}
|
||
|
||
if existingCouncil.IssueID.Valid {
|
||
briefSummary["issue_id"] = existingCouncil.IssueID.Int64
|
||
}
|
||
|
||
if existingCouncil.BriefOwnerRole.Valid {
|
||
briefSummary["brief_owner_role"] = existingCouncil.BriefOwnerRole.String
|
||
}
|
||
|
||
// Check if all core roles are now claimed
|
||
roleCountsQuery := `
|
||
SELECT
|
||
COUNT(*) FILTER (WHERE required = true) AS total_core,
|
||
COUNT(*) FILTER (WHERE required = true AND deployed = true AND status IN ('assigned', 'active')) AS claimed_core,
|
||
COUNT(*) FILTER (WHERE required = false) AS total_optional,
|
||
COUNT(*) FILTER (WHERE required = false AND deployed = true AND status IN ('assigned', 'active')) AS claimed_optional
|
||
FROM council_agents
|
||
WHERE council_id = $1
|
||
`
|
||
|
||
var roleCounts struct {
|
||
TotalCore int
|
||
ClaimedCore int
|
||
TotalOptional int
|
||
ClaimedOptional int
|
||
}
|
||
|
||
err = s.db.Pool.QueryRow(ctx, roleCountsQuery, councilUUID).Scan(
|
||
&roleCounts.TotalCore,
|
||
&roleCounts.ClaimedCore,
|
||
&roleCounts.TotalOptional,
|
||
&roleCounts.ClaimedOptional,
|
||
)
|
||
if err != nil {
|
||
log.Error().Err(err).Msg("Failed to check core role status")
|
||
} else if roleCounts.TotalCore > 0 && roleCounts.ClaimedCore == roleCounts.TotalCore {
|
||
// All core roles claimed - activate council
|
||
_, err = s.db.Pool.Exec(ctx, "UPDATE councils SET status = 'active', updated_at = NOW() WHERE id = $1", councilUUID)
|
||
if err == nil {
|
||
log.Info().
|
||
Str("council_id", councilID).
|
||
Int("core_roles", roleCounts.TotalCore).
|
||
Msg("🎉 All core roles claimed - Council activated!")
|
||
// Stop any ongoing rebroadcast loop now that the council is fully staffed
|
||
s.stopCouncilRebroadcast(councilUUID)
|
||
|
||
go func() {
|
||
broadcastCtx, cancel := context.WithTimeout(context.Background(), 20*time.Second)
|
||
defer cancel()
|
||
|
||
statusUpdate, buildErr := s.buildCouncilStatusUpdate(broadcastCtx, councilUUID, "active", fmt.Sprintf("All %d core roles claimed", roleCounts.TotalCore))
|
||
if buildErr != nil {
|
||
log.Warn().Err(buildErr).Str("council_id", councilID).Msg("Failed to build council status update snapshot")
|
||
} else {
|
||
if err := s.p2pBroadcaster.BroadcastCouncilStatusUpdate(broadcastCtx, statusUpdate); err != nil {
|
||
log.Warn().Err(err).Str("council_id", councilID).Msg("Failed to broadcast council status update")
|
||
}
|
||
}
|
||
|
||
// Trigger team composition for the associated task once council is active
|
||
if err := s.triggerTeamCompositionForCouncil(broadcastCtx, existingCouncil.ID); err != nil {
|
||
log.Warn().Err(err).
|
||
Str("council_id", councilID).
|
||
Msg("Failed to trigger team composition after council activation")
|
||
}
|
||
}()
|
||
}
|
||
}
|
||
|
||
// Return success response
|
||
response := map[string]interface{}{
|
||
"status": "accepted",
|
||
"council_id": councilID,
|
||
"role_name": claim.RoleName,
|
||
"ucxl_address": fmt.Sprintf("%s/roles/%s", ucxlCouncilAddress, sanitizeUCXLIdentifier(claim.RoleName)),
|
||
"assigned_at": time.Now().Format(time.RFC3339),
|
||
"role_profile": roleProfile,
|
||
"council_brief": briefSummary,
|
||
"persona_status": "pending",
|
||
}
|
||
|
||
render.Status(r, http.StatusCreated)
|
||
render.JSON(w, r, response)
|
||
}
|
||
|
||
func (s *Server) ensureCouncilAgentStatusConstraint(ctx context.Context) error {
|
||
s.constraintMu.Lock()
|
||
defer s.constraintMu.Unlock()
|
||
|
||
tx, err := s.db.Pool.BeginTx(ctx, pgx.TxOptions{})
|
||
if err != nil {
|
||
return fmt.Errorf("begin council agent status constraint update: %w", err)
|
||
}
|
||
|
||
dropStmt := `ALTER TABLE council_agents DROP CONSTRAINT IF EXISTS council_agents_status_check`
|
||
if _, err := tx.Exec(ctx, dropStmt); err != nil {
|
||
tx.Rollback(ctx)
|
||
return fmt.Errorf("drop council agent status constraint: %w", err)
|
||
}
|
||
|
||
addStmt := `ALTER TABLE council_agents ADD CONSTRAINT council_agents_status_check CHECK (status IN ('pending', 'deploying', 'assigned', 'active', 'failed', 'removed'))`
|
||
if _, err := tx.Exec(ctx, addStmt); err != nil {
|
||
tx.Rollback(ctx)
|
||
|
||
if pgErr, ok := err.(*pgconn.PgError); ok && pgErr.Code == "42710" {
|
||
return nil
|
||
}
|
||
|
||
return fmt.Errorf("add council agent status constraint: %w", err)
|
||
}
|
||
|
||
if err := tx.Commit(ctx); err != nil {
|
||
return fmt.Errorf("commit council agent status constraint update: %w", err)
|
||
}
|
||
|
||
return nil
|
||
}
|
||
|
||
// triggerTeamCompositionForCouncil starts team composition for the task linked to the council
|
||
func (s *Server) triggerTeamCompositionForCouncil(ctx context.Context, councilID uuid.UUID) error {
|
||
// Look up the task associated with this council. For now we assume the task ID matches the council ID.
|
||
// Future work could store an explicit mapping in the database.
|
||
taskID := councilID.String()
|
||
|
||
log := zerolog.Ctx(ctx).With().Str("council_id", councilID.String()).Str("task_id", taskID).Logger()
|
||
log.Info().Msg("🔁 Triggering team composition for council-linked task")
|
||
|
||
// Reuse the monitor's capability to run team composition if available.
|
||
// During server initialization the monitor reference is optional; guard against nil.
|
||
if s.repoMonitor == nil {
|
||
return fmt.Errorf("monitor not initialized; cannot trigger team composition")
|
||
}
|
||
|
||
// Use the monitor's helper so the same logic runs as for bzzz-task issues.
|
||
go s.repoMonitor.TriggerTeamCompositionForCouncil(ctx, taskID)
|
||
|
||
return nil
|
||
}
|
||
|
||
// handleCouncilPersonaAck receives persona readiness status from CHORUS agents.
|
||
func (s *Server) handleCouncilPersonaAck(w http.ResponseWriter, r *http.Request) {
|
||
ctx := r.Context()
|
||
|
||
councilIDParam := chi.URLParam(r, "councilID")
|
||
if councilIDParam == "" {
|
||
render.Status(r, http.StatusBadRequest)
|
||
render.JSON(w, r, map[string]string{"error": "council_id is required"})
|
||
return
|
||
}
|
||
|
||
roleName := chi.URLParam(r, "roleName")
|
||
if roleName == "" {
|
||
render.Status(r, http.StatusBadRequest)
|
||
render.JSON(w, r, map[string]string{"error": "roleName is required"})
|
||
return
|
||
}
|
||
|
||
councilUUID, err := uuid.Parse(councilIDParam)
|
||
if err != nil {
|
||
render.Status(r, http.StatusBadRequest)
|
||
render.JSON(w, r, map[string]string{"error": "invalid council_id format"})
|
||
return
|
||
}
|
||
|
||
type personaAckPayload struct {
|
||
AgentID string `json:"agent_id"`
|
||
Status string `json:"status"`
|
||
ModelProvider string `json:"model_provider"`
|
||
ModelName string `json:"model_name,omitempty"`
|
||
SystemPromptHash string `json:"system_prompt_hash,omitempty"`
|
||
Capabilities []string `json:"capabilities,omitempty"`
|
||
Errors []string `json:"errors,omitempty"`
|
||
Metadata map[string]interface{} `json:"metadata,omitempty"`
|
||
EndpointOverride string `json:"endpoint_override,omitempty"`
|
||
}
|
||
|
||
var payload personaAckPayload
|
||
if err := json.NewDecoder(r.Body).Decode(&payload); err != nil {
|
||
render.Status(r, http.StatusBadRequest)
|
||
render.JSON(w, r, map[string]string{"error": "invalid JSON payload"})
|
||
return
|
||
}
|
||
|
||
status := strings.TrimSpace(strings.ToLower(payload.Status))
|
||
if status == "" {
|
||
status = "pending"
|
||
}
|
||
|
||
ackRecord := map[string]interface{}{
|
||
"agent_id": payload.AgentID,
|
||
"status": status,
|
||
"model_provider": payload.ModelProvider,
|
||
"model_name": payload.ModelName,
|
||
"system_prompt_hash": payload.SystemPromptHash,
|
||
"capabilities": payload.Capabilities,
|
||
"metadata": payload.Metadata,
|
||
"errors": payload.Errors,
|
||
"endpoint_override": payload.EndpointOverride,
|
||
"acknowledged_at": time.Now().UTC().Format(time.RFC3339),
|
||
}
|
||
|
||
ackPayloadJSON, err := json.Marshal(ackRecord)
|
||
if err != nil {
|
||
render.Status(r, http.StatusInternalServerError)
|
||
render.JSON(w, r, map[string]string{"error": "failed to marshal persona ack payload"})
|
||
return
|
||
}
|
||
|
||
updateQuery := `
|
||
UPDATE council_agents
|
||
SET
|
||
persona_status = $1::text,
|
||
persona_loaded_at = CASE WHEN $1::text = 'loaded' THEN NOW() ELSE persona_loaded_at END,
|
||
persona_ack_payload = COALESCE($2::jsonb, persona_ack_payload),
|
||
endpoint_url = COALESCE(NULLIF($3, '')::text, endpoint_url),
|
||
updated_at = NOW()
|
||
WHERE council_id = $4 AND role_name = $5
|
||
`
|
||
|
||
var ackJSON interface{}
|
||
if len(ackPayloadJSON) > 0 {
|
||
ackJSON = string(ackPayloadJSON)
|
||
}
|
||
|
||
commandTag, err := s.db.Pool.Exec(ctx, updateQuery, status, ackJSON, payload.EndpointOverride, councilUUID, roleName)
|
||
if err != nil {
|
||
log.Error().Err(err).
|
||
Str("council_id", councilIDParam).
|
||
Str("role_name", roleName).
|
||
Msg("Failed to update persona status")
|
||
render.Status(r, http.StatusInternalServerError)
|
||
render.JSON(w, r, map[string]string{"error": "failed to update persona status"})
|
||
return
|
||
}
|
||
|
||
if commandTag.RowsAffected() == 0 {
|
||
render.Status(r, http.StatusNotFound)
|
||
render.JSON(w, r, map[string]string{"error": "council role not found"})
|
||
return
|
||
}
|
||
|
||
log.Info().
|
||
Str("council_id", councilIDParam).
|
||
Str("role_name", roleName).
|
||
Str("agent_id", payload.AgentID).
|
||
Str("status", status).
|
||
Msg("📩 Persona status acknowledged")
|
||
|
||
go s.onPersonaAcknowledged(context.Background(), councilUUID)
|
||
|
||
render.Status(r, http.StatusAccepted)
|
||
render.JSON(w, r, map[string]interface{}{
|
||
"status": status,
|
||
"timestamp": time.Now().Unix(),
|
||
})
|
||
}
|
||
|
||
func (s *Server) onPersonaAcknowledged(ctx context.Context, councilID uuid.UUID) {
|
||
if ctx == nil {
|
||
ctx = context.Background()
|
||
}
|
||
|
||
statusUpdate, err := s.buildCouncilStatusUpdate(ctx, councilID, "", "Persona status updated")
|
||
if err != nil {
|
||
log.Warn().Err(err).Str("council_id", councilID.String()).Msg("Failed to build council status snapshot after persona ack")
|
||
} else if statusUpdate != nil && s.p2pBroadcaster != nil {
|
||
txCtx, cancel := context.WithTimeout(ctx, 10*time.Second)
|
||
defer cancel()
|
||
if err := s.p2pBroadcaster.BroadcastCouncilStatusUpdate(txCtx, statusUpdate); err != nil {
|
||
log.Warn().Err(err).Str("council_id", councilID.String()).Msg("Failed to broadcast persona status update")
|
||
}
|
||
}
|
||
|
||
ready, err := s.allCorePersonasLoaded(ctx, councilID)
|
||
if err != nil {
|
||
log.Warn().Err(err).Str("council_id", councilID.String()).Msg("Failed to evaluate core persona readiness")
|
||
return
|
||
}
|
||
|
||
if ready {
|
||
if err := s.dispatchCouncilBrief(ctx, councilID); err != nil {
|
||
log.Warn().Err(err).Str("council_id", councilID.String()).Msg("Failed to dispatch council design brief")
|
||
}
|
||
}
|
||
}
|
||
|
||
func (s *Server) allCorePersonasLoaded(ctx context.Context, councilID uuid.UUID) (bool, error) {
|
||
query := `
|
||
SELECT
|
||
COUNT(*) FILTER (WHERE required = true) AS total_core,
|
||
COUNT(*) FILTER (WHERE required = true AND persona_status = 'loaded') AS loaded_core
|
||
FROM council_agents
|
||
WHERE council_id = $1
|
||
`
|
||
|
||
var totals struct {
|
||
TotalCore int
|
||
LoadedCore int
|
||
}
|
||
|
||
if err := s.db.Pool.QueryRow(ctx, query, councilID).Scan(&totals.TotalCore, &totals.LoadedCore); err != nil {
|
||
return false, err
|
||
}
|
||
|
||
if totals.TotalCore == 0 {
|
||
return false, nil
|
||
}
|
||
|
||
return totals.TotalCore == totals.LoadedCore, nil
|
||
}
|
||
|
||
func (s *Server) buildCouncilStatusUpdate(ctx context.Context, councilID uuid.UUID, statusOverride, message string) (*p2p.CouncilStatusUpdate, error) {
|
||
councilQuery := `
|
||
SELECT project_name, status, brief_dispatched_at
|
||
FROM councils
|
||
WHERE id = $1
|
||
`
|
||
|
||
var councilRow struct {
|
||
ProjectName string
|
||
Status string
|
||
BriefDispatchedAt sql.NullTime
|
||
}
|
||
|
||
if err := s.db.Pool.QueryRow(ctx, councilQuery, councilID).Scan(&councilRow.ProjectName, &councilRow.Status, &councilRow.BriefDispatchedAt); err != nil {
|
||
return nil, err
|
||
}
|
||
|
||
rolesQuery := `
|
||
SELECT required, deployed, status, persona_status
|
||
FROM council_agents
|
||
WHERE council_id = $1
|
||
`
|
||
|
||
rows, err := s.db.Pool.Query(ctx, rolesQuery, councilID)
|
||
if err != nil {
|
||
return nil, err
|
||
}
|
||
defer rows.Close()
|
||
|
||
var (
|
||
totalCore, claimedCore int
|
||
totalOptional, claimedOptional int
|
||
totalPersonas, loadedPersonas int
|
||
corePersonasLoaded int
|
||
)
|
||
|
||
for rows.Next() {
|
||
var required, deployed bool
|
||
var status, personaStatus string
|
||
if err := rows.Scan(&required, &deployed, &status, &personaStatus); err != nil {
|
||
return nil, err
|
||
}
|
||
|
||
if required {
|
||
totalCore++
|
||
if deployed && (status == "assigned" || status == "active") {
|
||
claimedCore++
|
||
}
|
||
} else {
|
||
totalOptional++
|
||
if deployed && (status == "assigned" || status == "active") {
|
||
claimedOptional++
|
||
}
|
||
}
|
||
|
||
if deployed {
|
||
totalPersonas++
|
||
if personaStatus == "loaded" {
|
||
loadedPersonas++
|
||
if required {
|
||
corePersonasLoaded++
|
||
}
|
||
}
|
||
}
|
||
}
|
||
|
||
if err := rows.Err(); err != nil {
|
||
return nil, err
|
||
}
|
||
|
||
status := councilRow.Status
|
||
if statusOverride != "" {
|
||
status = statusOverride
|
||
}
|
||
|
||
if message == "" {
|
||
message = fmt.Sprintf("Core roles %d/%d claimed", claimedCore, totalCore)
|
||
}
|
||
|
||
return &p2p.CouncilStatusUpdate{
|
||
CouncilID: councilID,
|
||
ProjectName: councilRow.ProjectName,
|
||
Status: status,
|
||
Message: message,
|
||
Timestamp: time.Now(),
|
||
CoreRoles: p2p.RoleCounts{
|
||
Total: totalCore,
|
||
Claimed: claimedCore,
|
||
},
|
||
Optional: p2p.RoleCounts{
|
||
Total: totalOptional,
|
||
Claimed: claimedOptional,
|
||
},
|
||
Personas: p2p.PersonaCounts{
|
||
Total: totalPersonas,
|
||
Loaded: loadedPersonas,
|
||
CoreLoaded: corePersonasLoaded,
|
||
},
|
||
BriefDispatched: councilRow.BriefDispatchedAt.Valid,
|
||
}, nil
|
||
}
|
||
|
||
func (s *Server) dispatchCouncilBrief(ctx context.Context, councilID uuid.UUID) error {
|
||
const leadRole = "tpm"
|
||
|
||
councilQuery := `
|
||
SELECT project_name, project_brief, repository, external_url, issue_id, brief_dispatched_at
|
||
FROM councils
|
||
WHERE id = $1
|
||
`
|
||
|
||
var councilRow struct {
|
||
ProjectName string
|
||
ProjectBrief string
|
||
Repository string
|
||
ExternalURL sql.NullString
|
||
IssueID sql.NullInt64
|
||
BriefDispatchedAt sql.NullTime
|
||
}
|
||
|
||
if err := s.db.Pool.QueryRow(ctx, councilQuery, councilID).Scan(
|
||
&councilRow.ProjectName,
|
||
&councilRow.ProjectBrief,
|
||
&councilRow.Repository,
|
||
&councilRow.ExternalURL,
|
||
&councilRow.IssueID,
|
||
&councilRow.BriefDispatchedAt,
|
||
); err != nil {
|
||
return err
|
||
}
|
||
|
||
if councilRow.BriefDispatchedAt.Valid {
|
||
log.Debug().Str("council_id", councilID.String()).Msg("Council brief already dispatched")
|
||
return nil
|
||
}
|
||
|
||
agentQuery := `
|
||
SELECT agent_id, endpoint_url
|
||
FROM council_agents
|
||
WHERE council_id = $1 AND role_name = $2 AND deployed = true
|
||
`
|
||
|
||
var agentRow struct {
|
||
AgentID string
|
||
Endpoint sql.NullString
|
||
}
|
||
|
||
if err := s.db.Pool.QueryRow(ctx, agentQuery, councilID, leadRole).Scan(&agentRow.AgentID, &agentRow.Endpoint); err != nil {
|
||
return fmt.Errorf("failed to load project lead agent: %w", err)
|
||
}
|
||
|
||
if !agentRow.Endpoint.Valid || strings.TrimSpace(agentRow.Endpoint.String) == "" {
|
||
return fmt.Errorf("project lead endpoint not available")
|
||
}
|
||
|
||
briefPayload := map[string]interface{}{
|
||
"council_id": councilID.String(),
|
||
"project_name": councilRow.ProjectName,
|
||
"repository": councilRow.Repository,
|
||
"ucxl_address": fmt.Sprintf("ucxl://team:council@project:%s:council/councils/%s", sanitizeUCXLIdentifier(councilRow.ProjectName), councilID.String()),
|
||
"hmmm_topic": fmt.Sprintf("council:%s", councilID.String()),
|
||
"expected_artifacts": []string{
|
||
"kickoff_manifest",
|
||
"seminal_dr",
|
||
"scaffold_plan",
|
||
"gate_tests",
|
||
},
|
||
}
|
||
|
||
if trimmed := strings.TrimSpace(councilRow.ProjectBrief); trimmed != "" {
|
||
briefPayload["summary"] = trimmed
|
||
}
|
||
|
||
if councilRow.ExternalURL.Valid {
|
||
briefPayload["brief_url"] = councilRow.ExternalURL.String
|
||
}
|
||
|
||
if councilRow.IssueID.Valid {
|
||
briefPayload["issue_id"] = councilRow.IssueID.Int64
|
||
}
|
||
|
||
payloadBytes, err := json.Marshal(briefPayload)
|
||
if err != nil {
|
||
return fmt.Errorf("failed to marshal brief payload: %w", err)
|
||
}
|
||
|
||
requestURL := fmt.Sprintf("%s/api/v1/councils/%s/roles/%s/brief", strings.TrimRight(agentRow.Endpoint.String, "/"), councilID.String(), leadRole)
|
||
log.Info().
|
||
Str("council_id", councilID.String()).
|
||
Str("role", leadRole).
|
||
Str("endpoint", requestURL).
|
||
Msg("📦 Dispatching design brief to council project lead")
|
||
|
||
req, err := http.NewRequestWithContext(ctx, http.MethodPost, requestURL, bytes.NewBuffer(payloadBytes))
|
||
if err != nil {
|
||
return fmt.Errorf("failed to create brief dispatch request: %w", err)
|
||
}
|
||
req.Header.Set("Content-Type", "application/json")
|
||
req.Header.Set("X-WHOOSH-Broadcast", "council-brief")
|
||
|
||
client := &http.Client{Timeout: 15 * time.Second}
|
||
resp, err := client.Do(req)
|
||
if err != nil {
|
||
return fmt.Errorf("failed to send brief to project lead: %w", err)
|
||
}
|
||
defer resp.Body.Close()
|
||
|
||
if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusAccepted {
|
||
body, _ := io.ReadAll(io.LimitReader(resp.Body, 1024))
|
||
return fmt.Errorf("project lead returned non-success status %d: %s", resp.StatusCode, string(body))
|
||
}
|
||
|
||
updateQuery := `
|
||
UPDATE councils
|
||
SET brief_dispatched_at = NOW(), brief_owner_role = $2, updated_at = NOW()
|
||
WHERE id = $1 AND brief_dispatched_at IS NULL
|
||
`
|
||
|
||
if _, err := s.db.Pool.Exec(ctx, updateQuery, councilID, leadRole); err != nil {
|
||
log.Warn().Err(err).
|
||
Str("council_id", councilID.String()).
|
||
Msg("Brief delivered but failed to update dispatch timestamp")
|
||
} else {
|
||
log.Info().
|
||
Str("council_id", councilID.String()).
|
||
Str("role", leadRole).
|
||
Msg("🎯 Council brief dispatched to project lead")
|
||
}
|
||
|
||
statusUpdate, err := s.buildCouncilStatusUpdate(ctx, councilID, "active", "Brief dispatched to project lead")
|
||
if err == nil && statusUpdate != nil && s.p2pBroadcaster != nil {
|
||
statusUpdate.BriefDispatched = true
|
||
txCtx, cancel := context.WithTimeout(ctx, 10*time.Second)
|
||
defer cancel()
|
||
if err := s.p2pBroadcaster.BroadcastCouncilStatusUpdate(txCtx, statusUpdate); err != nil {
|
||
log.Warn().Err(err).Str("council_id", councilID.String()).Msg("Failed to broadcast brief dispatch update")
|
||
}
|
||
}
|
||
|
||
return nil
|
||
}
|