@goal: WHOOSH-LABELS-004, WSH-CONSISTENCY - Ecosystem standardization Replace custom label set with standardized CHORUS ecosystem labels: - Add all 8 GitHub-standard labels (bug, duplicate, enhancement, etc.) - Fix bzzz-task color from #ff6b6b to #5319e7 for consistency - Remove custom priority labels and whoosh-monitored label - Maintain backward compatibility and error handling Changes: - Updated EnsureRequiredLabels() in internal/gitea/client.go - Added requirement traceability comments throughout - Updated integration points in internal/server/server.go - Created comprehensive decision record Benefits: - Consistent labeling across WHOOSH, CHORUS, KACHING repositories - Familiar GitHub-standard labels for better developer experience - Improved tool integration and issue management - Preserved bzzz-task automation for CHORUS workflow Fixes: WHOOSH issue #4 🤖 Generated with [Claude Code](https://claude.ai/code) Co-Authored-By: Claude <noreply@anthropic.com>
3046 lines
97 KiB
Go
3046 lines
97 KiB
Go
package server
|
|
|
|
import (
|
|
"bytes"
|
|
"context"
|
|
"encoding/json"
|
|
"fmt"
|
|
"io"
|
|
"net/http"
|
|
"os"
|
|
"path/filepath"
|
|
"strconv"
|
|
"strings"
|
|
"time"
|
|
|
|
"github.com/chorus-services/whoosh/internal/agents"
|
|
"github.com/chorus-services/whoosh/internal/auth"
|
|
"github.com/chorus-services/whoosh/internal/backbeat"
|
|
"github.com/chorus-services/whoosh/internal/composer"
|
|
"github.com/chorus-services/whoosh/internal/config"
|
|
"github.com/chorus-services/whoosh/internal/council"
|
|
"github.com/chorus-services/whoosh/internal/database"
|
|
"github.com/chorus-services/whoosh/internal/gitea"
|
|
"github.com/chorus-services/whoosh/internal/monitor"
|
|
"github.com/chorus-services/whoosh/internal/orchestrator"
|
|
"github.com/chorus-services/whoosh/internal/p2p"
|
|
"github.com/chorus-services/whoosh/internal/tasks"
|
|
"github.com/chorus-services/whoosh/internal/tracing"
|
|
"github.com/chorus-services/whoosh/internal/validation"
|
|
"github.com/go-chi/chi/v5"
|
|
"github.com/go-chi/chi/v5/middleware"
|
|
"github.com/go-chi/cors"
|
|
"github.com/go-chi/render"
|
|
"github.com/google/uuid"
|
|
"github.com/rs/zerolog/log"
|
|
"go.opentelemetry.io/otel/attribute"
|
|
)
|
|
|
|
// Global version variable set by main package
|
|
var version = "development"
|
|
|
|
// SetVersion sets the global version variable
|
|
func SetVersion(v string) {
|
|
version = v
|
|
}
|
|
|
|
type Server struct {
|
|
config *config.Config
|
|
db *database.DB
|
|
httpServer *http.Server
|
|
router chi.Router
|
|
giteaClient *gitea.Client
|
|
webhookHandler *gitea.WebhookHandler
|
|
authMiddleware *auth.Middleware
|
|
rateLimiter *auth.RateLimiter
|
|
p2pDiscovery *p2p.Discovery
|
|
agentRegistry *agents.Registry
|
|
backbeat *backbeat.Integration
|
|
teamComposer *composer.Service
|
|
councilComposer *council.CouncilComposer
|
|
taskService *tasks.Service
|
|
giteaIntegration *tasks.GiteaIntegration
|
|
repoMonitor *monitor.Monitor
|
|
swarmManager *orchestrator.SwarmManager
|
|
agentDeployer *orchestrator.AgentDeployer
|
|
validator *validation.Validator
|
|
}
|
|
|
|
func NewServer(cfg *config.Config, db *database.DB) (*Server, error) {
|
|
// Initialize core services
|
|
taskService := tasks.NewService(db.Pool)
|
|
giteaIntegration := tasks.NewGiteaIntegration(taskService, gitea.NewClient(cfg.GITEA), nil)
|
|
|
|
// Initialize P2P discovery and agent registry
|
|
p2pDiscovery := p2p.NewDiscovery()
|
|
agentRegistry := agents.NewRegistry(db.Pool, p2pDiscovery)
|
|
|
|
// Initialize team composer
|
|
teamComposer := composer.NewService(db.Pool, nil) // Use default config
|
|
|
|
// Initialize council composer for project kickoffs
|
|
councilComposer := council.NewCouncilComposer(db.Pool)
|
|
|
|
// Initialize Docker Swarm orchestrator services conditionally
|
|
var swarmManager *orchestrator.SwarmManager
|
|
var agentDeployer *orchestrator.AgentDeployer
|
|
|
|
if cfg.Docker.Enabled {
|
|
var err error
|
|
swarmManager, err = orchestrator.NewSwarmManager("", "registry.home.deepblack.cloud")
|
|
if err != nil {
|
|
return nil, fmt.Errorf("failed to create swarm manager: %w", err)
|
|
}
|
|
|
|
agentDeployer = orchestrator.NewAgentDeployer(swarmManager, db.Pool, "registry.home.deepblack.cloud")
|
|
} else {
|
|
log.Warn().Msg("🐳 Docker integration disabled - council agent deployment unavailable")
|
|
}
|
|
|
|
// Initialize repository monitor with team composer, council composer, and agent deployer
|
|
repoMonitor := monitor.NewMonitor(db.Pool, cfg.GITEA, teamComposer, councilComposer, agentDeployer)
|
|
|
|
s := &Server{
|
|
config: cfg,
|
|
db: db,
|
|
giteaClient: gitea.NewClient(cfg.GITEA),
|
|
webhookHandler: gitea.NewWebhookHandler(cfg.GITEA.WebhookToken),
|
|
authMiddleware: auth.NewMiddleware(cfg.Auth.JWTSecret, cfg.Auth.ServiceTokens),
|
|
rateLimiter: auth.NewRateLimiter(100, time.Minute), // 100 requests per minute per IP
|
|
p2pDiscovery: p2pDiscovery,
|
|
agentRegistry: agentRegistry,
|
|
teamComposer: teamComposer,
|
|
councilComposer: councilComposer,
|
|
taskService: taskService,
|
|
giteaIntegration: giteaIntegration,
|
|
repoMonitor: repoMonitor,
|
|
swarmManager: swarmManager,
|
|
agentDeployer: agentDeployer,
|
|
validator: validation.NewValidator(),
|
|
}
|
|
|
|
// Initialize BACKBEAT integration if enabled
|
|
if cfg.BACKBEAT.Enabled {
|
|
backbeatIntegration, err := backbeat.NewIntegration(&cfg.BACKBEAT)
|
|
if err != nil {
|
|
return nil, fmt.Errorf("failed to create BACKBEAT integration: %w", err)
|
|
}
|
|
s.backbeat = backbeatIntegration
|
|
}
|
|
|
|
s.setupRouter()
|
|
s.setupRoutes()
|
|
|
|
s.httpServer = &http.Server{
|
|
Addr: cfg.Server.ListenAddr,
|
|
Handler: s.router,
|
|
ReadTimeout: cfg.Server.ReadTimeout,
|
|
WriteTimeout: cfg.Server.WriteTimeout,
|
|
}
|
|
|
|
return s, nil
|
|
}
|
|
|
|
func (s *Server) setupRouter() {
|
|
r := chi.NewRouter()
|
|
|
|
// Middleware
|
|
r.Use(middleware.RequestID)
|
|
r.Use(middleware.RealIP)
|
|
r.Use(middleware.Logger)
|
|
r.Use(middleware.Recoverer)
|
|
r.Use(middleware.Timeout(30 * time.Second))
|
|
r.Use(validation.SecurityHeaders)
|
|
r.Use(s.rateLimiter.RateLimitMiddleware)
|
|
|
|
// CORS configuration - restrict origins to configured values
|
|
r.Use(cors.Handler(cors.Options{
|
|
AllowedOrigins: s.config.Server.AllowedOrigins,
|
|
AllowedMethods: []string{"GET", "POST", "PUT", "DELETE", "OPTIONS"},
|
|
AllowedHeaders: []string{"Accept", "Authorization", "Content-Type", "X-CSRF-Token", "X-Gitea-Signature"},
|
|
ExposedHeaders: []string{"Link"},
|
|
AllowCredentials: true,
|
|
MaxAge: 300,
|
|
}))
|
|
|
|
// Content-Type handling
|
|
r.Use(render.SetContentType(render.ContentTypeJSON))
|
|
|
|
s.router = r
|
|
}
|
|
|
|
func (s *Server) setupRoutes() {
|
|
// Static file serving for UI assets
|
|
uiDir := "./ui"
|
|
s.router.Get("/ui/*", s.staticFileHandler(uiDir))
|
|
|
|
// Root route - serve basic dashboard
|
|
s.router.Get("/", s.dashboardHandler)
|
|
|
|
// Health check endpoints
|
|
s.router.Get("/health", s.healthHandler)
|
|
s.router.Get("/health/ready", s.readinessHandler)
|
|
|
|
// Admin health endpoint with detailed information
|
|
s.router.Get("/admin/health/details", s.healthDetailsHandler)
|
|
|
|
// API v1 routes
|
|
s.router.Route("/api/v1", func(r chi.Router) {
|
|
// MVP endpoints - minimal team management
|
|
r.Route("/teams", func(r chi.Router) {
|
|
r.Get("/", s.listTeamsHandler)
|
|
r.With(s.authMiddleware.AdminRequired).Post("/", s.createTeamHandler)
|
|
r.Get("/{teamID}", s.getTeamHandler)
|
|
r.With(s.authMiddleware.AdminRequired).Put("/{teamID}/status", s.updateTeamStatusHandler)
|
|
r.Post("/analyze", s.analyzeTeamCompositionHandler)
|
|
})
|
|
|
|
// Task ingestion from GITEA
|
|
r.Route("/tasks", func(r chi.Router) {
|
|
r.Get("/", s.listTasksHandler)
|
|
r.With(s.authMiddleware.ServiceTokenRequired).Post("/ingest", s.ingestTaskHandler)
|
|
r.Get("/{taskID}", s.getTaskHandler)
|
|
})
|
|
|
|
// Project management endpoints
|
|
r.Route("/projects", func(r chi.Router) {
|
|
r.Get("/", s.listProjectsHandler)
|
|
r.With(s.authMiddleware.AdminRequired).Post("/", s.createProjectHandler)
|
|
r.With(s.authMiddleware.AdminRequired).Delete("/{projectID}", s.deleteProjectHandler)
|
|
|
|
r.Route("/{projectID}", func(r chi.Router) {
|
|
r.Get("/", s.getProjectHandler)
|
|
r.Get("/tasks", s.listProjectTasksHandler)
|
|
r.Get("/tasks/available", s.listAvailableTasksHandler)
|
|
r.Get("/repository", s.getProjectRepositoryHandler)
|
|
r.Post("/analyze", s.analyzeProjectHandler)
|
|
|
|
r.Route("/tasks/{taskNumber}", func(r chi.Router) {
|
|
r.Get("/", s.getProjectTaskHandler)
|
|
r.Post("/claim", s.claimTaskHandler)
|
|
r.Put("/status", s.updateTaskStatusHandler)
|
|
r.Post("/complete", s.completeTaskHandler)
|
|
})
|
|
})
|
|
})
|
|
|
|
// Agent registration endpoints
|
|
r.Route("/agents", func(r chi.Router) {
|
|
r.Get("/", s.listAgentsHandler)
|
|
r.Post("/register", s.registerAgentHandler)
|
|
r.Put("/{agentID}/status", s.updateAgentStatusHandler)
|
|
})
|
|
|
|
// SLURP proxy endpoints
|
|
r.Route("/slurp", func(r chi.Router) {
|
|
r.Post("/submit", s.slurpSubmitHandler)
|
|
r.Get("/artifacts/{ucxlAddr}", s.slurpRetrieveHandler)
|
|
})
|
|
|
|
// Repository monitoring endpoints
|
|
r.Route("/repositories", func(r chi.Router) {
|
|
r.Get("/", s.listRepositoriesHandler)
|
|
r.With(s.authMiddleware.AdminRequired).Post("/", s.createRepositoryHandler)
|
|
r.Get("/{repoID}", s.getRepositoryHandler)
|
|
r.With(s.authMiddleware.AdminRequired).Put("/{repoID}", s.updateRepositoryHandler)
|
|
r.With(s.authMiddleware.AdminRequired).Delete("/{repoID}", s.deleteRepositoryHandler)
|
|
r.With(s.authMiddleware.AdminRequired).Post("/{repoID}/sync", s.syncRepositoryHandler)
|
|
r.With(s.authMiddleware.AdminRequired).Post("/{repoID}/ensure-labels", s.ensureRepositoryLabelsHandler)
|
|
r.Get("/{repoID}/logs", s.getRepositorySyncLogsHandler)
|
|
})
|
|
|
|
// Council management endpoints
|
|
r.Route("/councils", func(r chi.Router) {
|
|
r.Get("/{councilID}", s.getCouncilHandler)
|
|
|
|
r.Route("/{councilID}/artifacts", func(r chi.Router) {
|
|
r.Get("/", s.getCouncilArtifactsHandler)
|
|
r.With(s.authMiddleware.AdminRequired).Post("/", s.createCouncilArtifactHandler)
|
|
})
|
|
})
|
|
|
|
// BACKBEAT monitoring endpoints
|
|
r.Route("/backbeat", func(r chi.Router) {
|
|
r.Get("/status", s.backbeatStatusHandler)
|
|
})
|
|
})
|
|
|
|
// GITEA webhook endpoint
|
|
s.router.Post(s.config.GITEA.WebhookPath, s.giteaWebhookHandler)
|
|
}
|
|
|
|
func (s *Server) Start(ctx context.Context) error {
|
|
// Start BACKBEAT integration if enabled
|
|
if s.backbeat != nil {
|
|
if err := s.backbeat.Start(ctx); err != nil {
|
|
return fmt.Errorf("failed to start BACKBEAT integration: %w", err)
|
|
}
|
|
}
|
|
|
|
// Start P2P discovery service
|
|
if err := s.p2pDiscovery.Start(); err != nil {
|
|
return fmt.Errorf("failed to start P2P discovery: %w", err)
|
|
}
|
|
|
|
// Start agent registry service
|
|
if err := s.agentRegistry.Start(); err != nil {
|
|
return fmt.Errorf("failed to start agent registry: %w", err)
|
|
}
|
|
|
|
// Start repository monitoring service
|
|
if s.repoMonitor != nil {
|
|
go func() {
|
|
if err := s.repoMonitor.Start(ctx); err != nil && err != context.Canceled {
|
|
log.Error().Err(err).Msg("Repository monitoring service failed")
|
|
}
|
|
}()
|
|
log.Info().Msg("🔍 Repository monitoring service started")
|
|
}
|
|
|
|
log.Info().
|
|
Str("addr", s.httpServer.Addr).
|
|
Msg("HTTP server starting")
|
|
|
|
if err := s.httpServer.ListenAndServe(); err != nil && err != http.ErrServerClosed {
|
|
return fmt.Errorf("server failed to start: %w", err)
|
|
}
|
|
|
|
return nil
|
|
}
|
|
|
|
func (s *Server) Shutdown(ctx context.Context) error {
|
|
log.Info().Msg("HTTP server shutting down")
|
|
|
|
// Stop BACKBEAT integration
|
|
if s.backbeat != nil {
|
|
if err := s.backbeat.Stop(); err != nil {
|
|
log.Error().Err(err).Msg("Failed to stop BACKBEAT integration")
|
|
}
|
|
}
|
|
|
|
// Stop agent registry service
|
|
if err := s.agentRegistry.Stop(); err != nil {
|
|
log.Error().Err(err).Msg("Failed to stop agent registry service")
|
|
}
|
|
|
|
// Stop P2P discovery service
|
|
if err := s.p2pDiscovery.Stop(); err != nil {
|
|
log.Error().Err(err).Msg("Failed to stop P2P discovery service")
|
|
}
|
|
|
|
// Stop repository monitoring service
|
|
if s.repoMonitor != nil {
|
|
s.repoMonitor.Stop()
|
|
log.Info().Msg("🛑 Repository monitoring service stopped")
|
|
}
|
|
|
|
if err := s.httpServer.Shutdown(ctx); err != nil {
|
|
return fmt.Errorf("server shutdown failed: %w", err)
|
|
}
|
|
|
|
return nil
|
|
}
|
|
|
|
// Health check handlers
|
|
func (s *Server) healthHandler(w http.ResponseWriter, r *http.Request) {
|
|
response := map[string]interface{}{
|
|
"status": "ok",
|
|
"service": "whoosh",
|
|
"version": "0.1.0-mvp",
|
|
}
|
|
|
|
// Include BACKBEAT health information if available
|
|
if s.backbeat != nil {
|
|
response["backbeat"] = s.backbeat.GetHealth()
|
|
}
|
|
|
|
render.JSON(w, r, response)
|
|
}
|
|
|
|
func (s *Server) readinessHandler(w http.ResponseWriter, r *http.Request) {
|
|
ctx, cancel := context.WithTimeout(r.Context(), 5*time.Second)
|
|
defer cancel()
|
|
|
|
// Check database connection
|
|
if err := s.db.Health(ctx); err != nil {
|
|
log.Error().Err(err).Msg("Database health check failed")
|
|
render.Status(r, http.StatusServiceUnavailable)
|
|
render.JSON(w, r, map[string]string{
|
|
"status": "unavailable",
|
|
"error": "database connection failed",
|
|
})
|
|
return
|
|
}
|
|
|
|
render.JSON(w, r, map[string]string{
|
|
"status": "ready",
|
|
"database": "connected",
|
|
})
|
|
}
|
|
|
|
// healthDetailsHandler provides comprehensive system health information
|
|
func (s *Server) healthDetailsHandler(w http.ResponseWriter, r *http.Request) {
|
|
ctx, span := tracing.StartSpan(r.Context(), "health_check_details")
|
|
defer span.End()
|
|
|
|
ctx, cancel := context.WithTimeout(ctx, 10*time.Second)
|
|
defer cancel()
|
|
|
|
response := map[string]interface{}{
|
|
"service": "whoosh",
|
|
"version": version,
|
|
"timestamp": time.Now().Unix(),
|
|
"uptime": time.Since(time.Now()).Seconds(), // This would need to be stored at startup
|
|
"status": "healthy",
|
|
"components": make(map[string]interface{}),
|
|
}
|
|
|
|
overallHealthy := true
|
|
components := make(map[string]interface{})
|
|
|
|
// Database Health Check
|
|
dbHealth := map[string]interface{}{
|
|
"name": "database",
|
|
"type": "postgresql",
|
|
}
|
|
|
|
if err := s.db.Health(ctx); err != nil {
|
|
dbHealth["status"] = "unhealthy"
|
|
dbHealth["error"] = err.Error()
|
|
dbHealth["last_checked"] = time.Now().Unix()
|
|
overallHealthy = false
|
|
span.SetAttributes(attribute.Bool("health.database.healthy", false))
|
|
} else {
|
|
dbHealth["status"] = "healthy"
|
|
dbHealth["last_checked"] = time.Now().Unix()
|
|
|
|
// Get database statistics
|
|
var dbStats map[string]interface{}
|
|
if stats := s.db.Pool.Stat(); stats != nil {
|
|
dbStats = map[string]interface{}{
|
|
"max_conns": stats.MaxConns(),
|
|
"acquired_conns": stats.AcquiredConns(),
|
|
"idle_conns": stats.IdleConns(),
|
|
"constructing_conns": stats.ConstructingConns(),
|
|
}
|
|
}
|
|
dbHealth["statistics"] = dbStats
|
|
span.SetAttributes(attribute.Bool("health.database.healthy", true))
|
|
}
|
|
components["database"] = dbHealth
|
|
|
|
// Gitea Health Check
|
|
giteaHealth := map[string]interface{}{
|
|
"name": "gitea",
|
|
"type": "external_service",
|
|
}
|
|
|
|
if s.giteaClient != nil {
|
|
if err := s.giteaClient.TestConnection(ctx); err != nil {
|
|
giteaHealth["status"] = "unhealthy"
|
|
giteaHealth["error"] = err.Error()
|
|
giteaHealth["endpoint"] = s.config.GITEA.BaseURL
|
|
overallHealthy = false
|
|
span.SetAttributes(attribute.Bool("health.gitea.healthy", false))
|
|
} else {
|
|
giteaHealth["status"] = "healthy"
|
|
giteaHealth["endpoint"] = s.config.GITEA.BaseURL
|
|
giteaHealth["webhook_path"] = s.config.GITEA.WebhookPath
|
|
span.SetAttributes(attribute.Bool("health.gitea.healthy", true))
|
|
}
|
|
} else {
|
|
giteaHealth["status"] = "not_configured"
|
|
span.SetAttributes(attribute.Bool("health.gitea.healthy", false))
|
|
}
|
|
giteaHealth["last_checked"] = time.Now().Unix()
|
|
components["gitea"] = giteaHealth
|
|
|
|
// BackBeat Health Check
|
|
backbeatHealth := map[string]interface{}{
|
|
"name": "backbeat",
|
|
"type": "internal_service",
|
|
}
|
|
|
|
if s.backbeat != nil {
|
|
bbHealth := s.backbeat.GetHealth()
|
|
if connected, ok := bbHealth["connected"].(bool); ok && connected {
|
|
backbeatHealth["status"] = "healthy"
|
|
backbeatHealth["details"] = bbHealth
|
|
span.SetAttributes(attribute.Bool("health.backbeat.healthy", true))
|
|
} else {
|
|
backbeatHealth["status"] = "unhealthy"
|
|
backbeatHealth["details"] = bbHealth
|
|
backbeatHealth["error"] = "not connected to NATS cluster"
|
|
overallHealthy = false
|
|
span.SetAttributes(attribute.Bool("health.backbeat.healthy", false))
|
|
}
|
|
} else {
|
|
backbeatHealth["status"] = "not_configured"
|
|
span.SetAttributes(attribute.Bool("health.backbeat.healthy", false))
|
|
}
|
|
backbeatHealth["last_checked"] = time.Now().Unix()
|
|
components["backbeat"] = backbeatHealth
|
|
|
|
// Docker Swarm Health Check (if enabled)
|
|
swarmHealth := map[string]interface{}{
|
|
"name": "docker_swarm",
|
|
"type": "orchestration",
|
|
}
|
|
|
|
if s.config.Docker.Enabled {
|
|
// Basic Docker connection check - actual swarm health would need Docker client
|
|
swarmHealth["status"] = "unknown"
|
|
swarmHealth["note"] = "Docker integration enabled but health check not implemented"
|
|
swarmHealth["socket_path"] = s.config.Docker.Host
|
|
} else {
|
|
swarmHealth["status"] = "disabled"
|
|
}
|
|
swarmHealth["last_checked"] = time.Now().Unix()
|
|
components["docker_swarm"] = swarmHealth
|
|
|
|
// Repository Monitor Health
|
|
monitorHealth := map[string]interface{}{
|
|
"name": "repository_monitor",
|
|
"type": "internal_service",
|
|
}
|
|
|
|
if s.repoMonitor != nil {
|
|
// Get repository monitoring statistics
|
|
query := `SELECT
|
|
COUNT(*) as total_repos,
|
|
COUNT(*) FILTER (WHERE sync_status = 'active') as active_repos,
|
|
COUNT(*) FILTER (WHERE sync_status = 'error') as error_repos,
|
|
COUNT(*) FILTER (WHERE monitor_issues = true) as monitored_repos
|
|
FROM repositories`
|
|
|
|
var totalRepos, activeRepos, errorRepos, monitoredRepos int
|
|
err := s.db.Pool.QueryRow(ctx, query).Scan(&totalRepos, &activeRepos, &errorRepos, &monitoredRepos)
|
|
|
|
if err != nil {
|
|
monitorHealth["status"] = "unhealthy"
|
|
monitorHealth["error"] = err.Error()
|
|
overallHealthy = false
|
|
} else {
|
|
monitorHealth["status"] = "healthy"
|
|
monitorHealth["statistics"] = map[string]interface{}{
|
|
"total_repositories": totalRepos,
|
|
"active_repositories": activeRepos,
|
|
"error_repositories": errorRepos,
|
|
"monitored_repositories": monitoredRepos,
|
|
}
|
|
}
|
|
span.SetAttributes(attribute.Bool("health.repository_monitor.healthy", err == nil))
|
|
} else {
|
|
monitorHealth["status"] = "not_configured"
|
|
span.SetAttributes(attribute.Bool("health.repository_monitor.healthy", false))
|
|
}
|
|
monitorHealth["last_checked"] = time.Now().Unix()
|
|
components["repository_monitor"] = monitorHealth
|
|
|
|
// Overall system status
|
|
if !overallHealthy {
|
|
response["status"] = "unhealthy"
|
|
span.SetAttributes(
|
|
attribute.String("health.overall_status", "unhealthy"),
|
|
attribute.Bool("health.overall_healthy", false),
|
|
)
|
|
} else {
|
|
span.SetAttributes(
|
|
attribute.String("health.overall_status", "healthy"),
|
|
attribute.Bool("health.overall_healthy", true),
|
|
)
|
|
}
|
|
|
|
response["components"] = components
|
|
response["healthy"] = overallHealthy
|
|
|
|
// Set appropriate HTTP status
|
|
if !overallHealthy {
|
|
render.Status(r, http.StatusServiceUnavailable)
|
|
}
|
|
|
|
render.JSON(w, r, response)
|
|
}
|
|
|
|
// MVP handlers for team and task management
|
|
func (s *Server) listTeamsHandler(w http.ResponseWriter, r *http.Request) {
|
|
// Parse pagination parameters
|
|
limitStr := r.URL.Query().Get("limit")
|
|
offsetStr := r.URL.Query().Get("offset")
|
|
|
|
limit := 20 // Default limit
|
|
offset := 0 // Default offset
|
|
|
|
if limitStr != "" {
|
|
if l, err := strconv.Atoi(limitStr); err == nil && l > 0 && l <= 100 {
|
|
limit = l
|
|
}
|
|
}
|
|
|
|
if offsetStr != "" {
|
|
if o, err := strconv.Atoi(offsetStr); err == nil && o >= 0 {
|
|
offset = o
|
|
}
|
|
}
|
|
|
|
// Get teams from database
|
|
teams, total, err := s.teamComposer.ListTeams(r.Context(), limit, offset)
|
|
if err != nil {
|
|
log.Error().Err(err).Msg("Failed to list teams")
|
|
render.Status(r, http.StatusInternalServerError)
|
|
render.JSON(w, r, map[string]string{"error": "failed to retrieve teams"})
|
|
return
|
|
}
|
|
|
|
render.JSON(w, r, map[string]interface{}{
|
|
"teams": teams,
|
|
"total": total,
|
|
"limit": limit,
|
|
"offset": offset,
|
|
})
|
|
}
|
|
|
|
func (s *Server) createTeamHandler(w http.ResponseWriter, r *http.Request) {
|
|
var taskInput composer.TaskAnalysisInput
|
|
|
|
if err := json.NewDecoder(r.Body).Decode(&taskInput); err != nil {
|
|
render.Status(r, http.StatusBadRequest)
|
|
render.JSON(w, r, map[string]string{"error": "invalid request body"})
|
|
return
|
|
}
|
|
|
|
// Validate required fields
|
|
if taskInput.Title == "" {
|
|
render.Status(r, http.StatusBadRequest)
|
|
render.JSON(w, r, map[string]string{"error": "title is required"})
|
|
return
|
|
}
|
|
|
|
if taskInput.Description == "" {
|
|
render.Status(r, http.StatusBadRequest)
|
|
render.JSON(w, r, map[string]string{"error": "description is required"})
|
|
return
|
|
}
|
|
|
|
// Set defaults if not provided
|
|
if taskInput.Priority == "" {
|
|
taskInput.Priority = composer.PriorityMedium
|
|
}
|
|
|
|
log.Info().
|
|
Str("task_title", taskInput.Title).
|
|
Str("priority", string(taskInput.Priority)).
|
|
Msg("Starting team composition for new task")
|
|
|
|
// Analyze task and compose team
|
|
result, err := s.teamComposer.AnalyzeAndComposeTeam(r.Context(), &taskInput)
|
|
if err != nil {
|
|
log.Error().Err(err).Msg("Team composition failed")
|
|
render.Status(r, http.StatusInternalServerError)
|
|
render.JSON(w, r, map[string]string{"error": "team composition failed"})
|
|
return
|
|
}
|
|
|
|
// Create the team in database
|
|
team, err := s.teamComposer.CreateTeam(r.Context(), result.TeamComposition, &taskInput)
|
|
if err != nil {
|
|
log.Error().Err(err).Msg("Failed to create team")
|
|
render.Status(r, http.StatusInternalServerError)
|
|
render.JSON(w, r, map[string]string{"error": "failed to create team"})
|
|
return
|
|
}
|
|
|
|
log.Info().
|
|
Str("team_id", team.ID.String()).
|
|
Str("team_name", team.Name).
|
|
Float64("confidence_score", result.TeamComposition.ConfidenceScore).
|
|
Msg("Team created successfully")
|
|
|
|
// Return both the team and the composition analysis
|
|
response := map[string]interface{}{
|
|
"team": team,
|
|
"composition_result": result,
|
|
"message": "Team created successfully",
|
|
}
|
|
|
|
render.Status(r, http.StatusCreated)
|
|
render.JSON(w, r, response)
|
|
}
|
|
|
|
func (s *Server) getTeamHandler(w http.ResponseWriter, r *http.Request) {
|
|
teamIDStr := chi.URLParam(r, "teamID")
|
|
teamID, err := uuid.Parse(teamIDStr)
|
|
if err != nil {
|
|
render.Status(r, http.StatusBadRequest)
|
|
render.JSON(w, r, map[string]string{"error": "invalid team ID"})
|
|
return
|
|
}
|
|
|
|
team, assignments, err := s.teamComposer.GetTeam(r.Context(), teamID)
|
|
if err != nil {
|
|
if err.Error() == "team not found" {
|
|
render.Status(r, http.StatusNotFound)
|
|
render.JSON(w, r, map[string]string{"error": "team not found"})
|
|
return
|
|
}
|
|
|
|
log.Error().Err(err).Str("team_id", teamIDStr).Msg("Failed to get team")
|
|
render.Status(r, http.StatusInternalServerError)
|
|
render.JSON(w, r, map[string]string{"error": "failed to retrieve team"})
|
|
return
|
|
}
|
|
|
|
response := map[string]interface{}{
|
|
"team": team,
|
|
"assignments": assignments,
|
|
}
|
|
|
|
render.JSON(w, r, response)
|
|
}
|
|
|
|
func (s *Server) updateTeamStatusHandler(w http.ResponseWriter, r *http.Request) {
|
|
teamIDStr := chi.URLParam(r, "teamID")
|
|
teamID, err := uuid.Parse(teamIDStr)
|
|
if err != nil {
|
|
render.Status(r, http.StatusBadRequest)
|
|
render.JSON(w, r, map[string]string{"error": "invalid team ID"})
|
|
return
|
|
}
|
|
|
|
var statusUpdate struct {
|
|
Status string `json:"status"`
|
|
Reason string `json:"reason,omitempty"`
|
|
}
|
|
|
|
if err := json.NewDecoder(r.Body).Decode(&statusUpdate); err != nil {
|
|
render.Status(r, http.StatusBadRequest)
|
|
render.JSON(w, r, map[string]string{"error": "invalid request body"})
|
|
return
|
|
}
|
|
|
|
// Validate status values
|
|
validStatuses := map[string]bool{
|
|
"forming": true,
|
|
"active": true,
|
|
"completed": true,
|
|
"disbanded": true,
|
|
}
|
|
|
|
if !validStatuses[statusUpdate.Status] {
|
|
render.Status(r, http.StatusBadRequest)
|
|
render.JSON(w, r, map[string]string{"error": "invalid status. Valid values: forming, active, completed, disbanded"})
|
|
return
|
|
}
|
|
|
|
// Update team status in database
|
|
updateQuery := `UPDATE teams SET status = $1, updated_at = $2 WHERE id = $3`
|
|
|
|
if statusUpdate.Status == "completed" {
|
|
updateQuery = `UPDATE teams SET status = $1, updated_at = $2, completed_at = $2 WHERE id = $3`
|
|
}
|
|
|
|
_, err = s.db.Pool.Exec(r.Context(), updateQuery, statusUpdate.Status, time.Now(), teamID)
|
|
if err != nil {
|
|
log.Error().Err(err).
|
|
Str("team_id", teamIDStr).
|
|
Str("status", statusUpdate.Status).
|
|
Msg("Failed to update team status")
|
|
render.Status(r, http.StatusInternalServerError)
|
|
render.JSON(w, r, map[string]string{"error": "failed to update team status"})
|
|
return
|
|
}
|
|
|
|
log.Info().
|
|
Str("team_id", teamIDStr).
|
|
Str("status", statusUpdate.Status).
|
|
Str("reason", statusUpdate.Reason).
|
|
Msg("Team status updated")
|
|
|
|
render.JSON(w, r, map[string]interface{}{
|
|
"team_id": teamIDStr,
|
|
"status": statusUpdate.Status,
|
|
"message": "Team status updated successfully",
|
|
})
|
|
}
|
|
|
|
func (s *Server) analyzeTeamCompositionHandler(w http.ResponseWriter, r *http.Request) {
|
|
var taskInput composer.TaskAnalysisInput
|
|
|
|
if err := json.NewDecoder(r.Body).Decode(&taskInput); err != nil {
|
|
render.Status(r, http.StatusBadRequest)
|
|
render.JSON(w, r, map[string]string{"error": "invalid request body"})
|
|
return
|
|
}
|
|
|
|
// Validate required fields
|
|
if taskInput.Title == "" {
|
|
render.Status(r, http.StatusBadRequest)
|
|
render.JSON(w, r, map[string]string{"error": "title is required"})
|
|
return
|
|
}
|
|
|
|
if taskInput.Description == "" {
|
|
render.Status(r, http.StatusBadRequest)
|
|
render.JSON(w, r, map[string]string{"error": "description is required"})
|
|
return
|
|
}
|
|
|
|
// Set defaults if not provided
|
|
if taskInput.Priority == "" {
|
|
taskInput.Priority = composer.PriorityMedium
|
|
}
|
|
|
|
log.Info().
|
|
Str("task_title", taskInput.Title).
|
|
Str("priority", string(taskInput.Priority)).
|
|
Msg("Analyzing team composition requirements")
|
|
|
|
// Analyze task and compose team (without creating it)
|
|
result, err := s.teamComposer.AnalyzeAndComposeTeam(r.Context(), &taskInput)
|
|
if err != nil {
|
|
log.Error().Err(err).Msg("Team composition analysis failed")
|
|
render.Status(r, http.StatusInternalServerError)
|
|
render.JSON(w, r, map[string]string{"error": "team composition analysis failed"})
|
|
return
|
|
}
|
|
|
|
log.Info().
|
|
Str("analysis_id", result.AnalysisID.String()).
|
|
Float64("confidence_score", result.TeamComposition.ConfidenceScore).
|
|
Int("recommended_team_size", result.TeamComposition.EstimatedSize).
|
|
Msg("Team composition analysis completed")
|
|
|
|
render.JSON(w, r, result)
|
|
}
|
|
|
|
func (s *Server) listTasksHandler(w http.ResponseWriter, r *http.Request) {
|
|
// Parse query parameters
|
|
statusParam := r.URL.Query().Get("status")
|
|
priorityParam := r.URL.Query().Get("priority")
|
|
repositoryParam := r.URL.Query().Get("repository")
|
|
limitStr := r.URL.Query().Get("limit")
|
|
offsetStr := r.URL.Query().Get("offset")
|
|
|
|
// Build filter
|
|
filter := &tasks.TaskFilter{}
|
|
|
|
if statusParam != "" && statusParam != "all" {
|
|
filter.Status = []tasks.TaskStatus{tasks.TaskStatus(statusParam)}
|
|
}
|
|
|
|
if priorityParam != "" {
|
|
filter.Priority = []tasks.TaskPriority{tasks.TaskPriority(priorityParam)}
|
|
}
|
|
|
|
if repositoryParam != "" {
|
|
filter.Repository = repositoryParam
|
|
}
|
|
|
|
if limitStr != "" {
|
|
if limit, err := strconv.Atoi(limitStr); err == nil && limit > 0 && limit <= 100 {
|
|
filter.Limit = limit
|
|
}
|
|
}
|
|
|
|
if offsetStr != "" {
|
|
if offset, err := strconv.Atoi(offsetStr); err == nil && offset >= 0 {
|
|
filter.Offset = offset
|
|
}
|
|
}
|
|
|
|
// Get tasks from database
|
|
taskList, total, err := s.taskService.ListTasks(r.Context(), filter)
|
|
if err != nil {
|
|
log.Error().Err(err).Msg("Failed to list tasks")
|
|
render.Status(r, http.StatusInternalServerError)
|
|
render.JSON(w, r, map[string]string{"error": "failed to retrieve tasks"})
|
|
return
|
|
}
|
|
|
|
render.JSON(w, r, map[string]interface{}{
|
|
"tasks": taskList,
|
|
"total": total,
|
|
"filter": filter,
|
|
})
|
|
}
|
|
|
|
func (s *Server) ingestTaskHandler(w http.ResponseWriter, r *http.Request) {
|
|
var taskData struct {
|
|
Title string `json:"title"`
|
|
Description string `json:"description"`
|
|
Repository string `json:"repository"`
|
|
IssueURL string `json:"issue_url,omitempty"`
|
|
Priority string `json:"priority,omitempty"`
|
|
Labels []string `json:"labels,omitempty"`
|
|
Source string `json:"source,omitempty"` // "manual", "gitea", "webhook"
|
|
}
|
|
|
|
if err := json.NewDecoder(r.Body).Decode(&taskData); err != nil {
|
|
render.Status(r, http.StatusBadRequest)
|
|
render.JSON(w, r, map[string]string{"error": "invalid request body"})
|
|
return
|
|
}
|
|
|
|
// Validate required fields
|
|
if taskData.Title == "" {
|
|
render.Status(r, http.StatusBadRequest)
|
|
render.JSON(w, r, map[string]string{"error": "title is required"})
|
|
return
|
|
}
|
|
|
|
if taskData.Description == "" {
|
|
render.Status(r, http.StatusBadRequest)
|
|
render.JSON(w, r, map[string]string{"error": "description is required"})
|
|
return
|
|
}
|
|
|
|
// Set defaults
|
|
if taskData.Priority == "" {
|
|
taskData.Priority = "medium"
|
|
}
|
|
|
|
if taskData.Source == "" {
|
|
taskData.Source = "manual"
|
|
}
|
|
|
|
// Create task ID
|
|
taskID := uuid.New().String()
|
|
|
|
log.Info().
|
|
Str("task_id", taskID).
|
|
Str("title", taskData.Title).
|
|
Str("repository", taskData.Repository).
|
|
Str("source", taskData.Source).
|
|
Msg("Ingesting new task")
|
|
|
|
// For MVP, we'll create the task record and attempt team composition
|
|
// In production, this would persist to a tasks table and queue for processing
|
|
|
|
// Create task in database first
|
|
createInput := &tasks.CreateTaskInput{
|
|
ExternalID: taskID, // Use generated ID as external ID for manual tasks
|
|
ExternalURL: taskData.IssueURL,
|
|
SourceType: tasks.SourceType(taskData.Source),
|
|
Title: taskData.Title,
|
|
Description: taskData.Description,
|
|
Priority: tasks.TaskPriority(taskData.Priority),
|
|
Repository: taskData.Repository,
|
|
Labels: taskData.Labels,
|
|
}
|
|
|
|
createdTask, err := s.taskService.CreateTask(r.Context(), createInput)
|
|
if err != nil {
|
|
log.Error().Err(err).Str("task_id", taskID).Msg("Failed to create task")
|
|
render.Status(r, http.StatusInternalServerError)
|
|
render.JSON(w, r, map[string]string{"error": "failed to create task"})
|
|
return
|
|
}
|
|
|
|
// Convert to TaskAnalysisInput for team composition
|
|
taskInput := &composer.TaskAnalysisInput{
|
|
Title: createdTask.Title,
|
|
Description: createdTask.Description,
|
|
Repository: createdTask.Repository,
|
|
Requirements: createdTask.Requirements,
|
|
Priority: composer.TaskPriority(createdTask.Priority),
|
|
TechStack: createdTask.TechStack,
|
|
Metadata: map[string]interface{}{
|
|
"task_id": createdTask.ID.String(),
|
|
"source": taskData.Source,
|
|
"issue_url": taskData.IssueURL,
|
|
"labels": createdTask.Labels,
|
|
},
|
|
}
|
|
|
|
// Start team composition analysis in background for complex tasks
|
|
// For simple tasks, we can process synchronously
|
|
isComplex := len(taskData.Description) > 200 ||
|
|
len(taskData.Labels) > 3 ||
|
|
taskData.Priority == "high" ||
|
|
taskData.Priority == "critical"
|
|
|
|
if isComplex {
|
|
// For complex tasks, start async team composition
|
|
go s.processTaskAsync(taskID, taskInput)
|
|
|
|
// Return immediate response
|
|
render.Status(r, http.StatusAccepted)
|
|
render.JSON(w, r, map[string]interface{}{
|
|
"task_id": taskID,
|
|
"status": "queued",
|
|
"message": "Task queued for team composition analysis",
|
|
})
|
|
} else {
|
|
// For simple tasks, process synchronously
|
|
result, err := s.teamComposer.AnalyzeAndComposeTeam(r.Context(), taskInput)
|
|
if err != nil {
|
|
log.Error().Err(err).Str("task_id", taskID).Msg("Task analysis failed")
|
|
render.Status(r, http.StatusInternalServerError)
|
|
render.JSON(w, r, map[string]string{"error": "task analysis failed"})
|
|
return
|
|
}
|
|
|
|
// Create the team
|
|
team, err := s.teamComposer.CreateTeam(r.Context(), result.TeamComposition, taskInput)
|
|
if err != nil {
|
|
log.Error().Err(err).Str("task_id", taskID).Msg("Team creation failed")
|
|
render.Status(r, http.StatusInternalServerError)
|
|
render.JSON(w, r, map[string]string{"error": "team creation failed"})
|
|
return
|
|
}
|
|
|
|
log.Info().
|
|
Str("task_id", taskID).
|
|
Str("team_id", team.ID.String()).
|
|
Msg("Task ingested and team created")
|
|
|
|
render.Status(r, http.StatusCreated)
|
|
render.JSON(w, r, map[string]interface{}{
|
|
"task_id": taskID,
|
|
"team": team,
|
|
"composition_result": result,
|
|
"status": "completed",
|
|
"message": "Task ingested and team created successfully",
|
|
})
|
|
}
|
|
}
|
|
|
|
func (s *Server) getTaskHandler(w http.ResponseWriter, r *http.Request) {
|
|
taskIDStr := chi.URLParam(r, "taskID")
|
|
taskID, err := uuid.Parse(taskIDStr)
|
|
if err != nil {
|
|
render.Status(r, http.StatusBadRequest)
|
|
render.JSON(w, r, map[string]string{"error": "invalid task ID format"})
|
|
return
|
|
}
|
|
|
|
// Get task from database
|
|
task, err := s.taskService.GetTask(r.Context(), taskID)
|
|
if err != nil {
|
|
if strings.Contains(err.Error(), "not found") {
|
|
render.Status(r, http.StatusNotFound)
|
|
render.JSON(w, r, map[string]string{"error": "task not found"})
|
|
return
|
|
}
|
|
|
|
log.Error().Err(err).Str("task_id", taskIDStr).Msg("Failed to get task")
|
|
render.Status(r, http.StatusInternalServerError)
|
|
render.JSON(w, r, map[string]string{"error": "failed to retrieve task"})
|
|
return
|
|
}
|
|
|
|
render.JSON(w, r, map[string]interface{}{
|
|
"task": task,
|
|
})
|
|
}
|
|
|
|
func (s *Server) slurpSubmitHandler(w http.ResponseWriter, r *http.Request) {
|
|
// Parse the submission request
|
|
var submission struct {
|
|
TeamID string `json:"team_id"`
|
|
ArtifactType string `json:"artifact_type"`
|
|
Content map[string]interface{} `json:"content"`
|
|
Metadata map[string]interface{} `json:"metadata,omitempty"`
|
|
}
|
|
|
|
if err := json.NewDecoder(r.Body).Decode(&submission); err != nil {
|
|
render.Status(r, http.StatusBadRequest)
|
|
render.JSON(w, r, map[string]string{"error": "invalid request body"})
|
|
return
|
|
}
|
|
|
|
// Validate required fields
|
|
if submission.TeamID == "" || submission.ArtifactType == "" {
|
|
render.Status(r, http.StatusBadRequest)
|
|
render.JSON(w, r, map[string]string{"error": "team_id and artifact_type are required"})
|
|
return
|
|
}
|
|
|
|
// Generate UCXL address for the submission
|
|
ucxlAddr := fmt.Sprintf("ucxl://%s/%s/%d",
|
|
submission.TeamID,
|
|
submission.ArtifactType,
|
|
time.Now().Unix())
|
|
|
|
// For MVP, we'll store basic metadata in the database
|
|
// In production, this would proxy to actual SLURP service
|
|
teamUUID, err := uuid.Parse(submission.TeamID)
|
|
if err != nil {
|
|
render.Status(r, http.StatusBadRequest)
|
|
render.JSON(w, r, map[string]string{"error": "invalid team_id format"})
|
|
return
|
|
}
|
|
|
|
// Store submission record
|
|
submissionID := uuid.New()
|
|
metadataJSON, _ := json.Marshal(submission.Metadata)
|
|
|
|
insertQuery := `
|
|
INSERT INTO slurp_submissions (id, team_id, ucxl_address, artifact_type, metadata, submitted_at, status)
|
|
VALUES ($1, $2, $3, $4, $5, $6, $7)
|
|
`
|
|
|
|
_, err = s.db.Pool.Exec(r.Context(), insertQuery,
|
|
submissionID, teamUUID, ucxlAddr, submission.ArtifactType,
|
|
metadataJSON, time.Now(), "submitted")
|
|
|
|
if err != nil {
|
|
log.Error().Err(err).
|
|
Str("team_id", submission.TeamID).
|
|
Str("artifact_type", submission.ArtifactType).
|
|
Msg("Failed to store SLURP submission")
|
|
render.Status(r, http.StatusInternalServerError)
|
|
render.JSON(w, r, map[string]string{"error": "failed to store submission"})
|
|
return
|
|
}
|
|
|
|
log.Info().
|
|
Str("team_id", submission.TeamID).
|
|
Str("artifact_type", submission.ArtifactType).
|
|
Str("ucxl_address", ucxlAddr).
|
|
Msg("SLURP submission stored")
|
|
|
|
render.Status(r, http.StatusCreated)
|
|
render.JSON(w, r, map[string]interface{}{
|
|
"submission_id": submissionID,
|
|
"ucxl_address": ucxlAddr,
|
|
"status": "submitted",
|
|
"message": "Artifact submitted to SLURP successfully (MVP mode)",
|
|
})
|
|
}
|
|
|
|
func (s *Server) slurpRetrieveHandler(w http.ResponseWriter, r *http.Request) {
|
|
ucxlAddress := r.URL.Query().Get("ucxl_address")
|
|
if ucxlAddress == "" {
|
|
render.Status(r, http.StatusBadRequest)
|
|
render.JSON(w, r, map[string]string{"error": "ucxl_address query parameter is required"})
|
|
return
|
|
}
|
|
|
|
log.Info().
|
|
Str("ucxl_address", ucxlAddress).
|
|
Msg("Retrieving SLURP submission")
|
|
|
|
// Query the submission from database
|
|
query := `
|
|
SELECT id, team_id, ucxl_address, artifact_type, metadata, submitted_at, status
|
|
FROM slurp_submissions
|
|
WHERE ucxl_address = $1
|
|
`
|
|
|
|
row := s.db.Pool.QueryRow(r.Context(), query, ucxlAddress)
|
|
|
|
var (
|
|
id uuid.UUID
|
|
teamID uuid.UUID
|
|
retrievedAddr string
|
|
artifactType string
|
|
metadataJSON []byte
|
|
submittedAt time.Time
|
|
status string
|
|
)
|
|
|
|
err := row.Scan(&id, &teamID, &retrievedAddr, &artifactType, &metadataJSON, &submittedAt, &status)
|
|
if err != nil {
|
|
if err.Error() == "no rows in result set" {
|
|
render.Status(r, http.StatusNotFound)
|
|
render.JSON(w, r, map[string]string{"error": "SLURP submission not found"})
|
|
return
|
|
}
|
|
|
|
log.Error().Err(err).
|
|
Str("ucxl_address", ucxlAddress).
|
|
Msg("Failed to retrieve SLURP submission")
|
|
render.Status(r, http.StatusInternalServerError)
|
|
render.JSON(w, r, map[string]string{"error": "failed to retrieve submission"})
|
|
return
|
|
}
|
|
|
|
// Parse metadata
|
|
var metadata map[string]interface{}
|
|
if len(metadataJSON) > 0 {
|
|
json.Unmarshal(metadataJSON, &metadata)
|
|
}
|
|
|
|
submission := map[string]interface{}{
|
|
"id": id,
|
|
"team_id": teamID,
|
|
"ucxl_address": retrievedAddr,
|
|
"artifact_type": artifactType,
|
|
"metadata": metadata,
|
|
"submitted_at": submittedAt.Format(time.RFC3339),
|
|
"status": status,
|
|
}
|
|
|
|
// For MVP, we return the metadata. In production, this would
|
|
// proxy to SLURP service to retrieve actual artifact content
|
|
render.JSON(w, r, map[string]interface{}{
|
|
"submission": submission,
|
|
"message": "SLURP submission retrieved (MVP mode - metadata only)",
|
|
})
|
|
}
|
|
|
|
// CHORUS Integration Handlers
|
|
|
|
func (s *Server) listProjectTasksHandler(w http.ResponseWriter, r *http.Request) {
|
|
projectID := chi.URLParam(r, "projectID")
|
|
|
|
log.Info().
|
|
Str("project_id", projectID).
|
|
Msg("Listing tasks for project")
|
|
|
|
// For MVP, return mock tasks associated with the project
|
|
// In production, this would query actual tasks from database
|
|
tasks := []map[string]interface{}{
|
|
{
|
|
"id": "task-001",
|
|
"project_id": projectID,
|
|
"title": "Setup project infrastructure",
|
|
"description": "Initialize Docker, CI/CD, and database setup",
|
|
"status": "completed",
|
|
"priority": "high",
|
|
"assigned_team": nil,
|
|
"created_at": time.Now().Add(-48 * time.Hour).Format(time.RFC3339),
|
|
"completed_at": time.Now().Add(-12 * time.Hour).Format(time.RFC3339),
|
|
},
|
|
{
|
|
"id": "task-002",
|
|
"project_id": projectID,
|
|
"title": "Implement authentication system",
|
|
"description": "JWT-based authentication with user management",
|
|
"status": "active",
|
|
"priority": "high",
|
|
"assigned_team": "team-001",
|
|
"created_at": time.Now().Add(-24 * time.Hour).Format(time.RFC3339),
|
|
"updated_at": time.Now().Add(-2 * time.Hour).Format(time.RFC3339),
|
|
},
|
|
{
|
|
"id": "task-003",
|
|
"project_id": projectID,
|
|
"title": "Create API documentation",
|
|
"description": "OpenAPI/Swagger documentation for all endpoints",
|
|
"status": "queued",
|
|
"priority": "medium",
|
|
"assigned_team": nil,
|
|
"created_at": time.Now().Add(-6 * time.Hour).Format(time.RFC3339),
|
|
},
|
|
}
|
|
|
|
render.JSON(w, r, map[string]interface{}{
|
|
"project_id": projectID,
|
|
"tasks": tasks,
|
|
"total": len(tasks),
|
|
"message": "Project tasks retrieved (MVP mock data)",
|
|
})
|
|
}
|
|
|
|
func (s *Server) listAvailableTasksHandler(w http.ResponseWriter, r *http.Request) {
|
|
// Get query parameters for filtering
|
|
skillFilter := r.URL.Query().Get("skills")
|
|
priorityFilter := r.URL.Query().Get("priority")
|
|
|
|
log.Info().
|
|
Str("skill_filter", skillFilter).
|
|
Str("priority_filter", priorityFilter).
|
|
Msg("Listing available tasks")
|
|
|
|
// For MVP, return mock available tasks that agents can claim
|
|
// In production, this would query unassigned tasks from database
|
|
availableTasks := []map[string]interface{}{
|
|
{
|
|
"id": "task-004",
|
|
"title": "Fix memory leak in user service",
|
|
"description": "Investigate and fix memory leak causing high memory usage",
|
|
"status": "available",
|
|
"priority": "high",
|
|
"skills_required": []string{"go", "debugging", "performance"},
|
|
"estimated_hours": 8,
|
|
"repository": "example/user-service",
|
|
"created_at": time.Now().Add(-3 * time.Hour).Format(time.RFC3339),
|
|
},
|
|
{
|
|
"id": "task-005",
|
|
"title": "Add rate limiting to API",
|
|
"description": "Implement rate limiting middleware for API endpoints",
|
|
"status": "available",
|
|
"priority": "medium",
|
|
"skills_required": []string{"go", "middleware", "api"},
|
|
"estimated_hours": 4,
|
|
"repository": "example/api-gateway",
|
|
"created_at": time.Now().Add(-1 * time.Hour).Format(time.RFC3339),
|
|
},
|
|
{
|
|
"id": "task-006",
|
|
"title": "Update React components",
|
|
"description": "Migrate legacy class components to functional components",
|
|
"status": "available",
|
|
"priority": "low",
|
|
"skills_required": []string{"react", "javascript", "frontend"},
|
|
"estimated_hours": 12,
|
|
"repository": "example/web-ui",
|
|
"created_at": time.Now().Add(-30 * time.Minute).Format(time.RFC3339),
|
|
},
|
|
}
|
|
|
|
// Apply filtering if specified
|
|
filteredTasks := availableTasks
|
|
|
|
if priorityFilter != "" {
|
|
filtered := []map[string]interface{}{}
|
|
for _, task := range availableTasks {
|
|
if task["priority"] == priorityFilter {
|
|
filtered = append(filtered, task)
|
|
}
|
|
}
|
|
filteredTasks = filtered
|
|
}
|
|
|
|
render.JSON(w, r, map[string]interface{}{
|
|
"available_tasks": filteredTasks,
|
|
"total": len(filteredTasks),
|
|
"filters": map[string]string{
|
|
"skills": skillFilter,
|
|
"priority": priorityFilter,
|
|
},
|
|
"message": "Available tasks retrieved (MVP mock data)",
|
|
})
|
|
}
|
|
|
|
func (s *Server) getProjectRepositoryHandler(w http.ResponseWriter, r *http.Request) {
|
|
render.Status(r, http.StatusNotImplemented)
|
|
render.JSON(w, r, map[string]string{"error": "not implemented"})
|
|
}
|
|
|
|
func (s *Server) getProjectTaskHandler(w http.ResponseWriter, r *http.Request) {
|
|
render.Status(r, http.StatusNotImplemented)
|
|
render.JSON(w, r, map[string]string{"error": "not implemented"})
|
|
}
|
|
|
|
func (s *Server) claimTaskHandler(w http.ResponseWriter, r *http.Request) {
|
|
taskIDStr := chi.URLParam(r, "taskID")
|
|
taskID, err := uuid.Parse(taskIDStr)
|
|
if err != nil {
|
|
render.Status(r, http.StatusBadRequest)
|
|
render.JSON(w, r, map[string]string{"error": "invalid task ID format"})
|
|
return
|
|
}
|
|
|
|
var claimData struct {
|
|
TeamID string `json:"team_id"`
|
|
AgentID string `json:"agent_id,omitempty"`
|
|
Reason string `json:"reason,omitempty"`
|
|
}
|
|
|
|
if err := json.NewDecoder(r.Body).Decode(&claimData); err != nil {
|
|
render.Status(r, http.StatusBadRequest)
|
|
render.JSON(w, r, map[string]string{"error": "invalid request body"})
|
|
return
|
|
}
|
|
|
|
if claimData.TeamID == "" {
|
|
render.Status(r, http.StatusBadRequest)
|
|
render.JSON(w, r, map[string]string{"error": "team_id is required"})
|
|
return
|
|
}
|
|
|
|
// Validate team exists
|
|
teamUUID, err := uuid.Parse(claimData.TeamID)
|
|
if err != nil {
|
|
render.Status(r, http.StatusBadRequest)
|
|
render.JSON(w, r, map[string]string{"error": "invalid team_id format"})
|
|
return
|
|
}
|
|
|
|
// Check if team exists
|
|
_, _, err = s.teamComposer.GetTeam(r.Context(), teamUUID)
|
|
if err != nil {
|
|
if strings.Contains(err.Error(), "not found") {
|
|
render.Status(r, http.StatusNotFound)
|
|
render.JSON(w, r, map[string]string{"error": "team not found"})
|
|
return
|
|
}
|
|
|
|
log.Error().Err(err).Str("team_id", claimData.TeamID).Msg("Failed to validate team")
|
|
render.Status(r, http.StatusInternalServerError)
|
|
render.JSON(w, r, map[string]string{"error": "failed to validate team"})
|
|
return
|
|
}
|
|
|
|
// Parse agent ID if provided
|
|
var agentUUID *uuid.UUID
|
|
if claimData.AgentID != "" {
|
|
agentID, err := uuid.Parse(claimData.AgentID)
|
|
if err != nil {
|
|
render.Status(r, http.StatusBadRequest)
|
|
render.JSON(w, r, map[string]string{"error": "invalid agent_id format"})
|
|
return
|
|
}
|
|
agentUUID = &agentID
|
|
}
|
|
|
|
// Assign task to team/agent
|
|
assignment := &tasks.TaskAssignment{
|
|
TaskID: taskID,
|
|
TeamID: &teamUUID,
|
|
AgentID: agentUUID,
|
|
Reason: claimData.Reason,
|
|
}
|
|
|
|
err = s.taskService.AssignTask(r.Context(), assignment)
|
|
if err != nil {
|
|
log.Error().Err(err).Str("task_id", taskIDStr).Msg("Failed to assign task")
|
|
render.Status(r, http.StatusInternalServerError)
|
|
render.JSON(w, r, map[string]string{"error": "failed to assign task"})
|
|
return
|
|
}
|
|
|
|
log.Info().
|
|
Str("task_id", taskIDStr).
|
|
Str("team_id", claimData.TeamID).
|
|
Str("agent_id", claimData.AgentID).
|
|
Msg("Task assigned to team")
|
|
|
|
render.JSON(w, r, map[string]interface{}{
|
|
"task_id": taskIDStr,
|
|
"team_id": claimData.TeamID,
|
|
"agent_id": claimData.AgentID,
|
|
"status": "claimed",
|
|
"claimed_at": time.Now().Format(time.RFC3339),
|
|
"message": "Task assigned successfully",
|
|
})
|
|
}
|
|
|
|
func (s *Server) updateTaskStatusHandler(w http.ResponseWriter, r *http.Request) {
|
|
render.Status(r, http.StatusNotImplemented)
|
|
render.JSON(w, r, map[string]string{"error": "not implemented"})
|
|
}
|
|
|
|
func (s *Server) completeTaskHandler(w http.ResponseWriter, r *http.Request) {
|
|
render.Status(r, http.StatusNotImplemented)
|
|
render.JSON(w, r, map[string]string{"error": "not implemented"})
|
|
}
|
|
|
|
func (s *Server) listAgentsHandler(w http.ResponseWriter, r *http.Request) {
|
|
// Get discovered CHORUS agents from P2P discovery
|
|
discoveredAgents := s.p2pDiscovery.GetAgents()
|
|
|
|
// Convert to API format
|
|
agents := make([]map[string]interface{}, 0, len(discoveredAgents))
|
|
onlineCount := 0
|
|
idleCount := 0
|
|
offlineCount := 0
|
|
|
|
for _, agent := range discoveredAgents {
|
|
agentData := map[string]interface{}{
|
|
"id": agent.ID,
|
|
"name": agent.Name,
|
|
"status": agent.Status,
|
|
"capabilities": agent.Capabilities,
|
|
"model": agent.Model,
|
|
"endpoint": agent.Endpoint,
|
|
"last_seen": agent.LastSeen.Format(time.RFC3339),
|
|
"tasks_completed": agent.TasksCompleted,
|
|
"p2p_addr": agent.P2PAddr,
|
|
"cluster_id": agent.ClusterID,
|
|
}
|
|
|
|
// Add current team if present
|
|
if agent.CurrentTeam != "" {
|
|
agentData["current_team"] = agent.CurrentTeam
|
|
} else {
|
|
agentData["current_team"] = nil
|
|
}
|
|
|
|
agents = append(agents, agentData)
|
|
|
|
// Count status
|
|
switch agent.Status {
|
|
case "online":
|
|
onlineCount++
|
|
case "idle":
|
|
idleCount++
|
|
case "working":
|
|
onlineCount++ // Working agents are considered online
|
|
default:
|
|
offlineCount++
|
|
}
|
|
}
|
|
|
|
render.JSON(w, r, map[string]interface{}{
|
|
"agents": agents,
|
|
"total": len(agents),
|
|
"online": onlineCount,
|
|
"idle": idleCount,
|
|
"offline": offlineCount,
|
|
})
|
|
}
|
|
|
|
func (s *Server) registerAgentHandler(w http.ResponseWriter, r *http.Request) {
|
|
var agentData struct {
|
|
Name string `json:"name"`
|
|
EndpointURL string `json:"endpoint_url"`
|
|
Capabilities map[string]interface{} `json:"capabilities"`
|
|
}
|
|
|
|
if err := json.NewDecoder(r.Body).Decode(&agentData); err != nil {
|
|
render.Status(r, http.StatusBadRequest)
|
|
render.JSON(w, r, map[string]string{"error": "invalid request body"})
|
|
return
|
|
}
|
|
|
|
// Validate required fields
|
|
if agentData.Name == "" || agentData.EndpointURL == "" {
|
|
render.Status(r, http.StatusBadRequest)
|
|
render.JSON(w, r, map[string]string{"error": "name and endpoint_url are required"})
|
|
return
|
|
}
|
|
|
|
// Create agent record
|
|
agent := &composer.Agent{
|
|
ID: uuid.New(),
|
|
Name: agentData.Name,
|
|
EndpointURL: agentData.EndpointURL,
|
|
Capabilities: agentData.Capabilities,
|
|
Status: composer.AgentStatusAvailable,
|
|
LastSeen: time.Now(),
|
|
PerformanceMetrics: make(map[string]interface{}),
|
|
CreatedAt: time.Now(),
|
|
UpdatedAt: time.Now(),
|
|
}
|
|
|
|
// Initialize empty capabilities if none provided
|
|
if agent.Capabilities == nil {
|
|
agent.Capabilities = make(map[string]interface{})
|
|
}
|
|
|
|
// Insert into database
|
|
capabilitiesJSON, _ := json.Marshal(agent.Capabilities)
|
|
metricsJSON, _ := json.Marshal(agent.PerformanceMetrics)
|
|
|
|
query := `
|
|
INSERT INTO agents (id, name, endpoint_url, capabilities, status, last_seen, performance_metrics, created_at, updated_at)
|
|
VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9)
|
|
`
|
|
|
|
_, err := s.db.Pool.Exec(r.Context(), query,
|
|
agent.ID, agent.Name, agent.EndpointURL, capabilitiesJSON,
|
|
agent.Status, agent.LastSeen, metricsJSON,
|
|
agent.CreatedAt, agent.UpdatedAt)
|
|
|
|
if err != nil {
|
|
log.Error().Err(err).Str("agent_name", agent.Name).Msg("Failed to register agent")
|
|
render.Status(r, http.StatusInternalServerError)
|
|
render.JSON(w, r, map[string]string{"error": "failed to register agent"})
|
|
return
|
|
}
|
|
|
|
log.Info().
|
|
Str("agent_id", agent.ID.String()).
|
|
Str("agent_name", agent.Name).
|
|
Str("endpoint", agent.EndpointURL).
|
|
Msg("Agent registered successfully")
|
|
|
|
render.Status(r, http.StatusCreated)
|
|
render.JSON(w, r, map[string]interface{}{
|
|
"agent": agent,
|
|
"message": "Agent registered successfully",
|
|
})
|
|
}
|
|
|
|
func (s *Server) updateAgentStatusHandler(w http.ResponseWriter, r *http.Request) {
|
|
agentIDStr := chi.URLParam(r, "agentID")
|
|
agentID, err := uuid.Parse(agentIDStr)
|
|
if err != nil {
|
|
render.Status(r, http.StatusBadRequest)
|
|
render.JSON(w, r, map[string]string{"error": "invalid agent ID"})
|
|
return
|
|
}
|
|
|
|
var statusUpdate struct {
|
|
Status string `json:"status"`
|
|
PerformanceMetrics map[string]interface{} `json:"performance_metrics,omitempty"`
|
|
Reason string `json:"reason,omitempty"`
|
|
}
|
|
|
|
if err := json.NewDecoder(r.Body).Decode(&statusUpdate); err != nil {
|
|
render.Status(r, http.StatusBadRequest)
|
|
render.JSON(w, r, map[string]string{"error": "invalid request body"})
|
|
return
|
|
}
|
|
|
|
// Validate status values
|
|
validStatuses := map[string]bool{
|
|
"available": true,
|
|
"busy": true,
|
|
"idle": true,
|
|
"offline": true,
|
|
}
|
|
|
|
if !validStatuses[statusUpdate.Status] {
|
|
render.Status(r, http.StatusBadRequest)
|
|
render.JSON(w, r, map[string]string{"error": "invalid status. Valid values: available, busy, idle, offline"})
|
|
return
|
|
}
|
|
|
|
// Update agent status and last_seen timestamp
|
|
updateQuery := `
|
|
UPDATE agents
|
|
SET status = $1, last_seen = $2, updated_at = $2
|
|
WHERE id = $3
|
|
`
|
|
|
|
_, err = s.db.Pool.Exec(r.Context(), updateQuery, statusUpdate.Status, time.Now(), agentID)
|
|
if err != nil {
|
|
log.Error().Err(err).
|
|
Str("agent_id", agentIDStr).
|
|
Str("status", statusUpdate.Status).
|
|
Msg("Failed to update agent status")
|
|
render.Status(r, http.StatusInternalServerError)
|
|
render.JSON(w, r, map[string]string{"error": "failed to update agent status"})
|
|
return
|
|
}
|
|
|
|
// Update performance metrics if provided
|
|
if statusUpdate.PerformanceMetrics != nil {
|
|
metricsJSON, _ := json.Marshal(statusUpdate.PerformanceMetrics)
|
|
_, err = s.db.Pool.Exec(r.Context(),
|
|
`UPDATE agents SET performance_metrics = $1 WHERE id = $2`,
|
|
metricsJSON, agentID)
|
|
|
|
if err != nil {
|
|
log.Warn().Err(err).
|
|
Str("agent_id", agentIDStr).
|
|
Msg("Failed to update agent performance metrics")
|
|
}
|
|
}
|
|
|
|
log.Info().
|
|
Str("agent_id", agentIDStr).
|
|
Str("status", statusUpdate.Status).
|
|
Str("reason", statusUpdate.Reason).
|
|
Msg("Agent status updated")
|
|
|
|
render.JSON(w, r, map[string]interface{}{
|
|
"agent_id": agentIDStr,
|
|
"status": statusUpdate.Status,
|
|
"message": "Agent status updated successfully",
|
|
})
|
|
}
|
|
|
|
// Project Management Handlers
|
|
|
|
func (s *Server) listProjectsHandler(w http.ResponseWriter, r *http.Request) {
|
|
// For MVP, return hardcoded projects list
|
|
// In full implementation, this would query database
|
|
projects := []map[string]interface{}{
|
|
{
|
|
"id": "whoosh-001",
|
|
"name": "WHOOSH",
|
|
"repo_url": "https://gitea.chorus.services/tony/WHOOSH",
|
|
"description": "Autonomous AI Development Teams Architecture",
|
|
"tech_stack": []string{"Go", "Docker", "PostgreSQL"},
|
|
"status": "active",
|
|
"created_at": "2025-09-04T00:00:00Z",
|
|
"team_size": 3,
|
|
},
|
|
{
|
|
"id": "chorus-001",
|
|
"name": "CHORUS",
|
|
"repo_url": "https://gitea.chorus.services/tony/CHORUS",
|
|
"description": "AI Agent P2P Coordination System",
|
|
"tech_stack": []string{"Go", "P2P", "LibP2P"},
|
|
"status": "active",
|
|
"created_at": "2025-09-03T00:00:00Z",
|
|
"team_size": 2,
|
|
},
|
|
}
|
|
|
|
render.JSON(w, r, map[string]interface{}{
|
|
"projects": projects,
|
|
"total": len(projects),
|
|
})
|
|
}
|
|
|
|
// createProjectHandler handles POST /api/projects requests to add new GITEA repositories
|
|
// for team composition analysis. This is the core MVP functionality that allows users
|
|
// to register repositories that will be analyzed by the N8N workflow.
|
|
//
|
|
// Implementation decision: We use an anonymous struct for the request payload rather than
|
|
// a named struct because this is a simple, internal API that doesn't need to be shared
|
|
// across packages. This reduces complexity while maintaining type safety.
|
|
//
|
|
// TODO: In production, this would persist to PostgreSQL database rather than just
|
|
// returning in-memory data. The database integration is prepared in the docker-compose
|
|
// but not yet implemented in the handlers.
|
|
func (s *Server) createProjectHandler(w http.ResponseWriter, r *http.Request) {
|
|
// Parse and validate request using secure validation
|
|
var reqData map[string]interface{}
|
|
|
|
if err := s.validator.DecodeAndValidateJSON(r, &reqData); err != nil {
|
|
render.Status(r, http.StatusBadRequest)
|
|
render.JSON(w, r, map[string]string{"error": "invalid JSON payload"})
|
|
return
|
|
}
|
|
|
|
// Validate request using comprehensive validation
|
|
if errors := validation.ValidateProjectRequest(reqData); !s.validator.ValidateAndRespond(w, r, errors) {
|
|
return
|
|
}
|
|
|
|
// Extract validated fields
|
|
name := validation.SanitizeString(reqData["name"].(string))
|
|
repoURL := validation.SanitizeString(reqData["repo_url"].(string))
|
|
description := ""
|
|
if desc, exists := reqData["description"]; exists && desc != nil {
|
|
description = validation.SanitizeString(desc.(string))
|
|
}
|
|
|
|
// Generate unique project ID using Unix timestamp. In production, this would be
|
|
// a proper UUID or database auto-increment, but for MVP simplicity, timestamp-based
|
|
// IDs are sufficient and provide natural ordering.
|
|
projectID := fmt.Sprintf("proj-%d", time.Now().Unix())
|
|
|
|
// Project data structure matches the expected format for the frontend UI.
|
|
// Status "created" indicates the project is registered but not yet analyzed.
|
|
// This will be updated to "analyzing" -> "completed" by the N8N workflow.
|
|
project := map[string]interface{}{
|
|
"id": projectID,
|
|
"name": name,
|
|
"repo_url": repoURL,
|
|
"description": description,
|
|
"status": "created",
|
|
"created_at": time.Now().Format(time.RFC3339),
|
|
"team_size": 0, // Will be populated after N8N analysis
|
|
}
|
|
|
|
// Structured logging with zerolog provides excellent performance and
|
|
// searchability in production environments. Include key identifiers
|
|
// for debugging and audit trails.
|
|
log.Info().
|
|
Str("project_id", projectID).
|
|
Str("repo_url", repoURL).
|
|
Msg("Created new project")
|
|
|
|
// Return 201 Created with the project data. The frontend will use this
|
|
// response to update the UI and potentially trigger immediate analysis.
|
|
render.Status(r, http.StatusCreated)
|
|
render.JSON(w, r, project)
|
|
}
|
|
|
|
// deleteProjectHandler handles DELETE /api/projects/{projectID} requests to remove
|
|
// repositories from management. This allows users to clean up their project list
|
|
// and stop monitoring repositories that are no longer relevant.
|
|
//
|
|
// Implementation decision: We use chi.URLParam to extract the project ID from the
|
|
// URL path rather than query parameters, following REST conventions where the
|
|
// resource identifier is part of the path structure.
|
|
func (s *Server) deleteProjectHandler(w http.ResponseWriter, r *http.Request) {
|
|
// Extract project ID from URL path parameter. Chi router handles the parsing
|
|
// and validation of the URL structure, so we can safely assume this exists
|
|
// if the route matched.
|
|
projectID := chi.URLParam(r, "projectID")
|
|
|
|
// Log the deletion for audit purposes. In a production system, you'd want
|
|
// to track who deleted what and when for compliance and debugging.
|
|
log.Info().
|
|
Str("project_id", projectID).
|
|
Msg("Deleted project")
|
|
|
|
render.JSON(w, r, map[string]string{"message": "project deleted"})
|
|
}
|
|
|
|
// getProjectHandler handles GET /api/projects/{projectID} requests to retrieve
|
|
// detailed information about a specific project, including its analysis results
|
|
// and team formation recommendations from the N8N workflow.
|
|
//
|
|
// Implementation decision: We return mock data for now since database persistence
|
|
// isn't implemented yet. In production, this would query PostgreSQL for the
|
|
// actual project record and its associated analysis results.
|
|
func (s *Server) getProjectHandler(w http.ResponseWriter, r *http.Request) {
|
|
projectID := chi.URLParam(r, "projectID")
|
|
|
|
// TODO: Replace with database query - this mock data demonstrates the expected
|
|
// response structure that the frontend UI will consume. The tech_stack and
|
|
// team_size fields would be populated by N8N workflow analysis results.
|
|
project := map[string]interface{}{
|
|
"id": projectID,
|
|
"name": "Sample Project",
|
|
"repo_url": "https://gitea.chorus.services/tony/" + projectID,
|
|
"description": "Sample project description",
|
|
"tech_stack": []string{"Go", "JavaScript"}, // From N8N analysis
|
|
"status": "active",
|
|
"created_at": "2025-09-04T00:00:00Z",
|
|
"team_size": 2, // From N8N team formation recommendations
|
|
}
|
|
|
|
render.JSON(w, r, project)
|
|
}
|
|
|
|
// analyzeProjectHandler handles POST /api/projects/{projectID}/analyze requests to
|
|
// trigger the N8N Team Formation Analysis workflow. This is the core integration point
|
|
// that connects WHOOSH to the AI-powered repository analysis system.
|
|
//
|
|
// Implementation decisions:
|
|
// 1. 60-second timeout for N8N requests because LLM analysis can be slow
|
|
// 2. Direct HTTP client rather than a service layer for simplicity in MVP
|
|
// 3. Graceful fallback to mock data when request body is empty
|
|
// 4. Comprehensive error handling with structured logging for debugging
|
|
//
|
|
// This handler represents the "missing link" that was identified as the core need:
|
|
// WHOOSH UI → N8N workflow → LLM analysis → team formation recommendations
|
|
func (s *Server) analyzeProjectHandler(w http.ResponseWriter, r *http.Request) {
|
|
projectID := chi.URLParam(r, "projectID")
|
|
|
|
// Project data structure for N8N payload. In production, this would be fetched
|
|
// from the database using the projectID, but for MVP we allow it to be provided
|
|
// in the request body or fall back to predictable mock data.
|
|
var projectData struct {
|
|
RepoURL string `json:"repo_url"`
|
|
Name string `json:"name"`
|
|
}
|
|
|
|
// Handle both scenarios: explicit project data in request body (for testing)
|
|
// and implicit data fetching (for production UI). This flexibility makes the
|
|
// API easier to test manually while supporting the intended UI workflow.
|
|
if r.Body != http.NoBody {
|
|
if err := json.NewDecoder(r.Body).Decode(&projectData); err != nil {
|
|
// Try to fetch from database first, fallback to mock data if not found
|
|
if err := s.lookupProjectData(r.Context(), projectID, &projectData); err != nil {
|
|
// Fallback to predictable mock data based on projectID for testing
|
|
projectData.RepoURL = "https://gitea.chorus.services/tony/" + projectID
|
|
projectData.Name = projectID
|
|
}
|
|
}
|
|
} else {
|
|
// No body provided - try database lookup first, fallback to mock data
|
|
if err := s.lookupProjectData(r.Context(), projectID, &projectData); err != nil {
|
|
// Fallback to mock data if database lookup fails
|
|
projectData.RepoURL = "https://gitea.chorus.services/tony/" + projectID
|
|
projectData.Name = projectID
|
|
}
|
|
}
|
|
|
|
// Start BACKBEAT search tracking if available
|
|
searchID := fmt.Sprintf("analyze-%s", projectID)
|
|
if s.backbeat != nil {
|
|
if err := s.backbeat.StartSearch(searchID, fmt.Sprintf("Analyzing project %s (%s)", projectID, projectData.RepoURL), 4); err != nil {
|
|
log.Warn().Err(err).Msg("Failed to start BACKBEAT search tracking")
|
|
}
|
|
}
|
|
|
|
// Log the analysis initiation for debugging and audit trails. Repository URL
|
|
// is crucial for troubleshooting N8N workflow issues.
|
|
log.Info().
|
|
Str("project_id", projectID).
|
|
Str("repo_url", projectData.RepoURL).
|
|
Msg("🔍 Starting project analysis via N8N workflow with BACKBEAT tracking")
|
|
|
|
// Execute analysis within BACKBEAT beat budget (4 beats = 2 minutes at 2 BPM)
|
|
var analysisResult map[string]interface{}
|
|
analysisFunc := func() error {
|
|
// Update BACKBEAT phase to querying
|
|
if s.backbeat != nil {
|
|
s.backbeat.UpdateSearchPhase(searchID, backbeat.PhaseQuerying, 0)
|
|
}
|
|
|
|
// HTTP client with generous timeout because:
|
|
// 1. N8N workflow fetches multiple files from repository
|
|
// 2. LLM analysis (Ollama) can take 10-30 seconds depending on model size
|
|
// 3. Network latency between services in Docker Swarm
|
|
// 60 seconds provides buffer while still failing fast for real issues
|
|
client := &http.Client{Timeout: 60 * time.Second}
|
|
|
|
// Payload structure matches the N8N workflow webhook expectations.
|
|
// The workflow expects these exact field names to properly route data
|
|
// through the file fetching and analysis nodes.
|
|
payload := map[string]interface{}{
|
|
"repo_url": projectData.RepoURL, // Primary input for file fetching
|
|
"project_name": projectData.Name, // Used in LLM analysis context
|
|
}
|
|
|
|
// JSON marshaling without error checking is acceptable here because we control
|
|
// the payload structure and know it will always be valid JSON.
|
|
payloadBytes, _ := json.Marshal(payload)
|
|
|
|
// Call to configurable N8N instance for team formation workflow
|
|
// The webhook URL is constructed from the base URL in configuration
|
|
n8nWebhookURL := s.config.N8N.BaseURL + "/webhook/team-formation"
|
|
resp, err := client.Post(
|
|
n8nWebhookURL,
|
|
"application/json",
|
|
bytes.NewBuffer(payloadBytes),
|
|
)
|
|
|
|
// Network-level error handling (connection refused, timeout, DNS issues)
|
|
if err != nil {
|
|
log.Error().Err(err).Msg("Failed to trigger N8N workflow")
|
|
return fmt.Errorf("failed to trigger N8N workflow: %w", err)
|
|
}
|
|
defer resp.Body.Close()
|
|
|
|
// HTTP-level error handling (N8N returned an error status)
|
|
if resp.StatusCode != http.StatusOK {
|
|
log.Error().
|
|
Int("status", resp.StatusCode).
|
|
Msg("N8N workflow returned error")
|
|
return fmt.Errorf("N8N workflow returned status %d", resp.StatusCode)
|
|
}
|
|
|
|
// Update BACKBEAT phase to ranking
|
|
if s.backbeat != nil {
|
|
s.backbeat.UpdateSearchPhase(searchID, backbeat.PhaseRanking, 0)
|
|
}
|
|
|
|
// Read the N8N workflow response, which contains the team formation analysis
|
|
// results including detected technologies, complexity scores, and agent assignments.
|
|
body, err := io.ReadAll(resp.Body)
|
|
if err != nil {
|
|
log.Error().Err(err).Msg("Failed to read N8N response")
|
|
return fmt.Errorf("failed to read N8N response: %w", err)
|
|
}
|
|
|
|
// Parse and return N8N response
|
|
if err := json.Unmarshal(body, &analysisResult); err != nil {
|
|
log.Error().Err(err).Msg("Failed to parse N8N response")
|
|
return fmt.Errorf("failed to parse N8N response: %w", err)
|
|
}
|
|
|
|
return nil
|
|
}
|
|
|
|
// Execute analysis with BACKBEAT beat budget or fallback to direct execution
|
|
var analysisErr error
|
|
if s.backbeat != nil {
|
|
analysisErr = s.backbeat.ExecuteWithBeatBudget(4, analysisFunc)
|
|
if analysisErr != nil {
|
|
s.backbeat.FailSearch(searchID, analysisErr.Error())
|
|
}
|
|
} else {
|
|
analysisErr = analysisFunc()
|
|
}
|
|
|
|
if analysisErr != nil {
|
|
render.Status(r, http.StatusInternalServerError)
|
|
render.JSON(w, r, map[string]string{"error": analysisErr.Error()})
|
|
return
|
|
}
|
|
|
|
// Complete BACKBEAT search tracking
|
|
if s.backbeat != nil {
|
|
s.backbeat.CompleteSearch(searchID, 1)
|
|
}
|
|
|
|
log.Info().
|
|
Str("project_id", projectID).
|
|
Msg("🔍 Project analysis completed successfully with BACKBEAT tracking")
|
|
|
|
render.JSON(w, r, analysisResult)
|
|
}
|
|
|
|
func (s *Server) giteaWebhookHandler(w http.ResponseWriter, r *http.Request) {
|
|
ctx, span := tracing.StartWebhookSpan(r.Context(), "gitea_webhook", "gitea")
|
|
defer span.End()
|
|
|
|
// Parse webhook payload
|
|
payload, err := s.webhookHandler.ParsePayload(r)
|
|
if err != nil {
|
|
tracing.SetSpanError(span, err)
|
|
log.Error().Err(err).Msg("Failed to parse webhook payload")
|
|
render.Status(r, http.StatusBadRequest)
|
|
render.JSON(w, r, map[string]string{"error": "invalid payload"})
|
|
return
|
|
}
|
|
|
|
span.SetAttributes(
|
|
attribute.String("webhook.action", payload.Action),
|
|
attribute.String("webhook.repository", payload.Repository.FullName),
|
|
attribute.String("webhook.sender", payload.Sender.Login),
|
|
)
|
|
|
|
log.Info().
|
|
Str("action", payload.Action).
|
|
Str("repository", payload.Repository.FullName).
|
|
Str("sender", payload.Sender.Login).
|
|
Msg("Received GITEA webhook")
|
|
|
|
// Process webhook event
|
|
event := s.webhookHandler.ProcessWebhook(payload)
|
|
|
|
// Handle task-related webhooks
|
|
if event.TaskInfo != nil {
|
|
span.SetAttributes(
|
|
attribute.Bool("webhook.has_task_info", true),
|
|
attribute.String("webhook.task_type", event.TaskInfo["task_type"].(string)),
|
|
)
|
|
|
|
log.Info().
|
|
Interface("task_info", event.TaskInfo).
|
|
Msg("Processing task issue")
|
|
|
|
// MVP: Store basic task info for future team assignment
|
|
// In full implementation, this would trigger Team Composer
|
|
s.handleTaskWebhook(ctx, event)
|
|
} else {
|
|
span.SetAttributes(attribute.Bool("webhook.has_task_info", false))
|
|
}
|
|
|
|
span.SetAttributes(
|
|
attribute.String("webhook.status", "processed"),
|
|
attribute.Int64("webhook.timestamp", event.Timestamp),
|
|
)
|
|
|
|
render.JSON(w, r, map[string]interface{}{
|
|
"status": "received",
|
|
"event_id": event.Timestamp,
|
|
"processed": event.TaskInfo != nil,
|
|
})
|
|
}
|
|
|
|
func (s *Server) handleTaskWebhook(ctx context.Context, event *gitea.WebhookEvent) {
|
|
// MVP implementation: Log task details
|
|
// In full version, this would:
|
|
// 1. Analyze task complexity
|
|
// 2. Determine required team composition
|
|
// 3. Create team and assign agents
|
|
// 4. Set up P2P communication channels
|
|
|
|
log.Info().
|
|
Str("action", event.Action).
|
|
Str("repository", event.Repository).
|
|
Interface("task_info", event.TaskInfo).
|
|
Msg("Task webhook received - MVP logging")
|
|
|
|
// For MVP, we'll just acknowledge task detection
|
|
if event.Action == "opened" || event.Action == "reopened" {
|
|
taskType := event.TaskInfo["task_type"].(string)
|
|
priority := event.TaskInfo["priority"].(string)
|
|
|
|
log.Info().
|
|
Str("task_type", taskType).
|
|
Str("priority", priority).
|
|
Msg("New task detected - ready for team assignment")
|
|
}
|
|
}
|
|
|
|
// staticFileHandler serves static files from the UI directory
|
|
func (s *Server) staticFileHandler(uiDir string) http.HandlerFunc {
|
|
return func(w http.ResponseWriter, r *http.Request) {
|
|
// Extract the file path from the URL
|
|
urlPath := r.URL.Path
|
|
filePath := strings.TrimPrefix(urlPath, "/ui/")
|
|
|
|
// Security check: prevent directory traversal
|
|
if strings.Contains(filePath, "..") {
|
|
http.Error(w, "Invalid file path", http.StatusBadRequest)
|
|
return
|
|
}
|
|
|
|
// Construct full file path
|
|
fullPath := filepath.Join(uiDir, filePath)
|
|
|
|
// Check if file exists
|
|
if _, err := os.Stat(fullPath); os.IsNotExist(err) {
|
|
http.NotFound(w, r)
|
|
return
|
|
}
|
|
|
|
// Serve the file
|
|
http.ServeFile(w, r, fullPath)
|
|
}
|
|
}
|
|
|
|
func (s *Server) dashboardHandler(w http.ResponseWriter, r *http.Request) {
|
|
// Serve the external index.html file
|
|
uiDir := "./ui"
|
|
indexPath := filepath.Join(uiDir, "index.html")
|
|
|
|
// Check if the UI directory and index.html exist
|
|
if _, err := os.Stat(indexPath); os.IsNotExist(err) {
|
|
// Fallback to embedded HTML if external files don't exist
|
|
log.Warn().Msg("External UI files not found, using fallback message")
|
|
w.Header().Set("Content-Type", "text/html; charset=utf-8")
|
|
w.Write([]byte(`<!DOCTYPE html>
|
|
<html lang="en">
|
|
<head>
|
|
<meta charset="UTF-8">
|
|
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
|
<title>WHOOSH - Setup Required</title>
|
|
</head>
|
|
<body style="font-family: Arial, sans-serif; margin: 40px; background: #1a1a1a; color: #f0f0f0;">
|
|
<h1>WHOOSH Setup Required</h1>
|
|
<p>External UI files not found. Please ensure the ui/ directory is mounted and contains:</p>
|
|
<ul>
|
|
<li>index.html</li>
|
|
<li>styles.css</li>
|
|
<li>script.js</li>
|
|
</ul>
|
|
<p>Current working directory: ` + getCurrentDir() + `</p>
|
|
</body>
|
|
</html>`))
|
|
return
|
|
}
|
|
|
|
// Serve the external index.html file
|
|
http.ServeFile(w, r, indexPath)
|
|
}
|
|
|
|
// getCurrentDir returns the current working directory for debugging
|
|
func getCurrentDir() string {
|
|
dir, err := os.Getwd()
|
|
if err != nil {
|
|
return "unknown"
|
|
}
|
|
return dir
|
|
}
|
|
// backbeatStatusHandler provides real-time BACKBEAT pulse data
|
|
func (s *Server) backbeatStatusHandler(w http.ResponseWriter, r *http.Request) {
|
|
now := time.Now()
|
|
|
|
// Get real BACKBEAT data if integration is available and started
|
|
if s.backbeat != nil {
|
|
health := s.backbeat.GetHealth()
|
|
|
|
// Extract real BACKBEAT data
|
|
currentBeat := int64(0)
|
|
if beatVal, ok := health["current_beat"]; ok {
|
|
if beat, ok := beatVal.(int64); ok {
|
|
currentBeat = beat
|
|
}
|
|
}
|
|
|
|
currentTempo := 2 // Default fallback
|
|
if tempoVal, ok := health["current_tempo"]; ok {
|
|
if tempo, ok := tempoVal.(int); ok {
|
|
currentTempo = tempo
|
|
}
|
|
}
|
|
|
|
connected := false
|
|
if connVal, ok := health["connected"]; ok {
|
|
if conn, ok := connVal.(bool); ok {
|
|
connected = conn
|
|
}
|
|
}
|
|
|
|
// Determine phase based on BACKBEAT health
|
|
phase := "normal"
|
|
if degradationVal, ok := health["local_degradation"]; ok {
|
|
if degraded, ok := degradationVal.(bool); ok && degraded {
|
|
phase = "degraded"
|
|
}
|
|
}
|
|
|
|
// Calculate average interval based on tempo (BPM to milliseconds)
|
|
averageInterval := 60000 / currentTempo // Convert BPM to milliseconds between beats
|
|
|
|
// Determine if current beat is a downbeat (every 4th beat)
|
|
isDownbeat := currentBeat%4 == 1
|
|
currentDownbeat := (currentBeat / 4) + 1
|
|
|
|
response := map[string]interface{}{
|
|
"current_beat": currentBeat,
|
|
"current_downbeat": currentDownbeat,
|
|
"average_interval": averageInterval,
|
|
"phase": phase,
|
|
"is_downbeat": isDownbeat,
|
|
"tempo": currentTempo,
|
|
"connected": connected,
|
|
"timestamp": now.Unix(),
|
|
"status": "live",
|
|
"backbeat_health": health,
|
|
}
|
|
|
|
w.Header().Set("Content-Type", "application/json")
|
|
if err := json.NewEncoder(w).Encode(response); err != nil {
|
|
http.Error(w, "Failed to encode response", http.StatusInternalServerError)
|
|
return
|
|
}
|
|
return
|
|
}
|
|
|
|
// Fallback to basic data if BACKBEAT integration is not available
|
|
response := map[string]interface{}{
|
|
"current_beat": 0,
|
|
"current_downbeat": 0,
|
|
"average_interval": 0,
|
|
"phase": "disconnected",
|
|
"is_downbeat": false,
|
|
"tempo": 0,
|
|
"connected": false,
|
|
"timestamp": now.Unix(),
|
|
"status": "no_backbeat",
|
|
"error": "BACKBEAT integration not available",
|
|
}
|
|
|
|
w.Header().Set("Content-Type", "application/json")
|
|
if err := json.NewEncoder(w).Encode(response); err != nil {
|
|
http.Error(w, "Failed to encode response", http.StatusInternalServerError)
|
|
return
|
|
}
|
|
}
|
|
|
|
// Repository Management Handlers
|
|
|
|
// listRepositoriesHandler returns all monitored repositories
|
|
func (s *Server) listRepositoriesHandler(w http.ResponseWriter, r *http.Request) {
|
|
log.Info().Msg("Listing monitored repositories")
|
|
|
|
query := `
|
|
SELECT id, name, owner, full_name, url, clone_url, ssh_url, source_type,
|
|
monitor_issues, monitor_pull_requests, enable_chorus_integration,
|
|
description, default_branch, is_private, language, topics,
|
|
last_sync_at, sync_status, sync_error, open_issues_count,
|
|
closed_issues_count, total_tasks_created, created_at, updated_at
|
|
FROM repositories
|
|
ORDER BY created_at DESC`
|
|
|
|
rows, err := s.db.Pool.Query(context.Background(), query)
|
|
if err != nil {
|
|
log.Error().Err(err).Msg("Failed to query repositories")
|
|
render.Status(r, http.StatusInternalServerError)
|
|
render.JSON(w, r, map[string]string{"error": "failed to query repositories"})
|
|
return
|
|
}
|
|
defer rows.Close()
|
|
|
|
repositories := []map[string]interface{}{}
|
|
for rows.Next() {
|
|
var id, name, owner, fullName, url, sourceType, defaultBranch, syncStatus string
|
|
var cloneURL, sshURL, description, syncError, language *string
|
|
var monitorIssues, monitorPRs, enableChorus, isPrivate bool
|
|
var topicsJSON []byte
|
|
var lastSyncAt *time.Time
|
|
var createdAt, updatedAt time.Time
|
|
var openIssues, closedIssues, totalTasks int
|
|
|
|
err := rows.Scan(&id, &name, &owner, &fullName, &url, &cloneURL, &sshURL, &sourceType,
|
|
&monitorIssues, &monitorPRs, &enableChorus, &description, &defaultBranch,
|
|
&isPrivate, &language, &topicsJSON, &lastSyncAt, &syncStatus, &syncError,
|
|
&openIssues, &closedIssues, &totalTasks, &createdAt, &updatedAt)
|
|
if err != nil {
|
|
log.Error().Err(err).Msg("Failed to scan repository row")
|
|
continue
|
|
}
|
|
|
|
// Parse topics from JSONB
|
|
var topics []string
|
|
if err := json.Unmarshal(topicsJSON, &topics); err != nil {
|
|
log.Error().Err(err).Msg("Failed to unmarshal topics")
|
|
topics = []string{} // Default to empty slice
|
|
}
|
|
|
|
// Handle nullable lastSyncAt
|
|
var lastSyncFormatted *string
|
|
if lastSyncAt != nil {
|
|
formatted := lastSyncAt.Format(time.RFC3339)
|
|
lastSyncFormatted = &formatted
|
|
}
|
|
|
|
repo := map[string]interface{}{
|
|
"id": id,
|
|
"name": name,
|
|
"owner": owner,
|
|
"full_name": fullName,
|
|
"url": url,
|
|
"clone_url": cloneURL,
|
|
"ssh_url": sshURL,
|
|
"source_type": sourceType,
|
|
"monitor_issues": monitorIssues,
|
|
"monitor_pull_requests": monitorPRs,
|
|
"enable_chorus_integration": enableChorus,
|
|
"description": description,
|
|
"default_branch": defaultBranch,
|
|
"is_private": isPrivate,
|
|
"language": language,
|
|
"topics": topics,
|
|
"last_sync_at": lastSyncFormatted,
|
|
"sync_status": syncStatus,
|
|
"sync_error": syncError,
|
|
"open_issues_count": openIssues,
|
|
"closed_issues_count": closedIssues,
|
|
"total_tasks_created": totalTasks,
|
|
"created_at": createdAt.Format(time.RFC3339),
|
|
"updated_at": updatedAt.Format(time.RFC3339),
|
|
}
|
|
repositories = append(repositories, repo)
|
|
}
|
|
|
|
render.JSON(w, r, map[string]interface{}{
|
|
"repositories": repositories,
|
|
"count": len(repositories),
|
|
})
|
|
}
|
|
|
|
// createRepositoryHandler adds a new repository to monitor
|
|
func (s *Server) createRepositoryHandler(w http.ResponseWriter, r *http.Request) {
|
|
var req struct {
|
|
Name string `json:"name"`
|
|
Owner string `json:"owner"`
|
|
URL string `json:"url"`
|
|
SourceType string `json:"source_type"`
|
|
MonitorIssues bool `json:"monitor_issues"`
|
|
MonitorPullRequests bool `json:"monitor_pull_requests"`
|
|
EnableChorusIntegration bool `json:"enable_chorus_integration"`
|
|
Description *string `json:"description"`
|
|
DefaultBranch string `json:"default_branch"`
|
|
IsPrivate bool `json:"is_private"`
|
|
Language *string `json:"language"`
|
|
Topics []string `json:"topics"`
|
|
}
|
|
|
|
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
|
|
render.Status(r, http.StatusBadRequest)
|
|
render.JSON(w, r, map[string]string{"error": "invalid request body"})
|
|
return
|
|
}
|
|
|
|
// Validate required fields
|
|
if req.Name == "" || req.Owner == "" || req.URL == "" {
|
|
render.Status(r, http.StatusBadRequest)
|
|
render.JSON(w, r, map[string]string{"error": "name, owner, and url are required"})
|
|
return
|
|
}
|
|
|
|
// Set defaults
|
|
if req.SourceType == "" {
|
|
req.SourceType = "gitea"
|
|
}
|
|
if req.DefaultBranch == "" {
|
|
req.DefaultBranch = "main"
|
|
}
|
|
if req.Topics == nil {
|
|
req.Topics = []string{}
|
|
}
|
|
|
|
fullName := req.Owner + "/" + req.Name
|
|
|
|
log.Info().
|
|
Str("repository", fullName).
|
|
Str("url", req.URL).
|
|
Msg("Creating new repository monitor")
|
|
|
|
query := `
|
|
INSERT INTO repositories (
|
|
name, owner, full_name, url, source_type, monitor_issues,
|
|
monitor_pull_requests, enable_chorus_integration, description,
|
|
default_branch, is_private, language, topics
|
|
) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13)
|
|
RETURNING id, created_at`
|
|
|
|
// Convert topics slice to JSON for JSONB column
|
|
topicsJSON, err := json.Marshal(req.Topics)
|
|
if err != nil {
|
|
log.Error().Err(err).Msg("Failed to marshal topics")
|
|
render.Status(r, http.StatusInternalServerError)
|
|
render.JSON(w, r, map[string]string{"error": "failed to process topics"})
|
|
return
|
|
}
|
|
|
|
var id string
|
|
var createdAt time.Time
|
|
err = s.db.Pool.QueryRow(context.Background(), query,
|
|
req.Name, req.Owner, fullName, req.URL, req.SourceType,
|
|
req.MonitorIssues, req.MonitorPullRequests, req.EnableChorusIntegration,
|
|
req.Description, req.DefaultBranch, req.IsPrivate, req.Language, topicsJSON).
|
|
Scan(&id, &createdAt)
|
|
|
|
if err != nil {
|
|
log.Error().Err(err).Msg("Failed to create repository")
|
|
render.Status(r, http.StatusInternalServerError)
|
|
render.JSON(w, r, map[string]string{"error": "failed to create repository"})
|
|
return
|
|
}
|
|
|
|
// Automatically create required labels in the Gitea repository
|
|
// @goal: WHOOSH-LABELS-004 - Automatic label creation on repository addition
|
|
// WHY: Ensures standardized ecosystem labels are available immediately for issue categorization
|
|
if req.SourceType == "gitea" && s.repoMonitor != nil && s.repoMonitor.GetGiteaClient() != nil {
|
|
log.Info().
|
|
Str("repository", fullName).
|
|
Msg("Creating required labels in Gitea repository")
|
|
|
|
// @goal: WHOOSH-LABELS-004 - Apply standardized label set to new repository
|
|
err := s.repoMonitor.GetGiteaClient().EnsureRequiredLabels(context.Background(), req.Owner, req.Name)
|
|
if err != nil {
|
|
log.Warn().
|
|
Err(err).
|
|
Str("repository", fullName).
|
|
Msg("Failed to create labels in Gitea repository - repository monitoring will still work")
|
|
// Don't fail the entire request if label creation fails
|
|
} else {
|
|
log.Info().
|
|
Str("repository", fullName).
|
|
Msg("Successfully created required labels in Gitea repository")
|
|
}
|
|
}
|
|
|
|
render.Status(r, http.StatusCreated)
|
|
render.JSON(w, r, map[string]interface{}{
|
|
"id": id,
|
|
"full_name": fullName,
|
|
"created_at": createdAt.Format(time.RFC3339),
|
|
"message": "Repository monitor created successfully",
|
|
})
|
|
}
|
|
|
|
// getRepositoryHandler returns a specific repository
|
|
func (s *Server) getRepositoryHandler(w http.ResponseWriter, r *http.Request) {
|
|
repoID := chi.URLParam(r, "repoID")
|
|
|
|
log.Info().Str("repository_id", repoID).Msg("Getting repository details")
|
|
|
|
query := `
|
|
SELECT id, name, owner, full_name, url, clone_url, ssh_url, source_type,
|
|
source_config, monitor_issues, monitor_pull_requests, monitor_releases,
|
|
enable_chorus_integration, chorus_task_labels, auto_assign_teams,
|
|
description, default_branch, is_private, language, topics,
|
|
last_sync_at, last_issue_sync, sync_status, sync_error,
|
|
open_issues_count, closed_issues_count, total_tasks_created,
|
|
created_at, updated_at
|
|
FROM repositories WHERE id = $1`
|
|
|
|
var repo struct {
|
|
ID string `json:"id"`
|
|
Name string `json:"name"`
|
|
Owner string `json:"owner"`
|
|
FullName string `json:"full_name"`
|
|
URL string `json:"url"`
|
|
CloneURL *string `json:"clone_url"`
|
|
SSHURL *string `json:"ssh_url"`
|
|
SourceType string `json:"source_type"`
|
|
SourceConfig []byte `json:"source_config"`
|
|
MonitorIssues bool `json:"monitor_issues"`
|
|
MonitorPullRequests bool `json:"monitor_pull_requests"`
|
|
MonitorReleases bool `json:"monitor_releases"`
|
|
EnableChorusIntegration bool `json:"enable_chorus_integration"`
|
|
ChorusTaskLabels []string `json:"chorus_task_labels"`
|
|
AutoAssignTeams bool `json:"auto_assign_teams"`
|
|
Description *string `json:"description"`
|
|
DefaultBranch string `json:"default_branch"`
|
|
IsPrivate bool `json:"is_private"`
|
|
Language *string `json:"language"`
|
|
Topics []string `json:"topics"`
|
|
LastSyncAt *time.Time `json:"last_sync_at"`
|
|
LastIssueSyncAt *time.Time `json:"last_issue_sync"`
|
|
SyncStatus string `json:"sync_status"`
|
|
SyncError *string `json:"sync_error"`
|
|
OpenIssuesCount int `json:"open_issues_count"`
|
|
ClosedIssuesCount int `json:"closed_issues_count"`
|
|
TotalTasksCreated int `json:"total_tasks_created"`
|
|
CreatedAt time.Time `json:"created_at"`
|
|
UpdatedAt time.Time `json:"updated_at"`
|
|
}
|
|
|
|
err := s.db.Pool.QueryRow(context.Background(), query, repoID).Scan(
|
|
&repo.ID, &repo.Name, &repo.Owner, &repo.FullName, &repo.URL,
|
|
&repo.CloneURL, &repo.SSHURL, &repo.SourceType, &repo.SourceConfig,
|
|
&repo.MonitorIssues, &repo.MonitorPullRequests, &repo.MonitorReleases,
|
|
&repo.EnableChorusIntegration, &repo.ChorusTaskLabels, &repo.AutoAssignTeams,
|
|
&repo.Description, &repo.DefaultBranch, &repo.IsPrivate, &repo.Language,
|
|
&repo.Topics, &repo.LastSyncAt, &repo.LastIssueSyncAt, &repo.SyncStatus,
|
|
&repo.SyncError, &repo.OpenIssuesCount, &repo.ClosedIssuesCount,
|
|
&repo.TotalTasksCreated, &repo.CreatedAt, &repo.UpdatedAt)
|
|
|
|
if err != nil {
|
|
if err.Error() == "no rows in result set" {
|
|
render.Status(r, http.StatusNotFound)
|
|
render.JSON(w, r, map[string]string{"error": "repository not found"})
|
|
return
|
|
}
|
|
log.Error().Err(err).Msg("Failed to get repository")
|
|
render.Status(r, http.StatusInternalServerError)
|
|
render.JSON(w, r, map[string]string{"error": "failed to get repository"})
|
|
return
|
|
}
|
|
|
|
render.JSON(w, r, repo)
|
|
}
|
|
|
|
// updateRepositoryHandler updates repository settings
|
|
func (s *Server) updateRepositoryHandler(w http.ResponseWriter, r *http.Request) {
|
|
repoID := chi.URLParam(r, "repoID")
|
|
|
|
var req struct {
|
|
MonitorIssues *bool `json:"monitor_issues"`
|
|
MonitorPullRequests *bool `json:"monitor_pull_requests"`
|
|
MonitorReleases *bool `json:"monitor_releases"`
|
|
EnableChorusIntegration *bool `json:"enable_chorus_integration"`
|
|
AutoAssignTeams *bool `json:"auto_assign_teams"`
|
|
Description *string `json:"description"`
|
|
DefaultBranch *string `json:"default_branch"`
|
|
Language *string `json:"language"`
|
|
Topics []string `json:"topics"`
|
|
}
|
|
|
|
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
|
|
render.Status(r, http.StatusBadRequest)
|
|
render.JSON(w, r, map[string]string{"error": "invalid request body"})
|
|
return
|
|
}
|
|
|
|
log.Info().Str("repository_id", repoID).Msg("Updating repository settings")
|
|
|
|
// Build dynamic update query
|
|
updates := []string{}
|
|
args := []interface{}{repoID}
|
|
argIndex := 2
|
|
|
|
if req.MonitorIssues != nil {
|
|
updates = append(updates, fmt.Sprintf("monitor_issues = $%d", argIndex))
|
|
args = append(args, *req.MonitorIssues)
|
|
argIndex++
|
|
}
|
|
if req.MonitorPullRequests != nil {
|
|
updates = append(updates, fmt.Sprintf("monitor_pull_requests = $%d", argIndex))
|
|
args = append(args, *req.MonitorPullRequests)
|
|
argIndex++
|
|
}
|
|
if req.MonitorReleases != nil {
|
|
updates = append(updates, fmt.Sprintf("monitor_releases = $%d", argIndex))
|
|
args = append(args, *req.MonitorReleases)
|
|
argIndex++
|
|
}
|
|
if req.EnableChorusIntegration != nil {
|
|
updates = append(updates, fmt.Sprintf("enable_chorus_integration = $%d", argIndex))
|
|
args = append(args, *req.EnableChorusIntegration)
|
|
argIndex++
|
|
}
|
|
if req.AutoAssignTeams != nil {
|
|
updates = append(updates, fmt.Sprintf("auto_assign_teams = $%d", argIndex))
|
|
args = append(args, *req.AutoAssignTeams)
|
|
argIndex++
|
|
}
|
|
if req.Description != nil {
|
|
updates = append(updates, fmt.Sprintf("description = $%d", argIndex))
|
|
args = append(args, *req.Description)
|
|
argIndex++
|
|
}
|
|
if req.DefaultBranch != nil {
|
|
updates = append(updates, fmt.Sprintf("default_branch = $%d", argIndex))
|
|
args = append(args, *req.DefaultBranch)
|
|
argIndex++
|
|
}
|
|
if req.Language != nil {
|
|
updates = append(updates, fmt.Sprintf("language = $%d", argIndex))
|
|
args = append(args, *req.Language)
|
|
argIndex++
|
|
}
|
|
if req.Topics != nil {
|
|
updates = append(updates, fmt.Sprintf("topics = $%d", argIndex))
|
|
args = append(args, req.Topics)
|
|
argIndex++
|
|
}
|
|
|
|
if len(updates) == 0 {
|
|
render.Status(r, http.StatusBadRequest)
|
|
render.JSON(w, r, map[string]string{"error": "no fields to update"})
|
|
return
|
|
}
|
|
|
|
updates = append(updates, fmt.Sprintf("updated_at = $%d", argIndex))
|
|
args = append(args, time.Now())
|
|
|
|
query := fmt.Sprintf("UPDATE repositories SET %s WHERE id = $1", strings.Join(updates, ", "))
|
|
|
|
_, err := s.db.Pool.Exec(context.Background(), query, args...)
|
|
if err != nil {
|
|
log.Error().Err(err).Msg("Failed to update repository")
|
|
render.Status(r, http.StatusInternalServerError)
|
|
render.JSON(w, r, map[string]string{"error": "failed to update repository"})
|
|
return
|
|
}
|
|
|
|
render.JSON(w, r, map[string]string{"message": "Repository updated successfully"})
|
|
}
|
|
|
|
// deleteRepositoryHandler removes a repository from monitoring
|
|
func (s *Server) deleteRepositoryHandler(w http.ResponseWriter, r *http.Request) {
|
|
repoID := chi.URLParam(r, "repoID")
|
|
|
|
log.Info().Str("repository_id", repoID).Msg("Deleting repository monitor")
|
|
|
|
query := "DELETE FROM repositories WHERE id = $1"
|
|
result, err := s.db.Pool.Exec(context.Background(), query, repoID)
|
|
if err != nil {
|
|
log.Error().Err(err).Msg("Failed to delete repository")
|
|
render.Status(r, http.StatusInternalServerError)
|
|
render.JSON(w, r, map[string]string{"error": "failed to delete repository"})
|
|
return
|
|
}
|
|
|
|
if result.RowsAffected() == 0 {
|
|
render.Status(r, http.StatusNotFound)
|
|
render.JSON(w, r, map[string]string{"error": "repository not found"})
|
|
return
|
|
}
|
|
|
|
render.JSON(w, r, map[string]string{"message": "Repository deleted successfully"})
|
|
}
|
|
|
|
// syncRepositoryHandler triggers a manual sync of repository issues
|
|
func (s *Server) syncRepositoryHandler(w http.ResponseWriter, r *http.Request) {
|
|
repoID := chi.URLParam(r, "repoID")
|
|
|
|
log.Info().Str("repository_id", repoID).Msg("Manual repository sync triggered")
|
|
|
|
if s.repoMonitor == nil {
|
|
render.Status(r, http.StatusServiceUnavailable)
|
|
render.JSON(w, r, map[string]string{"error": "repository monitoring service not available"})
|
|
return
|
|
}
|
|
|
|
// Trigger repository sync in background
|
|
go func() {
|
|
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute)
|
|
defer cancel()
|
|
|
|
if err := s.repoMonitor.SyncRepository(ctx, repoID); err != nil {
|
|
log.Error().
|
|
Err(err).
|
|
Str("repository_id", repoID).
|
|
Msg("Manual repository sync failed")
|
|
}
|
|
}()
|
|
|
|
render.JSON(w, r, map[string]interface{}{
|
|
"message": "Repository sync triggered",
|
|
"repository_id": repoID,
|
|
"status": "started",
|
|
})
|
|
}
|
|
|
|
// ensureRepositoryLabelsHandler ensures required labels exist in the Gitea repository
|
|
func (s *Server) ensureRepositoryLabelsHandler(w http.ResponseWriter, r *http.Request) {
|
|
repoID := chi.URLParam(r, "repoID")
|
|
|
|
log.Info().Str("repository_id", repoID).Msg("Ensuring repository labels")
|
|
|
|
if s.repoMonitor == nil || s.repoMonitor.GetGiteaClient() == nil {
|
|
render.Status(r, http.StatusServiceUnavailable)
|
|
render.JSON(w, r, map[string]string{"error": "repository monitoring service not available"})
|
|
return
|
|
}
|
|
|
|
// Get repository details first
|
|
query := "SELECT owner, name FROM repositories WHERE id = $1"
|
|
var owner, name string
|
|
err := s.db.Pool.QueryRow(context.Background(), query, repoID).Scan(&owner, &name)
|
|
if err != nil {
|
|
if err.Error() == "no rows in result set" {
|
|
render.Status(r, http.StatusNotFound)
|
|
render.JSON(w, r, map[string]string{"error": "repository not found"})
|
|
return
|
|
}
|
|
log.Error().Err(err).Msg("Failed to get repository")
|
|
render.Status(r, http.StatusInternalServerError)
|
|
render.JSON(w, r, map[string]string{"error": "failed to get repository"})
|
|
return
|
|
}
|
|
|
|
// @goal: WHOOSH-LABELS-004 - Manual label synchronization endpoint
|
|
// WHY: Allows updating existing repositories to standardized label set
|
|
err = s.repoMonitor.GetGiteaClient().EnsureRequiredLabels(context.Background(), owner, name)
|
|
if err != nil {
|
|
log.Error().
|
|
Err(err).
|
|
Str("repository_id", repoID).
|
|
Str("owner", owner).
|
|
Str("name", name).
|
|
Msg("Failed to ensure repository labels")
|
|
render.Status(r, http.StatusInternalServerError)
|
|
render.JSON(w, r, map[string]string{"error": "failed to create labels: " + err.Error()})
|
|
return
|
|
}
|
|
|
|
log.Info().
|
|
Str("repository_id", repoID).
|
|
Str("owner", owner).
|
|
Str("name", name).
|
|
Msg("Successfully ensured repository labels")
|
|
|
|
render.JSON(w, r, map[string]interface{}{
|
|
"message": "Repository labels ensured successfully",
|
|
"repository_id": repoID,
|
|
"owner": owner,
|
|
"name": name,
|
|
})
|
|
}
|
|
|
|
// getRepositorySyncLogsHandler returns sync logs for a repository
|
|
func (s *Server) getRepositorySyncLogsHandler(w http.ResponseWriter, r *http.Request) {
|
|
repoID := chi.URLParam(r, "repoID")
|
|
limit := 50
|
|
|
|
if limitParam := r.URL.Query().Get("limit"); limitParam != "" {
|
|
if l, err := strconv.Atoi(limitParam); err == nil && l > 0 && l <= 1000 {
|
|
limit = l
|
|
}
|
|
}
|
|
|
|
log.Info().Str("repository_id", repoID).Int("limit", limit).Msg("Getting repository sync logs")
|
|
|
|
query := `
|
|
SELECT id, sync_type, operation, status, message, error_details,
|
|
items_processed, items_created, items_updated, duration_ms,
|
|
external_id, external_url, created_at
|
|
FROM repository_sync_logs
|
|
WHERE repository_id = $1
|
|
ORDER BY created_at DESC
|
|
LIMIT $2`
|
|
|
|
rows, err := s.db.Pool.Query(context.Background(), query, repoID, limit)
|
|
if err != nil {
|
|
log.Error().Err(err).Msg("Failed to query sync logs")
|
|
render.Status(r, http.StatusInternalServerError)
|
|
render.JSON(w, r, map[string]string{"error": "failed to query sync logs"})
|
|
return
|
|
}
|
|
defer rows.Close()
|
|
|
|
logs := []map[string]interface{}{}
|
|
for rows.Next() {
|
|
var id, syncType, operation, status, message string
|
|
var errorDetails []byte
|
|
var itemsProcessed, itemsCreated, itemsUpdated, durationMs int
|
|
var externalID, externalURL *string
|
|
var createdAt time.Time
|
|
|
|
err := rows.Scan(&id, &syncType, &operation, &status, &message, &errorDetails,
|
|
&itemsProcessed, &itemsCreated, &itemsUpdated, &durationMs,
|
|
&externalID, &externalURL, &createdAt)
|
|
if err != nil {
|
|
log.Error().Err(err).Msg("Failed to scan sync log row")
|
|
continue
|
|
}
|
|
|
|
logEntry := map[string]interface{}{
|
|
"id": id,
|
|
"sync_type": syncType,
|
|
"operation": operation,
|
|
"status": status,
|
|
"message": message,
|
|
"error_details": string(errorDetails),
|
|
"items_processed": itemsProcessed,
|
|
"items_created": itemsCreated,
|
|
"items_updated": itemsUpdated,
|
|
"duration_ms": durationMs,
|
|
"external_id": externalID,
|
|
"external_url": externalURL,
|
|
"created_at": createdAt.Format(time.RFC3339),
|
|
}
|
|
logs = append(logs, logEntry)
|
|
}
|
|
|
|
render.JSON(w, r, map[string]interface{}{
|
|
"logs": logs,
|
|
"count": len(logs),
|
|
})
|
|
}
|
|
|
|
// Council management handlers
|
|
|
|
func (s *Server) getCouncilHandler(w http.ResponseWriter, r *http.Request) {
|
|
councilIDStr := chi.URLParam(r, "councilID")
|
|
councilID, err := uuid.Parse(councilIDStr)
|
|
if err != nil {
|
|
render.Status(r, http.StatusBadRequest)
|
|
render.JSON(w, r, map[string]string{"error": "invalid council ID"})
|
|
return
|
|
}
|
|
|
|
composition, err := s.councilComposer.GetCouncilComposition(r.Context(), councilID)
|
|
if err != nil {
|
|
if strings.Contains(err.Error(), "no rows in result set") {
|
|
render.Status(r, http.StatusNotFound)
|
|
render.JSON(w, r, map[string]string{"error": "council not found"})
|
|
return
|
|
}
|
|
|
|
log.Error().Err(err).Str("council_id", councilIDStr).Msg("Failed to get council composition")
|
|
render.Status(r, http.StatusInternalServerError)
|
|
render.JSON(w, r, map[string]string{"error": "failed to retrieve council"})
|
|
return
|
|
}
|
|
|
|
render.JSON(w, r, composition)
|
|
}
|
|
|
|
func (s *Server) getCouncilArtifactsHandler(w http.ResponseWriter, r *http.Request) {
|
|
councilIDStr := chi.URLParam(r, "councilID")
|
|
councilID, err := uuid.Parse(councilIDStr)
|
|
if err != nil {
|
|
render.Status(r, http.StatusBadRequest)
|
|
render.JSON(w, r, map[string]string{"error": "invalid council ID"})
|
|
return
|
|
}
|
|
|
|
// Query all artifacts for this council
|
|
query := `
|
|
SELECT id, artifact_type, artifact_name, content, content_json, produced_at, produced_by, status
|
|
FROM council_artifacts
|
|
WHERE council_id = $1
|
|
ORDER BY produced_at DESC
|
|
`
|
|
|
|
rows, err := s.db.Pool.Query(r.Context(), query, councilID)
|
|
if err != nil {
|
|
log.Error().Err(err).Str("council_id", councilIDStr).Msg("Failed to query council artifacts")
|
|
render.Status(r, http.StatusInternalServerError)
|
|
render.JSON(w, r, map[string]string{"error": "failed to retrieve artifacts"})
|
|
return
|
|
}
|
|
defer rows.Close()
|
|
|
|
var artifacts []map[string]interface{}
|
|
for rows.Next() {
|
|
var id uuid.UUID
|
|
var artifactType, artifactName, status string
|
|
var content *string
|
|
var contentJSON []byte
|
|
var producedAt time.Time
|
|
var producedBy *string
|
|
|
|
err := rows.Scan(&id, &artifactType, &artifactName, &content, &contentJSON, &producedAt, &producedBy, &status)
|
|
if err != nil {
|
|
log.Error().Err(err).Msg("Failed to scan artifact row")
|
|
continue
|
|
}
|
|
|
|
artifact := map[string]interface{}{
|
|
"id": id,
|
|
"artifact_type": artifactType,
|
|
"artifact_name": artifactName,
|
|
"content": content,
|
|
"produced_at": producedAt.Format(time.RFC3339),
|
|
"produced_by": producedBy,
|
|
"status": status,
|
|
}
|
|
|
|
// Parse JSON content if available
|
|
if contentJSON != nil {
|
|
var jsonData interface{}
|
|
if err := json.Unmarshal(contentJSON, &jsonData); err == nil {
|
|
artifact["content_json"] = jsonData
|
|
}
|
|
}
|
|
|
|
artifacts = append(artifacts, artifact)
|
|
}
|
|
|
|
if err = rows.Err(); err != nil {
|
|
log.Error().Err(err).Msg("Error iterating artifact rows")
|
|
render.Status(r, http.StatusInternalServerError)
|
|
render.JSON(w, r, map[string]string{"error": "failed to process artifacts"})
|
|
return
|
|
}
|
|
|
|
render.JSON(w, r, map[string]interface{}{
|
|
"council_id": councilID,
|
|
"artifacts": artifacts,
|
|
"count": len(artifacts),
|
|
})
|
|
}
|
|
|
|
func (s *Server) createCouncilArtifactHandler(w http.ResponseWriter, r *http.Request) {
|
|
councilIDStr := chi.URLParam(r, "councilID")
|
|
councilID, err := uuid.Parse(councilIDStr)
|
|
if err != nil {
|
|
render.Status(r, http.StatusBadRequest)
|
|
render.JSON(w, r, map[string]string{"error": "invalid council ID"})
|
|
return
|
|
}
|
|
|
|
var req struct {
|
|
ArtifactType string `json:"artifact_type"`
|
|
ArtifactName string `json:"artifact_name"`
|
|
Content *string `json:"content,omitempty"`
|
|
ContentJSON interface{} `json:"content_json,omitempty"`
|
|
ProducedBy *string `json:"produced_by,omitempty"`
|
|
Status *string `json:"status,omitempty"`
|
|
}
|
|
|
|
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
|
|
render.Status(r, http.StatusBadRequest)
|
|
render.JSON(w, r, map[string]string{"error": "invalid JSON body"})
|
|
return
|
|
}
|
|
|
|
if req.ArtifactType == "" || req.ArtifactName == "" {
|
|
render.Status(r, http.StatusBadRequest)
|
|
render.JSON(w, r, map[string]string{"error": "artifact_type and artifact_name are required"})
|
|
return
|
|
}
|
|
|
|
// Set default status if not provided
|
|
status := "draft"
|
|
if req.Status != nil {
|
|
status = *req.Status
|
|
}
|
|
|
|
// Validate artifact type (based on the constraint in the migration)
|
|
validTypes := map[string]bool{
|
|
"kickoff_manifest": true,
|
|
"seminal_dr": true,
|
|
"scaffold_plan": true,
|
|
"gate_tests": true,
|
|
"hmmm_thread": true,
|
|
"slurp_sources": true,
|
|
"shhh_policy": true,
|
|
"ucxl_root": true,
|
|
}
|
|
|
|
if !validTypes[req.ArtifactType] {
|
|
render.Status(r, http.StatusBadRequest)
|
|
render.JSON(w, r, map[string]string{"error": "invalid artifact_type"})
|
|
return
|
|
}
|
|
|
|
// Prepare JSON content
|
|
var contentJSONBytes []byte
|
|
if req.ContentJSON != nil {
|
|
contentJSONBytes, err = json.Marshal(req.ContentJSON)
|
|
if err != nil {
|
|
render.Status(r, http.StatusBadRequest)
|
|
render.JSON(w, r, map[string]string{"error": "invalid content_json"})
|
|
return
|
|
}
|
|
}
|
|
|
|
// Insert artifact
|
|
insertQuery := `
|
|
INSERT INTO council_artifacts (council_id, artifact_type, artifact_name, content, content_json, produced_by, status)
|
|
VALUES ($1, $2, $3, $4, $5, $6, $7)
|
|
RETURNING id, produced_at
|
|
`
|
|
|
|
var artifactID uuid.UUID
|
|
var producedAt time.Time
|
|
|
|
err = s.db.Pool.QueryRow(r.Context(), insertQuery, councilID, req.ArtifactType, req.ArtifactName,
|
|
req.Content, contentJSONBytes, req.ProducedBy, status).Scan(&artifactID, &producedAt)
|
|
|
|
if err != nil {
|
|
log.Error().Err(err).Str("council_id", councilIDStr).Msg("Failed to create council artifact")
|
|
render.Status(r, http.StatusInternalServerError)
|
|
render.JSON(w, r, map[string]string{"error": "failed to create artifact"})
|
|
return
|
|
}
|
|
|
|
response := map[string]interface{}{
|
|
"id": artifactID,
|
|
"council_id": councilID,
|
|
"artifact_type": req.ArtifactType,
|
|
"artifact_name": req.ArtifactName,
|
|
"content": req.Content,
|
|
"content_json": req.ContentJSON,
|
|
"produced_by": req.ProducedBy,
|
|
"status": status,
|
|
"produced_at": producedAt.Format(time.RFC3339),
|
|
}
|
|
|
|
render.Status(r, http.StatusCreated)
|
|
render.JSON(w, r, response)
|
|
}
|
|
|
|
// Helper methods for task processing
|
|
|
|
// lookupProjectData queries the repositories table to find project data by name
|
|
func (s *Server) lookupProjectData(ctx context.Context, projectID string, projectData *struct {
|
|
RepoURL string `json:"repo_url"`
|
|
Name string `json:"name"`
|
|
}) error {
|
|
// Query the repositories table to find the repository by name
|
|
// We assume projectID corresponds to the repository name
|
|
query := `
|
|
SELECT name, url
|
|
FROM repositories
|
|
WHERE name = $1 OR full_name LIKE '%/' || $1
|
|
LIMIT 1
|
|
`
|
|
|
|
var name, url string
|
|
err := s.db.Pool.QueryRow(ctx, query, projectID).Scan(&name, &url)
|
|
if err != nil {
|
|
if strings.Contains(err.Error(), "no rows in result set") {
|
|
return fmt.Errorf("project %s not found in repositories", projectID)
|
|
}
|
|
log.Error().Err(err).Str("project_id", projectID).Msg("Failed to query repository")
|
|
return fmt.Errorf("database error: %w", err)
|
|
}
|
|
|
|
// Populate the project data
|
|
projectData.Name = name
|
|
projectData.RepoURL = url
|
|
|
|
log.Info().
|
|
Str("project_id", projectID).
|
|
Str("name", name).
|
|
Str("repo_url", url).
|
|
Msg("Found project data in repositories table")
|
|
|
|
return nil
|
|
}
|
|
|
|
// inferTechStackFromLabels extracts technology information from labels
|
|
func (s *Server) inferTechStackFromLabels(labels []string) []string {
|
|
techMap := map[string]bool{
|
|
"go": true,
|
|
"golang": true,
|
|
"javascript": true,
|
|
"react": true,
|
|
"node": true,
|
|
"python": true,
|
|
"java": true,
|
|
"rust": true,
|
|
"docker": true,
|
|
"postgres": true,
|
|
"mysql": true,
|
|
"api": true,
|
|
"backend": true,
|
|
"frontend": true,
|
|
"database": true,
|
|
}
|
|
|
|
var techStack []string
|
|
for _, label := range labels {
|
|
if techMap[strings.ToLower(label)] {
|
|
techStack = append(techStack, strings.ToLower(label))
|
|
}
|
|
}
|
|
|
|
return techStack
|
|
}
|
|
|
|
// processTaskAsync handles complex task processing in background
|
|
func (s *Server) processTaskAsync(taskID string, taskInput *composer.TaskAnalysisInput) {
|
|
ctx := context.Background()
|
|
|
|
log.Info().
|
|
Str("task_id", taskID).
|
|
Msg("Starting async task processing")
|
|
|
|
result, err := s.teamComposer.AnalyzeAndComposeTeam(ctx, taskInput)
|
|
if err != nil {
|
|
log.Error().Err(err).
|
|
Str("task_id", taskID).
|
|
Msg("Async task analysis failed")
|
|
return
|
|
}
|
|
|
|
team, err := s.teamComposer.CreateTeam(ctx, result.TeamComposition, taskInput)
|
|
if err != nil {
|
|
log.Error().Err(err).
|
|
Str("task_id", taskID).
|
|
Msg("Async team creation failed")
|
|
return
|
|
}
|
|
|
|
log.Info().
|
|
Str("task_id", taskID).
|
|
Str("team_id", team.ID.String()).
|
|
Float64("confidence", result.TeamComposition.ConfidenceScore).
|
|
Msg("Async task processing completed")
|
|
|
|
// In production, this would update task status in database
|
|
// and potentially notify clients via websockets or webhooks
|
|
} |