package server
import (
"bytes"
"context"
"encoding/json"
"fmt"
"io"
"net/http"
"strconv"
"strings"
"time"
"github.com/chorus-services/whoosh/internal/backbeat"
"github.com/chorus-services/whoosh/internal/composer"
"github.com/chorus-services/whoosh/internal/config"
"github.com/chorus-services/whoosh/internal/database"
"github.com/chorus-services/whoosh/internal/gitea"
"github.com/chorus-services/whoosh/internal/p2p"
"github.com/chorus-services/whoosh/internal/tasks"
"github.com/go-chi/chi/v5"
"github.com/go-chi/chi/v5/middleware"
"github.com/go-chi/cors"
"github.com/go-chi/render"
"github.com/google/uuid"
"github.com/rs/zerolog/log"
)
type Server struct {
config *config.Config
db *database.DB
httpServer *http.Server
router chi.Router
giteaClient *gitea.Client
webhookHandler *gitea.WebhookHandler
p2pDiscovery *p2p.Discovery
backbeat *backbeat.Integration
teamComposer *composer.Service
taskService *tasks.Service
giteaIntegration *tasks.GiteaIntegration
}
func NewServer(cfg *config.Config, db *database.DB) (*Server, error) {
// Initialize core services
taskService := tasks.NewService(db.Pool)
giteaIntegration := tasks.NewGiteaIntegration(taskService, gitea.NewClient(cfg.GITEA), nil)
s := &Server{
config: cfg,
db: db,
giteaClient: gitea.NewClient(cfg.GITEA),
webhookHandler: gitea.NewWebhookHandler(cfg.GITEA.WebhookToken),
p2pDiscovery: p2p.NewDiscovery(),
teamComposer: composer.NewService(db.Pool, nil), // Use default config
taskService: taskService,
giteaIntegration: giteaIntegration,
}
// Initialize BACKBEAT integration if enabled
if cfg.BACKBEAT.Enabled {
backbeatIntegration, err := backbeat.NewIntegration(&cfg.BACKBEAT)
if err != nil {
return nil, fmt.Errorf("failed to create BACKBEAT integration: %w", err)
}
s.backbeat = backbeatIntegration
}
s.setupRouter()
s.setupRoutes()
s.httpServer = &http.Server{
Addr: cfg.Server.ListenAddr,
Handler: s.router,
ReadTimeout: cfg.Server.ReadTimeout,
WriteTimeout: cfg.Server.WriteTimeout,
}
return s, nil
}
func (s *Server) setupRouter() {
r := chi.NewRouter()
// Middleware
r.Use(middleware.RequestID)
r.Use(middleware.RealIP)
r.Use(middleware.Logger)
r.Use(middleware.Recoverer)
r.Use(middleware.Timeout(30 * time.Second))
// CORS configuration
r.Use(cors.Handler(cors.Options{
AllowedOrigins: []string{"*"},
AllowedMethods: []string{"GET", "POST", "PUT", "DELETE", "OPTIONS"},
AllowedHeaders: []string{"*"},
ExposedHeaders: []string{"Link"},
AllowCredentials: true,
MaxAge: 300,
}))
// Content-Type handling
r.Use(render.SetContentType(render.ContentTypeJSON))
s.router = r
}
func (s *Server) setupRoutes() {
// Root route - serve basic dashboard
s.router.Get("/", s.dashboardHandler)
// Health check endpoints
s.router.Get("/health", s.healthHandler)
s.router.Get("/health/ready", s.readinessHandler)
// API v1 routes
s.router.Route("/api/v1", func(r chi.Router) {
// MVP endpoints - minimal team management
r.Route("/teams", func(r chi.Router) {
r.Get("/", s.listTeamsHandler)
r.Post("/", s.createTeamHandler)
r.Get("/{teamID}", s.getTeamHandler)
r.Put("/{teamID}/status", s.updateTeamStatusHandler)
r.Post("/analyze", s.analyzeTeamCompositionHandler)
})
// Task ingestion from GITEA
r.Route("/tasks", func(r chi.Router) {
r.Get("/", s.listTasksHandler)
r.Post("/ingest", s.ingestTaskHandler)
r.Get("/{taskID}", s.getTaskHandler)
})
// Project management endpoints
r.Route("/projects", func(r chi.Router) {
r.Get("/", s.listProjectsHandler)
r.Post("/", s.createProjectHandler)
r.Delete("/{projectID}", s.deleteProjectHandler)
r.Route("/{projectID}", func(r chi.Router) {
r.Get("/", s.getProjectHandler)
r.Get("/tasks", s.listProjectTasksHandler)
r.Get("/tasks/available", s.listAvailableTasksHandler)
r.Get("/repository", s.getProjectRepositoryHandler)
r.Post("/analyze", s.analyzeProjectHandler)
r.Route("/tasks/{taskNumber}", func(r chi.Router) {
r.Get("/", s.getProjectTaskHandler)
r.Post("/claim", s.claimTaskHandler)
r.Put("/status", s.updateTaskStatusHandler)
r.Post("/complete", s.completeTaskHandler)
})
})
})
// Agent registration endpoints
r.Route("/agents", func(r chi.Router) {
r.Get("/", s.listAgentsHandler)
r.Post("/register", s.registerAgentHandler)
r.Put("/{agentID}/status", s.updateAgentStatusHandler)
})
// SLURP proxy endpoints
r.Route("/slurp", func(r chi.Router) {
r.Post("/submit", s.slurpSubmitHandler)
r.Get("/artifacts/{ucxlAddr}", s.slurpRetrieveHandler)
})
// Repository monitoring endpoints
r.Route("/repositories", func(r chi.Router) {
r.Get("/", s.listRepositoriesHandler)
r.Post("/", s.createRepositoryHandler)
r.Get("/{repoID}", s.getRepositoryHandler)
r.Put("/{repoID}", s.updateRepositoryHandler)
r.Delete("/{repoID}", s.deleteRepositoryHandler)
r.Post("/{repoID}/sync", s.syncRepositoryHandler)
r.Get("/{repoID}/logs", s.getRepositorySyncLogsHandler)
})
// BACKBEAT monitoring endpoints
r.Route("/backbeat", func(r chi.Router) {
r.Get("/status", s.backbeatStatusHandler)
})
})
// GITEA webhook endpoint
s.router.Post(s.config.GITEA.WebhookPath, s.giteaWebhookHandler)
}
func (s *Server) Start(ctx context.Context) error {
// Start BACKBEAT integration if enabled
if s.backbeat != nil {
if err := s.backbeat.Start(ctx); err != nil {
return fmt.Errorf("failed to start BACKBEAT integration: %w", err)
}
}
// Start P2P discovery service
if err := s.p2pDiscovery.Start(); err != nil {
return fmt.Errorf("failed to start P2P discovery: %w", err)
}
log.Info().
Str("addr", s.httpServer.Addr).
Msg("HTTP server starting")
if err := s.httpServer.ListenAndServe(); err != nil && err != http.ErrServerClosed {
return fmt.Errorf("server failed to start: %w", err)
}
return nil
}
func (s *Server) Shutdown(ctx context.Context) error {
log.Info().Msg("HTTP server shutting down")
// Stop BACKBEAT integration
if s.backbeat != nil {
if err := s.backbeat.Stop(); err != nil {
log.Error().Err(err).Msg("Failed to stop BACKBEAT integration")
}
}
// Stop P2P discovery service
if err := s.p2pDiscovery.Stop(); err != nil {
log.Error().Err(err).Msg("Failed to stop P2P discovery service")
}
if err := s.httpServer.Shutdown(ctx); err != nil {
return fmt.Errorf("server shutdown failed: %w", err)
}
return nil
}
// Health check handlers
func (s *Server) healthHandler(w http.ResponseWriter, r *http.Request) {
response := map[string]interface{}{
"status": "ok",
"service": "whoosh",
"version": "0.1.0-mvp",
}
// Include BACKBEAT health information if available
if s.backbeat != nil {
response["backbeat"] = s.backbeat.GetHealth()
}
render.JSON(w, r, response)
}
func (s *Server) readinessHandler(w http.ResponseWriter, r *http.Request) {
ctx, cancel := context.WithTimeout(r.Context(), 5*time.Second)
defer cancel()
// Check database connection
if err := s.db.Health(ctx); err != nil {
log.Error().Err(err).Msg("Database health check failed")
render.Status(r, http.StatusServiceUnavailable)
render.JSON(w, r, map[string]string{
"status": "unavailable",
"error": "database connection failed",
})
return
}
render.JSON(w, r, map[string]string{
"status": "ready",
"database": "connected",
})
}
// MVP handlers for team and task management
func (s *Server) listTeamsHandler(w http.ResponseWriter, r *http.Request) {
// Parse pagination parameters
limitStr := r.URL.Query().Get("limit")
offsetStr := r.URL.Query().Get("offset")
limit := 20 // Default limit
offset := 0 // Default offset
if limitStr != "" {
if l, err := strconv.Atoi(limitStr); err == nil && l > 0 && l <= 100 {
limit = l
}
}
if offsetStr != "" {
if o, err := strconv.Atoi(offsetStr); err == nil && o >= 0 {
offset = o
}
}
// Get teams from database
teams, total, err := s.teamComposer.ListTeams(r.Context(), limit, offset)
if err != nil {
log.Error().Err(err).Msg("Failed to list teams")
render.Status(r, http.StatusInternalServerError)
render.JSON(w, r, map[string]string{"error": "failed to retrieve teams"})
return
}
render.JSON(w, r, map[string]interface{}{
"teams": teams,
"total": total,
"limit": limit,
"offset": offset,
})
}
func (s *Server) createTeamHandler(w http.ResponseWriter, r *http.Request) {
var taskInput composer.TaskAnalysisInput
if err := json.NewDecoder(r.Body).Decode(&taskInput); err != nil {
render.Status(r, http.StatusBadRequest)
render.JSON(w, r, map[string]string{"error": "invalid request body"})
return
}
// Validate required fields
if taskInput.Title == "" {
render.Status(r, http.StatusBadRequest)
render.JSON(w, r, map[string]string{"error": "title is required"})
return
}
if taskInput.Description == "" {
render.Status(r, http.StatusBadRequest)
render.JSON(w, r, map[string]string{"error": "description is required"})
return
}
// Set defaults if not provided
if taskInput.Priority == "" {
taskInput.Priority = composer.PriorityMedium
}
log.Info().
Str("task_title", taskInput.Title).
Str("priority", string(taskInput.Priority)).
Msg("Starting team composition for new task")
// Analyze task and compose team
result, err := s.teamComposer.AnalyzeAndComposeTeam(r.Context(), &taskInput)
if err != nil {
log.Error().Err(err).Msg("Team composition failed")
render.Status(r, http.StatusInternalServerError)
render.JSON(w, r, map[string]string{"error": "team composition failed"})
return
}
// Create the team in database
team, err := s.teamComposer.CreateTeam(r.Context(), result.TeamComposition, &taskInput)
if err != nil {
log.Error().Err(err).Msg("Failed to create team")
render.Status(r, http.StatusInternalServerError)
render.JSON(w, r, map[string]string{"error": "failed to create team"})
return
}
log.Info().
Str("team_id", team.ID.String()).
Str("team_name", team.Name).
Float64("confidence_score", result.TeamComposition.ConfidenceScore).
Msg("Team created successfully")
// Return both the team and the composition analysis
response := map[string]interface{}{
"team": team,
"composition_result": result,
"message": "Team created successfully",
}
render.Status(r, http.StatusCreated)
render.JSON(w, r, response)
}
func (s *Server) getTeamHandler(w http.ResponseWriter, r *http.Request) {
teamIDStr := chi.URLParam(r, "teamID")
teamID, err := uuid.Parse(teamIDStr)
if err != nil {
render.Status(r, http.StatusBadRequest)
render.JSON(w, r, map[string]string{"error": "invalid team ID"})
return
}
team, assignments, err := s.teamComposer.GetTeam(r.Context(), teamID)
if err != nil {
if err.Error() == "team not found" {
render.Status(r, http.StatusNotFound)
render.JSON(w, r, map[string]string{"error": "team not found"})
return
}
log.Error().Err(err).Str("team_id", teamIDStr).Msg("Failed to get team")
render.Status(r, http.StatusInternalServerError)
render.JSON(w, r, map[string]string{"error": "failed to retrieve team"})
return
}
response := map[string]interface{}{
"team": team,
"assignments": assignments,
}
render.JSON(w, r, response)
}
func (s *Server) updateTeamStatusHandler(w http.ResponseWriter, r *http.Request) {
teamIDStr := chi.URLParam(r, "teamID")
teamID, err := uuid.Parse(teamIDStr)
if err != nil {
render.Status(r, http.StatusBadRequest)
render.JSON(w, r, map[string]string{"error": "invalid team ID"})
return
}
var statusUpdate struct {
Status string `json:"status"`
Reason string `json:"reason,omitempty"`
}
if err := json.NewDecoder(r.Body).Decode(&statusUpdate); err != nil {
render.Status(r, http.StatusBadRequest)
render.JSON(w, r, map[string]string{"error": "invalid request body"})
return
}
// Validate status values
validStatuses := map[string]bool{
"forming": true,
"active": true,
"completed": true,
"disbanded": true,
}
if !validStatuses[statusUpdate.Status] {
render.Status(r, http.StatusBadRequest)
render.JSON(w, r, map[string]string{"error": "invalid status. Valid values: forming, active, completed, disbanded"})
return
}
// Update team status in database
updateQuery := `UPDATE teams SET status = $1, updated_at = $2 WHERE id = $3`
if statusUpdate.Status == "completed" {
updateQuery = `UPDATE teams SET status = $1, updated_at = $2, completed_at = $2 WHERE id = $3`
}
_, err = s.db.Pool.Exec(r.Context(), updateQuery, statusUpdate.Status, time.Now(), teamID)
if err != nil {
log.Error().Err(err).
Str("team_id", teamIDStr).
Str("status", statusUpdate.Status).
Msg("Failed to update team status")
render.Status(r, http.StatusInternalServerError)
render.JSON(w, r, map[string]string{"error": "failed to update team status"})
return
}
log.Info().
Str("team_id", teamIDStr).
Str("status", statusUpdate.Status).
Str("reason", statusUpdate.Reason).
Msg("Team status updated")
render.JSON(w, r, map[string]interface{}{
"team_id": teamIDStr,
"status": statusUpdate.Status,
"message": "Team status updated successfully",
})
}
func (s *Server) analyzeTeamCompositionHandler(w http.ResponseWriter, r *http.Request) {
var taskInput composer.TaskAnalysisInput
if err := json.NewDecoder(r.Body).Decode(&taskInput); err != nil {
render.Status(r, http.StatusBadRequest)
render.JSON(w, r, map[string]string{"error": "invalid request body"})
return
}
// Validate required fields
if taskInput.Title == "" {
render.Status(r, http.StatusBadRequest)
render.JSON(w, r, map[string]string{"error": "title is required"})
return
}
if taskInput.Description == "" {
render.Status(r, http.StatusBadRequest)
render.JSON(w, r, map[string]string{"error": "description is required"})
return
}
// Set defaults if not provided
if taskInput.Priority == "" {
taskInput.Priority = composer.PriorityMedium
}
log.Info().
Str("task_title", taskInput.Title).
Str("priority", string(taskInput.Priority)).
Msg("Analyzing team composition requirements")
// Analyze task and compose team (without creating it)
result, err := s.teamComposer.AnalyzeAndComposeTeam(r.Context(), &taskInput)
if err != nil {
log.Error().Err(err).Msg("Team composition analysis failed")
render.Status(r, http.StatusInternalServerError)
render.JSON(w, r, map[string]string{"error": "team composition analysis failed"})
return
}
log.Info().
Str("analysis_id", result.AnalysisID.String()).
Float64("confidence_score", result.TeamComposition.ConfidenceScore).
Int("recommended_team_size", result.TeamComposition.EstimatedSize).
Msg("Team composition analysis completed")
render.JSON(w, r, result)
}
func (s *Server) listTasksHandler(w http.ResponseWriter, r *http.Request) {
// Parse query parameters
statusParam := r.URL.Query().Get("status")
priorityParam := r.URL.Query().Get("priority")
repositoryParam := r.URL.Query().Get("repository")
limitStr := r.URL.Query().Get("limit")
offsetStr := r.URL.Query().Get("offset")
// Build filter
filter := &tasks.TaskFilter{}
if statusParam != "" && statusParam != "all" {
filter.Status = []tasks.TaskStatus{tasks.TaskStatus(statusParam)}
}
if priorityParam != "" {
filter.Priority = []tasks.TaskPriority{tasks.TaskPriority(priorityParam)}
}
if repositoryParam != "" {
filter.Repository = repositoryParam
}
if limitStr != "" {
if limit, err := strconv.Atoi(limitStr); err == nil && limit > 0 && limit <= 100 {
filter.Limit = limit
}
}
if offsetStr != "" {
if offset, err := strconv.Atoi(offsetStr); err == nil && offset >= 0 {
filter.Offset = offset
}
}
// Get tasks from database
taskList, total, err := s.taskService.ListTasks(r.Context(), filter)
if err != nil {
log.Error().Err(err).Msg("Failed to list tasks")
render.Status(r, http.StatusInternalServerError)
render.JSON(w, r, map[string]string{"error": "failed to retrieve tasks"})
return
}
render.JSON(w, r, map[string]interface{}{
"tasks": taskList,
"total": total,
"filter": filter,
})
}
func (s *Server) ingestTaskHandler(w http.ResponseWriter, r *http.Request) {
var taskData struct {
Title string `json:"title"`
Description string `json:"description"`
Repository string `json:"repository"`
IssueURL string `json:"issue_url,omitempty"`
Priority string `json:"priority,omitempty"`
Labels []string `json:"labels,omitempty"`
Source string `json:"source,omitempty"` // "manual", "gitea", "webhook"
}
if err := json.NewDecoder(r.Body).Decode(&taskData); err != nil {
render.Status(r, http.StatusBadRequest)
render.JSON(w, r, map[string]string{"error": "invalid request body"})
return
}
// Validate required fields
if taskData.Title == "" {
render.Status(r, http.StatusBadRequest)
render.JSON(w, r, map[string]string{"error": "title is required"})
return
}
if taskData.Description == "" {
render.Status(r, http.StatusBadRequest)
render.JSON(w, r, map[string]string{"error": "description is required"})
return
}
// Set defaults
if taskData.Priority == "" {
taskData.Priority = "medium"
}
if taskData.Source == "" {
taskData.Source = "manual"
}
// Create task ID
taskID := uuid.New().String()
log.Info().
Str("task_id", taskID).
Str("title", taskData.Title).
Str("repository", taskData.Repository).
Str("source", taskData.Source).
Msg("Ingesting new task")
// For MVP, we'll create the task record and attempt team composition
// In production, this would persist to a tasks table and queue for processing
// Create task in database first
createInput := &tasks.CreateTaskInput{
ExternalID: taskID, // Use generated ID as external ID for manual tasks
ExternalURL: taskData.IssueURL,
SourceType: tasks.SourceType(taskData.Source),
Title: taskData.Title,
Description: taskData.Description,
Priority: tasks.TaskPriority(taskData.Priority),
Repository: taskData.Repository,
Labels: taskData.Labels,
}
createdTask, err := s.taskService.CreateTask(r.Context(), createInput)
if err != nil {
log.Error().Err(err).Str("task_id", taskID).Msg("Failed to create task")
render.Status(r, http.StatusInternalServerError)
render.JSON(w, r, map[string]string{"error": "failed to create task"})
return
}
// Convert to TaskAnalysisInput for team composition
taskInput := &composer.TaskAnalysisInput{
Title: createdTask.Title,
Description: createdTask.Description,
Repository: createdTask.Repository,
Requirements: createdTask.Requirements,
Priority: composer.TaskPriority(createdTask.Priority),
TechStack: createdTask.TechStack,
Metadata: map[string]interface{}{
"task_id": createdTask.ID.String(),
"source": taskData.Source,
"issue_url": taskData.IssueURL,
"labels": createdTask.Labels,
},
}
// Start team composition analysis in background for complex tasks
// For simple tasks, we can process synchronously
isComplex := len(taskData.Description) > 200 ||
len(taskData.Labels) > 3 ||
taskData.Priority == "high" ||
taskData.Priority == "critical"
if isComplex {
// For complex tasks, start async team composition
go s.processTaskAsync(taskID, taskInput)
// Return immediate response
render.Status(r, http.StatusAccepted)
render.JSON(w, r, map[string]interface{}{
"task_id": taskID,
"status": "queued",
"message": "Task queued for team composition analysis",
})
} else {
// For simple tasks, process synchronously
result, err := s.teamComposer.AnalyzeAndComposeTeam(r.Context(), taskInput)
if err != nil {
log.Error().Err(err).Str("task_id", taskID).Msg("Task analysis failed")
render.Status(r, http.StatusInternalServerError)
render.JSON(w, r, map[string]string{"error": "task analysis failed"})
return
}
// Create the team
team, err := s.teamComposer.CreateTeam(r.Context(), result.TeamComposition, taskInput)
if err != nil {
log.Error().Err(err).Str("task_id", taskID).Msg("Team creation failed")
render.Status(r, http.StatusInternalServerError)
render.JSON(w, r, map[string]string{"error": "team creation failed"})
return
}
log.Info().
Str("task_id", taskID).
Str("team_id", team.ID.String()).
Msg("Task ingested and team created")
render.Status(r, http.StatusCreated)
render.JSON(w, r, map[string]interface{}{
"task_id": taskID,
"team": team,
"composition_result": result,
"status": "completed",
"message": "Task ingested and team created successfully",
})
}
}
func (s *Server) getTaskHandler(w http.ResponseWriter, r *http.Request) {
taskIDStr := chi.URLParam(r, "taskID")
taskID, err := uuid.Parse(taskIDStr)
if err != nil {
render.Status(r, http.StatusBadRequest)
render.JSON(w, r, map[string]string{"error": "invalid task ID format"})
return
}
// Get task from database
task, err := s.taskService.GetTask(r.Context(), taskID)
if err != nil {
if strings.Contains(err.Error(), "not found") {
render.Status(r, http.StatusNotFound)
render.JSON(w, r, map[string]string{"error": "task not found"})
return
}
log.Error().Err(err).Str("task_id", taskIDStr).Msg("Failed to get task")
render.Status(r, http.StatusInternalServerError)
render.JSON(w, r, map[string]string{"error": "failed to retrieve task"})
return
}
render.JSON(w, r, map[string]interface{}{
"task": task,
})
}
func (s *Server) slurpSubmitHandler(w http.ResponseWriter, r *http.Request) {
// Parse the submission request
var submission struct {
TeamID string `json:"team_id"`
ArtifactType string `json:"artifact_type"`
Content map[string]interface{} `json:"content"`
Metadata map[string]interface{} `json:"metadata,omitempty"`
}
if err := json.NewDecoder(r.Body).Decode(&submission); err != nil {
render.Status(r, http.StatusBadRequest)
render.JSON(w, r, map[string]string{"error": "invalid request body"})
return
}
// Validate required fields
if submission.TeamID == "" || submission.ArtifactType == "" {
render.Status(r, http.StatusBadRequest)
render.JSON(w, r, map[string]string{"error": "team_id and artifact_type are required"})
return
}
// Generate UCXL address for the submission
ucxlAddr := fmt.Sprintf("ucxl://%s/%s/%d",
submission.TeamID,
submission.ArtifactType,
time.Now().Unix())
// For MVP, we'll store basic metadata in the database
// In production, this would proxy to actual SLURP service
teamUUID, err := uuid.Parse(submission.TeamID)
if err != nil {
render.Status(r, http.StatusBadRequest)
render.JSON(w, r, map[string]string{"error": "invalid team_id format"})
return
}
// Store submission record
submissionID := uuid.New()
metadataJSON, _ := json.Marshal(submission.Metadata)
insertQuery := `
INSERT INTO slurp_submissions (id, team_id, ucxl_address, artifact_type, metadata, submitted_at, status)
VALUES ($1, $2, $3, $4, $5, $6, $7)
`
_, err = s.db.Pool.Exec(r.Context(), insertQuery,
submissionID, teamUUID, ucxlAddr, submission.ArtifactType,
metadataJSON, time.Now(), "submitted")
if err != nil {
log.Error().Err(err).
Str("team_id", submission.TeamID).
Str("artifact_type", submission.ArtifactType).
Msg("Failed to store SLURP submission")
render.Status(r, http.StatusInternalServerError)
render.JSON(w, r, map[string]string{"error": "failed to store submission"})
return
}
log.Info().
Str("team_id", submission.TeamID).
Str("artifact_type", submission.ArtifactType).
Str("ucxl_address", ucxlAddr).
Msg("SLURP submission stored")
render.Status(r, http.StatusCreated)
render.JSON(w, r, map[string]interface{}{
"submission_id": submissionID,
"ucxl_address": ucxlAddr,
"status": "submitted",
"message": "Artifact submitted to SLURP successfully (MVP mode)",
})
}
func (s *Server) slurpRetrieveHandler(w http.ResponseWriter, r *http.Request) {
ucxlAddress := r.URL.Query().Get("ucxl_address")
if ucxlAddress == "" {
render.Status(r, http.StatusBadRequest)
render.JSON(w, r, map[string]string{"error": "ucxl_address query parameter is required"})
return
}
log.Info().
Str("ucxl_address", ucxlAddress).
Msg("Retrieving SLURP submission")
// Query the submission from database
query := `
SELECT id, team_id, ucxl_address, artifact_type, metadata, submitted_at, status
FROM slurp_submissions
WHERE ucxl_address = $1
`
row := s.db.Pool.QueryRow(r.Context(), query, ucxlAddress)
var (
id uuid.UUID
teamID uuid.UUID
retrievedAddr string
artifactType string
metadataJSON []byte
submittedAt time.Time
status string
)
err := row.Scan(&id, &teamID, &retrievedAddr, &artifactType, &metadataJSON, &submittedAt, &status)
if err != nil {
if err.Error() == "no rows in result set" {
render.Status(r, http.StatusNotFound)
render.JSON(w, r, map[string]string{"error": "SLURP submission not found"})
return
}
log.Error().Err(err).
Str("ucxl_address", ucxlAddress).
Msg("Failed to retrieve SLURP submission")
render.Status(r, http.StatusInternalServerError)
render.JSON(w, r, map[string]string{"error": "failed to retrieve submission"})
return
}
// Parse metadata
var metadata map[string]interface{}
if len(metadataJSON) > 0 {
json.Unmarshal(metadataJSON, &metadata)
}
submission := map[string]interface{}{
"id": id,
"team_id": teamID,
"ucxl_address": retrievedAddr,
"artifact_type": artifactType,
"metadata": metadata,
"submitted_at": submittedAt.Format(time.RFC3339),
"status": status,
}
// For MVP, we return the metadata. In production, this would
// proxy to SLURP service to retrieve actual artifact content
render.JSON(w, r, map[string]interface{}{
"submission": submission,
"message": "SLURP submission retrieved (MVP mode - metadata only)",
})
}
// CHORUS Integration Handlers
func (s *Server) listProjectTasksHandler(w http.ResponseWriter, r *http.Request) {
projectID := chi.URLParam(r, "projectID")
log.Info().
Str("project_id", projectID).
Msg("Listing tasks for project")
// For MVP, return mock tasks associated with the project
// In production, this would query actual tasks from database
tasks := []map[string]interface{}{
{
"id": "task-001",
"project_id": projectID,
"title": "Setup project infrastructure",
"description": "Initialize Docker, CI/CD, and database setup",
"status": "completed",
"priority": "high",
"assigned_team": nil,
"created_at": time.Now().Add(-48 * time.Hour).Format(time.RFC3339),
"completed_at": time.Now().Add(-12 * time.Hour).Format(time.RFC3339),
},
{
"id": "task-002",
"project_id": projectID,
"title": "Implement authentication system",
"description": "JWT-based authentication with user management",
"status": "active",
"priority": "high",
"assigned_team": "team-001",
"created_at": time.Now().Add(-24 * time.Hour).Format(time.RFC3339),
"updated_at": time.Now().Add(-2 * time.Hour).Format(time.RFC3339),
},
{
"id": "task-003",
"project_id": projectID,
"title": "Create API documentation",
"description": "OpenAPI/Swagger documentation for all endpoints",
"status": "queued",
"priority": "medium",
"assigned_team": nil,
"created_at": time.Now().Add(-6 * time.Hour).Format(time.RFC3339),
},
}
render.JSON(w, r, map[string]interface{}{
"project_id": projectID,
"tasks": tasks,
"total": len(tasks),
"message": "Project tasks retrieved (MVP mock data)",
})
}
func (s *Server) listAvailableTasksHandler(w http.ResponseWriter, r *http.Request) {
// Get query parameters for filtering
skillFilter := r.URL.Query().Get("skills")
priorityFilter := r.URL.Query().Get("priority")
log.Info().
Str("skill_filter", skillFilter).
Str("priority_filter", priorityFilter).
Msg("Listing available tasks")
// For MVP, return mock available tasks that agents can claim
// In production, this would query unassigned tasks from database
availableTasks := []map[string]interface{}{
{
"id": "task-004",
"title": "Fix memory leak in user service",
"description": "Investigate and fix memory leak causing high memory usage",
"status": "available",
"priority": "high",
"skills_required": []string{"go", "debugging", "performance"},
"estimated_hours": 8,
"repository": "example/user-service",
"created_at": time.Now().Add(-3 * time.Hour).Format(time.RFC3339),
},
{
"id": "task-005",
"title": "Add rate limiting to API",
"description": "Implement rate limiting middleware for API endpoints",
"status": "available",
"priority": "medium",
"skills_required": []string{"go", "middleware", "api"},
"estimated_hours": 4,
"repository": "example/api-gateway",
"created_at": time.Now().Add(-1 * time.Hour).Format(time.RFC3339),
},
{
"id": "task-006",
"title": "Update React components",
"description": "Migrate legacy class components to functional components",
"status": "available",
"priority": "low",
"skills_required": []string{"react", "javascript", "frontend"},
"estimated_hours": 12,
"repository": "example/web-ui",
"created_at": time.Now().Add(-30 * time.Minute).Format(time.RFC3339),
},
}
// Apply filtering if specified
filteredTasks := availableTasks
if priorityFilter != "" {
filtered := []map[string]interface{}{}
for _, task := range availableTasks {
if task["priority"] == priorityFilter {
filtered = append(filtered, task)
}
}
filteredTasks = filtered
}
render.JSON(w, r, map[string]interface{}{
"available_tasks": filteredTasks,
"total": len(filteredTasks),
"filters": map[string]string{
"skills": skillFilter,
"priority": priorityFilter,
},
"message": "Available tasks retrieved (MVP mock data)",
})
}
func (s *Server) getProjectRepositoryHandler(w http.ResponseWriter, r *http.Request) {
render.Status(r, http.StatusNotImplemented)
render.JSON(w, r, map[string]string{"error": "not implemented"})
}
func (s *Server) getProjectTaskHandler(w http.ResponseWriter, r *http.Request) {
render.Status(r, http.StatusNotImplemented)
render.JSON(w, r, map[string]string{"error": "not implemented"})
}
func (s *Server) claimTaskHandler(w http.ResponseWriter, r *http.Request) {
taskIDStr := chi.URLParam(r, "taskID")
taskID, err := uuid.Parse(taskIDStr)
if err != nil {
render.Status(r, http.StatusBadRequest)
render.JSON(w, r, map[string]string{"error": "invalid task ID format"})
return
}
var claimData struct {
TeamID string `json:"team_id"`
AgentID string `json:"agent_id,omitempty"`
Reason string `json:"reason,omitempty"`
}
if err := json.NewDecoder(r.Body).Decode(&claimData); err != nil {
render.Status(r, http.StatusBadRequest)
render.JSON(w, r, map[string]string{"error": "invalid request body"})
return
}
if claimData.TeamID == "" {
render.Status(r, http.StatusBadRequest)
render.JSON(w, r, map[string]string{"error": "team_id is required"})
return
}
// Validate team exists
teamUUID, err := uuid.Parse(claimData.TeamID)
if err != nil {
render.Status(r, http.StatusBadRequest)
render.JSON(w, r, map[string]string{"error": "invalid team_id format"})
return
}
// Check if team exists
_, _, err = s.teamComposer.GetTeam(r.Context(), teamUUID)
if err != nil {
if strings.Contains(err.Error(), "not found") {
render.Status(r, http.StatusNotFound)
render.JSON(w, r, map[string]string{"error": "team not found"})
return
}
log.Error().Err(err).Str("team_id", claimData.TeamID).Msg("Failed to validate team")
render.Status(r, http.StatusInternalServerError)
render.JSON(w, r, map[string]string{"error": "failed to validate team"})
return
}
// Parse agent ID if provided
var agentUUID *uuid.UUID
if claimData.AgentID != "" {
agentID, err := uuid.Parse(claimData.AgentID)
if err != nil {
render.Status(r, http.StatusBadRequest)
render.JSON(w, r, map[string]string{"error": "invalid agent_id format"})
return
}
agentUUID = &agentID
}
// Assign task to team/agent
assignment := &tasks.TaskAssignment{
TaskID: taskID,
TeamID: &teamUUID,
AgentID: agentUUID,
Reason: claimData.Reason,
}
err = s.taskService.AssignTask(r.Context(), assignment)
if err != nil {
log.Error().Err(err).Str("task_id", taskIDStr).Msg("Failed to assign task")
render.Status(r, http.StatusInternalServerError)
render.JSON(w, r, map[string]string{"error": "failed to assign task"})
return
}
log.Info().
Str("task_id", taskIDStr).
Str("team_id", claimData.TeamID).
Str("agent_id", claimData.AgentID).
Msg("Task assigned to team")
render.JSON(w, r, map[string]interface{}{
"task_id": taskIDStr,
"team_id": claimData.TeamID,
"agent_id": claimData.AgentID,
"status": "claimed",
"claimed_at": time.Now().Format(time.RFC3339),
"message": "Task assigned successfully",
})
}
func (s *Server) updateTaskStatusHandler(w http.ResponseWriter, r *http.Request) {
render.Status(r, http.StatusNotImplemented)
render.JSON(w, r, map[string]string{"error": "not implemented"})
}
func (s *Server) completeTaskHandler(w http.ResponseWriter, r *http.Request) {
render.Status(r, http.StatusNotImplemented)
render.JSON(w, r, map[string]string{"error": "not implemented"})
}
func (s *Server) listAgentsHandler(w http.ResponseWriter, r *http.Request) {
// Get discovered CHORUS agents from P2P discovery
discoveredAgents := s.p2pDiscovery.GetAgents()
// Convert to API format
agents := make([]map[string]interface{}, 0, len(discoveredAgents))
onlineCount := 0
idleCount := 0
offlineCount := 0
for _, agent := range discoveredAgents {
agentData := map[string]interface{}{
"id": agent.ID,
"name": agent.Name,
"status": agent.Status,
"capabilities": agent.Capabilities,
"model": agent.Model,
"endpoint": agent.Endpoint,
"last_seen": agent.LastSeen.Format(time.RFC3339),
"tasks_completed": agent.TasksCompleted,
"p2p_addr": agent.P2PAddr,
"cluster_id": agent.ClusterID,
}
// Add current team if present
if agent.CurrentTeam != "" {
agentData["current_team"] = agent.CurrentTeam
} else {
agentData["current_team"] = nil
}
agents = append(agents, agentData)
// Count status
switch agent.Status {
case "online":
onlineCount++
case "idle":
idleCount++
case "working":
onlineCount++ // Working agents are considered online
default:
offlineCount++
}
}
render.JSON(w, r, map[string]interface{}{
"agents": agents,
"total": len(agents),
"online": onlineCount,
"idle": idleCount,
"offline": offlineCount,
})
}
func (s *Server) registerAgentHandler(w http.ResponseWriter, r *http.Request) {
var agentData struct {
Name string `json:"name"`
EndpointURL string `json:"endpoint_url"`
Capabilities map[string]interface{} `json:"capabilities"`
}
if err := json.NewDecoder(r.Body).Decode(&agentData); err != nil {
render.Status(r, http.StatusBadRequest)
render.JSON(w, r, map[string]string{"error": "invalid request body"})
return
}
// Validate required fields
if agentData.Name == "" || agentData.EndpointURL == "" {
render.Status(r, http.StatusBadRequest)
render.JSON(w, r, map[string]string{"error": "name and endpoint_url are required"})
return
}
// Create agent record
agent := &composer.Agent{
ID: uuid.New(),
Name: agentData.Name,
EndpointURL: agentData.EndpointURL,
Capabilities: agentData.Capabilities,
Status: composer.AgentStatusAvailable,
LastSeen: time.Now(),
PerformanceMetrics: make(map[string]interface{}),
CreatedAt: time.Now(),
UpdatedAt: time.Now(),
}
// Initialize empty capabilities if none provided
if agent.Capabilities == nil {
agent.Capabilities = make(map[string]interface{})
}
// Insert into database
capabilitiesJSON, _ := json.Marshal(agent.Capabilities)
metricsJSON, _ := json.Marshal(agent.PerformanceMetrics)
query := `
INSERT INTO agents (id, name, endpoint_url, capabilities, status, last_seen, performance_metrics, created_at, updated_at)
VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9)
`
_, err := s.db.Pool.Exec(r.Context(), query,
agent.ID, agent.Name, agent.EndpointURL, capabilitiesJSON,
agent.Status, agent.LastSeen, metricsJSON,
agent.CreatedAt, agent.UpdatedAt)
if err != nil {
log.Error().Err(err).Str("agent_name", agent.Name).Msg("Failed to register agent")
render.Status(r, http.StatusInternalServerError)
render.JSON(w, r, map[string]string{"error": "failed to register agent"})
return
}
log.Info().
Str("agent_id", agent.ID.String()).
Str("agent_name", agent.Name).
Str("endpoint", agent.EndpointURL).
Msg("Agent registered successfully")
render.Status(r, http.StatusCreated)
render.JSON(w, r, map[string]interface{}{
"agent": agent,
"message": "Agent registered successfully",
})
}
func (s *Server) updateAgentStatusHandler(w http.ResponseWriter, r *http.Request) {
agentIDStr := chi.URLParam(r, "agentID")
agentID, err := uuid.Parse(agentIDStr)
if err != nil {
render.Status(r, http.StatusBadRequest)
render.JSON(w, r, map[string]string{"error": "invalid agent ID"})
return
}
var statusUpdate struct {
Status string `json:"status"`
PerformanceMetrics map[string]interface{} `json:"performance_metrics,omitempty"`
Reason string `json:"reason,omitempty"`
}
if err := json.NewDecoder(r.Body).Decode(&statusUpdate); err != nil {
render.Status(r, http.StatusBadRequest)
render.JSON(w, r, map[string]string{"error": "invalid request body"})
return
}
// Validate status values
validStatuses := map[string]bool{
"available": true,
"busy": true,
"idle": true,
"offline": true,
}
if !validStatuses[statusUpdate.Status] {
render.Status(r, http.StatusBadRequest)
render.JSON(w, r, map[string]string{"error": "invalid status. Valid values: available, busy, idle, offline"})
return
}
// Update agent status and last_seen timestamp
updateQuery := `
UPDATE agents
SET status = $1, last_seen = $2, updated_at = $2
WHERE id = $3
`
_, err = s.db.Pool.Exec(r.Context(), updateQuery, statusUpdate.Status, time.Now(), agentID)
if err != nil {
log.Error().Err(err).
Str("agent_id", agentIDStr).
Str("status", statusUpdate.Status).
Msg("Failed to update agent status")
render.Status(r, http.StatusInternalServerError)
render.JSON(w, r, map[string]string{"error": "failed to update agent status"})
return
}
// Update performance metrics if provided
if statusUpdate.PerformanceMetrics != nil {
metricsJSON, _ := json.Marshal(statusUpdate.PerformanceMetrics)
_, err = s.db.Pool.Exec(r.Context(),
`UPDATE agents SET performance_metrics = $1 WHERE id = $2`,
metricsJSON, agentID)
if err != nil {
log.Warn().Err(err).
Str("agent_id", agentIDStr).
Msg("Failed to update agent performance metrics")
}
}
log.Info().
Str("agent_id", agentIDStr).
Str("status", statusUpdate.Status).
Str("reason", statusUpdate.Reason).
Msg("Agent status updated")
render.JSON(w, r, map[string]interface{}{
"agent_id": agentIDStr,
"status": statusUpdate.Status,
"message": "Agent status updated successfully",
})
}
// Project Management Handlers
func (s *Server) listProjectsHandler(w http.ResponseWriter, r *http.Request) {
// For MVP, return hardcoded projects list
// In full implementation, this would query database
projects := []map[string]interface{}{
{
"id": "whoosh-001",
"name": "WHOOSH",
"repo_url": "https://gitea.chorus.services/tony/WHOOSH",
"description": "Autonomous AI Development Teams Architecture",
"tech_stack": []string{"Go", "Docker", "PostgreSQL"},
"status": "active",
"created_at": "2025-09-04T00:00:00Z",
"team_size": 3,
},
{
"id": "chorus-001",
"name": "CHORUS",
"repo_url": "https://gitea.chorus.services/tony/CHORUS",
"description": "AI Agent P2P Coordination System",
"tech_stack": []string{"Go", "P2P", "LibP2P"},
"status": "active",
"created_at": "2025-09-03T00:00:00Z",
"team_size": 2,
},
}
render.JSON(w, r, map[string]interface{}{
"projects": projects,
"total": len(projects),
})
}
// createProjectHandler handles POST /api/projects requests to add new GITEA repositories
// for team composition analysis. This is the core MVP functionality that allows users
// to register repositories that will be analyzed by the N8N workflow.
//
// Implementation decision: We use an anonymous struct for the request payload rather than
// a named struct because this is a simple, internal API that doesn't need to be shared
// across packages. This reduces complexity while maintaining type safety.
//
// TODO: In production, this would persist to PostgreSQL database rather than just
// returning in-memory data. The database integration is prepared in the docker-compose
// but not yet implemented in the handlers.
func (s *Server) createProjectHandler(w http.ResponseWriter, r *http.Request) {
// Anonymous struct for request payload - simpler than defining a separate type
// for this single-use case. Contains the minimal required fields for MVP.
var req struct {
Name string `json:"name"` // User-friendly project name
RepoURL string `json:"repo_url"` // GITEA repository URL for analysis
Description string `json:"description"` // Optional project description
}
// Use json.NewDecoder instead of render.Bind because render.Bind requires
// implementing the render.Binder interface, which adds unnecessary complexity
// for simple JSON parsing. Direct JSON decoding is more straightforward.
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
render.Status(r, http.StatusBadRequest)
render.JSON(w, r, map[string]string{"error": "invalid request"})
return
}
// Basic validation - both name and repo_url are required for meaningful analysis.
// The N8N workflow needs the repo URL to fetch files, and we need a name for UI display.
if req.RepoURL == "" || req.Name == "" {
render.Status(r, http.StatusBadRequest)
render.JSON(w, r, map[string]string{"error": "name and repo_url are required"})
return
}
// Generate unique project ID using Unix timestamp. In production, this would be
// a proper UUID or database auto-increment, but for MVP simplicity, timestamp-based
// IDs are sufficient and provide natural ordering.
projectID := fmt.Sprintf("proj-%d", time.Now().Unix())
// Project data structure matches the expected format for the frontend UI.
// Status "created" indicates the project is registered but not yet analyzed.
// This will be updated to "analyzing" -> "completed" by the N8N workflow.
project := map[string]interface{}{
"id": projectID,
"name": req.Name,
"repo_url": req.RepoURL,
"description": req.Description,
"status": "created",
"created_at": time.Now().Format(time.RFC3339),
"team_size": 0, // Will be populated after N8N analysis
}
// Structured logging with zerolog provides excellent performance and
// searchability in production environments. Include key identifiers
// for debugging and audit trails.
log.Info().
Str("project_id", projectID).
Str("repo_url", req.RepoURL).
Msg("Created new project")
// Return 201 Created with the project data. The frontend will use this
// response to update the UI and potentially trigger immediate analysis.
render.Status(r, http.StatusCreated)
render.JSON(w, r, project)
}
// deleteProjectHandler handles DELETE /api/projects/{projectID} requests to remove
// repositories from management. This allows users to clean up their project list
// and stop monitoring repositories that are no longer relevant.
//
// Implementation decision: We use chi.URLParam to extract the project ID from the
// URL path rather than query parameters, following REST conventions where the
// resource identifier is part of the path structure.
func (s *Server) deleteProjectHandler(w http.ResponseWriter, r *http.Request) {
// Extract project ID from URL path parameter. Chi router handles the parsing
// and validation of the URL structure, so we can safely assume this exists
// if the route matched.
projectID := chi.URLParam(r, "projectID")
// Log the deletion for audit purposes. In a production system, you'd want
// to track who deleted what and when for compliance and debugging.
log.Info().
Str("project_id", projectID).
Msg("Deleted project")
render.JSON(w, r, map[string]string{"message": "project deleted"})
}
// getProjectHandler handles GET /api/projects/{projectID} requests to retrieve
// detailed information about a specific project, including its analysis results
// and team formation recommendations from the N8N workflow.
//
// Implementation decision: We return mock data for now since database persistence
// isn't implemented yet. In production, this would query PostgreSQL for the
// actual project record and its associated analysis results.
func (s *Server) getProjectHandler(w http.ResponseWriter, r *http.Request) {
projectID := chi.URLParam(r, "projectID")
// TODO: Replace with database query - this mock data demonstrates the expected
// response structure that the frontend UI will consume. The tech_stack and
// team_size fields would be populated by N8N workflow analysis results.
project := map[string]interface{}{
"id": projectID,
"name": "Sample Project",
"repo_url": "https://gitea.chorus.services/tony/" + projectID,
"description": "Sample project description",
"tech_stack": []string{"Go", "JavaScript"}, // From N8N analysis
"status": "active",
"created_at": "2025-09-04T00:00:00Z",
"team_size": 2, // From N8N team formation recommendations
}
render.JSON(w, r, project)
}
// analyzeProjectHandler handles POST /api/projects/{projectID}/analyze requests to
// trigger the N8N Team Formation Analysis workflow. This is the core integration point
// that connects WHOOSH to the AI-powered repository analysis system.
//
// Implementation decisions:
// 1. 60-second timeout for N8N requests because LLM analysis can be slow
// 2. Direct HTTP client rather than a service layer for simplicity in MVP
// 3. Graceful fallback to mock data when request body is empty
// 4. Comprehensive error handling with structured logging for debugging
//
// This handler represents the "missing link" that was identified as the core need:
// WHOOSH UI → N8N workflow → LLM analysis → team formation recommendations
func (s *Server) analyzeProjectHandler(w http.ResponseWriter, r *http.Request) {
projectID := chi.URLParam(r, "projectID")
// Project data structure for N8N payload. In production, this would be fetched
// from the database using the projectID, but for MVP we allow it to be provided
// in the request body or fall back to predictable mock data.
var projectData struct {
RepoURL string `json:"repo_url"`
Name string `json:"name"`
}
// Handle both scenarios: explicit project data in request body (for testing)
// and implicit data fetching (for production UI). This flexibility makes the
// API easier to test manually while supporting the intended UI workflow.
if r.Body != http.NoBody {
if err := json.NewDecoder(r.Body).Decode(&projectData); err != nil {
// Fallback to predictable mock data based on projectID for testing
projectData.RepoURL = "https://gitea.chorus.services/tony/" + projectID
projectData.Name = projectID
}
} else {
// No body provided - use mock data (in production, would query database)
projectData.RepoURL = "https://gitea.chorus.services/tony/" + projectID
projectData.Name = projectID
}
// Start BACKBEAT search tracking if available
searchID := fmt.Sprintf("analyze-%s", projectID)
if s.backbeat != nil {
if err := s.backbeat.StartSearch(searchID, fmt.Sprintf("Analyzing project %s (%s)", projectID, projectData.RepoURL), 4); err != nil {
log.Warn().Err(err).Msg("Failed to start BACKBEAT search tracking")
}
}
// Log the analysis initiation for debugging and audit trails. Repository URL
// is crucial for troubleshooting N8N workflow issues.
log.Info().
Str("project_id", projectID).
Str("repo_url", projectData.RepoURL).
Msg("🔍 Starting project analysis via N8N workflow with BACKBEAT tracking")
// Execute analysis within BACKBEAT beat budget (4 beats = 2 minutes at 2 BPM)
var analysisResult map[string]interface{}
analysisFunc := func() error {
// Update BACKBEAT phase to querying
if s.backbeat != nil {
s.backbeat.UpdateSearchPhase(searchID, backbeat.PhaseQuerying, 0)
}
// HTTP client with generous timeout because:
// 1. N8N workflow fetches multiple files from repository
// 2. LLM analysis (Ollama) can take 10-30 seconds depending on model size
// 3. Network latency between services in Docker Swarm
// 60 seconds provides buffer while still failing fast for real issues
client := &http.Client{Timeout: 60 * time.Second}
// Payload structure matches the N8N workflow webhook expectations.
// The workflow expects these exact field names to properly route data
// through the file fetching and analysis nodes.
payload := map[string]interface{}{
"repo_url": projectData.RepoURL, // Primary input for file fetching
"project_name": projectData.Name, // Used in LLM analysis context
}
// JSON marshaling without error checking is acceptable here because we control
// the payload structure and know it will always be valid JSON.
payloadBytes, _ := json.Marshal(payload)
// Direct call to production N8N instance. In a more complex system, this URL
// would be configurable, but for MVP we can hardcode the known endpoint.
// The webhook URL was configured when we created the N8N workflow.
resp, err := client.Post(
"https://n8n.home.deepblack.cloud/webhook/team-formation",
"application/json",
bytes.NewBuffer(payloadBytes),
)
// Network-level error handling (connection refused, timeout, DNS issues)
if err != nil {
log.Error().Err(err).Msg("Failed to trigger N8N workflow")
return fmt.Errorf("failed to trigger N8N workflow: %w", err)
}
defer resp.Body.Close()
// HTTP-level error handling (N8N returned an error status)
if resp.StatusCode != http.StatusOK {
log.Error().
Int("status", resp.StatusCode).
Msg("N8N workflow returned error")
return fmt.Errorf("N8N workflow returned status %d", resp.StatusCode)
}
// Update BACKBEAT phase to ranking
if s.backbeat != nil {
s.backbeat.UpdateSearchPhase(searchID, backbeat.PhaseRanking, 0)
}
// Read the N8N workflow response, which contains the team formation analysis
// results including detected technologies, complexity scores, and agent assignments.
body, err := io.ReadAll(resp.Body)
if err != nil {
log.Error().Err(err).Msg("Failed to read N8N response")
return fmt.Errorf("failed to read N8N response: %w", err)
}
// Parse and return N8N response
if err := json.Unmarshal(body, &analysisResult); err != nil {
log.Error().Err(err).Msg("Failed to parse N8N response")
return fmt.Errorf("failed to parse N8N response: %w", err)
}
return nil
}
// Execute analysis with BACKBEAT beat budget or fallback to direct execution
var analysisErr error
if s.backbeat != nil {
analysisErr = s.backbeat.ExecuteWithBeatBudget(4, analysisFunc)
if analysisErr != nil {
s.backbeat.FailSearch(searchID, analysisErr.Error())
}
} else {
analysisErr = analysisFunc()
}
if analysisErr != nil {
render.Status(r, http.StatusInternalServerError)
render.JSON(w, r, map[string]string{"error": analysisErr.Error()})
return
}
// Complete BACKBEAT search tracking
if s.backbeat != nil {
s.backbeat.CompleteSearch(searchID, 1)
}
log.Info().
Str("project_id", projectID).
Msg("🔍 Project analysis completed successfully with BACKBEAT tracking")
render.JSON(w, r, analysisResult)
}
func (s *Server) giteaWebhookHandler(w http.ResponseWriter, r *http.Request) {
// Parse webhook payload
payload, err := s.webhookHandler.ParsePayload(r)
if err != nil {
log.Error().Err(err).Msg("Failed to parse webhook payload")
render.Status(r, http.StatusBadRequest)
render.JSON(w, r, map[string]string{"error": "invalid payload"})
return
}
log.Info().
Str("action", payload.Action).
Str("repository", payload.Repository.FullName).
Str("sender", payload.Sender.Login).
Msg("Received GITEA webhook")
// Process webhook event
event := s.webhookHandler.ProcessWebhook(payload)
// Handle task-related webhooks
if event.TaskInfo != nil {
log.Info().
Interface("task_info", event.TaskInfo).
Msg("Processing task issue")
// MVP: Store basic task info for future team assignment
// In full implementation, this would trigger Team Composer
s.handleTaskWebhook(r.Context(), event)
}
render.JSON(w, r, map[string]interface{}{
"status": "received",
"event_id": event.Timestamp,
"processed": event.TaskInfo != nil,
})
}
func (s *Server) handleTaskWebhook(ctx context.Context, event *gitea.WebhookEvent) {
// MVP implementation: Log task details
// In full version, this would:
// 1. Analyze task complexity
// 2. Determine required team composition
// 3. Create team and assign agents
// 4. Set up P2P communication channels
log.Info().
Str("action", event.Action).
Str("repository", event.Repository).
Interface("task_info", event.TaskInfo).
Msg("Task webhook received - MVP logging")
// For MVP, we'll just acknowledge task detection
if event.Action == "opened" || event.Action == "reopened" {
taskType := event.TaskInfo["task_type"].(string)
priority := event.TaskInfo["priority"].(string)
log.Info().
Str("task_type", taskType).
Str("priority", priority).
Msg("New task detected - ready for team assignment")
}
}
func (s *Server) dashboardHandler(w http.ResponseWriter, r *http.Request) {
html := `
WHOOSH - AI Team Orchestration
Overview
Tasks
Teams
Agents
Repositories
Settings
📊 System Metrics
Active Teams
0
Pending Tasks
0
Registered Agents
0
Tasks Completed Today
0
🔄 Recent Activity
📭
No recent activity
Task activity will appear here once agents start working
🎯 System Status
Service Health
✅ Healthy
Database
✅ Connected
GITEA Integration
✅ Active
Redis Cache
✅ Running
🥁 BACKBEAT Clock
Current Beat
--
Downbeat
--
Avg Interval
--ms
Phase
--
Live BACKBEAT Pulse
📋 Task Management
🎯 Active Tasks
📝
No active tasks
bzzz-task issues will appear here from GITEA
⏳ Task Queue
⏱️
No queued tasks
Tasks awaiting team assignment
👥 Team Management
👥
No assembled teams
Teams are automatically assembled when tasks are assigned to agents
🤖 Agent Management
🤖
No agents discovered
CHORUS agents are discovered organically and their personas tracked here
⚙️ System Configuration
🔗 GITEA Integration
Base URL
gitea.chorus.services
Webhook Path
/webhooks/gitea
Token Status
✅ Valid
🗄️ Database Configuration
Host
postgres:5432
SSL Mode
disabled
Auto-Migrate
enabled
📚 Repository Management
📊 Repository Stats
Total Repositories
--
Active Monitoring
--
Tasks Created
--
`
w.Header().Set("Content-Type", "text/html; charset=utf-8")
w.Write([]byte(html))
}
// backbeatStatusHandler provides real-time BACKBEAT pulse data
func (s *Server) backbeatStatusHandler(w http.ResponseWriter, r *http.Request) {
now := time.Now()
// Get real BACKBEAT data if integration is available and started
if s.backbeat != nil {
health := s.backbeat.GetHealth()
// Extract real BACKBEAT data
currentBeat := int64(0)
if beatVal, ok := health["current_beat"]; ok {
if beat, ok := beatVal.(int64); ok {
currentBeat = beat
}
}
currentTempo := 2 // Default fallback
if tempoVal, ok := health["current_tempo"]; ok {
if tempo, ok := tempoVal.(int); ok {
currentTempo = tempo
}
}
connected := false
if connVal, ok := health["connected"]; ok {
if conn, ok := connVal.(bool); ok {
connected = conn
}
}
// Determine phase based on BACKBEAT health
phase := "normal"
if degradationVal, ok := health["local_degradation"]; ok {
if degraded, ok := degradationVal.(bool); ok && degraded {
phase = "degraded"
}
}
// Calculate average interval based on tempo (BPM to milliseconds)
averageInterval := 60000 / currentTempo // Convert BPM to milliseconds between beats
// Determine if current beat is a downbeat (every 4th beat)
isDownbeat := currentBeat%4 == 1
currentDownbeat := (currentBeat / 4) + 1
response := map[string]interface{}{
"current_beat": currentBeat,
"current_downbeat": currentDownbeat,
"average_interval": averageInterval,
"phase": phase,
"is_downbeat": isDownbeat,
"tempo": currentTempo,
"connected": connected,
"timestamp": now.Unix(),
"status": "live",
"backbeat_health": health,
}
w.Header().Set("Content-Type", "application/json")
if err := json.NewEncoder(w).Encode(response); err != nil {
http.Error(w, "Failed to encode response", http.StatusInternalServerError)
return
}
return
}
// Fallback to basic data if BACKBEAT integration is not available
response := map[string]interface{}{
"current_beat": 0,
"current_downbeat": 0,
"average_interval": 0,
"phase": "disconnected",
"is_downbeat": false,
"tempo": 0,
"connected": false,
"timestamp": now.Unix(),
"status": "no_backbeat",
"error": "BACKBEAT integration not available",
}
w.Header().Set("Content-Type", "application/json")
if err := json.NewEncoder(w).Encode(response); err != nil {
http.Error(w, "Failed to encode response", http.StatusInternalServerError)
return
}
}
// Repository Management Handlers
// listRepositoriesHandler returns all monitored repositories
func (s *Server) listRepositoriesHandler(w http.ResponseWriter, r *http.Request) {
log.Info().Msg("Listing monitored repositories")
query := `
SELECT id, name, owner, full_name, url, clone_url, ssh_url, source_type,
monitor_issues, monitor_pull_requests, enable_chorus_integration,
description, default_branch, is_private, language, topics,
last_sync_at, sync_status, sync_error, open_issues_count,
closed_issues_count, total_tasks_created, created_at, updated_at
FROM repositories
ORDER BY created_at DESC`
rows, err := s.db.Pool.Query(context.Background(), query)
if err != nil {
log.Error().Err(err).Msg("Failed to query repositories")
render.Status(r, http.StatusInternalServerError)
render.JSON(w, r, map[string]string{"error": "failed to query repositories"})
return
}
defer rows.Close()
repositories := []map[string]interface{}{}
for rows.Next() {
var id, name, owner, fullName, url, sourceType, defaultBranch, syncStatus string
var cloneURL, sshURL, description, syncError, language *string
var monitorIssues, monitorPRs, enableChorus, isPrivate bool
var topicsJSON []byte
var lastSyncAt *time.Time
var createdAt, updatedAt time.Time
var openIssues, closedIssues, totalTasks int
err := rows.Scan(&id, &name, &owner, &fullName, &url, &cloneURL, &sshURL, &sourceType,
&monitorIssues, &monitorPRs, &enableChorus, &description, &defaultBranch,
&isPrivate, &language, &topicsJSON, &lastSyncAt, &syncStatus, &syncError,
&openIssues, &closedIssues, &totalTasks, &createdAt, &updatedAt)
if err != nil {
log.Error().Err(err).Msg("Failed to scan repository row")
continue
}
// Parse topics from JSONB
var topics []string
if err := json.Unmarshal(topicsJSON, &topics); err != nil {
log.Error().Err(err).Msg("Failed to unmarshal topics")
topics = []string{} // Default to empty slice
}
// Handle nullable lastSyncAt
var lastSyncFormatted *string
if lastSyncAt != nil {
formatted := lastSyncAt.Format(time.RFC3339)
lastSyncFormatted = &formatted
}
repo := map[string]interface{}{
"id": id,
"name": name,
"owner": owner,
"full_name": fullName,
"url": url,
"clone_url": cloneURL,
"ssh_url": sshURL,
"source_type": sourceType,
"monitor_issues": monitorIssues,
"monitor_pull_requests": monitorPRs,
"enable_chorus_integration": enableChorus,
"description": description,
"default_branch": defaultBranch,
"is_private": isPrivate,
"language": language,
"topics": topics,
"last_sync_at": lastSyncFormatted,
"sync_status": syncStatus,
"sync_error": syncError,
"open_issues_count": openIssues,
"closed_issues_count": closedIssues,
"total_tasks_created": totalTasks,
"created_at": createdAt.Format(time.RFC3339),
"updated_at": updatedAt.Format(time.RFC3339),
}
repositories = append(repositories, repo)
}
render.JSON(w, r, map[string]interface{}{
"repositories": repositories,
"count": len(repositories),
})
}
// createRepositoryHandler adds a new repository to monitor
func (s *Server) createRepositoryHandler(w http.ResponseWriter, r *http.Request) {
var req struct {
Name string `json:"name"`
Owner string `json:"owner"`
URL string `json:"url"`
SourceType string `json:"source_type"`
MonitorIssues bool `json:"monitor_issues"`
MonitorPullRequests bool `json:"monitor_pull_requests"`
EnableChorusIntegration bool `json:"enable_chorus_integration"`
Description *string `json:"description"`
DefaultBranch string `json:"default_branch"`
IsPrivate bool `json:"is_private"`
Language *string `json:"language"`
Topics []string `json:"topics"`
}
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
render.Status(r, http.StatusBadRequest)
render.JSON(w, r, map[string]string{"error": "invalid request body"})
return
}
// Validate required fields
if req.Name == "" || req.Owner == "" || req.URL == "" {
render.Status(r, http.StatusBadRequest)
render.JSON(w, r, map[string]string{"error": "name, owner, and url are required"})
return
}
// Set defaults
if req.SourceType == "" {
req.SourceType = "gitea"
}
if req.DefaultBranch == "" {
req.DefaultBranch = "main"
}
if req.Topics == nil {
req.Topics = []string{}
}
fullName := req.Owner + "/" + req.Name
log.Info().
Str("repository", fullName).
Str("url", req.URL).
Msg("Creating new repository monitor")
query := `
INSERT INTO repositories (
name, owner, full_name, url, source_type, monitor_issues,
monitor_pull_requests, enable_chorus_integration, description,
default_branch, is_private, language, topics
) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13)
RETURNING id, created_at`
// Convert topics slice to JSON for JSONB column
topicsJSON, err := json.Marshal(req.Topics)
if err != nil {
log.Error().Err(err).Msg("Failed to marshal topics")
render.Status(r, http.StatusInternalServerError)
render.JSON(w, r, map[string]string{"error": "failed to process topics"})
return
}
var id string
var createdAt time.Time
err = s.db.Pool.QueryRow(context.Background(), query,
req.Name, req.Owner, fullName, req.URL, req.SourceType,
req.MonitorIssues, req.MonitorPullRequests, req.EnableChorusIntegration,
req.Description, req.DefaultBranch, req.IsPrivate, req.Language, topicsJSON).
Scan(&id, &createdAt)
if err != nil {
log.Error().Err(err).Msg("Failed to create repository")
render.Status(r, http.StatusInternalServerError)
render.JSON(w, r, map[string]string{"error": "failed to create repository"})
return
}
render.Status(r, http.StatusCreated)
render.JSON(w, r, map[string]interface{}{
"id": id,
"full_name": fullName,
"created_at": createdAt.Format(time.RFC3339),
"message": "Repository monitor created successfully",
})
}
// getRepositoryHandler returns a specific repository
func (s *Server) getRepositoryHandler(w http.ResponseWriter, r *http.Request) {
repoID := chi.URLParam(r, "repoID")
log.Info().Str("repository_id", repoID).Msg("Getting repository details")
query := `
SELECT id, name, owner, full_name, url, clone_url, ssh_url, source_type,
source_config, monitor_issues, monitor_pull_requests, monitor_releases,
enable_chorus_integration, chorus_task_labels, auto_assign_teams,
description, default_branch, is_private, language, topics,
last_sync_at, last_issue_sync, sync_status, sync_error,
open_issues_count, closed_issues_count, total_tasks_created,
created_at, updated_at
FROM repositories WHERE id = $1`
var repo struct {
ID string `json:"id"`
Name string `json:"name"`
Owner string `json:"owner"`
FullName string `json:"full_name"`
URL string `json:"url"`
CloneURL *string `json:"clone_url"`
SSHURL *string `json:"ssh_url"`
SourceType string `json:"source_type"`
SourceConfig []byte `json:"source_config"`
MonitorIssues bool `json:"monitor_issues"`
MonitorPullRequests bool `json:"monitor_pull_requests"`
MonitorReleases bool `json:"monitor_releases"`
EnableChorusIntegration bool `json:"enable_chorus_integration"`
ChorusTaskLabels []string `json:"chorus_task_labels"`
AutoAssignTeams bool `json:"auto_assign_teams"`
Description *string `json:"description"`
DefaultBranch string `json:"default_branch"`
IsPrivate bool `json:"is_private"`
Language *string `json:"language"`
Topics []string `json:"topics"`
LastSyncAt *time.Time `json:"last_sync_at"`
LastIssueSyncAt *time.Time `json:"last_issue_sync"`
SyncStatus string `json:"sync_status"`
SyncError *string `json:"sync_error"`
OpenIssuesCount int `json:"open_issues_count"`
ClosedIssuesCount int `json:"closed_issues_count"`
TotalTasksCreated int `json:"total_tasks_created"`
CreatedAt time.Time `json:"created_at"`
UpdatedAt time.Time `json:"updated_at"`
}
err := s.db.Pool.QueryRow(context.Background(), query, repoID).Scan(
&repo.ID, &repo.Name, &repo.Owner, &repo.FullName, &repo.URL,
&repo.CloneURL, &repo.SSHURL, &repo.SourceType, &repo.SourceConfig,
&repo.MonitorIssues, &repo.MonitorPullRequests, &repo.MonitorReleases,
&repo.EnableChorusIntegration, &repo.ChorusTaskLabels, &repo.AutoAssignTeams,
&repo.Description, &repo.DefaultBranch, &repo.IsPrivate, &repo.Language,
&repo.Topics, &repo.LastSyncAt, &repo.LastIssueSyncAt, &repo.SyncStatus,
&repo.SyncError, &repo.OpenIssuesCount, &repo.ClosedIssuesCount,
&repo.TotalTasksCreated, &repo.CreatedAt, &repo.UpdatedAt)
if err != nil {
if err.Error() == "no rows in result set" {
render.Status(r, http.StatusNotFound)
render.JSON(w, r, map[string]string{"error": "repository not found"})
return
}
log.Error().Err(err).Msg("Failed to get repository")
render.Status(r, http.StatusInternalServerError)
render.JSON(w, r, map[string]string{"error": "failed to get repository"})
return
}
render.JSON(w, r, repo)
}
// updateRepositoryHandler updates repository settings
func (s *Server) updateRepositoryHandler(w http.ResponseWriter, r *http.Request) {
repoID := chi.URLParam(r, "repoID")
var req struct {
MonitorIssues *bool `json:"monitor_issues"`
MonitorPullRequests *bool `json:"monitor_pull_requests"`
MonitorReleases *bool `json:"monitor_releases"`
EnableChorusIntegration *bool `json:"enable_chorus_integration"`
AutoAssignTeams *bool `json:"auto_assign_teams"`
Description *string `json:"description"`
DefaultBranch *string `json:"default_branch"`
Language *string `json:"language"`
Topics []string `json:"topics"`
}
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
render.Status(r, http.StatusBadRequest)
render.JSON(w, r, map[string]string{"error": "invalid request body"})
return
}
log.Info().Str("repository_id", repoID).Msg("Updating repository settings")
// Build dynamic update query
updates := []string{}
args := []interface{}{repoID}
argIndex := 2
if req.MonitorIssues != nil {
updates = append(updates, fmt.Sprintf("monitor_issues = $%d", argIndex))
args = append(args, *req.MonitorIssues)
argIndex++
}
if req.MonitorPullRequests != nil {
updates = append(updates, fmt.Sprintf("monitor_pull_requests = $%d", argIndex))
args = append(args, *req.MonitorPullRequests)
argIndex++
}
if req.MonitorReleases != nil {
updates = append(updates, fmt.Sprintf("monitor_releases = $%d", argIndex))
args = append(args, *req.MonitorReleases)
argIndex++
}
if req.EnableChorusIntegration != nil {
updates = append(updates, fmt.Sprintf("enable_chorus_integration = $%d", argIndex))
args = append(args, *req.EnableChorusIntegration)
argIndex++
}
if req.AutoAssignTeams != nil {
updates = append(updates, fmt.Sprintf("auto_assign_teams = $%d", argIndex))
args = append(args, *req.AutoAssignTeams)
argIndex++
}
if req.Description != nil {
updates = append(updates, fmt.Sprintf("description = $%d", argIndex))
args = append(args, *req.Description)
argIndex++
}
if req.DefaultBranch != nil {
updates = append(updates, fmt.Sprintf("default_branch = $%d", argIndex))
args = append(args, *req.DefaultBranch)
argIndex++
}
if req.Language != nil {
updates = append(updates, fmt.Sprintf("language = $%d", argIndex))
args = append(args, *req.Language)
argIndex++
}
if req.Topics != nil {
updates = append(updates, fmt.Sprintf("topics = $%d", argIndex))
args = append(args, req.Topics)
argIndex++
}
if len(updates) == 0 {
render.Status(r, http.StatusBadRequest)
render.JSON(w, r, map[string]string{"error": "no fields to update"})
return
}
updates = append(updates, fmt.Sprintf("updated_at = $%d", argIndex))
args = append(args, time.Now())
query := fmt.Sprintf("UPDATE repositories SET %s WHERE id = $1", strings.Join(updates, ", "))
_, err := s.db.Pool.Exec(context.Background(), query, args...)
if err != nil {
log.Error().Err(err).Msg("Failed to update repository")
render.Status(r, http.StatusInternalServerError)
render.JSON(w, r, map[string]string{"error": "failed to update repository"})
return
}
render.JSON(w, r, map[string]string{"message": "Repository updated successfully"})
}
// deleteRepositoryHandler removes a repository from monitoring
func (s *Server) deleteRepositoryHandler(w http.ResponseWriter, r *http.Request) {
repoID := chi.URLParam(r, "repoID")
log.Info().Str("repository_id", repoID).Msg("Deleting repository monitor")
query := "DELETE FROM repositories WHERE id = $1"
result, err := s.db.Pool.Exec(context.Background(), query, repoID)
if err != nil {
log.Error().Err(err).Msg("Failed to delete repository")
render.Status(r, http.StatusInternalServerError)
render.JSON(w, r, map[string]string{"error": "failed to delete repository"})
return
}
if result.RowsAffected() == 0 {
render.Status(r, http.StatusNotFound)
render.JSON(w, r, map[string]string{"error": "repository not found"})
return
}
render.JSON(w, r, map[string]string{"message": "Repository deleted successfully"})
}
// syncRepositoryHandler triggers a manual sync of repository issues
func (s *Server) syncRepositoryHandler(w http.ResponseWriter, r *http.Request) {
repoID := chi.URLParam(r, "repoID")
log.Info().Str("repository_id", repoID).Msg("Manual repository sync triggered")
// TODO: Implement repository sync logic
// This would trigger the Gitea issue monitoring service
render.JSON(w, r, map[string]interface{}{
"message": "Repository sync triggered",
"repository_id": repoID,
"status": "pending",
})
}
// getRepositorySyncLogsHandler returns sync logs for a repository
func (s *Server) getRepositorySyncLogsHandler(w http.ResponseWriter, r *http.Request) {
repoID := chi.URLParam(r, "repoID")
limit := 50
if limitParam := r.URL.Query().Get("limit"); limitParam != "" {
if l, err := strconv.Atoi(limitParam); err == nil && l > 0 && l <= 1000 {
limit = l
}
}
log.Info().Str("repository_id", repoID).Int("limit", limit).Msg("Getting repository sync logs")
query := `
SELECT id, sync_type, operation, status, message, error_details,
items_processed, items_created, items_updated, duration_ms,
external_id, external_url, created_at
FROM repository_sync_logs
WHERE repository_id = $1
ORDER BY created_at DESC
LIMIT $2`
rows, err := s.db.Pool.Query(context.Background(), query, repoID, limit)
if err != nil {
log.Error().Err(err).Msg("Failed to query sync logs")
render.Status(r, http.StatusInternalServerError)
render.JSON(w, r, map[string]string{"error": "failed to query sync logs"})
return
}
defer rows.Close()
logs := []map[string]interface{}{}
for rows.Next() {
var id, syncType, operation, status, message string
var errorDetails []byte
var itemsProcessed, itemsCreated, itemsUpdated, durationMs int
var externalID, externalURL *string
var createdAt time.Time
err := rows.Scan(&id, &syncType, &operation, &status, &message, &errorDetails,
&itemsProcessed, &itemsCreated, &itemsUpdated, &durationMs,
&externalID, &externalURL, &createdAt)
if err != nil {
log.Error().Err(err).Msg("Failed to scan sync log row")
continue
}
logEntry := map[string]interface{}{
"id": id,
"sync_type": syncType,
"operation": operation,
"status": status,
"message": message,
"error_details": string(errorDetails),
"items_processed": itemsProcessed,
"items_created": itemsCreated,
"items_updated": itemsUpdated,
"duration_ms": durationMs,
"external_id": externalID,
"external_url": externalURL,
"created_at": createdAt.Format(time.RFC3339),
}
logs = append(logs, logEntry)
}
render.JSON(w, r, map[string]interface{}{
"logs": logs,
"count": len(logs),
})
}
// Helper methods for task processing
// inferTechStackFromLabels extracts technology information from labels
func (s *Server) inferTechStackFromLabels(labels []string) []string {
techMap := map[string]bool{
"go": true,
"golang": true,
"javascript": true,
"react": true,
"node": true,
"python": true,
"java": true,
"rust": true,
"docker": true,
"postgres": true,
"mysql": true,
"redis": true,
"api": true,
"backend": true,
"frontend": true,
"database": true,
}
var techStack []string
for _, label := range labels {
if techMap[strings.ToLower(label)] {
techStack = append(techStack, strings.ToLower(label))
}
}
return techStack
}
// processTaskAsync handles complex task processing in background
func (s *Server) processTaskAsync(taskID string, taskInput *composer.TaskAnalysisInput) {
ctx := context.Background()
log.Info().
Str("task_id", taskID).
Msg("Starting async task processing")
result, err := s.teamComposer.AnalyzeAndComposeTeam(ctx, taskInput)
if err != nil {
log.Error().Err(err).
Str("task_id", taskID).
Msg("Async task analysis failed")
return
}
team, err := s.teamComposer.CreateTeam(ctx, result.TeamComposition, taskInput)
if err != nil {
log.Error().Err(err).
Str("task_id", taskID).
Msg("Async team creation failed")
return
}
log.Info().
Str("task_id", taskID).
Str("team_id", team.ID.String()).
Float64("confidence", result.TeamComposition.ConfidenceScore).
Msg("Async task processing completed")
// In production, this would update task status in database
// and potentially notify clients via websockets or webhooks
}