Implement Beat 1: Sequential Thinking Age-Encrypted Wrapper (Skeleton)

This commit completes Beat 1 of the SequentialThinkingForCHORUS implementation,
providing a functional plaintext skeleton for the age-encrypted wrapper.

## Deliverables

### 1. Main Wrapper Entry Point
- `cmd/seqthink-wrapper/main.go`: HTTP server on :8443
- Configuration loading from environment variables
- Graceful shutdown handling
- MCP server readiness checking with timeout

### 2. MCP Client Package
- `pkg/seqthink/mcpclient/client.go`: HTTP client for MCP server
- Communicates with MCP server on localhost:8000
- Health check endpoint
- Tool call endpoint with 120s timeout

### 3. Proxy Server Package
- `pkg/seqthink/proxy/server.go`: HTTP handlers for wrapper
- Health and readiness endpoints
- Tool call proxy (plaintext for Beat 1)
- SSE endpoint placeholder
- Metrics endpoint integration

### 4. Observability Package
- `pkg/seqthink/observability/logger.go`: Structured logging with zerolog
- `pkg/seqthink/observability/metrics.go`: Prometheus metrics
- Counters for requests, errors, decrypt/encrypt failures, policy denials
- Request duration histogram

### 5. Docker Infrastructure
- `deploy/seqthink/Dockerfile`: Multi-stage build
- `deploy/seqthink/entrypoint.sh`: Startup orchestration
- `deploy/seqthink/mcp_stub.py`: Minimal MCP server for testing

### 6. Build System Integration
- Updated `Makefile` with `build-seqthink` target
- Uses GOWORK=off and -mod=mod for clean builds
- `docker-seqthink` target for container builds

## Testing

Successfully builds with:
```
make build-seqthink
```

Binary successfully starts and waits for MCP server connection.

## Next Steps

Beat 2 will add:
- Age encryption/decryption (pkg/seqthink/ageio)
- Content-Type: application/age enforcement
- SSE streaming with encrypted frames
- Golden tests for crypto round-trips

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude <noreply@anthropic.com>
This commit is contained in:
anthonyrawlins
2025-10-13 08:35:43 +11:00
parent dd8be05e9c
commit 3ce9811826
11 changed files with 2424 additions and 9 deletions

View File

@@ -1,11 +1,12 @@
# CHORUS Multi-Binary Makefile # CHORUS Multi-Binary Makefile
# Builds both chorus-agent and chorus-hap binaries # Builds chorus-agent, chorus-hap, and seqthink-wrapper binaries
# Build configuration # Build configuration
BINARY_NAME_AGENT = chorus-agent BINARY_NAME_AGENT = chorus-agent
BINARY_NAME_HAP = chorus-hap BINARY_NAME_HAP = chorus-hap
BINARY_NAME_COMPAT = chorus BINARY_NAME_COMPAT = chorus
VERSION ?= 0.5.5 BINARY_NAME_SEQTHINK = seqthink-wrapper
VERSION ?= 0.5.28
COMMIT_HASH ?= $(shell git rev-parse --short HEAD 2>/dev/null || echo "unknown") COMMIT_HASH ?= $(shell git rev-parse --short HEAD 2>/dev/null || echo "unknown")
BUILD_DATE ?= $(shell date -u '+%Y-%m-%d_%H:%M:%S') BUILD_DATE ?= $(shell date -u '+%Y-%m-%d_%H:%M:%S')
@@ -49,6 +50,14 @@ build-compat:
go build $(BUILD_FLAGS) -o $(BUILD_DIR)/$(BINARY_NAME_COMPAT) ./$(CMD_DIR)/chorus go build $(BUILD_FLAGS) -o $(BUILD_DIR)/$(BINARY_NAME_COMPAT) ./$(CMD_DIR)/chorus
@echo "✅ Compatibility wrapper built: $(BUILD_DIR)/$(BINARY_NAME_COMPAT)" @echo "✅ Compatibility wrapper built: $(BUILD_DIR)/$(BINARY_NAME_COMPAT)"
# Build Sequential Thinking age-encrypted wrapper
.PHONY: build-seqthink
build-seqthink:
@echo "🔐 Building Sequential Thinking wrapper..."
@mkdir -p $(BUILD_DIR)
GOWORK=off go build -mod=mod $(BUILD_FLAGS) -o $(BUILD_DIR)/$(BINARY_NAME_SEQTHINK) ./$(CMD_DIR)/seqthink-wrapper
@echo "✅ SeqThink wrapper built: $(BUILD_DIR)/$(BINARY_NAME_SEQTHINK)"
# Test compilation without building # Test compilation without building
.PHONY: test-compile .PHONY: test-compile
test-compile: test-compile:
@@ -103,8 +112,13 @@ docker-hap:
@echo "🐳 Building Docker image for CHORUS HAP..." @echo "🐳 Building Docker image for CHORUS HAP..."
docker build -f docker/Dockerfile.hap -t chorus-hap:$(VERSION) . docker build -f docker/Dockerfile.hap -t chorus-hap:$(VERSION) .
.PHONY: docker-seqthink
docker-seqthink:
@echo "🔐 Building Docker image for Sequential Thinking wrapper..."
docker build -f deploy/seqthink/Dockerfile -t seqthink-wrapper:$(VERSION) .
.PHONY: docker .PHONY: docker
docker: docker-agent docker-hap docker: docker-agent docker-hap docker-seqthink
# Help # Help
.PHONY: help .PHONY: help
@@ -112,22 +126,24 @@ help:
@echo "CHORUS Multi-Binary Build System" @echo "CHORUS Multi-Binary Build System"
@echo "" @echo ""
@echo "Targets:" @echo "Targets:"
@echo " all - Clean and build both binaries (default)" @echo " all - Clean and build all binaries (default)"
@echo " build - Build both binaries" @echo " build - Build all binaries"
@echo " build-agent - Build autonomous agent binary only" @echo " build-agent - Build autonomous agent binary only"
@echo " build-hap - Build human agent portal binary only" @echo " build-hap - Build human agent portal binary only"
@echo " test-compile - Test that both binaries compile" @echo " build-seqthink - Build Sequential Thinking wrapper only"
@echo " test-compile - Test that binaries compile"
@echo " test - Run tests" @echo " test - Run tests"
@echo " clean - Remove build artifacts" @echo " clean - Remove build artifacts"
@echo " install - Install binaries to GOPATH/bin" @echo " install - Install binaries to GOPATH/bin"
@echo " run-agent - Build and run agent" @echo " run-agent - Build and run agent"
@echo " run-hap - Build and run HAP" @echo " run-hap - Build and run HAP"
@echo " docker - Build Docker images for both binaries" @echo " docker - Build Docker images for all binaries"
@echo " docker-agent - Build Docker image for agent only" @echo " docker-agent - Build Docker image for agent only"
@echo " docker-hap - Build Docker image for HAP only" @echo " docker-hap - Build Docker image for HAP only"
@echo " docker-seqthink - Build Docker image for SeqThink wrapper only"
@echo " help - Show this help" @echo " help - Show this help"
@echo "" @echo ""
@echo "Environment Variables:" @echo "Environment Variables:"
@echo " VERSION - Version string (default: 0.1.0-dev)" @echo " VERSION - Version string (default: 0.5.28)"
@echo " COMMIT_HASH - Git commit hash (auto-detected)" @echo " COMMIT_HASH - Git commit hash (auto-detected)"
@echo " BUILD_DATE - Build timestamp (auto-generated)" @echo " BUILD_DATE - Build timestamp (auto-generated)"

View File

@@ -0,0 +1,173 @@
package main
import (
"context"
"fmt"
"net/http"
"os"
"os/signal"
"syscall"
"time"
"chorus/pkg/seqthink/mcpclient"
"chorus/pkg/seqthink/observability"
"chorus/pkg/seqthink/proxy"
"github.com/rs/zerolog/log"
)
// Config holds the wrapper configuration
type Config struct {
Port string
MCPLocalURL string
LogLevel string
MaxBodyMB int
HealthTimeout time.Duration
ShutdownTimeout time.Duration
AgeIdentPath string
AgeRecipsPath string
KachingJWKSURL string
RequiredScope string
}
func loadConfig() *Config {
return &Config{
Port: getEnv("PORT", "8443"),
MCPLocalURL: getEnv("MCP_LOCAL", "http://127.0.0.1:8000"),
LogLevel: getEnv("LOG_LEVEL", "info"),
MaxBodyMB: getEnvInt("MAX_BODY_MB", 4),
HealthTimeout: 5 * time.Second,
ShutdownTimeout: 30 * time.Second,
AgeIdentPath: getEnv("AGE_IDENT_PATH", ""),
AgeRecipsPath: getEnv("AGE_RECIPS_PATH", ""),
KachingJWKSURL: getEnv("KACHING_JWKS_URL", ""),
RequiredScope: getEnv("REQUIRED_SCOPE", "sequentialthinking.run"),
}
}
func main() {
cfg := loadConfig()
// Initialize observability
observability.InitLogger(cfg.LogLevel)
metrics := observability.InitMetrics()
log.Info().
Str("port", cfg.Port).
Str("mcp_url", cfg.MCPLocalURL).
Str("version", "0.1.0-beta1").
Msg("🚀 Starting Sequential Thinking Age Wrapper")
// Create MCP client
mcpClient := mcpclient.New(cfg.MCPLocalURL)
// Wait for MCP server to be ready
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
log.Info().Msg("⏳ Waiting for MCP server...")
if err := waitForMCP(ctx, mcpClient); err != nil {
log.Fatal().Err(err).Msg("❌ MCP server not ready")
}
log.Info().Msg("✅ MCP server ready")
// Create proxy server
proxyServer, err := proxy.NewServer(proxy.ServerConfig{
MCPClient: mcpClient,
Metrics: metrics,
MaxBodyMB: cfg.MaxBodyMB,
AgeIdentPath: cfg.AgeIdentPath,
AgeRecipsPath: cfg.AgeRecipsPath,
KachingJWKSURL: cfg.KachingJWKSURL,
RequiredScope: cfg.RequiredScope,
})
if err != nil {
log.Fatal().Err(err).Msg("❌ Failed to create proxy server")
}
// Setup HTTP server
srv := &http.Server{
Addr: ":" + cfg.Port,
Handler: proxyServer.Handler(),
ReadTimeout: 30 * time.Second,
WriteTimeout: 90 * time.Second,
IdleTimeout: 120 * time.Second,
}
// Start server in goroutine
go func() {
log.Info().
Str("addr", srv.Addr).
Bool("encryption_enabled", cfg.AgeIdentPath != "").
Bool("policy_enabled", cfg.KachingJWKSURL != "").
Msg("🔐 Wrapper listening")
if err := srv.ListenAndServe(); err != http.ErrServerClosed {
log.Fatal().Err(err).Msg("❌ HTTP server failed")
}
}()
// Wait for shutdown signal
sigChan := make(chan os.Signal, 1)
signal.Notify(sigChan, syscall.SIGINT, syscall.SIGTERM)
<-sigChan
log.Info().Msg("🛑 Shutting down gracefully...")
// Graceful shutdown
shutdownCtx, shutdownCancel := context.WithTimeout(context.Background(), cfg.ShutdownTimeout)
defer shutdownCancel()
if err := srv.Shutdown(shutdownCtx); err != nil {
log.Error().Err(err).Msg("⚠️ Shutdown error")
}
log.Info().Msg("✅ Shutdown complete")
}
// waitForMCP waits for MCP server to be ready
func waitForMCP(ctx context.Context, client *mcpclient.Client) error {
ticker := time.NewTicker(1 * time.Second)
defer ticker.Stop()
for {
select {
case <-ctx.Done():
return fmt.Errorf("timeout waiting for MCP server")
case <-ticker.C:
if err := client.Health(ctx); err == nil {
return nil
}
log.Debug().Msg("Waiting for MCP server...")
}
}
}
// getEnv gets environment variable with default
func getEnv(key, defaultVal string) string {
if val := os.Getenv(key); val != "" {
return val
}
return defaultVal
}
// getEnvInt gets environment variable as int with default
func getEnvInt(key string, defaultVal int) int {
val := os.Getenv(key)
if val == "" {
return defaultVal
}
var result int
if _, err := fmt.Sscanf(val, "%d", &result); err != nil {
log.Warn().
Str("key", key).
Str("value", val).
Int("default", defaultVal).
Msg("Invalid integer env var, using default")
return defaultVal
}
return result
}

View File

@@ -0,0 +1,86 @@
# Sequential Thinking Age-Encrypted Wrapper
# Beat 1: Plaintext skeleton - encryption added in Beat 2
# Stage 1: Build Go wrapper
FROM golang:1.23-alpine AS go-builder
WORKDIR /build
# Install build dependencies
RUN apk add --no-cache git make
# Copy go mod files
COPY go.mod go.sum ./
RUN go mod download
# Copy source code
COPY . .
# Build the wrapper binary
RUN CGO_ENABLED=0 GOOS=linux go build -a -installsuffix cgo \
-ldflags '-w -s -extldflags "-static"' \
-o seqthink-wrapper \
./cmd/seqthink-wrapper
# Stage 2: Build Python MCP server
FROM python:3.11-slim AS python-builder
WORKDIR /mcp
# Install Sequential Thinking MCP server dependencies
# Note: For Beat 1, we'll use a minimal Python HTTP server
# Full MCP server integration happens in later beats
RUN pip install --no-cache-dir \
fastapi==0.109.0 \
uvicorn[standard]==0.27.0 \
pydantic==2.5.3
# Copy MCP server stub (to be replaced with real implementation)
COPY deploy/seqthink/mcp_stub.py /mcp/server.py
# Stage 3: Runtime
FROM debian:bookworm-slim
# Install runtime dependencies
RUN apt-get update && \
apt-get install -y --no-install-recommends \
ca-certificates \
python3 \
python3-pip && \
apt-get clean && \
rm -rf /var/lib/apt/lists/*
# Install Python packages in runtime
RUN pip3 install --no-cache-dir --break-system-packages \
fastapi==0.109.0 \
uvicorn[standard]==0.27.0 \
pydantic==2.5.3
# Create non-root user
RUN useradd -r -u 1000 -m -s /bin/bash seqthink
# Copy binaries
COPY --from=go-builder /build/seqthink-wrapper /usr/local/bin/
COPY --from=python-builder /mcp/server.py /opt/mcp/server.py
# Copy entrypoint
COPY deploy/seqthink/entrypoint.sh /entrypoint.sh
RUN chmod +x /entrypoint.sh
# Setup directories
RUN mkdir -p /etc/seqthink /var/log/seqthink && \
chown -R seqthink:seqthink /etc/seqthink /var/log/seqthink
# Switch to non-root user
USER seqthink
WORKDIR /home/seqthink
# Health check
HEALTHCHECK --interval=30s --timeout=10s --start-period=40s --retries=3 \
CMD curl -f http://localhost:8443/health || exit 1
# Expose wrapper port (MCP server on 127.0.0.1:8000 is internal only)
EXPOSE 8443
# Run entrypoint
ENTRYPOINT ["/entrypoint.sh"]

View File

@@ -0,0 +1,27 @@
#!/bin/bash
set -e
echo "🚀 Starting Sequential Thinking Age Wrapper (Beat 1)"
# Start MCP server on loopback
echo "📡 Starting MCP server on 127.0.0.1:8000..."
python3 /opt/mcp/server.py &
MCP_PID=$!
# Wait for MCP server to be ready
echo "⏳ Waiting for MCP server to be ready..."
for i in {1..30}; do
if curl -sf http://127.0.0.1:8000/health > /dev/null 2>&1; then
echo "✅ MCP server ready"
break
fi
if [ $i -eq 30 ]; then
echo "❌ MCP server failed to start"
exit 1
fi
sleep 1
done
# Start wrapper
echo "🔐 Starting wrapper on :8443..."
exec seqthink-wrapper

View File

@@ -0,0 +1,70 @@
#!/usr/bin/env python3
"""
Sequential Thinking MCP Server Stub (Beat 1)
This is a minimal implementation for testing the wrapper infrastructure.
In later beats, this will be replaced with the full Sequential Thinking MCP server.
"""
from fastapi import FastAPI, HTTPException
from pydantic import BaseModel
from typing import Dict, Any, Optional
import uvicorn
app = FastAPI(title="Sequential Thinking MCP Server Stub")
class ToolRequest(BaseModel):
tool: str
payload: Dict[str, Any]
class ToolResponse(BaseModel):
result: Optional[Any] = None
error: Optional[str] = None
@app.get("/health")
async def health():
"""Health check endpoint"""
return {"status": "ok"}
@app.post("/mcp/tool")
async def call_tool(request: ToolRequest) -> ToolResponse:
"""
Tool call endpoint - stub implementation
In Beat 1, this just echoes back the request to verify the wrapper works.
Later beats will implement the actual Sequential Thinking logic.
"""
if request.tool != "mcp__sequential-thinking__sequentialthinking":
return ToolResponse(
error=f"Unknown tool: {request.tool}"
)
# Stub response for Sequential Thinking tool
payload = request.payload
thought_number = payload.get("thoughtNumber", 1)
total_thoughts = payload.get("totalThoughts", 5)
thought = payload.get("thought", "")
next_thought_needed = payload.get("nextThoughtNeeded", True)
return ToolResponse(
result={
"thoughtNumber": thought_number,
"totalThoughts": total_thoughts,
"thought": thought,
"nextThoughtNeeded": next_thought_needed,
"message": "Beat 1 stub - Sequential Thinking not yet implemented"
}
)
if __name__ == "__main__":
uvicorn.run(
app,
host="127.0.0.1",
port=8000,
log_level="info"
)

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,579 @@
# Sequential Thinking Integration Plan for CHORUS Agents
**Date**: 2025-10-13
**Status**: Design Phase
**Priority**: High - Blocking further intelligence improvements
---
## Executive Summary
This document outlines the integration of the Sequential Thinking MCP server into CHORUS agents to enable **structured, multi-step reasoning** before task execution. This addresses the limitation in the SequentialThinkingForCHORUS repository issue and unlocks advanced agent decision-making capabilities.
**Problem Statement**: CHORUS agents currently use simple prompt-response cycles without structured reasoning, limiting their ability to handle complex tasks requiring multi-step analysis, hypothesis generation, and iterative refinement.
**Solution**: Integrate the `mcp__sequential-thinking__sequentialthinking` MCP tool into the AI provider layer to enable chain-of-thought reasoning for complex tasks.
---
## Current Architecture Analysis
### 1. Existing AI Provider Flow
```
TaskRequest → ModelProvider.ExecuteTask() → TaskResponse
[Single LLM Call]
Response String
```
**Current Providers**:
- **OllamaProvider**: Local model execution
- **ResetDataProvider**: ResetData LaaS API
- **OpenAIProvider**: OpenAI API
**Current Limitations**:
- ✗ No structured reasoning process
- ✗ No ability to revise initial thoughts
- ✗ No hypothesis generation and verification
- ✗ No branching for alternative approaches
- ✗ Simple string reasoning field (not structured)
### 2. TaskResponse Structure
**Location**: `/home/tony/chorus/project-queues/active/CHORUS/pkg/ai/provider.go:53-78`
```go
type TaskResponse struct {
Success bool `json:"success"`
TaskID string `json:"task_id"`
Response string `json:"response"`
Reasoning string `json:"reasoning,omitempty"` // ← Simple string
Actions []TaskAction `json:"actions,omitempty"`
Artifacts []Artifact `json:"artifacts,omitempty"`
TokensUsed TokenUsage `json:"tokens_used,omitempty"`
// ... other fields
}
```
**Opportunity**: The `Reasoning` field is perfect for storing structured thinking output!
---
## Sequential Thinking MCP Tool
### Tool Signature
```go
mcp__sequential-thinking__sequentialthinking(
thought: string,
nextThoughtNeeded: bool,
thoughtNumber: int,
totalThoughts: int,
isRevision: bool = false,
revisesThought: int = null,
branchFromThought: int = null,
branchId: string = null,
needsMoreThoughts: bool = false
)
```
### Capabilities
1. **Adaptive Thinking**: Adjust `totalThoughts` up or down as understanding deepens
2. **Revision Support**: Question and revise previous thoughts (`isRevision`, `revisesThought`)
3. **Branching**: Explore alternative approaches (`branchFromThought`, `branchId`)
4. **Hypothesis Testing**: Generate and verify hypotheses in chain-of-thought
5. **Uncertainty Expression**: Express and work through unclear aspects
6. **Context Maintenance**: Keep track of all previous thoughts
### When to Use
- **Complex problem decomposition**
- **Multi-step solution planning**
- **Problems requiring course correction**
- **Unclear scope requiring exploration**
- **Tasks needing context over multiple steps**
- **Filtering irrelevant information**
---
## Proposed Integration Architecture
### Phase 1: Enhanced TaskResponse Structure
**File**: `pkg/ai/provider.go`
```go
// StructuredReasoning represents chain-of-thought reasoning process
type StructuredReasoning struct {
Thoughts []ThoughtStep `json:"thoughts"`
FinalHypothesis string `json:"final_hypothesis,omitempty"`
VerificationSteps []string `json:"verification_steps,omitempty"`
Confidence float32 `json:"confidence"` // 0.0-1.0
TotalRevisions int `json:"total_revisions"`
BranchesExplored int `json:"branches_explored"`
}
// ThoughtStep represents a single step in the reasoning process
type ThoughtStep struct {
Number int `json:"number"`
Content string `json:"content"`
IsRevision bool `json:"is_revision"`
RevisesThought int `json:"revises_thought,omitempty"`
BranchID string `json:"branch_id,omitempty"`
BranchFrom int `json:"branch_from,omitempty"`
Timestamp time.Time `json:"timestamp"`
}
// TaskResponse update
type TaskResponse struct {
// ... existing fields ...
Reasoning string `json:"reasoning,omitempty"` // Legacy simple string
StructuredReasoning *StructuredReasoning `json:"structured_reasoning,omitempty"` // NEW
// ... rest of fields ...
}
```
### Phase 2: Sequential Thinking Wrapper
**New File**: `pkg/ai/sequential_thinking.go`
```go
package ai
import (
"context"
"encoding/json"
"fmt"
)
// SequentialThinkingEngine wraps MCP sequential thinking tool
type SequentialThinkingEngine struct {
mcpClient MCPClient // Interface to MCP tool
}
// ThinkingRequest represents input for sequential thinking
type ThinkingRequest struct {
Problem string
Context map[string]interface{}
MaxThoughts int
AllowRevisions bool
AllowBranching bool
}
// ThinkingResult represents output from sequential thinking
type ThinkingResult struct {
Thoughts []ThoughtStep
FinalConclusion string
Confidence float32
ReasoningPath string // Markdown summary of thinking process
}
// Think executes sequential thinking process
func (e *SequentialThinkingEngine) Think(ctx context.Context, req *ThinkingRequest) (*ThinkingResult, error) {
// Implementation:
// 1. Initialize thinking with problem statement
// 2. Iteratively call MCP tool until nextThoughtNeeded = false
// 3. Track all thoughts, revisions, branches
// 4. Generate final conclusion and reasoning summary
// 5. Return structured result
}
```
### Phase 3: Provider Integration
**Modified File**: `pkg/ai/resetdata.go`
```go
// ExecuteTask with sequential thinking
func (p *ResetDataProvider) ExecuteTask(ctx context.Context, request *TaskRequest) (*TaskResponse, error) {
startTime := time.Now()
// Determine if task requires sequential thinking
useSequentialThinking := p.shouldUseSequentialThinking(request)
var structuredReasoning *StructuredReasoning
var enhancedPrompt string
if useSequentialThinking {
// Use sequential thinking engine to analyze task first
thinkingEngine := NewSequentialThinkingEngine(p.mcpClient)
thinkingResult, err := thinkingEngine.Think(ctx, &ThinkingRequest{
Problem: p.formatTaskAsProblem(request),
Context: request.Context,
MaxThoughts: 10,
AllowRevisions: true,
AllowBranching: true,
})
if err != nil {
// Fall back to direct execution if thinking fails
log.Warn().Err(err).Msg("Sequential thinking failed, falling back to direct execution")
} else {
// Use thinking result to enhance prompt
enhancedPrompt = p.buildPromptWithThinking(request, thinkingResult)
structuredReasoning = convertToStructuredReasoning(thinkingResult)
}
}
// Execute with enhanced prompt (if available) or standard prompt
messages, _ := p.buildChatMessages(request, enhancedPrompt)
// ... rest of execution ...
return &TaskResponse{
Success: true,
Response: responseText,
Reasoning: legacyReasoningString,
StructuredReasoning: structuredReasoning, // NEW
// ... rest of response ...
}
}
// shouldUseSequentialThinking determines if task warrants sequential thinking
func (p *ResetDataProvider) shouldUseSequentialThinking(request *TaskRequest) bool {
// Use sequential thinking for:
// - High complexity tasks (complexity >= 7)
// - Architect role (requires system design)
// - Tasks with "design" or "architecture" in title/labels
// - Tasks requiring multi-step planning
if request.Complexity >= 7 {
return true
}
role := strings.ToLower(request.AgentRole)
if role == "architect" || role == "senior-developer" {
return true
}
keywords := []string{"design", "architecture", "refactor", "plan", "strategy"}
taskText := strings.ToLower(request.TaskTitle + " " + request.TaskDescription)
for _, keyword := range keywords {
if strings.Contains(taskText, keyword) {
return true
}
}
return false
}
```
---
## Implementation Phases
### Phase 1: Foundation (Days 1-2)
**Tasks**:
1. ✅ Define `StructuredReasoning` and `ThoughtStep` types
2. ✅ Add `StructuredReasoning` field to `TaskResponse`
3. ✅ Create `SequentialThinkingEngine` skeleton
4. ✅ Add MCP client interface for sequential-thinking tool
**Files to Create/Modify**:
- `pkg/ai/provider.go` - Add new types
- `pkg/ai/sequential_thinking.go` - New file
- `pkg/ai/mcp_client.go` - New file for MCP integration
**Success Criteria**:
- Code compiles without errors
- Types are properly defined
- MCP client interface is clear
### Phase 2: Sequential Thinking Engine (Days 3-5)
**Tasks**:
1. Implement `SequentialThinkingEngine.Think()` method
2. Implement MCP tool call wrapper
3. Add thought tracking and revision detection
4. Implement branch management
5. Generate reasoning summaries
6. Write unit tests
**Files**:
- `pkg/ai/sequential_thinking.go` - Full implementation
- `pkg/ai/sequential_thinking_test.go` - Unit tests
**Success Criteria**:
- Can execute complete thinking cycles
- Properly tracks revisions and branches
- Generates clear reasoning summaries
- All unit tests pass
### Phase 3: Provider Integration (Days 6-8)
**Tasks**:
1. Modify `ResetDataProvider.ExecuteTask()` for sequential thinking
2. Implement `shouldUseSequentialThinking()` heuristics
3. Add prompt enhancement with thinking results
4. Implement fallback for thinking failures
5. Add configuration options
6. Write integration tests
**Files**:
- `pkg/ai/resetdata.go` - Modify ExecuteTask
- `pkg/ai/ollama.go` - Same modifications
- `config/agent.yaml` - Add sequential thinking config
**Success Criteria**:
- Complex tasks trigger sequential thinking
- Thinking results enhance task execution
- Graceful fallback on failures
- Integration tests pass
### Phase 4: Testing & Validation (Days 9-10)
**Tasks**:
1. End-to-end testing with real councils
2. Test with various complexity levels
3. Validate reasoning quality improvements
4. Performance benchmarking
5. Documentation updates
**Test Cases**:
- Simple task (complexity=3) → No sequential thinking
- Complex task (complexity=8) → Sequential thinking enabled
- Architect role → Always uses sequential thinking
- Design task → Sequential thinking with branching
- Fallback scenario → Graceful degradation
**Success Criteria**:
- Demonstrable improvement in task quality
- Acceptable performance overhead (<30% increase in latency)
- Clear reasoning traces in artifacts
- Documentation complete
---
## Configuration
### Agent Configuration
**File**: `config/agent.yaml`
```yaml
ai_providers:
resetdata:
type: "resetdata"
endpoint: "${RESETDATA_API_ENDPOINT}"
api_key: "${RESETDATA_API_KEY}"
default_model: "llama3.1:70b"
# Sequential thinking configuration
enable_sequential_thinking: true
sequential_thinking:
min_complexity: 7 # Minimum complexity to trigger
force_for_roles: # Always use for these roles
- architect
- senior-developer
max_thoughts: 15 # Maximum thinking iterations
enable_revisions: true # Allow thought revisions
enable_branching: true # Allow exploring alternatives
confidence_threshold: 0.7 # Minimum confidence for final answer
```
### Runtime Toggle
Allow runtime control via council brief:
```json
{
"task_id": "task-123",
"complexity": 8,
"use_sequential_thinking": true, // Explicit override
"thinking_config": {
"max_thoughts": 20,
"allow_branching": true
}
}
```
---
## Benefits & Expected Improvements
### 1. Better Problem Decomposition
**Before**:
```
Agent: Here's my solution [immediately provides implementation]
```
**After**:
```
Thought 1: Breaking down the task into 3 main components...
Thought 2: Component A requires database schema changes...
Thought 3: Wait, revising thought 2 - migration strategy needs consideration...
Thought 4: Exploring alternative: event sourcing vs direct updates...
Thought 5: Event sourcing better for audit trail requirements...
Final: Implementation plan with 5 concrete steps...
```
### 2. Improved Architecture Decisions
Architect agents can:
- Explore multiple design alternatives
- Revise decisions based on discovered constraints
- Build and verify hypotheses about scalability
- Document reasoning trail for future reference
### 3. Higher Quality Code
Developer agents can:
- Think through edge cases before coding
- Consider multiple implementation approaches
- Revise initial assumptions
- Plan testing strategy upfront
### 4. Debugging Enhancement
When tasks fail:
- Reasoning traces show where agent went wrong
- Can identify flawed assumptions
- Easier to improve prompts and heuristics
---
## Performance Considerations
### 1. Latency Impact
**Estimated Overhead**:
- Sequential thinking: 5-15 LLM calls (vs 1 direct call)
- Expected latency increase: 10-30 seconds for complex tasks
- **Mitigation**: Only use for high-complexity tasks (complexity >= 7)
### 2. Token Usage
**Estimated Increase**:
- Each thought: ~200-500 tokens
- 10 thoughts: ~3000-5000 additional tokens
- **Mitigation**: Set reasonable `max_thoughts` limits
### 3. Resource Requirements
**MCP Server**:
- Sequential thinking MCP server must be available
- Requires proper error handling and fallback
---
## Risks & Mitigations
| Risk | Impact | Mitigation |
|------|--------|------------|
| MCP server unavailable | High | Graceful fallback to direct execution |
| Increased latency unacceptable | Medium | Make sequential thinking opt-in per task |
| Token cost explosion | Medium | Set hard limits on max_thoughts |
| Reasoning doesn't improve quality | High | A/B testing with metrics |
| Complex implementation | Medium | Phased rollout with testing |
---
## Success Metrics
### Quantitative
1. **Task Success Rate**: Compare before/after for complexity >= 7 tasks
- Target: +15% improvement
2. **Code Quality**: Static analysis scores for generated code
- Target: +20% improvement in complexity score
3. **PR Acceptance Rate**: How many agent PRs get merged
- Target: +25% improvement
4. **Latency**: Task execution time
- Acceptable: <30% increase for complex tasks
### Qualitative
1. **Reasoning Quality**: Human review of reasoning traces
2. **Decision Clarity**: Can humans understand agent's thought process?
3. **Developer Feedback**: Easier to debug failed tasks?
---
## Rollout Plan
### Stage 1: Internal Testing (Week 1)
- Deploy to development environment
- Test with synthetic tasks
- Gather performance metrics
- Refine heuristics
### Stage 2: Limited Production (Week 2)
- Enable for architect role only
- Enable for complexity >= 9 only
- Monitor closely
- Collect feedback
### Stage 3: Expanded Rollout (Week 3-4)
- Enable for all roles with complexity >= 7
- Add complexity-based opt-in
- Full production deployment
- Continuous monitoring
### Stage 4: Optimization (Week 5+)
- Fine-tune heuristics based on data
- Optimize thought limits
- Improve reasoning summaries
- Add advanced features (e.g., multi-agent reasoning)
---
## Future Enhancements
### 1. Multi-Agent Reasoning
Multiple agents can contribute thoughts to same reasoning chain:
- Architect proposes design
- Security agent reviews security implications
- Performance agent analyzes scalability
### 2. Reasoning Templates
Pre-defined thinking patterns for common scenarios:
- API design checklist
- Security review framework
- Performance optimization workflow
### 3. Learning from Reasoning
Store successful reasoning patterns:
- Build knowledge base of good reasoning traces
- Use as examples in future tasks
- Identify common pitfalls
### 4. Visualization
Dashboard showing reasoning graphs:
- Thought flow diagrams
- Revision history
- Branch exploration trees
- Confidence evolution
---
## References
- **SequentialThinkingForCHORUS Issue**: (Repository in GITEA)
- **MCP Sequential Thinking Tool**: Available in Claude Code MCP servers
- **CHORUS Task Execution**: `/home/tony/chorus/project-queues/active/CHORUS/pkg/execution/engine.go`
- **AI Provider Interface**: `/home/tony/chorus/project-queues/active/CHORUS/pkg/ai/provider.go`
- **ResetData Provider**: `/home/tony/chorus/project-queues/active/CHORUS/pkg/ai/resetdata.go`
---
## Document Info
- **Created**: 2025-10-13
- **Author**: Claude Code
- **Status**: Design Complete - Ready for Implementation
- **Next Steps**: Begin Phase 1 implementation

View File

@@ -0,0 +1,100 @@
package mcpclient
import (
"bytes"
"context"
"encoding/json"
"fmt"
"io"
"net/http"
"time"
)
// Client is a client for the Sequential Thinking MCP server
type Client struct {
baseURL string
httpClient *http.Client
}
// ToolRequest represents a request to call an MCP tool
type ToolRequest struct {
Tool string `json:"tool"`
Payload map[string]interface{} `json:"payload"`
}
// ToolResponse represents the response from an MCP tool call
type ToolResponse struct {
Result interface{} `json:"result,omitempty"`
Error string `json:"error,omitempty"`
}
// New creates a new MCP client
func New(baseURL string) *Client {
return &Client{
baseURL: baseURL,
httpClient: &http.Client{
Timeout: 120 * time.Second, // Longer timeout for thinking operations
},
}
}
// Health checks if the MCP server is healthy
func (c *Client) Health(ctx context.Context) error {
req, err := http.NewRequestWithContext(ctx, "GET", c.baseURL+"/health", nil)
if err != nil {
return fmt.Errorf("create request: %w", err)
}
resp, err := c.httpClient.Do(req)
if err != nil {
return fmt.Errorf("http request: %w", err)
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
return fmt.Errorf("health check failed: status %d", resp.StatusCode)
}
return nil
}
// CallTool calls an MCP tool
func (c *Client) CallTool(ctx context.Context, req *ToolRequest) (*ToolResponse, error) {
jsonData, err := json.Marshal(req)
if err != nil {
return nil, fmt.Errorf("marshal request: %w", err)
}
httpReq, err := http.NewRequestWithContext(ctx, "POST", c.baseURL+"/mcp/tool", bytes.NewReader(jsonData))
if err != nil {
return nil, fmt.Errorf("create request: %w", err)
}
httpReq.Header.Set("Content-Type", "application/json")
resp, err := c.httpClient.Do(httpReq)
if err != nil {
return nil, fmt.Errorf("http request: %w", err)
}
defer resp.Body.Close()
body, err := io.ReadAll(resp.Body)
if err != nil {
return nil, fmt.Errorf("read response: %w", err)
}
if resp.StatusCode != http.StatusOK {
return nil, fmt.Errorf("tool call failed: status %d, body: %s", resp.StatusCode, string(body))
}
var toolResp ToolResponse
if err := json.Unmarshal(body, &toolResp); err != nil {
return nil, fmt.Errorf("unmarshal response: %w", err)
}
if toolResp.Error != "" {
return nil, fmt.Errorf("tool error: %s", toolResp.Error)
}
return &toolResp, nil
}

View File

@@ -0,0 +1,39 @@
package observability
import (
"os"
"strings"
"time"
"github.com/rs/zerolog"
"github.com/rs/zerolog/log"
)
// InitLogger initializes the global logger
func InitLogger(level string) {
// Set up zerolog with human-friendly console output
output := zerolog.ConsoleWriter{
Out: os.Stdout,
TimeFormat: time.RFC3339,
}
log.Logger = zerolog.New(output).
With().
Timestamp().
Caller().
Logger()
// Set log level
switch strings.ToLower(level) {
case "debug":
zerolog.SetGlobalLevel(zerolog.DebugLevel)
case "info":
zerolog.SetGlobalLevel(zerolog.InfoLevel)
case "warn":
zerolog.SetGlobalLevel(zerolog.WarnLevel)
case "error":
zerolog.SetGlobalLevel(zerolog.ErrorLevel)
default:
zerolog.SetGlobalLevel(zerolog.InfoLevel)
}
}

View File

@@ -0,0 +1,85 @@
package observability
import (
"net/http"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
"github.com/prometheus/client_golang/prometheus/promhttp"
)
// Metrics holds Prometheus metrics for the wrapper
type Metrics struct {
requestsTotal prometheus.Counter
errorsTotal prometheus.Counter
decryptFails prometheus.Counter
encryptFails prometheus.Counter
policyDenials prometheus.Counter
requestDuration prometheus.Histogram
}
// InitMetrics initializes Prometheus metrics
func InitMetrics() *Metrics {
return &Metrics{
requestsTotal: promauto.NewCounter(prometheus.CounterOpts{
Name: "seqthink_requests_total",
Help: "Total number of requests received",
}),
errorsTotal: promauto.NewCounter(prometheus.CounterOpts{
Name: "seqthink_errors_total",
Help: "Total number of errors",
}),
decryptFails: promauto.NewCounter(prometheus.CounterOpts{
Name: "seqthink_decrypt_failures_total",
Help: "Total number of decryption failures",
}),
encryptFails: promauto.NewCounter(prometheus.CounterOpts{
Name: "seqthink_encrypt_failures_total",
Help: "Total number of encryption failures",
}),
policyDenials: promauto.NewCounter(prometheus.CounterOpts{
Name: "seqthink_policy_denials_total",
Help: "Total number of policy denials",
}),
requestDuration: promauto.NewHistogram(prometheus.HistogramOpts{
Name: "seqthink_request_duration_seconds",
Help: "Request duration in seconds",
Buckets: prometheus.DefBuckets,
}),
}
}
// IncrementRequests increments the request counter
func (m *Metrics) IncrementRequests() {
m.requestsTotal.Inc()
}
// IncrementErrors increments the error counter
func (m *Metrics) IncrementErrors() {
m.errorsTotal.Inc()
}
// IncrementDecryptFails increments the decrypt failure counter
func (m *Metrics) IncrementDecryptFails() {
m.decryptFails.Inc()
}
// IncrementEncryptFails increments the encrypt failure counter
func (m *Metrics) IncrementEncryptFails() {
m.encryptFails.Inc()
}
// IncrementPolicyDenials increments the policy denial counter
func (m *Metrics) IncrementPolicyDenials() {
m.policyDenials.Inc()
}
// ObserveRequestDuration records request duration
func (m *Metrics) ObserveRequestDuration(seconds float64) {
m.requestDuration.Observe(seconds)
}
// Handler returns the Prometheus metrics HTTP handler
func (m *Metrics) Handler() http.Handler {
return promhttp.Handler()
}

View File

@@ -0,0 +1,150 @@
package proxy
import (
"context"
"encoding/json"
"fmt"
"io"
"net/http"
"time"
"chorus/pkg/seqthink/mcpclient"
"chorus/pkg/seqthink/observability"
"github.com/gorilla/mux"
"github.com/rs/zerolog/log"
)
// ServerConfig holds the proxy server configuration
type ServerConfig struct {
MCPClient *mcpclient.Client
Metrics *observability.Metrics
MaxBodyMB int
AgeIdentPath string
AgeRecipsPath string
KachingJWKSURL string
RequiredScope string
}
// Server is the proxy server handling requests
type Server struct {
config ServerConfig
router *mux.Router
}
// NewServer creates a new proxy server
func NewServer(cfg ServerConfig) (*Server, error) {
s := &Server{
config: cfg,
router: mux.NewRouter(),
}
// Setup routes
s.setupRoutes()
return s, nil
}
// Handler returns the HTTP handler
func (s *Server) Handler() http.Handler {
return s.router
}
// setupRoutes configures the HTTP routes
func (s *Server) setupRoutes() {
// Health checks
s.router.HandleFunc("/health", s.handleHealth).Methods("GET")
s.router.HandleFunc("/ready", s.handleReady).Methods("GET")
// MCP tool endpoint (plaintext for Beat 1)
s.router.HandleFunc("/mcp/tool", s.handleToolCall).Methods("POST")
// SSE endpoint (placeholder for Beat 1)
s.router.HandleFunc("/mcp/sse", s.handleSSE).Methods("GET")
// Metrics endpoint
s.router.Handle("/metrics", s.config.Metrics.Handler())
}
// handleHealth returns 200 OK if wrapper is running
func (s *Server) handleHealth(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusOK)
w.Write([]byte("OK"))
}
// handleReady checks if MCP server is ready
func (s *Server) handleReady(w http.ResponseWriter, r *http.Request) {
ctx, cancel := context.WithTimeout(r.Context(), 5*time.Second)
defer cancel()
if err := s.config.MCPClient.Health(ctx); err != nil {
log.Error().Err(err).Msg("MCP server not ready")
http.Error(w, "MCP server not ready", http.StatusServiceUnavailable)
return
}
w.WriteHeader(http.StatusOK)
w.Write([]byte("READY"))
}
// handleToolCall proxies tool calls to MCP server (plaintext for Beat 1)
func (s *Server) handleToolCall(w http.ResponseWriter, r *http.Request) {
s.config.Metrics.IncrementRequests()
startTime := time.Now()
// Limit request body size
r.Body = http.MaxBytesReader(w, r.Body, int64(s.config.MaxBodyMB)*1024*1024)
// Read request body
body, err := io.ReadAll(r.Body)
if err != nil {
log.Error().Err(err).Msg("Failed to read request body")
s.config.Metrics.IncrementErrors()
http.Error(w, "Failed to read request", http.StatusBadRequest)
return
}
// Parse tool request
var toolReq mcpclient.ToolRequest
if err := json.Unmarshal(body, &toolReq); err != nil {
log.Error().Err(err).Msg("Failed to parse tool request")
s.config.Metrics.IncrementErrors()
http.Error(w, "Invalid request format", http.StatusBadRequest)
return
}
log.Info().
Str("tool", toolReq.Tool).
Msg("Proxying tool call to MCP server")
// Call MCP server
ctx, cancel := context.WithTimeout(r.Context(), 120*time.Second)
defer cancel()
toolResp, err := s.config.MCPClient.CallTool(ctx, &toolReq)
if err != nil {
log.Error().Err(err).Msg("MCP tool call failed")
s.config.Metrics.IncrementErrors()
http.Error(w, fmt.Sprintf("Tool call failed: %v", err), http.StatusInternalServerError)
return
}
// Return response
w.Header().Set("Content-Type", "application/json")
if err := json.NewEncoder(w).Encode(toolResp); err != nil {
log.Error().Err(err).Msg("Failed to encode response")
s.config.Metrics.IncrementErrors()
return
}
duration := time.Since(startTime)
log.Info().
Str("tool", toolReq.Tool).
Dur("duration", duration).
Msg("Tool call completed")
}
// handleSSE is a placeholder for Server-Sent Events streaming (Beat 1)
func (s *Server) handleSSE(w http.ResponseWriter, r *http.Request) {
log.Warn().Msg("SSE endpoint not yet implemented")
http.Error(w, "SSE endpoint not implemented in Beat 1", http.StatusNotImplemented)
}