Compare commits
15 Commits
docs/compr
...
007aeb149a
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
007aeb149a | ||
|
|
2fd9a96950 | ||
|
|
c99def17d7 | ||
|
|
9190c75440 | ||
|
|
a658a7364d | ||
|
|
3ce9811826 | ||
|
|
dd8be05e9c | ||
|
|
df5ec34b4f | ||
|
|
fe6765afea | ||
|
|
511e52a05c | ||
|
|
f7130b327c | ||
|
|
7381137db5 | ||
|
|
9f480986fa | ||
|
|
4d424764e5 | ||
|
|
63dab5c4d4 |
@@ -1,3 +1,19 @@
|
|||||||
|
# ⚠️ DEPRECATED: DO NOT USE THIS DOCKERFILE ⚠️
|
||||||
|
#
|
||||||
|
# This Alpine-based Dockerfile is INCOMPATIBLE with the chorus-agent binary
|
||||||
|
# built by 'make build-agent'. The binary is compiled with glibc dependencies
|
||||||
|
# and will NOT run on Alpine's musl libc.
|
||||||
|
#
|
||||||
|
# ERROR when used: "exec /app/chorus-agent: no such file or directory"
|
||||||
|
#
|
||||||
|
# ✅ USE Dockerfile.ubuntu INSTEAD
|
||||||
|
#
|
||||||
|
# This file is kept for reference only and should not be used for builds.
|
||||||
|
# Last failed: 2025-10-01
|
||||||
|
# Reason: Alpine musl libc incompatibility with glibc-linked binary
|
||||||
|
#
|
||||||
|
# -------------------------------------------------------------------
|
||||||
|
|
||||||
# CHORUS - Simple Docker image using pre-built binary
|
# CHORUS - Simple Docker image using pre-built binary
|
||||||
FROM alpine:3.18
|
FROM alpine:3.18
|
||||||
|
|
||||||
47
Makefile
47
Makefile
@@ -1,11 +1,12 @@
|
|||||||
# CHORUS Multi-Binary Makefile
|
# CHORUS Multi-Binary Makefile
|
||||||
# Builds both chorus-agent and chorus-hap binaries
|
# Builds chorus-agent, chorus-hap, and seqthink-wrapper binaries
|
||||||
|
|
||||||
# Build configuration
|
# Build configuration
|
||||||
BINARY_NAME_AGENT = chorus-agent
|
BINARY_NAME_AGENT = chorus-agent
|
||||||
BINARY_NAME_HAP = chorus-hap
|
BINARY_NAME_HAP = chorus-hap
|
||||||
BINARY_NAME_COMPAT = chorus
|
BINARY_NAME_COMPAT = chorus
|
||||||
VERSION ?= 0.5.5
|
BINARY_NAME_SEQTHINK = seqthink-wrapper
|
||||||
|
VERSION ?= 0.5.40
|
||||||
COMMIT_HASH ?= $(shell git rev-parse --short HEAD 2>/dev/null || echo "unknown")
|
COMMIT_HASH ?= $(shell git rev-parse --short HEAD 2>/dev/null || echo "unknown")
|
||||||
BUILD_DATE ?= $(shell date -u '+%Y-%m-%d_%H:%M:%S')
|
BUILD_DATE ?= $(shell date -u '+%Y-%m-%d_%H:%M:%S')
|
||||||
|
|
||||||
@@ -30,15 +31,15 @@ build: build-agent build-hap build-compat
|
|||||||
build-agent:
|
build-agent:
|
||||||
@echo "🤖 Building CHORUS autonomous agent..."
|
@echo "🤖 Building CHORUS autonomous agent..."
|
||||||
@mkdir -p $(BUILD_DIR)
|
@mkdir -p $(BUILD_DIR)
|
||||||
go build $(BUILD_FLAGS) -o $(BUILD_DIR)/$(BINARY_NAME_AGENT) ./$(CMD_DIR)/agent
|
GOWORK=off go build -mod=mod $(BUILD_FLAGS) -o $(BUILD_DIR)/$(BINARY_NAME_AGENT) ./$(CMD_DIR)/agent
|
||||||
@echo "✅ Agent binary built: $(BUILD_DIR)/$(BINARY_NAME_AGENT)"
|
@echo "✅ Agent binary built: $(BUILD_DIR)/$(BINARY_NAME_AGENT)"
|
||||||
|
|
||||||
# Build human agent portal binary
|
# Build human agent portal binary
|
||||||
.PHONY: build-hap
|
.PHONY: build-hap
|
||||||
build-hap:
|
build-hap:
|
||||||
@echo "👤 Building CHORUS human agent portal..."
|
@echo "👤 Building CHORUS human agent portal..."
|
||||||
@mkdir -p $(BUILD_DIR)
|
@mkdir -p $(BUILD_DIR)
|
||||||
go build $(BUILD_FLAGS) -o $(BUILD_DIR)/$(BINARY_NAME_HAP) ./$(CMD_DIR)/hap
|
GOWORK=off go build -mod=mod $(BUILD_FLAGS) -o $(BUILD_DIR)/$(BINARY_NAME_HAP) ./$(CMD_DIR)/hap
|
||||||
@echo "✅ HAP binary built: $(BUILD_DIR)/$(BINARY_NAME_HAP)"
|
@echo "✅ HAP binary built: $(BUILD_DIR)/$(BINARY_NAME_HAP)"
|
||||||
|
|
||||||
# Build compatibility wrapper (deprecated)
|
# Build compatibility wrapper (deprecated)
|
||||||
@@ -46,9 +47,17 @@ build-hap:
|
|||||||
build-compat:
|
build-compat:
|
||||||
@echo "⚠️ Building CHORUS compatibility wrapper (deprecated)..."
|
@echo "⚠️ Building CHORUS compatibility wrapper (deprecated)..."
|
||||||
@mkdir -p $(BUILD_DIR)
|
@mkdir -p $(BUILD_DIR)
|
||||||
go build $(BUILD_FLAGS) -o $(BUILD_DIR)/$(BINARY_NAME_COMPAT) ./$(CMD_DIR)/chorus
|
GOWORK=off go build -mod=mod $(BUILD_FLAGS) -o $(BUILD_DIR)/$(BINARY_NAME_COMPAT) ./$(CMD_DIR)/chorus
|
||||||
@echo "✅ Compatibility wrapper built: $(BUILD_DIR)/$(BINARY_NAME_COMPAT)"
|
@echo "✅ Compatibility wrapper built: $(BUILD_DIR)/$(BINARY_NAME_COMPAT)"
|
||||||
|
|
||||||
|
# Build Sequential Thinking age-encrypted wrapper
|
||||||
|
.PHONY: build-seqthink
|
||||||
|
build-seqthink:
|
||||||
|
@echo "🔐 Building Sequential Thinking wrapper..."
|
||||||
|
@mkdir -p $(BUILD_DIR)
|
||||||
|
GOWORK=off go build -mod=mod $(BUILD_FLAGS) -o $(BUILD_DIR)/$(BINARY_NAME_SEQTHINK) ./$(CMD_DIR)/seqthink-wrapper
|
||||||
|
@echo "✅ SeqThink wrapper built: $(BUILD_DIR)/$(BINARY_NAME_SEQTHINK)"
|
||||||
|
|
||||||
# Test compilation without building
|
# Test compilation without building
|
||||||
.PHONY: test-compile
|
.PHONY: test-compile
|
||||||
test-compile:
|
test-compile:
|
||||||
@@ -90,18 +99,26 @@ run-hap: build-hap
|
|||||||
./$(BUILD_DIR)/$(BINARY_NAME_HAP)
|
./$(BUILD_DIR)/$(BINARY_NAME_HAP)
|
||||||
|
|
||||||
# Docker builds
|
# Docker builds
|
||||||
|
# NOTE: Always use Dockerfile.ubuntu for production builds!
|
||||||
|
# Dockerfile.simple.DEPRECATED uses Alpine which is incompatible with glibc-linked binaries
|
||||||
.PHONY: docker-agent
|
.PHONY: docker-agent
|
||||||
docker-agent:
|
docker-agent:
|
||||||
@echo "🐳 Building Docker image for CHORUS agent..."
|
@echo "🐳 Building Docker image for CHORUS agent..."
|
||||||
docker build -f docker/Dockerfile.agent -t chorus-agent:$(VERSION) .
|
docker build -f Dockerfile.ubuntu -t chorus-agent:$(VERSION) .
|
||||||
|
@echo "⚠️ IMPORTANT: Production images MUST use Dockerfile.ubuntu (glibc compatibility)"
|
||||||
|
|
||||||
.PHONY: docker-hap
|
.PHONY: docker-hap
|
||||||
docker-hap:
|
docker-hap:
|
||||||
@echo "🐳 Building Docker image for CHORUS HAP..."
|
@echo "🐳 Building Docker image for CHORUS HAP..."
|
||||||
docker build -f docker/Dockerfile.hap -t chorus-hap:$(VERSION) .
|
docker build -f docker/Dockerfile.hap -t chorus-hap:$(VERSION) .
|
||||||
|
|
||||||
|
.PHONY: docker-seqthink
|
||||||
|
docker-seqthink:
|
||||||
|
@echo "🔐 Building Docker image for Sequential Thinking wrapper..."
|
||||||
|
docker build -f deploy/seqthink/Dockerfile -t seqthink-wrapper:$(VERSION) .
|
||||||
|
|
||||||
.PHONY: docker
|
.PHONY: docker
|
||||||
docker: docker-agent docker-hap
|
docker: docker-agent docker-hap docker-seqthink
|
||||||
|
|
||||||
# Help
|
# Help
|
||||||
.PHONY: help
|
.PHONY: help
|
||||||
@@ -109,22 +126,24 @@ help:
|
|||||||
@echo "CHORUS Multi-Binary Build System"
|
@echo "CHORUS Multi-Binary Build System"
|
||||||
@echo ""
|
@echo ""
|
||||||
@echo "Targets:"
|
@echo "Targets:"
|
||||||
@echo " all - Clean and build both binaries (default)"
|
@echo " all - Clean and build all binaries (default)"
|
||||||
@echo " build - Build both binaries"
|
@echo " build - Build all binaries"
|
||||||
@echo " build-agent - Build autonomous agent binary only"
|
@echo " build-agent - Build autonomous agent binary only"
|
||||||
@echo " build-hap - Build human agent portal binary only"
|
@echo " build-hap - Build human agent portal binary only"
|
||||||
@echo " test-compile - Test that both binaries compile"
|
@echo " build-seqthink - Build Sequential Thinking wrapper only"
|
||||||
|
@echo " test-compile - Test that binaries compile"
|
||||||
@echo " test - Run tests"
|
@echo " test - Run tests"
|
||||||
@echo " clean - Remove build artifacts"
|
@echo " clean - Remove build artifacts"
|
||||||
@echo " install - Install binaries to GOPATH/bin"
|
@echo " install - Install binaries to GOPATH/bin"
|
||||||
@echo " run-agent - Build and run agent"
|
@echo " run-agent - Build and run agent"
|
||||||
@echo " run-hap - Build and run HAP"
|
@echo " run-hap - Build and run HAP"
|
||||||
@echo " docker - Build Docker images for both binaries"
|
@echo " docker - Build Docker images for all binaries"
|
||||||
@echo " docker-agent - Build Docker image for agent only"
|
@echo " docker-agent - Build Docker image for agent only"
|
||||||
@echo " docker-hap - Build Docker image for HAP only"
|
@echo " docker-hap - Build Docker image for HAP only"
|
||||||
|
@echo " docker-seqthink - Build Docker image for SeqThink wrapper only"
|
||||||
@echo " help - Show this help"
|
@echo " help - Show this help"
|
||||||
@echo ""
|
@echo ""
|
||||||
@echo "Environment Variables:"
|
@echo "Environment Variables:"
|
||||||
@echo " VERSION - Version string (default: 0.1.0-dev)"
|
@echo " VERSION - Version string (default: 0.5.28)"
|
||||||
@echo " COMMIT_HASH - Git commit hash (auto-detected)"
|
@echo " COMMIT_HASH - Git commit hash (auto-detected)"
|
||||||
@echo " BUILD_DATE - Build timestamp (auto-generated)"
|
@echo " BUILD_DATE - Build timestamp (auto-generated)"
|
||||||
|
|||||||
@@ -4,30 +4,122 @@ import (
|
|||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"net/http"
|
"net/http"
|
||||||
|
"os"
|
||||||
"strconv"
|
"strconv"
|
||||||
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"chorus/internal/council"
|
||||||
"chorus/internal/logging"
|
"chorus/internal/logging"
|
||||||
|
"chorus/p2p"
|
||||||
|
"chorus/pkg/config"
|
||||||
"chorus/pubsub"
|
"chorus/pubsub"
|
||||||
|
|
||||||
"github.com/gorilla/mux"
|
"github.com/gorilla/mux"
|
||||||
|
"github.com/rs/zerolog"
|
||||||
)
|
)
|
||||||
|
|
||||||
// HTTPServer provides HTTP API endpoints for CHORUS
|
// HTTPServer provides HTTP API endpoints for CHORUS
|
||||||
type HTTPServer struct {
|
type HTTPServer struct {
|
||||||
port int
|
port int
|
||||||
hypercoreLog *logging.HypercoreLog
|
hypercoreLog *logging.HypercoreLog
|
||||||
pubsub *pubsub.PubSub
|
pubsub *pubsub.PubSub
|
||||||
server *http.Server
|
node *p2p.Node // P2P node for peer ID and network info
|
||||||
|
server *http.Server
|
||||||
|
CouncilManager *council.Manager // Exported for brief processing
|
||||||
|
whooshEndpoint string
|
||||||
|
logger zerolog.Logger
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewHTTPServer creates a new HTTP server for CHORUS API
|
// NewHTTPServer creates a new HTTP server for CHORUS API
|
||||||
func NewHTTPServer(port int, hlog *logging.HypercoreLog, ps *pubsub.PubSub) *HTTPServer {
|
func NewHTTPServer(cfg *config.Config, node *p2p.Node, hlog *logging.HypercoreLog, ps *pubsub.PubSub) *HTTPServer {
|
||||||
return &HTTPServer{
|
agentID := cfg.Agent.ID
|
||||||
port: port,
|
agentName := deriveAgentName(cfg)
|
||||||
hypercoreLog: hlog,
|
endpoint := deriveAgentEndpoint(cfg)
|
||||||
pubsub: ps,
|
p2pAddr := deriveAgentP2PAddress(cfg, node)
|
||||||
|
capabilities := cfg.Agent.Capabilities
|
||||||
|
if len(capabilities) == 0 {
|
||||||
|
capabilities = []string{"general_development", "task_coordination"}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
councilMgr := council.NewManager(agentID, agentName, endpoint, p2pAddr, capabilities)
|
||||||
|
|
||||||
|
whooshEndpoint := overrideWhooshEndpoint(cfg)
|
||||||
|
|
||||||
|
return &HTTPServer{
|
||||||
|
port: cfg.Network.APIPort,
|
||||||
|
hypercoreLog: hlog,
|
||||||
|
pubsub: ps,
|
||||||
|
node: node,
|
||||||
|
CouncilManager: councilMgr,
|
||||||
|
whooshEndpoint: strings.TrimRight(whooshEndpoint, "/"),
|
||||||
|
logger: logging.ForComponent(logging.ComponentServer),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WhooshEndpoint returns the WHOOSH base endpoint configured for this agent.
|
||||||
|
func (h *HTTPServer) WhooshEndpoint() string {
|
||||||
|
return h.whooshEndpoint
|
||||||
|
}
|
||||||
|
|
||||||
|
func deriveAgentName(cfg *config.Config) string {
|
||||||
|
if v := strings.TrimSpace(os.Getenv("CHORUS_AGENT_NAME")); v != "" {
|
||||||
|
return v
|
||||||
|
}
|
||||||
|
if cfg.Agent.Specialization != "" {
|
||||||
|
return cfg.Agent.Specialization
|
||||||
|
}
|
||||||
|
return cfg.Agent.ID
|
||||||
|
}
|
||||||
|
|
||||||
|
func deriveAgentEndpoint(cfg *config.Config) string {
|
||||||
|
if v := strings.TrimSpace(os.Getenv("CHORUS_AGENT_ENDPOINT")); v != "" {
|
||||||
|
return strings.TrimRight(v, "/")
|
||||||
|
}
|
||||||
|
host := strings.TrimSpace(os.Getenv("CHORUS_AGENT_SERVICE_HOST"))
|
||||||
|
if host == "" {
|
||||||
|
host = "chorus"
|
||||||
|
}
|
||||||
|
scheme := strings.TrimSpace(os.Getenv("CHORUS_AGENT_ENDPOINT_SCHEME"))
|
||||||
|
if scheme == "" {
|
||||||
|
scheme = "http"
|
||||||
|
}
|
||||||
|
return fmt.Sprintf("%s://%s:%d", scheme, host, cfg.Network.APIPort)
|
||||||
|
}
|
||||||
|
|
||||||
|
func deriveAgentP2PAddress(cfg *config.Config, node *p2p.Node) string {
|
||||||
|
if v := strings.TrimSpace(os.Getenv("CHORUS_AGENT_P2P_ENDPOINT")); v != "" {
|
||||||
|
return v
|
||||||
|
}
|
||||||
|
if node != nil {
|
||||||
|
addrs := node.Addresses()
|
||||||
|
if len(addrs) > 0 {
|
||||||
|
return fmt.Sprintf("%s/p2p/%s", addrs[0], node.ID())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
host := strings.TrimSpace(os.Getenv("CHORUS_AGENT_SERVICE_HOST"))
|
||||||
|
if host == "" {
|
||||||
|
host = "chorus"
|
||||||
|
}
|
||||||
|
return fmt.Sprintf("%s:%d", host, cfg.Network.P2PPort)
|
||||||
|
}
|
||||||
|
|
||||||
|
func overrideWhooshEndpoint(cfg *config.Config) string {
|
||||||
|
if v := strings.TrimSpace(os.Getenv("CHORUS_WHOOSH_ENDPOINT")); v != "" {
|
||||||
|
return strings.TrimRight(v, "/")
|
||||||
|
}
|
||||||
|
candidate := cfg.WHOOSHAPI.BaseURL
|
||||||
|
if candidate == "" {
|
||||||
|
candidate = cfg.WHOOSHAPI.URL
|
||||||
|
}
|
||||||
|
if candidate == "" {
|
||||||
|
return "http://whoosh:8080"
|
||||||
|
}
|
||||||
|
trimmed := strings.TrimRight(candidate, "/")
|
||||||
|
if strings.Contains(trimmed, "localhost") || strings.Contains(trimmed, "127.0.0.1") {
|
||||||
|
return "http://whoosh:8080"
|
||||||
|
}
|
||||||
|
return trimmed
|
||||||
}
|
}
|
||||||
|
|
||||||
// Start starts the HTTP server
|
// Start starts the HTTP server
|
||||||
@@ -65,6 +157,12 @@ func (h *HTTPServer) Start() error {
|
|||||||
// Status endpoint
|
// Status endpoint
|
||||||
api.HandleFunc("/status", h.handleStatus).Methods("GET")
|
api.HandleFunc("/status", h.handleStatus).Methods("GET")
|
||||||
|
|
||||||
|
// Council opportunity endpoints (v1)
|
||||||
|
v1 := api.PathPrefix("/v1").Subrouter()
|
||||||
|
v1.HandleFunc("/opportunities/council", h.handleCouncilOpportunity).Methods("POST")
|
||||||
|
v1.HandleFunc("/councils/status", h.handleCouncilStatusUpdate).Methods("POST")
|
||||||
|
v1.HandleFunc("/councils/{councilID}/roles/{roleName}/brief", h.handleCouncilBrief).Methods("POST")
|
||||||
|
|
||||||
h.server = &http.Server{
|
h.server = &http.Server{
|
||||||
Addr: fmt.Sprintf(":%d", h.port),
|
Addr: fmt.Sprintf(":%d", h.port),
|
||||||
Handler: router,
|
Handler: router,
|
||||||
@@ -73,7 +171,7 @@ func (h *HTTPServer) Start() error {
|
|||||||
IdleTimeout: 60 * time.Second,
|
IdleTimeout: 60 * time.Second,
|
||||||
}
|
}
|
||||||
|
|
||||||
fmt.Printf("🌐 Starting HTTP API server on port %d\n", h.port)
|
h.logger.Info().Int("port", h.port).Msg("Starting HTTP API server")
|
||||||
return h.server.ListenAndServe()
|
return h.server.ListenAndServe()
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -216,7 +314,7 @@ func (h *HTTPServer) handleGetLogStats(w http.ResponseWriter, r *http.Request) {
|
|||||||
json.NewEncoder(w).Encode(stats)
|
json.NewEncoder(w).Encode(stats)
|
||||||
}
|
}
|
||||||
|
|
||||||
// handleHealth returns health status
|
// handleHealth returns health status with P2P network information
|
||||||
func (h *HTTPServer) handleHealth(w http.ResponseWriter, r *http.Request) {
|
func (h *HTTPServer) handleHealth(w http.ResponseWriter, r *http.Request) {
|
||||||
w.Header().Set("Content-Type", "application/json")
|
w.Header().Set("Content-Type", "application/json")
|
||||||
|
|
||||||
@@ -226,6 +324,89 @@ func (h *HTTPServer) handleHealth(w http.ResponseWriter, r *http.Request) {
|
|||||||
"log_entries": h.hypercoreLog.Length(),
|
"log_entries": h.hypercoreLog.Length(),
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Add P2P network information if node is available
|
||||||
|
if h.node != nil {
|
||||||
|
// Get peer ID
|
||||||
|
health["peer_id"] = h.node.ID().String()
|
||||||
|
|
||||||
|
// Build complete multiaddrs with peer ID using actual container IPs
|
||||||
|
// This is required for Docker Swarm because the service VIP load-balances
|
||||||
|
// and would cause peer ID mismatches when connecting to different replicas
|
||||||
|
var multiaddrs []string
|
||||||
|
rawAddrs := h.node.Addresses()
|
||||||
|
|
||||||
|
// Log what addresses we're getting from the node
|
||||||
|
h.logger.Debug().Int("address_count", len(rawAddrs)).Msg("Processing node addresses")
|
||||||
|
for i, addr := range rawAddrs {
|
||||||
|
h.logger.Debug().Int("index", i).Str("address", addr.String()).Msg("Raw address")
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, addr := range rawAddrs {
|
||||||
|
addrStr := addr.String()
|
||||||
|
|
||||||
|
// Extract IP and port from multiaddr
|
||||||
|
var ip, port string
|
||||||
|
if strings.Contains(addrStr, "/ip4/") && strings.Contains(addrStr, "/tcp/") {
|
||||||
|
parts := strings.Split(addrStr, "/")
|
||||||
|
for i := 0; i < len(parts)-1; i++ {
|
||||||
|
if parts[i] == "ip4" {
|
||||||
|
ip = parts[i+1]
|
||||||
|
}
|
||||||
|
if parts[i] == "tcp" {
|
||||||
|
port = parts[i+1]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Skip localhost addresses
|
||||||
|
if ip == "127.0.0.1" || ip == "::1" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// Build IP-based multiaddr for direct P2P connections
|
||||||
|
// This bypasses the Docker Swarm VIP and allows direct connection to this specific replica
|
||||||
|
if ip != "" && port != "" {
|
||||||
|
multiaddr := fmt.Sprintf("/ip4/%s/tcp/%s/p2p/%s", ip, port, h.node.ID().String())
|
||||||
|
h.logger.Debug().Str("multiaddr", multiaddr).Msg("Built multiaddr")
|
||||||
|
multiaddrs = append(multiaddrs, multiaddr)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
health["multiaddrs"] = multiaddrs
|
||||||
|
|
||||||
|
// Add connected peer count
|
||||||
|
connectedPeers := h.node.ConnectedPeers()
|
||||||
|
health["connected_peers"] = connectedPeers
|
||||||
|
|
||||||
|
// P2P Connectivity Status - critical for detecting mesh issues
|
||||||
|
p2pStatus := "healthy"
|
||||||
|
if connectedPeers == 0 {
|
||||||
|
p2pStatus = "isolated" // No peers - serious issue
|
||||||
|
health["status"] = "degraded"
|
||||||
|
} else if connectedPeers < 3 {
|
||||||
|
p2pStatus = "limited" // Few peers - potential discovery issue
|
||||||
|
}
|
||||||
|
health["p2p_status"] = p2pStatus
|
||||||
|
|
||||||
|
// Add DHT status if available
|
||||||
|
if h.node.DHT() != nil {
|
||||||
|
health["dht_enabled"] = true
|
||||||
|
// DHT routing table size indicates how many nodes we know about
|
||||||
|
health["dht_routing_table_size"] = h.node.DHT().GetDHTSize()
|
||||||
|
} else {
|
||||||
|
health["dht_enabled"] = false
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add GossipSub topics (static topics that agents join)
|
||||||
|
health["gossipsub_topics"] = []string{
|
||||||
|
"CHORUS/coordination/v1",
|
||||||
|
"hmmm/meta-discussion/v1",
|
||||||
|
"CHORUS/context-feedback/v1",
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add bootstrap status
|
||||||
|
health["bootstrap_peers_configured"] = len(h.node.BootstrapPeers())
|
||||||
|
}
|
||||||
|
|
||||||
json.NewEncoder(w).Encode(health)
|
json.NewEncoder(w).Encode(health)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -242,3 +423,218 @@ func (h *HTTPServer) handleStatus(w http.ResponseWriter, r *http.Request) {
|
|||||||
|
|
||||||
json.NewEncoder(w).Encode(status)
|
json.NewEncoder(w).Encode(status)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// handleCouncilOpportunity receives council formation opportunities from WHOOSH
|
||||||
|
func (h *HTTPServer) handleCouncilOpportunity(w http.ResponseWriter, r *http.Request) {
|
||||||
|
w.Header().Set("Content-Type", "application/json")
|
||||||
|
|
||||||
|
var opportunity council.CouncilOpportunity
|
||||||
|
if err := json.NewDecoder(r.Body).Decode(&opportunity); err != nil {
|
||||||
|
http.Error(w, fmt.Sprintf("Invalid JSON payload: %v", err), http.StatusBadRequest)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Log the received opportunity to hypercore
|
||||||
|
logData := map[string]interface{}{
|
||||||
|
"event": "council_opportunity_received",
|
||||||
|
"council_id": opportunity.CouncilID,
|
||||||
|
"project_name": opportunity.ProjectName,
|
||||||
|
"repository": opportunity.Repository,
|
||||||
|
"core_roles": len(opportunity.CoreRoles),
|
||||||
|
"optional_roles": len(opportunity.OptionalRoles),
|
||||||
|
"ucxl_address": opportunity.UCXLAddress,
|
||||||
|
"message": fmt.Sprintf("Received council opportunity for project: %s", opportunity.ProjectName),
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, err := h.hypercoreLog.Append(logging.NetworkEvent, logData); err != nil {
|
||||||
|
h.logger.Warn().Err(err).Msg("Failed to log council opportunity")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Log council opportunity with structured logging
|
||||||
|
h.logger.Info().
|
||||||
|
Str("council_id", opportunity.CouncilID).
|
||||||
|
Str("project_name", opportunity.ProjectName).
|
||||||
|
Str("repository", opportunity.Repository).
|
||||||
|
Int("core_roles", len(opportunity.CoreRoles)).
|
||||||
|
Int("optional_roles", len(opportunity.OptionalRoles)).
|
||||||
|
Str("ucxl_address", opportunity.UCXLAddress).
|
||||||
|
Msg("Council opportunity received")
|
||||||
|
|
||||||
|
// Log available roles
|
||||||
|
for _, role := range opportunity.CoreRoles {
|
||||||
|
h.logger.Info().
|
||||||
|
Str("agent_name", role.AgentName).
|
||||||
|
Str("role_name", role.RoleName).
|
||||||
|
Str("role_type", "CORE").
|
||||||
|
Msg("Available role")
|
||||||
|
}
|
||||||
|
for _, role := range opportunity.OptionalRoles {
|
||||||
|
h.logger.Info().
|
||||||
|
Str("agent_name", role.AgentName).
|
||||||
|
Str("role_name", role.RoleName).
|
||||||
|
Str("role_type", "OPTIONAL").
|
||||||
|
Msg("Available role")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Evaluate the opportunity and claim a role if suitable
|
||||||
|
go func() {
|
||||||
|
if err := h.CouncilManager.EvaluateOpportunity(&opportunity, h.whooshEndpoint); err != nil {
|
||||||
|
h.logger.Warn().Err(err).Msg("Failed to evaluate/claim council role")
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
response := map[string]interface{}{
|
||||||
|
"status": "received",
|
||||||
|
"council_id": opportunity.CouncilID,
|
||||||
|
"message": "Council opportunity received and being evaluated",
|
||||||
|
"timestamp": time.Now().Unix(),
|
||||||
|
"agent_id": h.CouncilManager.AgentID(),
|
||||||
|
}
|
||||||
|
|
||||||
|
w.WriteHeader(http.StatusAccepted)
|
||||||
|
json.NewEncoder(w).Encode(response)
|
||||||
|
}
|
||||||
|
|
||||||
|
// handleCouncilStatusUpdate receives council staffing updates from WHOOSH
|
||||||
|
func (h *HTTPServer) handleCouncilStatusUpdate(w http.ResponseWriter, r *http.Request) {
|
||||||
|
w.Header().Set("Content-Type", "application/json")
|
||||||
|
|
||||||
|
type roleCountsPayload struct {
|
||||||
|
Total int `json:"total"`
|
||||||
|
Claimed int `json:"claimed"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type councilStatusPayload struct {
|
||||||
|
CouncilID string `json:"council_id"`
|
||||||
|
ProjectName string `json:"project_name"`
|
||||||
|
Status string `json:"status"`
|
||||||
|
Message string `json:"message"`
|
||||||
|
Timestamp time.Time `json:"timestamp"`
|
||||||
|
CoreRoles roleCountsPayload `json:"core_roles"`
|
||||||
|
Optional roleCountsPayload `json:"optional_roles"`
|
||||||
|
}
|
||||||
|
|
||||||
|
var payload councilStatusPayload
|
||||||
|
if err := json.NewDecoder(r.Body).Decode(&payload); err != nil {
|
||||||
|
http.Error(w, fmt.Sprintf("Invalid JSON payload: %v", err), http.StatusBadRequest)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if payload.CouncilID == "" {
|
||||||
|
http.Error(w, "council_id is required", http.StatusBadRequest)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if payload.Status == "" {
|
||||||
|
payload.Status = "unknown"
|
||||||
|
}
|
||||||
|
|
||||||
|
if payload.Timestamp.IsZero() {
|
||||||
|
payload.Timestamp = time.Now()
|
||||||
|
}
|
||||||
|
|
||||||
|
if payload.Message == "" {
|
||||||
|
payload.Message = fmt.Sprintf("Council status update: %s (core %d/%d, optional %d/%d)",
|
||||||
|
payload.Status,
|
||||||
|
payload.CoreRoles.Claimed, payload.CoreRoles.Total,
|
||||||
|
payload.Optional.Claimed, payload.Optional.Total,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
logData := map[string]interface{}{
|
||||||
|
"event": "council_status_update",
|
||||||
|
"council_id": payload.CouncilID,
|
||||||
|
"project_name": payload.ProjectName,
|
||||||
|
"status": payload.Status,
|
||||||
|
"message": payload.Message,
|
||||||
|
"timestamp": payload.Timestamp.Format(time.RFC3339),
|
||||||
|
"core_roles_total": payload.CoreRoles.Total,
|
||||||
|
"core_roles_claimed": payload.CoreRoles.Claimed,
|
||||||
|
"optional_roles_total": payload.Optional.Total,
|
||||||
|
"optional_roles_claimed": payload.Optional.Claimed,
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, err := h.hypercoreLog.Append(logging.NetworkEvent, logData); err != nil {
|
||||||
|
h.logger.Warn().Err(err).Msg("Failed to log council status update")
|
||||||
|
}
|
||||||
|
|
||||||
|
h.logger.Info().
|
||||||
|
Str("council_id", payload.CouncilID).
|
||||||
|
Str("project_name", payload.ProjectName).
|
||||||
|
Str("status", payload.Status).
|
||||||
|
Int("core_roles_claimed", payload.CoreRoles.Claimed).
|
||||||
|
Int("core_roles_total", payload.CoreRoles.Total).
|
||||||
|
Int("optional_roles_claimed", payload.Optional.Claimed).
|
||||||
|
Int("optional_roles_total", payload.Optional.Total).
|
||||||
|
Str("message", payload.Message).
|
||||||
|
Msg("Council status update")
|
||||||
|
|
||||||
|
response := map[string]interface{}{
|
||||||
|
"status": "received",
|
||||||
|
"council_id": payload.CouncilID,
|
||||||
|
"timestamp": payload.Timestamp.Unix(),
|
||||||
|
}
|
||||||
|
|
||||||
|
w.WriteHeader(http.StatusAccepted)
|
||||||
|
json.NewEncoder(w).Encode(response)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *HTTPServer) handleCouncilBrief(w http.ResponseWriter, r *http.Request) {
|
||||||
|
w.Header().Set("Content-Type", "application/json")
|
||||||
|
|
||||||
|
vars := mux.Vars(r)
|
||||||
|
councilID := vars["councilID"]
|
||||||
|
roleName := vars["roleName"]
|
||||||
|
|
||||||
|
if councilID == "" || roleName == "" {
|
||||||
|
http.Error(w, "councilID and roleName are required", http.StatusBadRequest)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
var brief council.CouncilBrief
|
||||||
|
if err := json.NewDecoder(r.Body).Decode(&brief); err != nil {
|
||||||
|
http.Error(w, fmt.Sprintf("Invalid JSON payload: %v", err), http.StatusBadRequest)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
brief.CouncilID = councilID
|
||||||
|
brief.RoleName = roleName
|
||||||
|
|
||||||
|
h.logger.Info().
|
||||||
|
Str("council_id", councilID).
|
||||||
|
Str("role_name", roleName).
|
||||||
|
Str("brief_url", brief.BriefURL).
|
||||||
|
Str("summary", brief.Summary).
|
||||||
|
Msg("Received council brief")
|
||||||
|
|
||||||
|
if h.CouncilManager != nil {
|
||||||
|
h.CouncilManager.HandleCouncilBrief(councilID, roleName, &brief)
|
||||||
|
}
|
||||||
|
|
||||||
|
logData := map[string]interface{}{
|
||||||
|
"event": "council_brief_received",
|
||||||
|
"council_id": councilID,
|
||||||
|
"role_name": roleName,
|
||||||
|
"project_name": brief.ProjectName,
|
||||||
|
"repository": brief.Repository,
|
||||||
|
"brief_url": brief.BriefURL,
|
||||||
|
"ucxl_address": brief.UCXLAddress,
|
||||||
|
"hmmm_topic": brief.HMMMTopic,
|
||||||
|
"expected_artifacts": brief.ExpectedArtifacts,
|
||||||
|
"timestamp": time.Now().Format(time.RFC3339),
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, err := h.hypercoreLog.Append(logging.NetworkEvent, logData); err != nil {
|
||||||
|
h.logger.Warn().Err(err).Msg("Failed to log council brief")
|
||||||
|
}
|
||||||
|
|
||||||
|
response := map[string]interface{}{
|
||||||
|
"status": "received",
|
||||||
|
"council_id": councilID,
|
||||||
|
"role_name": roleName,
|
||||||
|
"timestamp": time.Now().Unix(),
|
||||||
|
}
|
||||||
|
|
||||||
|
w.WriteHeader(http.StatusAccepted)
|
||||||
|
json.NewEncoder(w).Encode(response)
|
||||||
|
}
|
||||||
|
|||||||
173
cmd/seqthink-wrapper/main.go
Normal file
173
cmd/seqthink-wrapper/main.go
Normal file
@@ -0,0 +1,173 @@
|
|||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"net/http"
|
||||||
|
"os"
|
||||||
|
"os/signal"
|
||||||
|
"syscall"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"chorus/pkg/seqthink/mcpclient"
|
||||||
|
"chorus/pkg/seqthink/observability"
|
||||||
|
"chorus/pkg/seqthink/proxy"
|
||||||
|
"github.com/rs/zerolog/log"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Config holds the wrapper configuration
|
||||||
|
type Config struct {
|
||||||
|
Port string
|
||||||
|
MCPLocalURL string
|
||||||
|
LogLevel string
|
||||||
|
MaxBodyMB int
|
||||||
|
HealthTimeout time.Duration
|
||||||
|
ShutdownTimeout time.Duration
|
||||||
|
AgeIdentPath string
|
||||||
|
AgeRecipsPath string
|
||||||
|
KachingJWKSURL string
|
||||||
|
RequiredScope string
|
||||||
|
}
|
||||||
|
|
||||||
|
func loadConfig() *Config {
|
||||||
|
return &Config{
|
||||||
|
Port: getEnv("PORT", "8443"),
|
||||||
|
MCPLocalURL: getEnv("MCP_LOCAL", "http://127.0.0.1:8000"),
|
||||||
|
LogLevel: getEnv("LOG_LEVEL", "info"),
|
||||||
|
MaxBodyMB: getEnvInt("MAX_BODY_MB", 4),
|
||||||
|
HealthTimeout: 5 * time.Second,
|
||||||
|
ShutdownTimeout: 30 * time.Second,
|
||||||
|
AgeIdentPath: getEnv("AGE_IDENT_PATH", ""),
|
||||||
|
AgeRecipsPath: getEnv("AGE_RECIPS_PATH", ""),
|
||||||
|
KachingJWKSURL: getEnv("KACHING_JWKS_URL", ""),
|
||||||
|
RequiredScope: getEnv("REQUIRED_SCOPE", "sequentialthinking.run"),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
cfg := loadConfig()
|
||||||
|
|
||||||
|
// Initialize observability
|
||||||
|
observability.InitLogger(cfg.LogLevel)
|
||||||
|
metrics := observability.InitMetrics()
|
||||||
|
|
||||||
|
log.Info().
|
||||||
|
Str("port", cfg.Port).
|
||||||
|
Str("mcp_url", cfg.MCPLocalURL).
|
||||||
|
Str("version", "0.1.0-beta2").
|
||||||
|
Msg("🚀 Starting Sequential Thinking Age Wrapper")
|
||||||
|
|
||||||
|
// Create MCP client
|
||||||
|
mcpClient := mcpclient.New(cfg.MCPLocalURL)
|
||||||
|
|
||||||
|
// Wait for MCP server to be ready
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
log.Info().Msg("⏳ Waiting for MCP server...")
|
||||||
|
if err := waitForMCP(ctx, mcpClient); err != nil {
|
||||||
|
log.Fatal().Err(err).Msg("❌ MCP server not ready")
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Info().Msg("✅ MCP server ready")
|
||||||
|
|
||||||
|
// Create proxy server
|
||||||
|
proxyServer, err := proxy.NewServer(proxy.ServerConfig{
|
||||||
|
MCPClient: mcpClient,
|
||||||
|
Metrics: metrics,
|
||||||
|
MaxBodyMB: cfg.MaxBodyMB,
|
||||||
|
AgeIdentPath: cfg.AgeIdentPath,
|
||||||
|
AgeRecipsPath: cfg.AgeRecipsPath,
|
||||||
|
KachingJWKSURL: cfg.KachingJWKSURL,
|
||||||
|
RequiredScope: cfg.RequiredScope,
|
||||||
|
})
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal().Err(err).Msg("❌ Failed to create proxy server")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Setup HTTP server
|
||||||
|
srv := &http.Server{
|
||||||
|
Addr: ":" + cfg.Port,
|
||||||
|
Handler: proxyServer.Handler(),
|
||||||
|
ReadTimeout: 30 * time.Second,
|
||||||
|
WriteTimeout: 90 * time.Second,
|
||||||
|
IdleTimeout: 120 * time.Second,
|
||||||
|
}
|
||||||
|
|
||||||
|
// Start server in goroutine
|
||||||
|
go func() {
|
||||||
|
log.Info().
|
||||||
|
Str("addr", srv.Addr).
|
||||||
|
Bool("encryption_enabled", cfg.AgeIdentPath != "").
|
||||||
|
Bool("policy_enabled", cfg.KachingJWKSURL != "").
|
||||||
|
Msg("🔐 Wrapper listening")
|
||||||
|
|
||||||
|
if err := srv.ListenAndServe(); err != http.ErrServerClosed {
|
||||||
|
log.Fatal().Err(err).Msg("❌ HTTP server failed")
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
// Wait for shutdown signal
|
||||||
|
sigChan := make(chan os.Signal, 1)
|
||||||
|
signal.Notify(sigChan, syscall.SIGINT, syscall.SIGTERM)
|
||||||
|
<-sigChan
|
||||||
|
|
||||||
|
log.Info().Msg("🛑 Shutting down gracefully...")
|
||||||
|
|
||||||
|
// Graceful shutdown
|
||||||
|
shutdownCtx, shutdownCancel := context.WithTimeout(context.Background(), cfg.ShutdownTimeout)
|
||||||
|
defer shutdownCancel()
|
||||||
|
|
||||||
|
if err := srv.Shutdown(shutdownCtx); err != nil {
|
||||||
|
log.Error().Err(err).Msg("⚠️ Shutdown error")
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Info().Msg("✅ Shutdown complete")
|
||||||
|
}
|
||||||
|
|
||||||
|
// waitForMCP waits for MCP server to be ready
|
||||||
|
func waitForMCP(ctx context.Context, client *mcpclient.Client) error {
|
||||||
|
ticker := time.NewTicker(1 * time.Second)
|
||||||
|
defer ticker.Stop()
|
||||||
|
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
return fmt.Errorf("timeout waiting for MCP server")
|
||||||
|
case <-ticker.C:
|
||||||
|
if err := client.Health(ctx); err == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
log.Debug().Msg("Waiting for MCP server...")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// getEnv gets environment variable with default
|
||||||
|
func getEnv(key, defaultVal string) string {
|
||||||
|
if val := os.Getenv(key); val != "" {
|
||||||
|
return val
|
||||||
|
}
|
||||||
|
return defaultVal
|
||||||
|
}
|
||||||
|
|
||||||
|
// getEnvInt gets environment variable as int with default
|
||||||
|
func getEnvInt(key string, defaultVal int) int {
|
||||||
|
val := os.Getenv(key)
|
||||||
|
if val == "" {
|
||||||
|
return defaultVal
|
||||||
|
}
|
||||||
|
|
||||||
|
var result int
|
||||||
|
if _, err := fmt.Sscanf(val, "%d", &result); err != nil {
|
||||||
|
log.Warn().
|
||||||
|
Str("key", key).
|
||||||
|
Str("value", val).
|
||||||
|
Int("default", defaultVal).
|
||||||
|
Msg("Invalid integer env var, using default")
|
||||||
|
return defaultVal
|
||||||
|
}
|
||||||
|
|
||||||
|
return result
|
||||||
|
}
|
||||||
380
deploy/seqthink/DEPLOYMENT.md
Normal file
380
deploy/seqthink/DEPLOYMENT.md
Normal file
@@ -0,0 +1,380 @@
|
|||||||
|
# Sequential Thinking Age Wrapper - Deployment Guide
|
||||||
|
|
||||||
|
## Overview
|
||||||
|
|
||||||
|
This guide covers deploying the Sequential Thinking Age-Encrypted Wrapper to Docker Swarm with full security enabled.
|
||||||
|
|
||||||
|
## Prerequisites
|
||||||
|
|
||||||
|
- Docker Swarm cluster initialized
|
||||||
|
- `chorus-overlay` network created
|
||||||
|
- Traefik reverse proxy configured
|
||||||
|
- KACHING authentication service available
|
||||||
|
|
||||||
|
## Architecture
|
||||||
|
|
||||||
|
```
|
||||||
|
Client → Traefik (HTTPS) → SeqThink Wrapper (JWT + Age Encryption) → MCP Server (loopback)
|
||||||
|
```
|
||||||
|
|
||||||
|
**Security Layers**:
|
||||||
|
1. **TLS**: Traefik terminates HTTPS
|
||||||
|
2. **JWT**: KACHING token validation
|
||||||
|
3. **Age Encryption**: End-to-end encrypted payloads
|
||||||
|
|
||||||
|
## Step 1: Generate Age Keys
|
||||||
|
|
||||||
|
Generate a key pair for encryption:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Generate age identity (private key)
|
||||||
|
age-keygen -o seqthink_age.key
|
||||||
|
|
||||||
|
# Extract recipient (public key)
|
||||||
|
age-keygen -y seqthink_age.key > seqthink_age.pub
|
||||||
|
```
|
||||||
|
|
||||||
|
**Output**:
|
||||||
|
```
|
||||||
|
seqthink_age.key:
|
||||||
|
# created: 2025-10-13T08:00:00+11:00
|
||||||
|
# public key: age1abcd...
|
||||||
|
AGE-SECRET-KEY-1ABCD...
|
||||||
|
|
||||||
|
seqthink_age.pub:
|
||||||
|
age1abcd...
|
||||||
|
```
|
||||||
|
|
||||||
|
## Step 2: Create Docker Secrets
|
||||||
|
|
||||||
|
Store the age keys as Docker secrets:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Create identity secret
|
||||||
|
docker secret create seqthink_age_identity seqthink_age.key
|
||||||
|
|
||||||
|
# Create recipient secret
|
||||||
|
docker secret create seqthink_age_recipients seqthink_age.pub
|
||||||
|
|
||||||
|
# Verify secrets
|
||||||
|
docker secret ls | grep seqthink
|
||||||
|
```
|
||||||
|
|
||||||
|
**Expected Output**:
|
||||||
|
```
|
||||||
|
seqthink_age_identity <timestamp>
|
||||||
|
seqthink_age_recipients <timestamp>
|
||||||
|
```
|
||||||
|
|
||||||
|
## Step 3: Build Docker Image
|
||||||
|
|
||||||
|
Build the wrapper image:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
cd /home/tony/chorus/project-queues/active/CHORUS
|
||||||
|
|
||||||
|
# Build image
|
||||||
|
docker build -f deploy/seqthink/Dockerfile -t anthonyrawlins/seqthink-wrapper:latest .
|
||||||
|
|
||||||
|
# Tag with version
|
||||||
|
docker tag anthonyrawlins/seqthink-wrapper:latest anthonyrawlins/seqthink-wrapper:0.1.0
|
||||||
|
|
||||||
|
# Push to registry
|
||||||
|
docker push anthonyrawlins/seqthink-wrapper:latest
|
||||||
|
docker push anthonyrawlins/seqthink-wrapper:0.1.0
|
||||||
|
```
|
||||||
|
|
||||||
|
## Step 4: Deploy to Swarm
|
||||||
|
|
||||||
|
Deploy the service:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
cd deploy/seqthink
|
||||||
|
|
||||||
|
# Deploy stack
|
||||||
|
docker stack deploy -c docker-compose.swarm.yml seqthink
|
||||||
|
|
||||||
|
# Check service status
|
||||||
|
docker service ls | grep seqthink
|
||||||
|
|
||||||
|
# Check logs
|
||||||
|
docker service logs -f seqthink_seqthink-wrapper
|
||||||
|
```
|
||||||
|
|
||||||
|
**Expected Log Output**:
|
||||||
|
```
|
||||||
|
🚀 Starting Sequential Thinking Age Wrapper
|
||||||
|
⏳ Waiting for MCP server...
|
||||||
|
✅ MCP server ready
|
||||||
|
Policy enforcement enabled
|
||||||
|
jwks_url: https://auth.kaching.services/jwks
|
||||||
|
required_scope: sequentialthinking.run
|
||||||
|
Fetching JWKS
|
||||||
|
JWKS cached successfully
|
||||||
|
key_count: 2
|
||||||
|
Encryption enabled - using encrypted endpoint
|
||||||
|
🔐 Wrapper listening
|
||||||
|
addr: :8443
|
||||||
|
encryption_enabled: true
|
||||||
|
policy_enabled: true
|
||||||
|
```
|
||||||
|
|
||||||
|
## Step 5: Verify Deployment
|
||||||
|
|
||||||
|
Check service health:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Check replicas
|
||||||
|
docker service ps seqthink_seqthink-wrapper
|
||||||
|
|
||||||
|
# Test health endpoint
|
||||||
|
curl -f http://localhost:8443/health
|
||||||
|
# Expected: OK
|
||||||
|
|
||||||
|
# Test readiness
|
||||||
|
curl -f http://localhost:8443/ready
|
||||||
|
# Expected: READY
|
||||||
|
|
||||||
|
# Check metrics
|
||||||
|
curl http://localhost:8443/metrics | grep seqthink
|
||||||
|
```
|
||||||
|
|
||||||
|
## Step 6: Test with JWT Token
|
||||||
|
|
||||||
|
Get a KACHING JWT token and test the API:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Set your JWT token
|
||||||
|
export JWT_TOKEN="eyJhbGciOiJSUzI1NiIsImtpZCI6ImRlZmF1bHQiLCJ0eXAiOiJKV1QifQ..."
|
||||||
|
|
||||||
|
# Test unauthorized (should fail)
|
||||||
|
curl -X POST https://seqthink.chorus.services/mcp/tool \
|
||||||
|
-H "Content-Type: application/age" \
|
||||||
|
-d "test"
|
||||||
|
# Expected: 401 Unauthorized
|
||||||
|
|
||||||
|
# Test authorized (should succeed)
|
||||||
|
curl -X POST https://seqthink.chorus.services/mcp/tool \
|
||||||
|
-H "Authorization: Bearer $JWT_TOKEN" \
|
||||||
|
-H "Content-Type: application/age" \
|
||||||
|
-d "$(echo '{"tool":"test","payload":{}}' | age -r $(cat seqthink_age.pub))" \
|
||||||
|
--output encrypted_response.age
|
||||||
|
|
||||||
|
# Decrypt response
|
||||||
|
age -d -i seqthink_age.key encrypted_response.age
|
||||||
|
```
|
||||||
|
|
||||||
|
## Configuration Reference
|
||||||
|
|
||||||
|
### Environment Variables
|
||||||
|
|
||||||
|
| Variable | Required | Default | Description |
|
||||||
|
|----------|----------|---------|-------------|
|
||||||
|
| `PORT` | No | `8443` | HTTP server port |
|
||||||
|
| `MCP_LOCAL` | No | `http://127.0.0.1:8000` | MCP server URL (loopback) |
|
||||||
|
| `LOG_LEVEL` | No | `info` | Logging level (debug, info, warn, error) |
|
||||||
|
| `MAX_BODY_MB` | No | `4` | Maximum request body size in MB |
|
||||||
|
| `AGE_IDENT_PATH` | **Yes** | - | Path to age identity (private key) |
|
||||||
|
| `AGE_RECIPS_PATH` | **Yes** | - | Path to age recipients (public key) |
|
||||||
|
| `KACHING_JWKS_URL` | **Yes** | - | KACHING JWKS endpoint |
|
||||||
|
| `REQUIRED_SCOPE` | **Yes** | `sequentialthinking.run` | Required JWT scope |
|
||||||
|
|
||||||
|
### Docker Secrets
|
||||||
|
|
||||||
|
| Secret Name | Purpose | Content |
|
||||||
|
|-------------|---------|---------|
|
||||||
|
| `seqthink_age_identity` | Age private key | `AGE-SECRET-KEY-1...` |
|
||||||
|
| `seqthink_age_recipients` | Age public key | `age1...` |
|
||||||
|
|
||||||
|
### Network Ports
|
||||||
|
|
||||||
|
| Port | Protocol | Purpose |
|
||||||
|
|------|----------|---------|
|
||||||
|
| `8443` | HTTP | Wrapper API |
|
||||||
|
| `8000` | HTTP | MCP server (internal loopback only) |
|
||||||
|
|
||||||
|
## Scaling
|
||||||
|
|
||||||
|
Scale the service:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Scale up
|
||||||
|
docker service scale seqthink_seqthink-wrapper=5
|
||||||
|
|
||||||
|
# Scale down
|
||||||
|
docker service scale seqthink_seqthink-wrapper=2
|
||||||
|
```
|
||||||
|
|
||||||
|
## Updates
|
||||||
|
|
||||||
|
Rolling update:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Build new version
|
||||||
|
docker build -f deploy/seqthink/Dockerfile -t anthonyrawlins/seqthink-wrapper:0.2.0 .
|
||||||
|
docker push anthonyrawlins/seqthink-wrapper:0.2.0
|
||||||
|
|
||||||
|
# Update service
|
||||||
|
docker service update \
|
||||||
|
--image anthonyrawlins/seqthink-wrapper:0.2.0 \
|
||||||
|
seqthink_seqthink-wrapper
|
||||||
|
|
||||||
|
# Monitor rollout
|
||||||
|
docker service ps seqthink_seqthink-wrapper
|
||||||
|
```
|
||||||
|
|
||||||
|
## Rollback
|
||||||
|
|
||||||
|
If update fails:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Automatic rollback (configured in stack)
|
||||||
|
# Or manual rollback:
|
||||||
|
docker service rollback seqthink_seqthink-wrapper
|
||||||
|
```
|
||||||
|
|
||||||
|
## Monitoring
|
||||||
|
|
||||||
|
### Prometheus Metrics
|
||||||
|
|
||||||
|
Available at `http://localhost:8443/metrics`:
|
||||||
|
|
||||||
|
```
|
||||||
|
seqthink_requests_total
|
||||||
|
seqthink_errors_total
|
||||||
|
seqthink_decrypt_failures_total
|
||||||
|
seqthink_encrypt_failures_total
|
||||||
|
seqthink_policy_denials_total
|
||||||
|
seqthink_request_duration_seconds
|
||||||
|
```
|
||||||
|
|
||||||
|
### Health Checks
|
||||||
|
|
||||||
|
- **Liveness**: `GET /health` - Returns 200 if wrapper is running
|
||||||
|
- **Readiness**: `GET /ready` - Returns 200 if MCP server is ready
|
||||||
|
|
||||||
|
### Logs
|
||||||
|
|
||||||
|
View logs:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# All replicas
|
||||||
|
docker service logs seqthink_seqthink-wrapper
|
||||||
|
|
||||||
|
# Follow logs
|
||||||
|
docker service logs -f seqthink_seqthink-wrapper
|
||||||
|
|
||||||
|
# Specific replica
|
||||||
|
docker service logs seqthink_seqthink-wrapper.<replica-id>
|
||||||
|
```
|
||||||
|
|
||||||
|
## Troubleshooting
|
||||||
|
|
||||||
|
### Issue: Policy Enforcement Disabled
|
||||||
|
|
||||||
|
**Symptoms**:
|
||||||
|
```
|
||||||
|
Policy enforcement disabled - no JWKS URL or required scope configured
|
||||||
|
```
|
||||||
|
|
||||||
|
**Solution**:
|
||||||
|
- Verify `KACHING_JWKS_URL` and `REQUIRED_SCOPE` are set
|
||||||
|
- Check environment variables: `docker service inspect seqthink_seqthink-wrapper`
|
||||||
|
|
||||||
|
### Issue: JWKS Fetch Failed
|
||||||
|
|
||||||
|
**Symptoms**:
|
||||||
|
```
|
||||||
|
Failed to pre-fetch JWKS, will retry on first request
|
||||||
|
```
|
||||||
|
|
||||||
|
**Solution**:
|
||||||
|
- Check KACHING service is accessible
|
||||||
|
- Verify JWKS URL is correct
|
||||||
|
- Check network connectivity
|
||||||
|
|
||||||
|
### Issue: Decryption Failed
|
||||||
|
|
||||||
|
**Symptoms**:
|
||||||
|
```
|
||||||
|
Failed to decrypt request
|
||||||
|
seqthink_decrypt_failures_total increasing
|
||||||
|
```
|
||||||
|
|
||||||
|
**Solution**:
|
||||||
|
- Verify age keys match between client and server
|
||||||
|
- Check client is using correct public key
|
||||||
|
- Ensure secrets are correctly mounted
|
||||||
|
|
||||||
|
### Issue: MCP Server Not Ready
|
||||||
|
|
||||||
|
**Symptoms**:
|
||||||
|
```
|
||||||
|
❌ MCP server not ready
|
||||||
|
timeout waiting for MCP server
|
||||||
|
```
|
||||||
|
|
||||||
|
**Solution**:
|
||||||
|
- Check MCP server is starting correctly
|
||||||
|
- Review entrypoint.sh logs
|
||||||
|
- Verify Python dependencies installed
|
||||||
|
|
||||||
|
## Security Considerations
|
||||||
|
|
||||||
|
1. **Key Rotation**: Periodically rotate age keys:
|
||||||
|
```bash
|
||||||
|
# Generate new keys
|
||||||
|
age-keygen -o seqthink_age_new.key
|
||||||
|
age-keygen -y seqthink_age_new.key > seqthink_age_new.pub
|
||||||
|
|
||||||
|
# Update secrets (requires service restart)
|
||||||
|
docker secret rm seqthink_age_identity
|
||||||
|
docker secret create seqthink_age_identity seqthink_age_new.key
|
||||||
|
```
|
||||||
|
|
||||||
|
2. **JWT Token Expiration**: Tokens should have short expiration times (1 hour recommended)
|
||||||
|
|
||||||
|
3. **Network Isolation**: MCP server only accessible on loopback (127.0.0.1)
|
||||||
|
|
||||||
|
4. **TLS**: Always use HTTPS in production (via Traefik)
|
||||||
|
|
||||||
|
5. **Rate Limiting**: Consider adding rate limiting at Traefik level
|
||||||
|
|
||||||
|
## Development Mode
|
||||||
|
|
||||||
|
For testing without security:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
environment:
|
||||||
|
# Disable encryption
|
||||||
|
AGE_IDENT_PATH: ""
|
||||||
|
AGE_RECIPS_PATH: ""
|
||||||
|
|
||||||
|
# Disable policy
|
||||||
|
KACHING_JWKS_URL: ""
|
||||||
|
REQUIRED_SCOPE: ""
|
||||||
|
```
|
||||||
|
|
||||||
|
**WARNING**: Only use in development environments!
|
||||||
|
|
||||||
|
## Production Checklist
|
||||||
|
|
||||||
|
- [ ] Age keys generated and stored as Docker secrets
|
||||||
|
- [ ] KACHING JWKS URL configured and accessible
|
||||||
|
- [ ] Docker image built and pushed to registry
|
||||||
|
- [ ] Service deployed to swarm
|
||||||
|
- [ ] Health checks passing
|
||||||
|
- [ ] Metrics endpoint accessible
|
||||||
|
- [ ] JWT tokens validated successfully
|
||||||
|
- [ ] End-to-end encryption verified
|
||||||
|
- [ ] Logs show no errors
|
||||||
|
- [ ] Monitoring alerts configured
|
||||||
|
- [ ] Backup of age keys stored securely
|
||||||
|
- [ ] Documentation updated with deployment details
|
||||||
|
|
||||||
|
## Support
|
||||||
|
|
||||||
|
For issues or questions:
|
||||||
|
- Check logs: `docker service logs seqthink_seqthink-wrapper`
|
||||||
|
- Review metrics: `curl http://localhost:8443/metrics`
|
||||||
|
- Consult implementation docs in `/home/tony/chorus/project-queues/active/CHORUS/docs/`
|
||||||
65
deploy/seqthink/Dockerfile
Normal file
65
deploy/seqthink/Dockerfile
Normal file
@@ -0,0 +1,65 @@
|
|||||||
|
# Sequential Thinking Age-Encrypted Wrapper
|
||||||
|
|
||||||
|
# Stage 1: Build Python MCP server
|
||||||
|
FROM python:3.11-slim AS python-builder
|
||||||
|
|
||||||
|
WORKDIR /mcp
|
||||||
|
|
||||||
|
# Install Sequential Thinking MCP server dependencies
|
||||||
|
# Note: For Beat 1, we'll use a minimal Python HTTP server
|
||||||
|
# Full MCP server integration happens in later beats
|
||||||
|
RUN pip install --no-cache-dir \
|
||||||
|
fastapi==0.109.0 \
|
||||||
|
uvicorn[standard]==0.27.0 \
|
||||||
|
pydantic==2.5.3
|
||||||
|
|
||||||
|
# Copy MCP compatibility server
|
||||||
|
COPY deploy/seqthink/mcp_server.py /mcp/server.py
|
||||||
|
|
||||||
|
# Stage 2: Runtime
|
||||||
|
FROM debian:bookworm-slim
|
||||||
|
|
||||||
|
# Install runtime dependencies
|
||||||
|
RUN apt-get update && \
|
||||||
|
apt-get install -y --no-install-recommends \
|
||||||
|
ca-certificates \
|
||||||
|
curl \
|
||||||
|
python3 \
|
||||||
|
python3-pip && \
|
||||||
|
apt-get clean && \
|
||||||
|
rm -rf /var/lib/apt/lists/*
|
||||||
|
|
||||||
|
# Install Python packages in runtime
|
||||||
|
RUN pip3 install --no-cache-dir --break-system-packages \
|
||||||
|
fastapi==0.109.0 \
|
||||||
|
uvicorn[standard]==0.27.0 \
|
||||||
|
pydantic==2.5.3
|
||||||
|
|
||||||
|
# Create non-root user
|
||||||
|
RUN useradd -r -u 1000 -m -s /bin/bash seqthink
|
||||||
|
|
||||||
|
# Copy wrapper binary built on host (GOWORK=off GOOS=linux go build ...)
|
||||||
|
COPY deploy/seqthink/bin/seqthink-wrapper /usr/local/bin/seqthink-wrapper
|
||||||
|
COPY --from=python-builder /mcp/server.py /opt/mcp/server.py
|
||||||
|
|
||||||
|
# Copy entrypoint
|
||||||
|
COPY deploy/seqthink/entrypoint.sh /entrypoint.sh
|
||||||
|
RUN chmod +x /entrypoint.sh
|
||||||
|
|
||||||
|
# Setup directories
|
||||||
|
RUN mkdir -p /etc/seqthink /var/log/seqthink && \
|
||||||
|
chown -R seqthink:seqthink /etc/seqthink /var/log/seqthink
|
||||||
|
|
||||||
|
# Switch to non-root user
|
||||||
|
USER seqthink
|
||||||
|
WORKDIR /home/seqthink
|
||||||
|
|
||||||
|
# Health check
|
||||||
|
HEALTHCHECK --interval=30s --timeout=10s --start-period=40s --retries=3 \
|
||||||
|
CMD curl -f http://localhost:8443/health || exit 1
|
||||||
|
|
||||||
|
# Expose wrapper port (MCP server on 127.0.0.1:8000 is internal only)
|
||||||
|
EXPOSE 8443
|
||||||
|
|
||||||
|
# Run entrypoint
|
||||||
|
ENTRYPOINT ["/entrypoint.sh"]
|
||||||
491
deploy/seqthink/SECRETS.md
Normal file
491
deploy/seqthink/SECRETS.md
Normal file
@@ -0,0 +1,491 @@
|
|||||||
|
# Secrets Management Guide
|
||||||
|
|
||||||
|
## Overview
|
||||||
|
|
||||||
|
The Sequential Thinking Wrapper uses Docker Secrets for secure key management. This guide covers generating, storing, and rotating secrets.
|
||||||
|
|
||||||
|
## Secret Types
|
||||||
|
|
||||||
|
### 1. Age Encryption Keys
|
||||||
|
|
||||||
|
**Purpose**: End-to-end encryption of MCP communications
|
||||||
|
|
||||||
|
**Components**:
|
||||||
|
- **Identity (Private Key)**: `seqthink_age_identity`
|
||||||
|
- **Recipients (Public Key)**: `seqthink_age_recipients`
|
||||||
|
|
||||||
|
### 2. KACHING JWT Configuration
|
||||||
|
|
||||||
|
**Purpose**: Authentication and authorization
|
||||||
|
|
||||||
|
**Components**:
|
||||||
|
- JWKS URL (environment variable, not a secret)
|
||||||
|
- Required scope (environment variable, not a secret)
|
||||||
|
|
||||||
|
## Generating Age Keys
|
||||||
|
|
||||||
|
### Method 1: Using age-keygen
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Install age if not already installed
|
||||||
|
# macOS: brew install age
|
||||||
|
# Ubuntu: apt install age
|
||||||
|
# Arch: pacman -S age
|
||||||
|
|
||||||
|
# Generate identity (private key)
|
||||||
|
age-keygen -o seqthink_age.key
|
||||||
|
|
||||||
|
# Extract recipient (public key)
|
||||||
|
age-keygen -y seqthink_age.key > seqthink_age.pub
|
||||||
|
```
|
||||||
|
|
||||||
|
**Output Format**:
|
||||||
|
|
||||||
|
`seqthink_age.key`:
|
||||||
|
```
|
||||||
|
# created: 2025-10-13T08:00:00+11:00
|
||||||
|
# public key: age1ql3z7hjy54pw3hyww5ayyfg7zqgvc7w3j2elw8zmrj2kg5sfn9aqmcac8p
|
||||||
|
AGE-SECRET-KEY-1GFPYYSJQ...
|
||||||
|
```
|
||||||
|
|
||||||
|
`seqthink_age.pub`:
|
||||||
|
```
|
||||||
|
age1ql3z7hjy54pw3hyww5ayyfg7zqgvc7w3j2elw8zmrj2kg5sfn9aqmcac8p
|
||||||
|
```
|
||||||
|
|
||||||
|
### Method 2: Using Go Code
|
||||||
|
|
||||||
|
Create a helper script:
|
||||||
|
|
||||||
|
```go
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"filippo.io/age"
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
)
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
identity, err := age.GenerateX25519Identity()
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Write identity (private key)
|
||||||
|
identityFile, _ := os.Create("seqthink_age.key")
|
||||||
|
fmt.Fprintf(identityFile, "# created: %s\n", time.Now().Format(time.RFC3339))
|
||||||
|
fmt.Fprintf(identityFile, "# public key: %s\n", identity.Recipient().String())
|
||||||
|
fmt.Fprintf(identityFile, "%s\n", identity.String())
|
||||||
|
identityFile.Close()
|
||||||
|
|
||||||
|
// Write recipient (public key)
|
||||||
|
recipientFile, _ := os.Create("seqthink_age.pub")
|
||||||
|
fmt.Fprintf(recipientFile, "%s\n", identity.Recipient().String())
|
||||||
|
recipientFile.Close()
|
||||||
|
|
||||||
|
fmt.Println("✅ Keys generated:")
|
||||||
|
fmt.Println(" Identity: seqthink_age.key")
|
||||||
|
fmt.Println(" Recipient: seqthink_age.pub")
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
## Storing Secrets in Docker Swarm
|
||||||
|
|
||||||
|
### Create Secrets
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Create identity secret
|
||||||
|
docker secret create seqthink_age_identity seqthink_age.key
|
||||||
|
|
||||||
|
# Create recipient secret
|
||||||
|
docker secret create seqthink_age_recipients seqthink_age.pub
|
||||||
|
```
|
||||||
|
|
||||||
|
### Verify Secrets
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# List secrets
|
||||||
|
docker secret ls | grep seqthink
|
||||||
|
|
||||||
|
# Inspect secret metadata (not content)
|
||||||
|
docker secret inspect seqthink_age_identity
|
||||||
|
```
|
||||||
|
|
||||||
|
**Expected Output**:
|
||||||
|
```json
|
||||||
|
[
|
||||||
|
{
|
||||||
|
"ID": "abc123...",
|
||||||
|
"Version": {
|
||||||
|
"Index": 123
|
||||||
|
},
|
||||||
|
"CreatedAt": "2025-10-13T08:00:00.000Z",
|
||||||
|
"UpdatedAt": "2025-10-13T08:00:00.000Z",
|
||||||
|
"Spec": {
|
||||||
|
"Name": "seqthink_age_identity",
|
||||||
|
"Labels": {}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
]
|
||||||
|
```
|
||||||
|
|
||||||
|
## Using Secrets in Services
|
||||||
|
|
||||||
|
### Compose File Configuration
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
services:
|
||||||
|
seqthink-wrapper:
|
||||||
|
environment:
|
||||||
|
AGE_IDENT_PATH: /run/secrets/seqthink_age_identity
|
||||||
|
AGE_RECIPS_PATH: /run/secrets/seqthink_age_recipients
|
||||||
|
|
||||||
|
secrets:
|
||||||
|
- seqthink_age_identity
|
||||||
|
- seqthink_age_recipients
|
||||||
|
|
||||||
|
secrets:
|
||||||
|
seqthink_age_identity:
|
||||||
|
external: true
|
||||||
|
seqthink_age_recipients:
|
||||||
|
external: true
|
||||||
|
```
|
||||||
|
|
||||||
|
### Secret Mount Points
|
||||||
|
|
||||||
|
Inside the container, secrets are available at:
|
||||||
|
- `/run/secrets/seqthink_age_identity`
|
||||||
|
- `/run/secrets/seqthink_age_recipients`
|
||||||
|
|
||||||
|
These are read-only files mounted via tmpfs.
|
||||||
|
|
||||||
|
## Key Rotation
|
||||||
|
|
||||||
|
### Why Rotate Keys?
|
||||||
|
|
||||||
|
- Compromised key material
|
||||||
|
- Compliance requirements
|
||||||
|
- Periodic security hygiene
|
||||||
|
- Employee offboarding
|
||||||
|
|
||||||
|
### Rotation Process
|
||||||
|
|
||||||
|
#### Step 1: Generate New Keys
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Generate new keys with timestamp
|
||||||
|
TIMESTAMP=$(date +%Y%m%d)
|
||||||
|
age-keygen -o seqthink_age_${TIMESTAMP}.key
|
||||||
|
age-keygen -y seqthink_age_${TIMESTAMP}.key > seqthink_age_${TIMESTAMP}.pub
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Step 2: Create New Secrets
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Create new secrets with version suffix
|
||||||
|
docker secret create seqthink_age_identity_v2 seqthink_age_${TIMESTAMP}.key
|
||||||
|
docker secret create seqthink_age_recipients_v2 seqthink_age_${TIMESTAMP}.pub
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Step 3: Update Service
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Update service to use new secrets
|
||||||
|
docker service update \
|
||||||
|
--secret-rm seqthink_age_identity \
|
||||||
|
--secret-add source=seqthink_age_identity_v2,target=seqthink_age_identity \
|
||||||
|
--secret-rm seqthink_age_recipients \
|
||||||
|
--secret-add source=seqthink_age_recipients_v2,target=seqthink_age_recipients \
|
||||||
|
seqthink_seqthink-wrapper
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Step 4: Verify New Keys Work
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Check service logs
|
||||||
|
docker service logs seqthink_seqthink-wrapper | tail -20
|
||||||
|
|
||||||
|
# Test encryption with new keys
|
||||||
|
echo "test" | age -r "$(cat seqthink_age_${TIMESTAMP}.pub)" | \
|
||||||
|
age -d -i seqthink_age_${TIMESTAMP}.key
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Step 5: Clean Up Old Secrets
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Wait 24 hours to ensure no rollback needed
|
||||||
|
# Then remove old secrets
|
||||||
|
docker secret rm seqthink_age_identity
|
||||||
|
docker secret rm seqthink_age_recipients
|
||||||
|
|
||||||
|
# Promote v2 to primary names (optional)
|
||||||
|
docker secret create seqthink_age_identity seqthink_age_${TIMESTAMP}.key
|
||||||
|
docker secret create seqthink_age_recipients seqthink_age_${TIMESTAMP}.pub
|
||||||
|
```
|
||||||
|
|
||||||
|
## Backup and Recovery
|
||||||
|
|
||||||
|
### Backup Keys
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Create secure backup directory
|
||||||
|
mkdir -p ~/secure-backups/seqthink-keys
|
||||||
|
chmod 700 ~/secure-backups/seqthink-keys
|
||||||
|
|
||||||
|
# Copy keys to backup
|
||||||
|
cp seqthink_age.key ~/secure-backups/seqthink-keys/
|
||||||
|
cp seqthink_age.pub ~/secure-backups/seqthink-keys/
|
||||||
|
|
||||||
|
# Encrypt backup
|
||||||
|
tar czf - ~/secure-backups/seqthink-keys | \
|
||||||
|
age -r age1... > seqthink-keys-backup.tar.gz.age
|
||||||
|
|
||||||
|
# Store encrypted backup in:
|
||||||
|
# 1. Offsite backup (Backblaze, Scaleway)
|
||||||
|
# 2. Password manager (1Password, Bitwarden)
|
||||||
|
# 3. Hardware security module (YubiKey)
|
||||||
|
```
|
||||||
|
|
||||||
|
### Recover Keys
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Decrypt backup
|
||||||
|
age -d -i master_identity.key seqthink-keys-backup.tar.gz.age | \
|
||||||
|
tar xzf -
|
||||||
|
|
||||||
|
# Recreate Docker secrets
|
||||||
|
docker secret create seqthink_age_identity \
|
||||||
|
~/secure-backups/seqthink-keys/seqthink_age.key
|
||||||
|
docker secret create seqthink_age_recipients \
|
||||||
|
~/secure-backups/seqthink-keys/seqthink_age.pub
|
||||||
|
```
|
||||||
|
|
||||||
|
## Security Best Practices
|
||||||
|
|
||||||
|
### 1. Key Generation
|
||||||
|
|
||||||
|
✅ **DO**:
|
||||||
|
- Generate keys on secure, air-gapped machines
|
||||||
|
- Use cryptographically secure random number generators
|
||||||
|
- Generate new keys per environment (dev, staging, prod)
|
||||||
|
|
||||||
|
❌ **DON'T**:
|
||||||
|
- Reuse keys across environments
|
||||||
|
- Generate keys on shared/untrusted systems
|
||||||
|
- Store keys in git repositories
|
||||||
|
|
||||||
|
### 2. Key Storage
|
||||||
|
|
||||||
|
✅ **DO**:
|
||||||
|
- Use Docker Secrets for production
|
||||||
|
- Encrypt backups with age or GPG
|
||||||
|
- Store backups in multiple secure locations
|
||||||
|
- Use hardware security modules for highly sensitive keys
|
||||||
|
|
||||||
|
❌ **DON'T**:
|
||||||
|
- Store keys in environment variables
|
||||||
|
- Commit keys to version control
|
||||||
|
- Share keys via insecure channels (email, Slack)
|
||||||
|
- Store unencrypted keys on disk
|
||||||
|
|
||||||
|
### 3. Key Distribution
|
||||||
|
|
||||||
|
✅ **DO**:
|
||||||
|
- Use secure channels (age-encrypted files, password managers)
|
||||||
|
- Verify key fingerprints before use
|
||||||
|
- Use Docker Secrets for service access
|
||||||
|
- Document key distribution recipients
|
||||||
|
|
||||||
|
❌ **DON'T**:
|
||||||
|
- Send keys via unencrypted email
|
||||||
|
- Post keys in chat systems
|
||||||
|
- Share keys verbally
|
||||||
|
- Use public key servers for private keys
|
||||||
|
|
||||||
|
### 4. Key Lifecycle
|
||||||
|
|
||||||
|
✅ **DO**:
|
||||||
|
- Rotate keys periodically (quarterly recommended)
|
||||||
|
- Rotate keys immediately if compromised
|
||||||
|
- Keep audit log of key generations and rotations
|
||||||
|
- Test key recovery procedures
|
||||||
|
|
||||||
|
❌ **DON'T**:
|
||||||
|
- Keep keys indefinitely without rotation
|
||||||
|
- Delete old keys immediately (keep 30-day overlap)
|
||||||
|
- Skip testing key recovery
|
||||||
|
- Forget to document key changes
|
||||||
|
|
||||||
|
## Troubleshooting
|
||||||
|
|
||||||
|
### Issue: Secret Not Found
|
||||||
|
|
||||||
|
**Error**:
|
||||||
|
```
|
||||||
|
Error response from daemon: secret 'seqthink_age_identity' not found
|
||||||
|
```
|
||||||
|
|
||||||
|
**Solution**:
|
||||||
|
```bash
|
||||||
|
# Check if secret exists
|
||||||
|
docker secret ls | grep seqthink
|
||||||
|
|
||||||
|
# If missing, create it
|
||||||
|
docker secret create seqthink_age_identity seqthink_age.key
|
||||||
|
```
|
||||||
|
|
||||||
|
### Issue: Permission Denied Reading Secret
|
||||||
|
|
||||||
|
**Error**:
|
||||||
|
```
|
||||||
|
open /run/secrets/seqthink_age_identity: permission denied
|
||||||
|
```
|
||||||
|
|
||||||
|
**Solution**:
|
||||||
|
- Secrets are mounted read-only to containers
|
||||||
|
- Container user must have read permissions
|
||||||
|
- Check Dockerfile USER directive
|
||||||
|
|
||||||
|
### Issue: Wrong Key Used
|
||||||
|
|
||||||
|
**Error**:
|
||||||
|
```
|
||||||
|
Failed to decrypt request
|
||||||
|
seqthink_decrypt_failures_total increasing
|
||||||
|
```
|
||||||
|
|
||||||
|
**Solution**:
|
||||||
|
```bash
|
||||||
|
# Verify public key matches private key
|
||||||
|
PUBLIC_FROM_PRIVATE=$(age-keygen -y seqthink_age.key)
|
||||||
|
PUBLIC_IN_SECRET=$(cat seqthink_age.pub)
|
||||||
|
|
||||||
|
if [ "$PUBLIC_FROM_PRIVATE" = "$PUBLIC_IN_SECRET" ]; then
|
||||||
|
echo "✓ Keys match"
|
||||||
|
else
|
||||||
|
echo "✗ Keys don't match - regenerate recipient"
|
||||||
|
fi
|
||||||
|
```
|
||||||
|
|
||||||
|
### Issue: Secret Update Not Taking Effect
|
||||||
|
|
||||||
|
**Symptoms**: Service still using old keys after update
|
||||||
|
|
||||||
|
**Solution**:
|
||||||
|
```bash
|
||||||
|
# Force service update
|
||||||
|
docker service update --force seqthink_seqthink-wrapper
|
||||||
|
|
||||||
|
# Or restart service
|
||||||
|
docker service scale seqthink_seqthink-wrapper=0
|
||||||
|
docker service scale seqthink_seqthink-wrapper=3
|
||||||
|
```
|
||||||
|
|
||||||
|
## Client-Side Key Management
|
||||||
|
|
||||||
|
### Distributing Public Keys to Clients
|
||||||
|
|
||||||
|
Clients need the public key to encrypt requests:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Generate client-friendly recipient file
|
||||||
|
cat seqthink_age.pub
|
||||||
|
|
||||||
|
# Clients can encrypt with:
|
||||||
|
echo '{"tool":"test","payload":{}}' | age -r age1ql3z7hjy54pw3... > request.age
|
||||||
|
```
|
||||||
|
|
||||||
|
### Recipient Key Distribution Methods
|
||||||
|
|
||||||
|
1. **Configuration Management**:
|
||||||
|
```yaml
|
||||||
|
seqthink:
|
||||||
|
recipient: age1ql3z7hjy54pw3hyww5ayyfg7zqgvc7w3j2elw8zmrj2kg5sfn9aqmcac8p
|
||||||
|
```
|
||||||
|
|
||||||
|
2. **Environment Variable**:
|
||||||
|
```bash
|
||||||
|
export SEQTHINK_RECIPIENT="age1ql3z7hjy54pw3..."
|
||||||
|
```
|
||||||
|
|
||||||
|
3. **API Discovery** (future):
|
||||||
|
```bash
|
||||||
|
curl https://seqthink.chorus.services/.well-known/age-recipient
|
||||||
|
```
|
||||||
|
|
||||||
|
## Compliance and Auditing
|
||||||
|
|
||||||
|
### Audit Log Example
|
||||||
|
|
||||||
|
Maintain a log of key operations:
|
||||||
|
|
||||||
|
```markdown
|
||||||
|
# seqthink-keys-audit.md
|
||||||
|
|
||||||
|
## 2025-10-13 - Initial Key Generation
|
||||||
|
- Generated by: Tony
|
||||||
|
- Purpose: Production deployment
|
||||||
|
- Public key: age1ql3z7hjy54pw3...
|
||||||
|
- Stored in: Docker Secrets + Backup
|
||||||
|
|
||||||
|
## 2025-11-15 - Quarterly Rotation
|
||||||
|
- Generated by: Tony
|
||||||
|
- Reason: Scheduled quarterly rotation
|
||||||
|
- Old public key: age1ql3z7hjy54pw3...
|
||||||
|
- New public key: age1abc123xyz...
|
||||||
|
- Overlap period: 30 days
|
||||||
|
- Old keys removed: 2025-12-15
|
||||||
|
```
|
||||||
|
|
||||||
|
### Compliance Requirements
|
||||||
|
|
||||||
|
For SOC 2, ISO 27001, or similar:
|
||||||
|
- Document key generation procedures
|
||||||
|
- Log all key rotations
|
||||||
|
- Restrict key access to authorized personnel
|
||||||
|
- Encrypt keys at rest
|
||||||
|
- Regular key rotation (90 days recommended)
|
||||||
|
- Incident response plan for key compromise
|
||||||
|
|
||||||
|
## Emergency Procedures
|
||||||
|
|
||||||
|
### Key Compromise Response
|
||||||
|
|
||||||
|
If keys are compromised:
|
||||||
|
|
||||||
|
1. **Immediate Actions** (< 1 hour):
|
||||||
|
```bash
|
||||||
|
# Generate new keys immediately
|
||||||
|
age-keygen -o seqthink_age_emergency.key
|
||||||
|
age-keygen -y seqthink_age_emergency.key > seqthink_age_emergency.pub
|
||||||
|
|
||||||
|
# Update Docker secrets
|
||||||
|
docker secret create seqthink_age_identity_emergency seqthink_age_emergency.key
|
||||||
|
docker secret create seqthink_age_recipients_emergency seqthink_age_emergency.pub
|
||||||
|
|
||||||
|
# Force service update
|
||||||
|
docker service update --force \
|
||||||
|
--secret-rm seqthink_age_identity \
|
||||||
|
--secret-add source=seqthink_age_identity_emergency,target=seqthink_age_identity \
|
||||||
|
--secret-rm seqthink_age_recipients \
|
||||||
|
--secret-add source=seqthink_age_recipients_emergency,target=seqthink_age_recipients \
|
||||||
|
seqthink_seqthink-wrapper
|
||||||
|
```
|
||||||
|
|
||||||
|
2. **Communication** (< 4 hours):
|
||||||
|
- Notify all clients of new public key
|
||||||
|
- Update documentation
|
||||||
|
- Post mortem analysis
|
||||||
|
|
||||||
|
3. **Follow-up** (< 24 hours):
|
||||||
|
- Review access logs
|
||||||
|
- Identify compromise source
|
||||||
|
- Update security procedures
|
||||||
|
- Complete incident report
|
||||||
|
|
||||||
|
## References
|
||||||
|
|
||||||
|
- [age encryption tool](https://github.com/FiloSottile/age)
|
||||||
|
- [Docker Secrets documentation](https://docs.docker.com/engine/swarm/secrets/)
|
||||||
|
- [NIST Key Management Guidelines](https://csrc.nist.gov/publications/detail/sp/800-57-part-1/rev-5/final)
|
||||||
102
deploy/seqthink/docker-compose.swarm.yml
Normal file
102
deploy/seqthink/docker-compose.swarm.yml
Normal file
@@ -0,0 +1,102 @@
|
|||||||
|
version: '3.8'
|
||||||
|
|
||||||
|
services:
|
||||||
|
seqthink-wrapper:
|
||||||
|
image: anthonyrawlins/seqthink-wrapper:latest
|
||||||
|
networks:
|
||||||
|
- chorus-overlay
|
||||||
|
ports:
|
||||||
|
- "8443:8443"
|
||||||
|
environment:
|
||||||
|
# Logging
|
||||||
|
LOG_LEVEL: info
|
||||||
|
|
||||||
|
# MCP server (internal loopback)
|
||||||
|
MCP_LOCAL: http://127.0.0.1:8000
|
||||||
|
|
||||||
|
# Port configuration
|
||||||
|
PORT: "8443"
|
||||||
|
|
||||||
|
# Request limits
|
||||||
|
MAX_BODY_MB: "4"
|
||||||
|
|
||||||
|
# Age encryption (use secrets)
|
||||||
|
AGE_IDENT_PATH: /run/secrets/seqthink_age_identity
|
||||||
|
AGE_RECIPS_PATH: /run/secrets/seqthink_age_recipients
|
||||||
|
|
||||||
|
# KACHING JWT policy
|
||||||
|
KACHING_JWKS_URL: https://auth.kaching.services/jwks
|
||||||
|
REQUIRED_SCOPE: sequentialthinking.run
|
||||||
|
|
||||||
|
secrets:
|
||||||
|
- seqthink_age_identity
|
||||||
|
- seqthink_age_recipients
|
||||||
|
|
||||||
|
deploy:
|
||||||
|
mode: replicated
|
||||||
|
replicas: 3
|
||||||
|
placement:
|
||||||
|
constraints:
|
||||||
|
- node.role == worker
|
||||||
|
preferences:
|
||||||
|
- spread: node.hostname
|
||||||
|
|
||||||
|
resources:
|
||||||
|
limits:
|
||||||
|
cpus: '1.0'
|
||||||
|
memory: 512M
|
||||||
|
reservations:
|
||||||
|
cpus: '0.5'
|
||||||
|
memory: 256M
|
||||||
|
|
||||||
|
restart_policy:
|
||||||
|
condition: on-failure
|
||||||
|
delay: 5s
|
||||||
|
max_attempts: 3
|
||||||
|
window: 120s
|
||||||
|
|
||||||
|
update_config:
|
||||||
|
parallelism: 1
|
||||||
|
delay: 10s
|
||||||
|
failure_action: rollback
|
||||||
|
monitor: 30s
|
||||||
|
max_failure_ratio: 0.3
|
||||||
|
|
||||||
|
rollback_config:
|
||||||
|
parallelism: 1
|
||||||
|
delay: 5s
|
||||||
|
failure_action: pause
|
||||||
|
monitor: 30s
|
||||||
|
|
||||||
|
labels:
|
||||||
|
- "traefik.enable=true"
|
||||||
|
- "traefik.http.routers.seqthink.rule=Host(`seqthink.chorus.services`)"
|
||||||
|
- "traefik.http.routers.seqthink.entrypoints=websecure"
|
||||||
|
- "traefik.http.routers.seqthink.tls=true"
|
||||||
|
- "traefik.http.routers.seqthink.tls.certresolver=letsencrypt"
|
||||||
|
- "traefik.http.services.seqthink.loadbalancer.server.port=8443"
|
||||||
|
- "traefik.http.services.seqthink.loadbalancer.healthcheck.path=/health"
|
||||||
|
- "traefik.http.services.seqthink.loadbalancer.healthcheck.interval=30s"
|
||||||
|
|
||||||
|
healthcheck:
|
||||||
|
test: ["CMD", "curl", "-f", "http://localhost:8443/health"]
|
||||||
|
interval: 30s
|
||||||
|
timeout: 10s
|
||||||
|
retries: 3
|
||||||
|
start_period: 40s
|
||||||
|
|
||||||
|
logging:
|
||||||
|
driver: "json-file"
|
||||||
|
options:
|
||||||
|
max-size: "10m"
|
||||||
|
max-file: "3"
|
||||||
|
|
||||||
|
networks:
|
||||||
|
chorus-overlay:
|
||||||
|
external: true
|
||||||
|
|
||||||
|
secrets:
|
||||||
|
seqthink_age_identity:
|
||||||
|
external: true
|
||||||
|
seqthink_age_recipients:
|
||||||
|
external: true
|
||||||
27
deploy/seqthink/entrypoint.sh
Normal file
27
deploy/seqthink/entrypoint.sh
Normal file
@@ -0,0 +1,27 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
set -e
|
||||||
|
|
||||||
|
echo "🚀 Starting Sequential Thinking Age Wrapper"
|
||||||
|
|
||||||
|
# Start MCP server on loopback
|
||||||
|
echo "📡 Starting Sequential Thinking MCP compatibility server on 127.0.0.1:8000..."
|
||||||
|
python3 /opt/mcp/server.py &
|
||||||
|
MCP_PID=$!
|
||||||
|
|
||||||
|
# Wait for MCP server to be ready
|
||||||
|
echo "⏳ Waiting for MCP server to be ready..."
|
||||||
|
for i in {1..30}; do
|
||||||
|
if curl -sf http://127.0.0.1:8000/health > /dev/null 2>&1; then
|
||||||
|
echo "✅ MCP server ready"
|
||||||
|
break
|
||||||
|
fi
|
||||||
|
if [ $i -eq 30 ]; then
|
||||||
|
echo "❌ MCP server failed to start"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
sleep 1
|
||||||
|
done
|
||||||
|
|
||||||
|
# Start wrapper
|
||||||
|
echo "🔐 Starting wrapper on :8443..."
|
||||||
|
exec seqthink-wrapper
|
||||||
160
deploy/seqthink/mcp_server.py
Normal file
160
deploy/seqthink/mcp_server.py
Normal file
@@ -0,0 +1,160 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
"""Sequential Thinking MCP compatibility server (HTTP wrapper)."""
|
||||||
|
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
import json
|
||||||
|
import logging
|
||||||
|
import os
|
||||||
|
from typing import Any, Dict, List, Optional
|
||||||
|
|
||||||
|
from fastapi import FastAPI, HTTPException
|
||||||
|
import uvicorn
|
||||||
|
from pydantic import BaseModel, Field, validator
|
||||||
|
|
||||||
|
logging.basicConfig(level=logging.INFO, format="%(message)s")
|
||||||
|
logger = logging.getLogger("seqthink")
|
||||||
|
|
||||||
|
|
||||||
|
class ToolRequest(BaseModel):
|
||||||
|
tool: str
|
||||||
|
payload: Dict[str, Any]
|
||||||
|
|
||||||
|
@validator("tool")
|
||||||
|
def validate_tool(cls, value: str) -> str:
|
||||||
|
allowed = {
|
||||||
|
"sequentialthinking",
|
||||||
|
"mcp__sequential-thinking__sequentialthinking",
|
||||||
|
}
|
||||||
|
if value not in allowed:
|
||||||
|
raise ValueError(f"Unknown tool '{value}'")
|
||||||
|
return value
|
||||||
|
|
||||||
|
|
||||||
|
class ToolResponse(BaseModel):
|
||||||
|
result: Optional[Dict[str, Any]] = None
|
||||||
|
error: Optional[str] = None
|
||||||
|
|
||||||
|
|
||||||
|
class ThoughtData(BaseModel):
|
||||||
|
thought: str
|
||||||
|
thoughtNumber: int = Field(..., ge=1)
|
||||||
|
totalThoughts: int = Field(..., ge=1)
|
||||||
|
nextThoughtNeeded: bool
|
||||||
|
isRevision: Optional[bool] = False
|
||||||
|
revisesThought: Optional[int] = Field(default=None, ge=1)
|
||||||
|
branchFromThought: Optional[int] = Field(default=None, ge=1)
|
||||||
|
branchId: Optional[str] = None
|
||||||
|
needsMoreThoughts: Optional[bool] = None
|
||||||
|
|
||||||
|
@validator("totalThoughts")
|
||||||
|
def normalize_total(cls, value: int, values: Dict[str, Any]) -> int:
|
||||||
|
thought_number = values.get("thoughtNumber")
|
||||||
|
if thought_number is not None and value < thought_number:
|
||||||
|
return thought_number
|
||||||
|
return value
|
||||||
|
|
||||||
|
|
||||||
|
class SequentialThinkingEngine:
|
||||||
|
"""Replicates the upstream sequential thinking MCP behaviour."""
|
||||||
|
|
||||||
|
def __init__(self) -> None:
|
||||||
|
self._thought_history: List[ThoughtData] = []
|
||||||
|
self._branches: Dict[str, List[ThoughtData]] = {}
|
||||||
|
env = os.environ.get("DISABLE_THOUGHT_LOGGING", "")
|
||||||
|
self._disable_logging = env.lower() == "true"
|
||||||
|
|
||||||
|
def _record_branch(self, data: ThoughtData) -> None:
|
||||||
|
if data.branchFromThought and data.branchId:
|
||||||
|
self._branches.setdefault(data.branchId, []).append(data)
|
||||||
|
|
||||||
|
def _log_thought(self, data: ThoughtData) -> None:
|
||||||
|
if self._disable_logging:
|
||||||
|
return
|
||||||
|
|
||||||
|
header = []
|
||||||
|
if data.isRevision:
|
||||||
|
header.append("🔄 Revision")
|
||||||
|
if data.revisesThought:
|
||||||
|
header.append(f"(revising thought {data.revisesThought})")
|
||||||
|
elif data.branchFromThought:
|
||||||
|
header.append("🌿 Branch")
|
||||||
|
header.append(f"(from thought {data.branchFromThought})")
|
||||||
|
if data.branchId:
|
||||||
|
header.append(f"[ID: {data.branchId}]")
|
||||||
|
else:
|
||||||
|
header.append("💭 Thought")
|
||||||
|
|
||||||
|
header.append(f"{data.thoughtNumber}/{data.totalThoughts}")
|
||||||
|
header_line = " ".join(part for part in header if part)
|
||||||
|
|
||||||
|
border_width = max(len(header_line), len(data.thought)) + 4
|
||||||
|
border = "─" * border_width
|
||||||
|
message = (
|
||||||
|
f"\n┌{border}┐\n"
|
||||||
|
f"│ {header_line.ljust(border_width - 2)} │\n"
|
||||||
|
f"├{border}┤\n"
|
||||||
|
f"│ {data.thought.ljust(border_width - 2)} │\n"
|
||||||
|
f"└{border}┘"
|
||||||
|
)
|
||||||
|
logger.error(message)
|
||||||
|
|
||||||
|
def process(self, payload: Dict[str, Any]) -> Dict[str, Any]:
|
||||||
|
try:
|
||||||
|
thought = ThoughtData(**payload)
|
||||||
|
except Exception as exc: # pylint: disable=broad-except
|
||||||
|
logger.exception("Invalid thought payload")
|
||||||
|
return {
|
||||||
|
"content": [
|
||||||
|
{
|
||||||
|
"type": "text",
|
||||||
|
"text": json.dumps({"error": str(exc)}, indent=2),
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"isError": True,
|
||||||
|
}
|
||||||
|
|
||||||
|
self._thought_history.append(thought)
|
||||||
|
self._record_branch(thought)
|
||||||
|
self._log_thought(thought)
|
||||||
|
|
||||||
|
response_payload = {
|
||||||
|
"thoughtNumber": thought.thoughtNumber,
|
||||||
|
"totalThoughts": thought.totalThoughts,
|
||||||
|
"nextThoughtNeeded": thought.nextThoughtNeeded,
|
||||||
|
"branches": list(self._branches.keys()),
|
||||||
|
"thoughtHistoryLength": len(self._thought_history),
|
||||||
|
}
|
||||||
|
|
||||||
|
return {
|
||||||
|
"content": [
|
||||||
|
{
|
||||||
|
"type": "text",
|
||||||
|
"text": json.dumps(response_payload, indent=2),
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
engine = SequentialThinkingEngine()
|
||||||
|
app = FastAPI(title="Sequential Thinking MCP Compatibility Server")
|
||||||
|
|
||||||
|
|
||||||
|
@app.get("/health")
|
||||||
|
def health() -> Dict[str, str]:
|
||||||
|
return {"status": "ok"}
|
||||||
|
|
||||||
|
|
||||||
|
@app.post("/mcp/tool")
|
||||||
|
def call_tool(request: ToolRequest) -> ToolResponse:
|
||||||
|
try:
|
||||||
|
result = engine.process(request.payload)
|
||||||
|
if result.get("isError"):
|
||||||
|
return ToolResponse(error=result["content"][0]["text"])
|
||||||
|
return ToolResponse(result=result)
|
||||||
|
except Exception as exc: # pylint: disable=broad-except
|
||||||
|
raise HTTPException(status_code=400, detail=str(exc)) from exc
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
uvicorn.run(app, host="127.0.0.1", port=8000, log_level="info")
|
||||||
216
deploy/seqthink/test-e2e.sh
Executable file
216
deploy/seqthink/test-e2e.sh
Executable file
@@ -0,0 +1,216 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
# End-to-end test script for Sequential Thinking Age Wrapper
|
||||||
|
set -e
|
||||||
|
|
||||||
|
echo "🧪 Sequential Thinking Wrapper E2E Tests"
|
||||||
|
echo "========================================"
|
||||||
|
echo ""
|
||||||
|
|
||||||
|
# Configuration
|
||||||
|
WRAPPER_URL="${WRAPPER_URL:-http://localhost:8443}"
|
||||||
|
JWT_TOKEN="${JWT_TOKEN:-}"
|
||||||
|
AGE_RECIPIENT="${AGE_RECIPIENT:-}"
|
||||||
|
AGE_IDENTITY="${AGE_IDENTITY:-}"
|
||||||
|
|
||||||
|
# Color codes
|
||||||
|
GREEN='\033[0;32m'
|
||||||
|
RED='\033[0;31m'
|
||||||
|
YELLOW='\033[1;33m'
|
||||||
|
NC='\033[0m' # No Color
|
||||||
|
|
||||||
|
# Test counters
|
||||||
|
TESTS_RUN=0
|
||||||
|
TESTS_PASSED=0
|
||||||
|
TESTS_FAILED=0
|
||||||
|
|
||||||
|
# Helper functions
|
||||||
|
pass() {
|
||||||
|
echo -e "${GREEN}✓${NC} $1"
|
||||||
|
((TESTS_PASSED++))
|
||||||
|
}
|
||||||
|
|
||||||
|
fail() {
|
||||||
|
echo -e "${RED}✗${NC} $1"
|
||||||
|
((TESTS_FAILED++))
|
||||||
|
}
|
||||||
|
|
||||||
|
warn() {
|
||||||
|
echo -e "${YELLOW}⚠${NC} $1"
|
||||||
|
}
|
||||||
|
|
||||||
|
test_start() {
|
||||||
|
((TESTS_RUN++))
|
||||||
|
echo ""
|
||||||
|
echo "Test $TESTS_RUN: $1"
|
||||||
|
echo "---"
|
||||||
|
}
|
||||||
|
|
||||||
|
# Test 1: Health Check
|
||||||
|
test_start "Health endpoint"
|
||||||
|
if curl -sf "$WRAPPER_URL/health" > /dev/null 2>&1; then
|
||||||
|
pass "Health check passed"
|
||||||
|
else
|
||||||
|
fail "Health check failed"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Test 2: Readiness Check
|
||||||
|
test_start "Readiness endpoint"
|
||||||
|
if curl -sf "$WRAPPER_URL/ready" > /dev/null 2>&1; then
|
||||||
|
pass "Readiness check passed"
|
||||||
|
else
|
||||||
|
fail "Readiness check failed"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Test 3: Metrics Endpoint
|
||||||
|
test_start "Metrics endpoint"
|
||||||
|
if curl -sf "$WRAPPER_URL/metrics" | grep -q "seqthink_requests_total"; then
|
||||||
|
pass "Metrics endpoint accessible"
|
||||||
|
else
|
||||||
|
fail "Metrics endpoint failed"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Test 4: Unauthorized Request (no token)
|
||||||
|
test_start "Unauthorized request rejection"
|
||||||
|
HTTP_CODE=$(curl -s -o /dev/null -w "%{http_code}" -X POST "$WRAPPER_URL/mcp/tool" \
|
||||||
|
-H "Content-Type: application/json" \
|
||||||
|
-d '{"tool":"test"}')
|
||||||
|
|
||||||
|
if [ "$HTTP_CODE" = "401" ]; then
|
||||||
|
pass "Unauthorized request correctly rejected (401)"
|
||||||
|
else
|
||||||
|
warn "Expected 401, got $HTTP_CODE (may be policy disabled)"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Test 5: Invalid Authorization Header
|
||||||
|
test_start "Invalid authorization header"
|
||||||
|
HTTP_CODE=$(curl -s -o /dev/null -w "%{http_code}" -X POST "$WRAPPER_URL/mcp/tool" \
|
||||||
|
-H "Authorization: InvalidFormat" \
|
||||||
|
-H "Content-Type: application/json" \
|
||||||
|
-d '{"tool":"test"}')
|
||||||
|
|
||||||
|
if [ "$HTTP_CODE" = "401" ]; then
|
||||||
|
pass "Invalid auth header correctly rejected (401)"
|
||||||
|
else
|
||||||
|
warn "Expected 401, got $HTTP_CODE (may be policy disabled)"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Test 6: JWT Token Validation (if token provided)
|
||||||
|
if [ -n "$JWT_TOKEN" ]; then
|
||||||
|
test_start "JWT token validation"
|
||||||
|
|
||||||
|
# Check if age keys are available
|
||||||
|
if [ -n "$AGE_RECIPIENT" ] && [ -n "$AGE_IDENTITY" ]; then
|
||||||
|
# Test with encryption
|
||||||
|
test_start "Encrypted request with valid JWT"
|
||||||
|
|
||||||
|
# Create test payload
|
||||||
|
TEST_PAYLOAD='{"tool":"mcp__sequential-thinking__sequentialthinking","payload":{"thought":"Test thought","thoughtNumber":1,"totalThoughts":1,"nextThoughtNeeded":false}}'
|
||||||
|
|
||||||
|
# Encrypt payload
|
||||||
|
ENCRYPTED_PAYLOAD=$(echo "$TEST_PAYLOAD" | age -r "$AGE_RECIPIENT" 2>/dev/null)
|
||||||
|
|
||||||
|
if [ $? -eq 0 ]; then
|
||||||
|
# Send encrypted request
|
||||||
|
HTTP_CODE=$(curl -s -o /dev/null -w "%{http_code}" -X POST "$WRAPPER_URL/mcp/tool" \
|
||||||
|
-H "Authorization: Bearer $JWT_TOKEN" \
|
||||||
|
-H "Content-Type: application/age" \
|
||||||
|
-d "$ENCRYPTED_PAYLOAD")
|
||||||
|
|
||||||
|
if [ "$HTTP_CODE" = "200" ]; then
|
||||||
|
pass "Encrypted request with JWT succeeded"
|
||||||
|
else
|
||||||
|
fail "Encrypted request failed with HTTP $HTTP_CODE"
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
fail "Failed to encrypt test payload"
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
# Test without encryption (plaintext mode)
|
||||||
|
test_start "Plaintext request with valid JWT"
|
||||||
|
|
||||||
|
HTTP_CODE=$(curl -s -o /dev/null -w "%{http_code}" -X POST "$WRAPPER_URL/mcp/tool" \
|
||||||
|
-H "Authorization: Bearer $JWT_TOKEN" \
|
||||||
|
-H "Content-Type: application/json" \
|
||||||
|
-d '{"tool":"mcp__sequential-thinking__sequentialthinking","payload":{"thought":"Test","thoughtNumber":1,"totalThoughts":1,"nextThoughtNeeded":false}}')
|
||||||
|
|
||||||
|
if [ "$HTTP_CODE" = "200" ]; then
|
||||||
|
pass "Plaintext request with JWT succeeded"
|
||||||
|
else
|
||||||
|
warn "Request failed with HTTP $HTTP_CODE"
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
warn "JWT_TOKEN not set - skipping authenticated tests"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Test 7: Content-Type Validation (if encryption enabled)
|
||||||
|
if [ -n "$AGE_RECIPIENT" ]; then
|
||||||
|
test_start "Content-Type validation for encrypted mode"
|
||||||
|
|
||||||
|
HTTP_CODE=$(curl -s -o /dev/null -w "%{http_code}" -X POST "$WRAPPER_URL/mcp/tool" \
|
||||||
|
-H "Authorization: Bearer ${JWT_TOKEN:-dummy}" \
|
||||||
|
-H "Content-Type: application/json" \
|
||||||
|
-d '{"tool":"test"}')
|
||||||
|
|
||||||
|
if [ "$HTTP_CODE" = "415" ]; then
|
||||||
|
pass "Incorrect Content-Type correctly rejected (415)"
|
||||||
|
else
|
||||||
|
warn "Expected 415, got $HTTP_CODE"
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Test 8: Metrics Collection
|
||||||
|
test_start "Metrics collection"
|
||||||
|
METRICS=$(curl -s "$WRAPPER_URL/metrics")
|
||||||
|
|
||||||
|
if echo "$METRICS" | grep -q "seqthink_requests_total"; then
|
||||||
|
REQUEST_COUNT=$(echo "$METRICS" | grep "^seqthink_requests_total" | awk '{print $2}')
|
||||||
|
pass "Request metrics collected (total: $REQUEST_COUNT)"
|
||||||
|
else
|
||||||
|
fail "Request metrics not found"
|
||||||
|
fi
|
||||||
|
|
||||||
|
if echo "$METRICS" | grep -q "seqthink_errors_total"; then
|
||||||
|
ERROR_COUNT=$(echo "$METRICS" | grep "^seqthink_errors_total" | awk '{print $2}')
|
||||||
|
pass "Error metrics collected (total: $ERROR_COUNT)"
|
||||||
|
else
|
||||||
|
fail "Error metrics not found"
|
||||||
|
fi
|
||||||
|
|
||||||
|
if echo "$METRICS" | grep -q "seqthink_policy_denials_total"; then
|
||||||
|
DENIAL_COUNT=$(echo "$METRICS" | grep "^seqthink_policy_denials_total" | awk '{print $2}')
|
||||||
|
pass "Policy denial metrics collected (total: $DENIAL_COUNT)"
|
||||||
|
else
|
||||||
|
warn "Policy denial metrics not found (may be policy disabled)"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Test 9: SSE Endpoint (basic check)
|
||||||
|
test_start "SSE endpoint availability"
|
||||||
|
# Just check if endpoint exists, don't try to consume stream
|
||||||
|
HTTP_CODE=$(curl -s -o /dev/null -w "%{http_code}" --max-time 2 "$WRAPPER_URL/mcp/sse" 2>/dev/null || echo "timeout")
|
||||||
|
|
||||||
|
if [ "$HTTP_CODE" = "401" ] || [ "$HTTP_CODE" = "200" ]; then
|
||||||
|
pass "SSE endpoint exists (HTTP $HTTP_CODE)"
|
||||||
|
else
|
||||||
|
warn "SSE endpoint check inconclusive (HTTP $HTTP_CODE)"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Summary
|
||||||
|
echo ""
|
||||||
|
echo "========================================"
|
||||||
|
echo "Test Summary"
|
||||||
|
echo "========================================"
|
||||||
|
echo "Tests Run: $TESTS_RUN"
|
||||||
|
echo -e "${GREEN}Tests Passed: $TESTS_PASSED${NC}"
|
||||||
|
if [ $TESTS_FAILED -gt 0 ]; then
|
||||||
|
echo -e "${RED}Tests Failed: $TESTS_FAILED${NC}"
|
||||||
|
fi
|
||||||
|
echo ""
|
||||||
|
|
||||||
|
if [ $TESTS_FAILED -eq 0 ]; then
|
||||||
|
echo -e "${GREEN}✓ All tests passed!${NC}"
|
||||||
|
exit 0
|
||||||
|
else
|
||||||
|
echo -e "${RED}✗ Some tests failed${NC}"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
@@ -5,9 +5,11 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"chorus/internal/logging"
|
||||||
"github.com/libp2p/go-libp2p/core/host"
|
"github.com/libp2p/go-libp2p/core/host"
|
||||||
"github.com/libp2p/go-libp2p/core/peer"
|
"github.com/libp2p/go-libp2p/core/peer"
|
||||||
"github.com/libp2p/go-libp2p/p2p/discovery/mdns"
|
"github.com/libp2p/go-libp2p/p2p/discovery/mdns"
|
||||||
|
"github.com/rs/zerolog"
|
||||||
)
|
)
|
||||||
|
|
||||||
// MDNSDiscovery handles mDNS peer discovery for local network
|
// MDNSDiscovery handles mDNS peer discovery for local network
|
||||||
@@ -18,6 +20,7 @@ type MDNSDiscovery struct {
|
|||||||
ctx context.Context
|
ctx context.Context
|
||||||
cancel context.CancelFunc
|
cancel context.CancelFunc
|
||||||
serviceTag string
|
serviceTag string
|
||||||
|
logger zerolog.Logger
|
||||||
}
|
}
|
||||||
|
|
||||||
// mdnsNotifee handles discovered peers
|
// mdnsNotifee handles discovered peers
|
||||||
@@ -25,6 +28,7 @@ type mdnsNotifee struct {
|
|||||||
h host.Host
|
h host.Host
|
||||||
ctx context.Context
|
ctx context.Context
|
||||||
peersChan chan peer.AddrInfo
|
peersChan chan peer.AddrInfo
|
||||||
|
logger zerolog.Logger
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewMDNSDiscovery creates a new mDNS discovery service
|
// NewMDNSDiscovery creates a new mDNS discovery service
|
||||||
@@ -35,11 +39,14 @@ func NewMDNSDiscovery(ctx context.Context, h host.Host, serviceTag string) (*MDN
|
|||||||
|
|
||||||
discoveryCtx, cancel := context.WithCancel(ctx)
|
discoveryCtx, cancel := context.WithCancel(ctx)
|
||||||
|
|
||||||
|
logger := logging.ForComponent(logging.ComponentP2P)
|
||||||
|
|
||||||
// Create notifee to handle discovered peers
|
// Create notifee to handle discovered peers
|
||||||
notifee := &mdnsNotifee{
|
notifee := &mdnsNotifee{
|
||||||
h: h,
|
h: h,
|
||||||
ctx: discoveryCtx,
|
ctx: discoveryCtx,
|
||||||
peersChan: make(chan peer.AddrInfo, 10),
|
peersChan: make(chan peer.AddrInfo, 10),
|
||||||
|
logger: logger,
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create mDNS service
|
// Create mDNS service
|
||||||
@@ -52,6 +59,7 @@ func NewMDNSDiscovery(ctx context.Context, h host.Host, serviceTag string) (*MDN
|
|||||||
ctx: discoveryCtx,
|
ctx: discoveryCtx,
|
||||||
cancel: cancel,
|
cancel: cancel,
|
||||||
serviceTag: serviceTag,
|
serviceTag: serviceTag,
|
||||||
|
logger: logger,
|
||||||
}
|
}
|
||||||
|
|
||||||
// Start the service
|
// Start the service
|
||||||
@@ -63,7 +71,7 @@ func NewMDNSDiscovery(ctx context.Context, h host.Host, serviceTag string) (*MDN
|
|||||||
// Start background peer connection handler
|
// Start background peer connection handler
|
||||||
go discovery.handleDiscoveredPeers()
|
go discovery.handleDiscoveredPeers()
|
||||||
|
|
||||||
fmt.Printf("🔍 mDNS Discovery started with service tag: %s\n", serviceTag)
|
logger.Info().Str("service_tag", serviceTag).Msg("mDNS Discovery started")
|
||||||
return discovery, nil
|
return discovery, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -90,13 +98,13 @@ func (d *MDNSDiscovery) handleDiscoveredPeers() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Attempt to connect
|
// Attempt to connect
|
||||||
fmt.Printf("🤝 Discovered peer %s, attempting connection...\n", peerInfo.ID.ShortString())
|
d.logger.Info().Str("peer_id", peerInfo.ID.ShortString()).Msg("Discovered peer, attempting connection")
|
||||||
|
|
||||||
connectCtx, cancel := context.WithTimeout(d.ctx, 10*time.Second)
|
connectCtx, cancel := context.WithTimeout(d.ctx, 10*time.Second)
|
||||||
if err := d.host.Connect(connectCtx, peerInfo); err != nil {
|
if err := d.host.Connect(connectCtx, peerInfo); err != nil {
|
||||||
fmt.Printf("❌ Failed to connect to peer %s: %v\n", peerInfo.ID.ShortString(), err)
|
d.logger.Warn().Err(err).Str("peer_id", peerInfo.ID.ShortString()).Msg("Failed to connect to peer")
|
||||||
} else {
|
} else {
|
||||||
fmt.Printf("✅ Successfully connected to peer %s\n", peerInfo.ID.ShortString())
|
d.logger.Info().Str("peer_id", peerInfo.ID.ShortString()).Msg("Successfully connected to peer")
|
||||||
}
|
}
|
||||||
cancel()
|
cancel()
|
||||||
}
|
}
|
||||||
@@ -119,6 +127,6 @@ func (n *mdnsNotifee) HandlePeerFound(pi peer.AddrInfo) {
|
|||||||
// Peer info sent to channel
|
// Peer info sent to channel
|
||||||
default:
|
default:
|
||||||
// Channel is full, skip this peer
|
// Channel is full, skip this peer
|
||||||
fmt.Printf("⚠️ Discovery channel full, skipping peer %s\n", pi.ID.ShortString())
|
n.logger.Warn().Str("peer_id", pi.ID.ShortString()).Msg("Discovery channel full, skipping peer")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -11,18 +11,18 @@ WORKDIR /build
|
|||||||
# Copy go mod files first (for better caching)
|
# Copy go mod files first (for better caching)
|
||||||
COPY go.mod go.sum ./
|
COPY go.mod go.sum ./
|
||||||
|
|
||||||
# Download dependencies
|
# Skip go mod download; we rely on vendored deps to avoid local replaces
|
||||||
RUN go mod download
|
RUN echo "Using vendored dependencies (skipping go mod download)"
|
||||||
|
|
||||||
# Copy source code
|
# Copy source code
|
||||||
COPY . .
|
COPY . .
|
||||||
|
|
||||||
# Build the CHORUS binary with mod mode
|
# Build the CHORUS agent binary with vendored deps
|
||||||
RUN CGO_ENABLED=0 GOOS=linux go build \
|
RUN CGO_ENABLED=0 GOOS=linux go build \
|
||||||
-mod=mod \
|
-mod=vendor \
|
||||||
-ldflags='-w -s -extldflags "-static"' \
|
-ldflags='-w -s -extldflags "-static"' \
|
||||||
-o chorus \
|
-o chorus-agent \
|
||||||
./cmd/chorus
|
./cmd/agent
|
||||||
|
|
||||||
# Final minimal runtime image
|
# Final minimal runtime image
|
||||||
FROM alpine:3.18
|
FROM alpine:3.18
|
||||||
@@ -42,8 +42,8 @@ RUN mkdir -p /app/data && \
|
|||||||
chown -R chorus:chorus /app
|
chown -R chorus:chorus /app
|
||||||
|
|
||||||
# Copy binary from builder stage
|
# Copy binary from builder stage
|
||||||
COPY --from=builder /build/chorus /app/chorus
|
COPY --from=builder /build/chorus-agent /app/chorus-agent
|
||||||
RUN chmod +x /app/chorus
|
RUN chmod +x /app/chorus-agent
|
||||||
|
|
||||||
# Switch to non-root user
|
# Switch to non-root user
|
||||||
USER chorus
|
USER chorus
|
||||||
@@ -64,5 +64,5 @@ ENV LOG_LEVEL=info \
|
|||||||
CHORUS_HEALTH_PORT=8081 \
|
CHORUS_HEALTH_PORT=8081 \
|
||||||
CHORUS_P2P_PORT=9000
|
CHORUS_P2P_PORT=9000
|
||||||
|
|
||||||
# Start CHORUS
|
# Start CHORUS Agent
|
||||||
ENTRYPOINT ["/app/chorus"]
|
ENTRYPOINT ["/app/chorus-agent"]
|
||||||
|
|||||||
@@ -29,8 +29,8 @@ services:
|
|||||||
- CHORUS_MAX_CONCURRENT_DHT=16 # Limit concurrent DHT queries
|
- CHORUS_MAX_CONCURRENT_DHT=16 # Limit concurrent DHT queries
|
||||||
|
|
||||||
# Election stability windows (Medium-risk fix 2.1)
|
# Election stability windows (Medium-risk fix 2.1)
|
||||||
- CHORUS_ELECTION_MIN_TERM=30s # Minimum time between elections to prevent churn
|
- CHORUS_ELECTION_MIN_TERM=120s # Minimum time between elections to prevent churn
|
||||||
- CHORUS_LEADER_MIN_TERM=45s # Minimum time before challenging healthy leader
|
- CHORUS_LEADER_MIN_TERM=240s # Minimum time before challenging healthy leader
|
||||||
|
|
||||||
# Assignment system for runtime configuration (Medium-risk fix 2.2)
|
# Assignment system for runtime configuration (Medium-risk fix 2.2)
|
||||||
- ASSIGN_URL=${ASSIGN_URL:-} # Optional: WHOOSH assignment endpoint
|
- ASSIGN_URL=${ASSIGN_URL:-} # Optional: WHOOSH assignment endpoint
|
||||||
@@ -38,6 +38,10 @@ services:
|
|||||||
- TASK_ID=${TASK_ID:-} # Optional: Task identifier
|
- TASK_ID=${TASK_ID:-} # Optional: Task identifier
|
||||||
- NODE_ID=${NODE_ID:-} # Optional: Node identifier
|
- NODE_ID=${NODE_ID:-} # Optional: Node identifier
|
||||||
|
|
||||||
|
# WHOOSH API configuration for bootstrap peer discovery
|
||||||
|
- WHOOSH_API_BASE_URL=${WHOOSH_API_BASE_URL:-http://whoosh:8080}
|
||||||
|
- WHOOSH_API_ENABLED=true
|
||||||
|
|
||||||
# Bootstrap pool configuration (supports JSON and CSV)
|
# Bootstrap pool configuration (supports JSON and CSV)
|
||||||
- BOOTSTRAP_JSON=/config/bootstrap.json # Optional: JSON bootstrap config
|
- BOOTSTRAP_JSON=/config/bootstrap.json # Optional: JSON bootstrap config
|
||||||
- CHORUS_BOOTSTRAP_PEERS=${CHORUS_BOOTSTRAP_PEERS:-} # CSV fallback
|
- CHORUS_BOOTSTRAP_PEERS=${CHORUS_BOOTSTRAP_PEERS:-} # CSV fallback
|
||||||
@@ -56,7 +60,14 @@ services:
|
|||||||
# Model configuration
|
# Model configuration
|
||||||
- CHORUS_MODELS=${CHORUS_MODELS:-meta/llama-3.1-8b-instruct}
|
- CHORUS_MODELS=${CHORUS_MODELS:-meta/llama-3.1-8b-instruct}
|
||||||
- CHORUS_DEFAULT_REASONING_MODEL=${CHORUS_DEFAULT_REASONING_MODEL:-meta/llama-3.1-8b-instruct}
|
- CHORUS_DEFAULT_REASONING_MODEL=${CHORUS_DEFAULT_REASONING_MODEL:-meta/llama-3.1-8b-instruct}
|
||||||
|
|
||||||
|
# LightRAG configuration (optional RAG enhancement)
|
||||||
|
- CHORUS_LIGHTRAG_ENABLED=${CHORUS_LIGHTRAG_ENABLED:-false}
|
||||||
|
- CHORUS_LIGHTRAG_BASE_URL=${CHORUS_LIGHTRAG_BASE_URL:-http://lightrag:9621}
|
||||||
|
- CHORUS_LIGHTRAG_TIMEOUT=${CHORUS_LIGHTRAG_TIMEOUT:-30s}
|
||||||
|
- CHORUS_LIGHTRAG_API_KEY=${CHORUS_LIGHTRAG_API_KEY:-your-secure-api-key-here}
|
||||||
|
- CHORUS_LIGHTRAG_DEFAULT_MODE=${CHORUS_LIGHTRAG_DEFAULT_MODE:-hybrid}
|
||||||
|
|
||||||
# Logging configuration
|
# Logging configuration
|
||||||
- LOG_LEVEL=${LOG_LEVEL:-info}
|
- LOG_LEVEL=${LOG_LEVEL:-info}
|
||||||
- LOG_FORMAT=${LOG_FORMAT:-structured}
|
- LOG_FORMAT=${LOG_FORMAT:-structured}
|
||||||
@@ -95,7 +106,7 @@ services:
|
|||||||
# Container resource limits
|
# Container resource limits
|
||||||
deploy:
|
deploy:
|
||||||
mode: replicated
|
mode: replicated
|
||||||
replicas: ${CHORUS_REPLICAS:-9}
|
replicas: ${CHORUS_REPLICAS:-20}
|
||||||
update_config:
|
update_config:
|
||||||
parallelism: 1
|
parallelism: 1
|
||||||
delay: 10s
|
delay: 10s
|
||||||
@@ -166,6 +177,8 @@ services:
|
|||||||
WHOOSH_SERVER_READ_TIMEOUT: "30s"
|
WHOOSH_SERVER_READ_TIMEOUT: "30s"
|
||||||
WHOOSH_SERVER_WRITE_TIMEOUT: "30s"
|
WHOOSH_SERVER_WRITE_TIMEOUT: "30s"
|
||||||
WHOOSH_SERVER_SHUTDOWN_TIMEOUT: "30s"
|
WHOOSH_SERVER_SHUTDOWN_TIMEOUT: "30s"
|
||||||
|
# UI static directory (served at site root by WHOOSH)
|
||||||
|
WHOOSH_UI_DIR: "/app/ui"
|
||||||
|
|
||||||
# GITEA configuration
|
# GITEA configuration
|
||||||
WHOOSH_GITEA_BASE_URL: https://gitea.chorus.services
|
WHOOSH_GITEA_BASE_URL: https://gitea.chorus.services
|
||||||
@@ -200,8 +213,8 @@ services:
|
|||||||
WHOOSH_BACKBEAT_AGENT_ID: "whoosh"
|
WHOOSH_BACKBEAT_AGENT_ID: "whoosh"
|
||||||
WHOOSH_BACKBEAT_NATS_URL: "nats://backbeat-nats:4222"
|
WHOOSH_BACKBEAT_NATS_URL: "nats://backbeat-nats:4222"
|
||||||
|
|
||||||
# Docker integration configuration (disabled for agent assignment architecture)
|
# Docker integration configuration - ENABLED for complete agent discovery
|
||||||
WHOOSH_DOCKER_ENABLED: "false"
|
WHOOSH_DOCKER_ENABLED: "true"
|
||||||
|
|
||||||
secrets:
|
secrets:
|
||||||
- whoosh_db_password
|
- whoosh_db_password
|
||||||
@@ -210,10 +223,11 @@ services:
|
|||||||
- jwt_secret
|
- jwt_secret
|
||||||
- service_tokens
|
- service_tokens
|
||||||
- redis_password
|
- redis_password
|
||||||
# volumes:
|
volumes:
|
||||||
# - /var/run/docker.sock:/var/run/docker.sock # Disabled for agent assignment architecture
|
- whoosh_ui:/app/ui:ro
|
||||||
|
- /var/run/docker.sock:/var/run/docker.sock # Required for Docker Swarm agent discovery
|
||||||
deploy:
|
deploy:
|
||||||
replicas: 2
|
replicas: 1
|
||||||
restart_policy:
|
restart_policy:
|
||||||
condition: on-failure
|
condition: on-failure
|
||||||
delay: 5s
|
delay: 5s
|
||||||
@@ -247,11 +261,11 @@ services:
|
|||||||
- traefik.enable=true
|
- traefik.enable=true
|
||||||
- traefik.docker.network=tengig
|
- traefik.docker.network=tengig
|
||||||
- traefik.http.routers.whoosh.rule=Host(`whoosh.chorus.services`)
|
- traefik.http.routers.whoosh.rule=Host(`whoosh.chorus.services`)
|
||||||
|
- traefik.http.routers.whoosh.entrypoints=web,web-secured
|
||||||
- traefik.http.routers.whoosh.tls=true
|
- traefik.http.routers.whoosh.tls=true
|
||||||
- traefik.http.routers.whoosh.tls.certresolver=letsencryptresolver
|
- traefik.http.routers.whoosh.tls.certresolver=letsencryptresolver
|
||||||
- traefik.http.routers.photoprism.entrypoints=web,web-secured
|
|
||||||
- traefik.http.services.whoosh.loadbalancer.server.port=8080
|
- traefik.http.services.whoosh.loadbalancer.server.port=8080
|
||||||
- traefik.http.services.photoprism.loadbalancer.passhostheader=true
|
- traefik.http.services.whoosh.loadbalancer.passhostheader=true
|
||||||
- traefik.http.middlewares.whoosh-auth.basicauth.users=admin:$2y$10$example_hash
|
- traefik.http.middlewares.whoosh-auth.basicauth.users=admin:$2y$10$example_hash
|
||||||
networks:
|
networks:
|
||||||
- tengig
|
- tengig
|
||||||
@@ -407,7 +421,7 @@ services:
|
|||||||
# REQ: BACKBEAT-REQ-001 - Single BeatFrame publisher per cluster
|
# REQ: BACKBEAT-REQ-001 - Single BeatFrame publisher per cluster
|
||||||
# REQ: BACKBEAT-OPS-001 - One replica prefers leadership
|
# REQ: BACKBEAT-OPS-001 - One replica prefers leadership
|
||||||
backbeat-pulse:
|
backbeat-pulse:
|
||||||
image: anthonyrawlins/backbeat-pulse:v1.0.5
|
image: anthonyrawlins/backbeat-pulse:v1.0.6
|
||||||
command: >
|
command: >
|
||||||
./pulse
|
./pulse
|
||||||
-cluster=chorus-production
|
-cluster=chorus-production
|
||||||
@@ -574,6 +588,46 @@ services:
|
|||||||
max-file: "3"
|
max-file: "3"
|
||||||
tag: "nats/{{.Name}}/{{.ID}}"
|
tag: "nats/{{.Name}}/{{.ID}}"
|
||||||
|
|
||||||
|
watchtower:
|
||||||
|
image: containrrr/watchtower
|
||||||
|
volumes:
|
||||||
|
- /var/run/docker.sock:/var/run/docker.sock
|
||||||
|
command: --interval 300 --cleanup --revive-stopped --include-stopped
|
||||||
|
restart: always
|
||||||
|
|
||||||
|
# HMMM Traffic Monitor - Observes P2P pub/sub traffic
|
||||||
|
hmmm-monitor:
|
||||||
|
image: anthonyrawlins/hmmm-monitor:latest
|
||||||
|
environment:
|
||||||
|
- WHOOSH_API_BASE_URL=http://whoosh:8080
|
||||||
|
ports:
|
||||||
|
- "9001:9001" # P2P port for peer discovery
|
||||||
|
deploy:
|
||||||
|
replicas: 1
|
||||||
|
restart_policy:
|
||||||
|
condition: on-failure
|
||||||
|
delay: 5s
|
||||||
|
max_attempts: 3
|
||||||
|
window: 120s
|
||||||
|
placement:
|
||||||
|
constraints:
|
||||||
|
- node.hostname == acacia # Keep monitor on acacia for stable peer ID
|
||||||
|
resources:
|
||||||
|
limits:
|
||||||
|
memory: 128M
|
||||||
|
cpus: '0.25'
|
||||||
|
reservations:
|
||||||
|
memory: 64M
|
||||||
|
cpus: '0.1'
|
||||||
|
networks:
|
||||||
|
- chorus_net
|
||||||
|
logging:
|
||||||
|
driver: "json-file"
|
||||||
|
options:
|
||||||
|
max-size: "10m"
|
||||||
|
max-file: "3"
|
||||||
|
tag: "hmmm-monitor/{{.Name}}/{{.ID}}"
|
||||||
|
|
||||||
# KACHING services are deployed separately in their own stack
|
# KACHING services are deployed separately in their own stack
|
||||||
# License validation will access https://kaching.chorus.services/api
|
# License validation will access https://kaching.chorus.services/api
|
||||||
|
|
||||||
@@ -611,6 +665,12 @@ volumes:
|
|||||||
type: none
|
type: none
|
||||||
o: bind
|
o: bind
|
||||||
device: /rust/containers/WHOOSH/redis
|
device: /rust/containers/WHOOSH/redis
|
||||||
|
whoosh_ui:
|
||||||
|
driver: local
|
||||||
|
driver_opts:
|
||||||
|
type: none
|
||||||
|
o: bind
|
||||||
|
device: /rust/containers/WHOOSH/ui
|
||||||
|
|
||||||
|
|
||||||
# Networks for CHORUS communication
|
# Networks for CHORUS communication
|
||||||
@@ -645,7 +705,7 @@ secrets:
|
|||||||
name: whoosh_webhook_token
|
name: whoosh_webhook_token
|
||||||
jwt_secret:
|
jwt_secret:
|
||||||
external: true
|
external: true
|
||||||
name: whoosh_jwt_secret
|
name: whoosh_jwt_secret_v4
|
||||||
service_tokens:
|
service_tokens:
|
||||||
external: true
|
external: true
|
||||||
name: whoosh_service_tokens
|
name: whoosh_service_tokens
|
||||||
|
|||||||
388
docs/LIGHTRAG_INTEGRATION.md
Normal file
388
docs/LIGHTRAG_INTEGRATION.md
Normal file
@@ -0,0 +1,388 @@
|
|||||||
|
# LightRAG MCP Integration
|
||||||
|
|
||||||
|
**Status:** ✅ Production Ready
|
||||||
|
**Version:** 1.0.0
|
||||||
|
**Date:** 2025-09-30
|
||||||
|
|
||||||
|
## Overview
|
||||||
|
|
||||||
|
CHORUS now includes optional LightRAG integration for Retrieval-Augmented Generation (RAG) capabilities. LightRAG provides graph-based knowledge retrieval to enrich AI reasoning and context resolution.
|
||||||
|
|
||||||
|
## Architecture
|
||||||
|
|
||||||
|
### Components
|
||||||
|
|
||||||
|
1. **LightRAG Client** (`pkg/mcp/lightrag_client.go`)
|
||||||
|
- HTTP client for LightRAG MCP server
|
||||||
|
- Supports 4 query modes: naive, local, global, hybrid
|
||||||
|
- Health checking and document insertion
|
||||||
|
- Configurable timeouts and API authentication
|
||||||
|
|
||||||
|
2. **Reasoning Engine Integration** (`reasoning/reasoning.go`)
|
||||||
|
- `GenerateResponseWithRAG()` - RAG-enriched response generation
|
||||||
|
- `GenerateResponseSmartWithRAG()` - Combines model selection + RAG
|
||||||
|
- `SetLightRAGClient()` - Configure RAG client
|
||||||
|
- Non-fatal error handling (degrades gracefully)
|
||||||
|
|
||||||
|
3. **SLURP Context Enrichment** (`pkg/slurp/context/lightrag.go`)
|
||||||
|
- `LightRAGEnricher` - Enriches context nodes with RAG data
|
||||||
|
- `EnrichContextNode()` - Add insights to individual nodes
|
||||||
|
- `EnrichResolvedContext()` - Enrich resolved context chains
|
||||||
|
- `InsertContextNode()` - Build knowledge base over time
|
||||||
|
|
||||||
|
4. **Configuration** (`pkg/config/config.go`)
|
||||||
|
- `LightRAGConfig` struct with 5 configuration options
|
||||||
|
- Environment variable support
|
||||||
|
- Automatic initialization in runtime
|
||||||
|
|
||||||
|
## Configuration
|
||||||
|
|
||||||
|
### Environment Variables
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Enable LightRAG integration
|
||||||
|
CHORUS_LIGHTRAG_ENABLED=true
|
||||||
|
|
||||||
|
# LightRAG server endpoint
|
||||||
|
CHORUS_LIGHTRAG_BASE_URL=http://127.0.0.1:9621
|
||||||
|
|
||||||
|
# Query timeout
|
||||||
|
CHORUS_LIGHTRAG_TIMEOUT=30s
|
||||||
|
|
||||||
|
# Optional API key
|
||||||
|
CHORUS_LIGHTRAG_API_KEY=your-api-key
|
||||||
|
|
||||||
|
# Default query mode (naive, local, global, hybrid)
|
||||||
|
CHORUS_LIGHTRAG_DEFAULT_MODE=hybrid
|
||||||
|
```
|
||||||
|
|
||||||
|
### Docker Configuration
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
services:
|
||||||
|
chorus-agent:
|
||||||
|
environment:
|
||||||
|
- CHORUS_LIGHTRAG_ENABLED=true
|
||||||
|
- CHORUS_LIGHTRAG_BASE_URL=http://lightrag:9621
|
||||||
|
- CHORUS_LIGHTRAG_DEFAULT_MODE=hybrid
|
||||||
|
depends_on:
|
||||||
|
- lightrag
|
||||||
|
|
||||||
|
lightrag:
|
||||||
|
image: lightrag/lightrag:latest
|
||||||
|
ports:
|
||||||
|
- "9621:9621"
|
||||||
|
volumes:
|
||||||
|
- lightrag-data:/app/data
|
||||||
|
```
|
||||||
|
|
||||||
|
## Query Modes
|
||||||
|
|
||||||
|
LightRAG supports 4 query modes with different retrieval strategies:
|
||||||
|
|
||||||
|
1. **Naive Mode** (`QueryModeNaive`)
|
||||||
|
- Simple semantic search
|
||||||
|
- Fastest, least context
|
||||||
|
- Use for: Quick lookups
|
||||||
|
|
||||||
|
2. **Local Mode** (`QueryModeLocal`)
|
||||||
|
- Local graph traversal
|
||||||
|
- Context from immediate neighbors
|
||||||
|
- Use for: Related information
|
||||||
|
|
||||||
|
3. **Global Mode** (`QueryModeGlobal`)
|
||||||
|
- Global graph analysis
|
||||||
|
- Broad context from entire knowledge base
|
||||||
|
- Use for: High-level questions
|
||||||
|
|
||||||
|
4. **Hybrid Mode** (`QueryModeHybrid`) ⭐ **Recommended**
|
||||||
|
- Combined approach
|
||||||
|
- Balances breadth and depth
|
||||||
|
- Use for: General purpose RAG
|
||||||
|
|
||||||
|
## Usage Examples
|
||||||
|
|
||||||
|
### Reasoning Engine with RAG
|
||||||
|
|
||||||
|
```go
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"chorus/reasoning"
|
||||||
|
"chorus/pkg/mcp"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Initialize LightRAG client
|
||||||
|
config := mcp.LightRAGConfig{
|
||||||
|
BaseURL: "http://127.0.0.1:9621",
|
||||||
|
Timeout: 30 * time.Second,
|
||||||
|
}
|
||||||
|
client := mcp.NewLightRAGClient(config)
|
||||||
|
|
||||||
|
// Configure reasoning engine
|
||||||
|
reasoning.SetLightRAGClient(client)
|
||||||
|
|
||||||
|
// Generate RAG-enriched response
|
||||||
|
ctx := context.Background()
|
||||||
|
response, err := reasoning.GenerateResponseWithRAG(
|
||||||
|
ctx,
|
||||||
|
"meta/llama-3.1-8b-instruct",
|
||||||
|
"How does CHORUS handle P2P networking?",
|
||||||
|
mcp.QueryModeHybrid,
|
||||||
|
)
|
||||||
|
```
|
||||||
|
|
||||||
|
### SLURP Context Enrichment
|
||||||
|
|
||||||
|
```go
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"chorus/pkg/slurp/context"
|
||||||
|
"chorus/pkg/mcp"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Create enricher
|
||||||
|
enricher := context.NewLightRAGEnricher(client, "hybrid")
|
||||||
|
|
||||||
|
// Enrich a context node
|
||||||
|
node := &context.ContextNode{
|
||||||
|
Path: "/pkg/p2p",
|
||||||
|
Summary: "P2P networking implementation",
|
||||||
|
Purpose: "Provides libp2p networking layer",
|
||||||
|
}
|
||||||
|
|
||||||
|
err := enricher.EnrichContextNode(ctx, node)
|
||||||
|
// node.Insights now contains RAG-retrieved information
|
||||||
|
|
||||||
|
// Insert for future retrieval
|
||||||
|
err = enricher.InsertContextNode(ctx, node)
|
||||||
|
```
|
||||||
|
|
||||||
|
### Direct LightRAG Client
|
||||||
|
|
||||||
|
```go
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"chorus/pkg/mcp"
|
||||||
|
)
|
||||||
|
|
||||||
|
client := mcp.NewLightRAGClient(config)
|
||||||
|
|
||||||
|
// Health check
|
||||||
|
healthy := client.IsHealthy(ctx)
|
||||||
|
|
||||||
|
// Query with response
|
||||||
|
response, err := client.Query(ctx, "query", mcp.QueryModeHybrid)
|
||||||
|
|
||||||
|
// Get context only
|
||||||
|
context, err := client.GetContext(ctx, "query", mcp.QueryModeHybrid)
|
||||||
|
|
||||||
|
// Insert document
|
||||||
|
err := client.Insert(ctx, "text content", "description")
|
||||||
|
```
|
||||||
|
|
||||||
|
## Integration Points
|
||||||
|
|
||||||
|
### Runtime Initialization
|
||||||
|
|
||||||
|
LightRAG is initialized automatically in `internal/runtime/shared.go`:
|
||||||
|
|
||||||
|
```go
|
||||||
|
// Line 685-704
|
||||||
|
if cfg.LightRAG.Enabled {
|
||||||
|
lightragConfig := mcp.LightRAGConfig{
|
||||||
|
BaseURL: cfg.LightRAG.BaseURL,
|
||||||
|
Timeout: cfg.LightRAG.Timeout,
|
||||||
|
APIKey: cfg.LightRAG.APIKey,
|
||||||
|
}
|
||||||
|
lightragClient := mcp.NewLightRAGClient(lightragConfig)
|
||||||
|
|
||||||
|
if lightragClient.IsHealthy(ctx) {
|
||||||
|
reasoning.SetLightRAGClient(lightragClient)
|
||||||
|
logger.Info("📚 LightRAG RAG system enabled")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Graceful Degradation
|
||||||
|
|
||||||
|
LightRAG integration is **completely optional** and **non-blocking**:
|
||||||
|
|
||||||
|
- If `CHORUS_LIGHTRAG_ENABLED=false`, no LightRAG calls are made
|
||||||
|
- If LightRAG server is unavailable, health check fails gracefully
|
||||||
|
- If RAG queries fail, reasoning engine falls back to non-RAG generation
|
||||||
|
- SLURP enrichment failures are logged but don't block context resolution
|
||||||
|
|
||||||
|
## Testing
|
||||||
|
|
||||||
|
### Unit Tests
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Run all LightRAG tests (requires running server)
|
||||||
|
go test -v ./pkg/mcp/
|
||||||
|
|
||||||
|
# Run only unit tests (no server required)
|
||||||
|
go test -v -short ./pkg/mcp/
|
||||||
|
```
|
||||||
|
|
||||||
|
### Integration Tests
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Start LightRAG server
|
||||||
|
cd ~/chorus/mcp-include/LightRAG
|
||||||
|
python main.py
|
||||||
|
|
||||||
|
# Run integration tests
|
||||||
|
cd ~/chorus/project-queues/active/CHORUS
|
||||||
|
go test -v ./pkg/mcp/ -run TestLightRAGClient
|
||||||
|
```
|
||||||
|
|
||||||
|
## Performance Considerations
|
||||||
|
|
||||||
|
### Query Timeouts
|
||||||
|
|
||||||
|
- Default: 30 seconds
|
||||||
|
- Hybrid mode is slowest (analyzes entire graph)
|
||||||
|
- Naive mode is fastest (simple semantic search)
|
||||||
|
|
||||||
|
### Caching
|
||||||
|
|
||||||
|
LightRAG includes internal caching:
|
||||||
|
- Repeated queries return cached results
|
||||||
|
- Cache TTL managed by LightRAG server
|
||||||
|
- No CHORUS-side caching required
|
||||||
|
|
||||||
|
### Resource Usage
|
||||||
|
|
||||||
|
- Memory: Proportional to knowledge base size
|
||||||
|
- CPU: Query modes have different compute requirements
|
||||||
|
- Network: HTTP requests to LightRAG server
|
||||||
|
|
||||||
|
## Troubleshooting
|
||||||
|
|
||||||
|
### Server Not Healthy
|
||||||
|
|
||||||
|
**Symptom:** `LightRAG enabled but server not healthy`
|
||||||
|
|
||||||
|
**Solutions:**
|
||||||
|
1. Check if LightRAG server is running: `curl http://127.0.0.1:9621/health`
|
||||||
|
2. Verify correct port in `CHORUS_LIGHTRAG_BASE_URL`
|
||||||
|
3. Check LightRAG logs for errors
|
||||||
|
4. Ensure network connectivity between CHORUS and LightRAG
|
||||||
|
|
||||||
|
### Empty Responses
|
||||||
|
|
||||||
|
**Symptom:** RAG queries return empty results
|
||||||
|
|
||||||
|
**Solutions:**
|
||||||
|
1. Knowledge base may be empty - insert documents first
|
||||||
|
2. Query may not match indexed content
|
||||||
|
3. Try different query mode (hybrid recommended)
|
||||||
|
4. Check LightRAG indexing logs
|
||||||
|
|
||||||
|
### Timeout Errors
|
||||||
|
|
||||||
|
**Symptom:** `context deadline exceeded`
|
||||||
|
|
||||||
|
**Solutions:**
|
||||||
|
1. Increase `CHORUS_LIGHTRAG_TIMEOUT`
|
||||||
|
2. Use faster query mode (naive or local)
|
||||||
|
3. Optimize LightRAG server performance
|
||||||
|
4. Check network latency
|
||||||
|
|
||||||
|
## Security Considerations
|
||||||
|
|
||||||
|
### API Authentication
|
||||||
|
|
||||||
|
Optional API key support:
|
||||||
|
```bash
|
||||||
|
CHORUS_LIGHTRAG_API_KEY=your-secret-key
|
||||||
|
```
|
||||||
|
|
||||||
|
Keys are sent as Bearer tokens in Authorization header.
|
||||||
|
|
||||||
|
### Network Security
|
||||||
|
|
||||||
|
- Run LightRAG on internal network only
|
||||||
|
- Use HTTPS for production deployments
|
||||||
|
- Consider firewall rules to restrict access
|
||||||
|
- LightRAG doesn't include built-in encryption
|
||||||
|
|
||||||
|
### Data Privacy
|
||||||
|
|
||||||
|
- All queries and documents are stored in LightRAG
|
||||||
|
- Consider what data is being indexed
|
||||||
|
- Implement data retention policies
|
||||||
|
- Use access control on LightRAG server
|
||||||
|
|
||||||
|
## Monitoring
|
||||||
|
|
||||||
|
### Health Checks
|
||||||
|
|
||||||
|
```go
|
||||||
|
// Check LightRAG availability
|
||||||
|
if client.IsHealthy(ctx) {
|
||||||
|
// Server is healthy
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get detailed health info
|
||||||
|
health, err := client.Health(ctx)
|
||||||
|
// Returns: Status, CoreVersion, APIVersion, etc.
|
||||||
|
```
|
||||||
|
|
||||||
|
### Metrics
|
||||||
|
|
||||||
|
Consider adding:
|
||||||
|
- RAG query latency
|
||||||
|
- Cache hit rates
|
||||||
|
- Enrichment success/failure rates
|
||||||
|
- Knowledge base size
|
||||||
|
|
||||||
|
## Future Enhancements
|
||||||
|
|
||||||
|
Potential improvements:
|
||||||
|
|
||||||
|
1. **Batch Query Optimization**
|
||||||
|
- Batch multiple RAG queries together
|
||||||
|
- Reduce HTTP overhead
|
||||||
|
|
||||||
|
2. **Adaptive Query Mode Selection**
|
||||||
|
- Automatically choose query mode based on question type
|
||||||
|
- Learn from past query performance
|
||||||
|
|
||||||
|
3. **Knowledge Base Management**
|
||||||
|
- Automated document insertion from SLURP contexts
|
||||||
|
- Background indexing of code repositories
|
||||||
|
- Scheduled knowledge base updates
|
||||||
|
|
||||||
|
4. **Advanced Caching**
|
||||||
|
- CHORUS-side caching with TTL
|
||||||
|
- Semantic cache (similar queries share cache)
|
||||||
|
- Persistent cache across restarts
|
||||||
|
|
||||||
|
5. **Multi-tenant Support**
|
||||||
|
- Per-agent knowledge bases
|
||||||
|
- Role-based access to documents
|
||||||
|
- Encrypted knowledge storage
|
||||||
|
|
||||||
|
## Files Changed
|
||||||
|
|
||||||
|
1. `pkg/mcp/lightrag_client.go` - NEW (277 lines)
|
||||||
|
2. `pkg/mcp/lightrag_client_test.go` - NEW (239 lines)
|
||||||
|
3. `pkg/config/config.go` - Modified (added LightRAGConfig)
|
||||||
|
4. `reasoning/reasoning.go` - Modified (added RAG functions)
|
||||||
|
5. `internal/runtime/shared.go` - Modified (added initialization)
|
||||||
|
6. `pkg/slurp/context/lightrag.go` - NEW (203 lines)
|
||||||
|
|
||||||
|
**Total:** 3 new files, 3 modified files, ~750 lines of code
|
||||||
|
|
||||||
|
## References
|
||||||
|
|
||||||
|
- LightRAG Documentation: https://github.com/HKUDS/LightRAG
|
||||||
|
- MCP Protocol Spec: https://spec.modelcontextprotocol.io
|
||||||
|
- CHORUS Documentation: `docs/comprehensive/`
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
**Maintainer:** CHORUS Project Team
|
||||||
|
**Last Updated:** 2025-09-30
|
||||||
|
**Status:** Production Ready
|
||||||
1090
docs/SEQTHINK-AGE-WRAPPER-IMPLEMENTATION.md
Normal file
1090
docs/SEQTHINK-AGE-WRAPPER-IMPLEMENTATION.md
Normal file
File diff suppressed because it is too large
Load Diff
579
docs/SEQUENTIAL-THINKING-INTEGRATION-PLAN.md
Normal file
579
docs/SEQUENTIAL-THINKING-INTEGRATION-PLAN.md
Normal file
@@ -0,0 +1,579 @@
|
|||||||
|
# Sequential Thinking Integration Plan for CHORUS Agents
|
||||||
|
|
||||||
|
**Date**: 2025-10-13
|
||||||
|
**Status**: Design Phase
|
||||||
|
**Priority**: High - Blocking further intelligence improvements
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Executive Summary
|
||||||
|
|
||||||
|
This document outlines the integration of the Sequential Thinking MCP server into CHORUS agents to enable **structured, multi-step reasoning** before task execution. This addresses the limitation in the SequentialThinkingForCHORUS repository issue and unlocks advanced agent decision-making capabilities.
|
||||||
|
|
||||||
|
**Problem Statement**: CHORUS agents currently use simple prompt-response cycles without structured reasoning, limiting their ability to handle complex tasks requiring multi-step analysis, hypothesis generation, and iterative refinement.
|
||||||
|
|
||||||
|
**Solution**: Integrate the `mcp__sequential-thinking__sequentialthinking` MCP tool into the AI provider layer to enable chain-of-thought reasoning for complex tasks.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Current Architecture Analysis
|
||||||
|
|
||||||
|
### 1. Existing AI Provider Flow
|
||||||
|
|
||||||
|
```
|
||||||
|
TaskRequest → ModelProvider.ExecuteTask() → TaskResponse
|
||||||
|
↓
|
||||||
|
[Single LLM Call]
|
||||||
|
↓
|
||||||
|
Response String
|
||||||
|
```
|
||||||
|
|
||||||
|
**Current Providers**:
|
||||||
|
- **OllamaProvider**: Local model execution
|
||||||
|
- **ResetDataProvider**: ResetData LaaS API
|
||||||
|
- **OpenAIProvider**: OpenAI API
|
||||||
|
|
||||||
|
**Current Limitations**:
|
||||||
|
- ✗ No structured reasoning process
|
||||||
|
- ✗ No ability to revise initial thoughts
|
||||||
|
- ✗ No hypothesis generation and verification
|
||||||
|
- ✗ No branching for alternative approaches
|
||||||
|
- ✗ Simple string reasoning field (not structured)
|
||||||
|
|
||||||
|
### 2. TaskResponse Structure
|
||||||
|
|
||||||
|
**Location**: `/home/tony/chorus/project-queues/active/CHORUS/pkg/ai/provider.go:53-78`
|
||||||
|
|
||||||
|
```go
|
||||||
|
type TaskResponse struct {
|
||||||
|
Success bool `json:"success"`
|
||||||
|
TaskID string `json:"task_id"`
|
||||||
|
Response string `json:"response"`
|
||||||
|
Reasoning string `json:"reasoning,omitempty"` // ← Simple string
|
||||||
|
Actions []TaskAction `json:"actions,omitempty"`
|
||||||
|
Artifacts []Artifact `json:"artifacts,omitempty"`
|
||||||
|
TokensUsed TokenUsage `json:"tokens_used,omitempty"`
|
||||||
|
// ... other fields
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
**Opportunity**: The `Reasoning` field is perfect for storing structured thinking output!
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Sequential Thinking MCP Tool
|
||||||
|
|
||||||
|
### Tool Signature
|
||||||
|
|
||||||
|
```go
|
||||||
|
mcp__sequential-thinking__sequentialthinking(
|
||||||
|
thought: string,
|
||||||
|
nextThoughtNeeded: bool,
|
||||||
|
thoughtNumber: int,
|
||||||
|
totalThoughts: int,
|
||||||
|
isRevision: bool = false,
|
||||||
|
revisesThought: int = null,
|
||||||
|
branchFromThought: int = null,
|
||||||
|
branchId: string = null,
|
||||||
|
needsMoreThoughts: bool = false
|
||||||
|
)
|
||||||
|
```
|
||||||
|
|
||||||
|
### Capabilities
|
||||||
|
|
||||||
|
1. **Adaptive Thinking**: Adjust `totalThoughts` up or down as understanding deepens
|
||||||
|
2. **Revision Support**: Question and revise previous thoughts (`isRevision`, `revisesThought`)
|
||||||
|
3. **Branching**: Explore alternative approaches (`branchFromThought`, `branchId`)
|
||||||
|
4. **Hypothesis Testing**: Generate and verify hypotheses in chain-of-thought
|
||||||
|
5. **Uncertainty Expression**: Express and work through unclear aspects
|
||||||
|
6. **Context Maintenance**: Keep track of all previous thoughts
|
||||||
|
|
||||||
|
### When to Use
|
||||||
|
|
||||||
|
- **Complex problem decomposition**
|
||||||
|
- **Multi-step solution planning**
|
||||||
|
- **Problems requiring course correction**
|
||||||
|
- **Unclear scope requiring exploration**
|
||||||
|
- **Tasks needing context over multiple steps**
|
||||||
|
- **Filtering irrelevant information**
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Proposed Integration Architecture
|
||||||
|
|
||||||
|
### Phase 1: Enhanced TaskResponse Structure
|
||||||
|
|
||||||
|
**File**: `pkg/ai/provider.go`
|
||||||
|
|
||||||
|
```go
|
||||||
|
// StructuredReasoning represents chain-of-thought reasoning process
|
||||||
|
type StructuredReasoning struct {
|
||||||
|
Thoughts []ThoughtStep `json:"thoughts"`
|
||||||
|
FinalHypothesis string `json:"final_hypothesis,omitempty"`
|
||||||
|
VerificationSteps []string `json:"verification_steps,omitempty"`
|
||||||
|
Confidence float32 `json:"confidence"` // 0.0-1.0
|
||||||
|
TotalRevisions int `json:"total_revisions"`
|
||||||
|
BranchesExplored int `json:"branches_explored"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// ThoughtStep represents a single step in the reasoning process
|
||||||
|
type ThoughtStep struct {
|
||||||
|
Number int `json:"number"`
|
||||||
|
Content string `json:"content"`
|
||||||
|
IsRevision bool `json:"is_revision"`
|
||||||
|
RevisesThought int `json:"revises_thought,omitempty"`
|
||||||
|
BranchID string `json:"branch_id,omitempty"`
|
||||||
|
BranchFrom int `json:"branch_from,omitempty"`
|
||||||
|
Timestamp time.Time `json:"timestamp"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// TaskResponse update
|
||||||
|
type TaskResponse struct {
|
||||||
|
// ... existing fields ...
|
||||||
|
Reasoning string `json:"reasoning,omitempty"` // Legacy simple string
|
||||||
|
StructuredReasoning *StructuredReasoning `json:"structured_reasoning,omitempty"` // NEW
|
||||||
|
// ... rest of fields ...
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Phase 2: Sequential Thinking Wrapper
|
||||||
|
|
||||||
|
**New File**: `pkg/ai/sequential_thinking.go`
|
||||||
|
|
||||||
|
```go
|
||||||
|
package ai
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
)
|
||||||
|
|
||||||
|
// SequentialThinkingEngine wraps MCP sequential thinking tool
|
||||||
|
type SequentialThinkingEngine struct {
|
||||||
|
mcpClient MCPClient // Interface to MCP tool
|
||||||
|
}
|
||||||
|
|
||||||
|
// ThinkingRequest represents input for sequential thinking
|
||||||
|
type ThinkingRequest struct {
|
||||||
|
Problem string
|
||||||
|
Context map[string]interface{}
|
||||||
|
MaxThoughts int
|
||||||
|
AllowRevisions bool
|
||||||
|
AllowBranching bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// ThinkingResult represents output from sequential thinking
|
||||||
|
type ThinkingResult struct {
|
||||||
|
Thoughts []ThoughtStep
|
||||||
|
FinalConclusion string
|
||||||
|
Confidence float32
|
||||||
|
ReasoningPath string // Markdown summary of thinking process
|
||||||
|
}
|
||||||
|
|
||||||
|
// Think executes sequential thinking process
|
||||||
|
func (e *SequentialThinkingEngine) Think(ctx context.Context, req *ThinkingRequest) (*ThinkingResult, error) {
|
||||||
|
// Implementation:
|
||||||
|
// 1. Initialize thinking with problem statement
|
||||||
|
// 2. Iteratively call MCP tool until nextThoughtNeeded = false
|
||||||
|
// 3. Track all thoughts, revisions, branches
|
||||||
|
// 4. Generate final conclusion and reasoning summary
|
||||||
|
// 5. Return structured result
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Phase 3: Provider Integration
|
||||||
|
|
||||||
|
**Modified File**: `pkg/ai/resetdata.go`
|
||||||
|
|
||||||
|
```go
|
||||||
|
// ExecuteTask with sequential thinking
|
||||||
|
func (p *ResetDataProvider) ExecuteTask(ctx context.Context, request *TaskRequest) (*TaskResponse, error) {
|
||||||
|
startTime := time.Now()
|
||||||
|
|
||||||
|
// Determine if task requires sequential thinking
|
||||||
|
useSequentialThinking := p.shouldUseSequentialThinking(request)
|
||||||
|
|
||||||
|
var structuredReasoning *StructuredReasoning
|
||||||
|
var enhancedPrompt string
|
||||||
|
|
||||||
|
if useSequentialThinking {
|
||||||
|
// Use sequential thinking engine to analyze task first
|
||||||
|
thinkingEngine := NewSequentialThinkingEngine(p.mcpClient)
|
||||||
|
|
||||||
|
thinkingResult, err := thinkingEngine.Think(ctx, &ThinkingRequest{
|
||||||
|
Problem: p.formatTaskAsProblem(request),
|
||||||
|
Context: request.Context,
|
||||||
|
MaxThoughts: 10,
|
||||||
|
AllowRevisions: true,
|
||||||
|
AllowBranching: true,
|
||||||
|
})
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
// Fall back to direct execution if thinking fails
|
||||||
|
log.Warn().Err(err).Msg("Sequential thinking failed, falling back to direct execution")
|
||||||
|
} else {
|
||||||
|
// Use thinking result to enhance prompt
|
||||||
|
enhancedPrompt = p.buildPromptWithThinking(request, thinkingResult)
|
||||||
|
structuredReasoning = convertToStructuredReasoning(thinkingResult)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Execute with enhanced prompt (if available) or standard prompt
|
||||||
|
messages, _ := p.buildChatMessages(request, enhancedPrompt)
|
||||||
|
|
||||||
|
// ... rest of execution ...
|
||||||
|
|
||||||
|
return &TaskResponse{
|
||||||
|
Success: true,
|
||||||
|
Response: responseText,
|
||||||
|
Reasoning: legacyReasoningString,
|
||||||
|
StructuredReasoning: structuredReasoning, // NEW
|
||||||
|
// ... rest of response ...
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// shouldUseSequentialThinking determines if task warrants sequential thinking
|
||||||
|
func (p *ResetDataProvider) shouldUseSequentialThinking(request *TaskRequest) bool {
|
||||||
|
// Use sequential thinking for:
|
||||||
|
// - High complexity tasks (complexity >= 7)
|
||||||
|
// - Architect role (requires system design)
|
||||||
|
// - Tasks with "design" or "architecture" in title/labels
|
||||||
|
// - Tasks requiring multi-step planning
|
||||||
|
|
||||||
|
if request.Complexity >= 7 {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
role := strings.ToLower(request.AgentRole)
|
||||||
|
if role == "architect" || role == "senior-developer" {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
keywords := []string{"design", "architecture", "refactor", "plan", "strategy"}
|
||||||
|
taskText := strings.ToLower(request.TaskTitle + " " + request.TaskDescription)
|
||||||
|
for _, keyword := range keywords {
|
||||||
|
if strings.Contains(taskText, keyword) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Implementation Phases
|
||||||
|
|
||||||
|
### Phase 1: Foundation (Days 1-2)
|
||||||
|
|
||||||
|
**Tasks**:
|
||||||
|
1. ✅ Define `StructuredReasoning` and `ThoughtStep` types
|
||||||
|
2. ✅ Add `StructuredReasoning` field to `TaskResponse`
|
||||||
|
3. ✅ Create `SequentialThinkingEngine` skeleton
|
||||||
|
4. ✅ Add MCP client interface for sequential-thinking tool
|
||||||
|
|
||||||
|
**Files to Create/Modify**:
|
||||||
|
- `pkg/ai/provider.go` - Add new types
|
||||||
|
- `pkg/ai/sequential_thinking.go` - New file
|
||||||
|
- `pkg/ai/mcp_client.go` - New file for MCP integration
|
||||||
|
|
||||||
|
**Success Criteria**:
|
||||||
|
- Code compiles without errors
|
||||||
|
- Types are properly defined
|
||||||
|
- MCP client interface is clear
|
||||||
|
|
||||||
|
### Phase 2: Sequential Thinking Engine (Days 3-5)
|
||||||
|
|
||||||
|
**Tasks**:
|
||||||
|
1. Implement `SequentialThinkingEngine.Think()` method
|
||||||
|
2. Implement MCP tool call wrapper
|
||||||
|
3. Add thought tracking and revision detection
|
||||||
|
4. Implement branch management
|
||||||
|
5. Generate reasoning summaries
|
||||||
|
6. Write unit tests
|
||||||
|
|
||||||
|
**Files**:
|
||||||
|
- `pkg/ai/sequential_thinking.go` - Full implementation
|
||||||
|
- `pkg/ai/sequential_thinking_test.go` - Unit tests
|
||||||
|
|
||||||
|
**Success Criteria**:
|
||||||
|
- Can execute complete thinking cycles
|
||||||
|
- Properly tracks revisions and branches
|
||||||
|
- Generates clear reasoning summaries
|
||||||
|
- All unit tests pass
|
||||||
|
|
||||||
|
### Phase 3: Provider Integration (Days 6-8)
|
||||||
|
|
||||||
|
**Tasks**:
|
||||||
|
1. Modify `ResetDataProvider.ExecuteTask()` for sequential thinking
|
||||||
|
2. Implement `shouldUseSequentialThinking()` heuristics
|
||||||
|
3. Add prompt enhancement with thinking results
|
||||||
|
4. Implement fallback for thinking failures
|
||||||
|
5. Add configuration options
|
||||||
|
6. Write integration tests
|
||||||
|
|
||||||
|
**Files**:
|
||||||
|
- `pkg/ai/resetdata.go` - Modify ExecuteTask
|
||||||
|
- `pkg/ai/ollama.go` - Same modifications
|
||||||
|
- `config/agent.yaml` - Add sequential thinking config
|
||||||
|
|
||||||
|
**Success Criteria**:
|
||||||
|
- Complex tasks trigger sequential thinking
|
||||||
|
- Thinking results enhance task execution
|
||||||
|
- Graceful fallback on failures
|
||||||
|
- Integration tests pass
|
||||||
|
|
||||||
|
### Phase 4: Testing & Validation (Days 9-10)
|
||||||
|
|
||||||
|
**Tasks**:
|
||||||
|
1. End-to-end testing with real councils
|
||||||
|
2. Test with various complexity levels
|
||||||
|
3. Validate reasoning quality improvements
|
||||||
|
4. Performance benchmarking
|
||||||
|
5. Documentation updates
|
||||||
|
|
||||||
|
**Test Cases**:
|
||||||
|
- Simple task (complexity=3) → No sequential thinking
|
||||||
|
- Complex task (complexity=8) → Sequential thinking enabled
|
||||||
|
- Architect role → Always uses sequential thinking
|
||||||
|
- Design task → Sequential thinking with branching
|
||||||
|
- Fallback scenario → Graceful degradation
|
||||||
|
|
||||||
|
**Success Criteria**:
|
||||||
|
- Demonstrable improvement in task quality
|
||||||
|
- Acceptable performance overhead (<30% increase in latency)
|
||||||
|
- Clear reasoning traces in artifacts
|
||||||
|
- Documentation complete
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Configuration
|
||||||
|
|
||||||
|
### Agent Configuration
|
||||||
|
|
||||||
|
**File**: `config/agent.yaml`
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
ai_providers:
|
||||||
|
resetdata:
|
||||||
|
type: "resetdata"
|
||||||
|
endpoint: "${RESETDATA_API_ENDPOINT}"
|
||||||
|
api_key: "${RESETDATA_API_KEY}"
|
||||||
|
default_model: "llama3.1:70b"
|
||||||
|
|
||||||
|
# Sequential thinking configuration
|
||||||
|
enable_sequential_thinking: true
|
||||||
|
sequential_thinking:
|
||||||
|
min_complexity: 7 # Minimum complexity to trigger
|
||||||
|
force_for_roles: # Always use for these roles
|
||||||
|
- architect
|
||||||
|
- senior-developer
|
||||||
|
max_thoughts: 15 # Maximum thinking iterations
|
||||||
|
enable_revisions: true # Allow thought revisions
|
||||||
|
enable_branching: true # Allow exploring alternatives
|
||||||
|
confidence_threshold: 0.7 # Minimum confidence for final answer
|
||||||
|
```
|
||||||
|
|
||||||
|
### Runtime Toggle
|
||||||
|
|
||||||
|
Allow runtime control via council brief:
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"task_id": "task-123",
|
||||||
|
"complexity": 8,
|
||||||
|
"use_sequential_thinking": true, // Explicit override
|
||||||
|
"thinking_config": {
|
||||||
|
"max_thoughts": 20,
|
||||||
|
"allow_branching": true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Benefits & Expected Improvements
|
||||||
|
|
||||||
|
### 1. Better Problem Decomposition
|
||||||
|
|
||||||
|
**Before**:
|
||||||
|
```
|
||||||
|
Agent: Here's my solution [immediately provides implementation]
|
||||||
|
```
|
||||||
|
|
||||||
|
**After**:
|
||||||
|
```
|
||||||
|
Thought 1: Breaking down the task into 3 main components...
|
||||||
|
Thought 2: Component A requires database schema changes...
|
||||||
|
Thought 3: Wait, revising thought 2 - migration strategy needs consideration...
|
||||||
|
Thought 4: Exploring alternative: event sourcing vs direct updates...
|
||||||
|
Thought 5: Event sourcing better for audit trail requirements...
|
||||||
|
Final: Implementation plan with 5 concrete steps...
|
||||||
|
```
|
||||||
|
|
||||||
|
### 2. Improved Architecture Decisions
|
||||||
|
|
||||||
|
Architect agents can:
|
||||||
|
- Explore multiple design alternatives
|
||||||
|
- Revise decisions based on discovered constraints
|
||||||
|
- Build and verify hypotheses about scalability
|
||||||
|
- Document reasoning trail for future reference
|
||||||
|
|
||||||
|
### 3. Higher Quality Code
|
||||||
|
|
||||||
|
Developer agents can:
|
||||||
|
- Think through edge cases before coding
|
||||||
|
- Consider multiple implementation approaches
|
||||||
|
- Revise initial assumptions
|
||||||
|
- Plan testing strategy upfront
|
||||||
|
|
||||||
|
### 4. Debugging Enhancement
|
||||||
|
|
||||||
|
When tasks fail:
|
||||||
|
- Reasoning traces show where agent went wrong
|
||||||
|
- Can identify flawed assumptions
|
||||||
|
- Easier to improve prompts and heuristics
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Performance Considerations
|
||||||
|
|
||||||
|
### 1. Latency Impact
|
||||||
|
|
||||||
|
**Estimated Overhead**:
|
||||||
|
- Sequential thinking: 5-15 LLM calls (vs 1 direct call)
|
||||||
|
- Expected latency increase: 10-30 seconds for complex tasks
|
||||||
|
- **Mitigation**: Only use for high-complexity tasks (complexity >= 7)
|
||||||
|
|
||||||
|
### 2. Token Usage
|
||||||
|
|
||||||
|
**Estimated Increase**:
|
||||||
|
- Each thought: ~200-500 tokens
|
||||||
|
- 10 thoughts: ~3000-5000 additional tokens
|
||||||
|
- **Mitigation**: Set reasonable `max_thoughts` limits
|
||||||
|
|
||||||
|
### 3. Resource Requirements
|
||||||
|
|
||||||
|
**MCP Server**:
|
||||||
|
- Sequential thinking MCP server must be available
|
||||||
|
- Requires proper error handling and fallback
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Risks & Mitigations
|
||||||
|
|
||||||
|
| Risk | Impact | Mitigation |
|
||||||
|
|------|--------|------------|
|
||||||
|
| MCP server unavailable | High | Graceful fallback to direct execution |
|
||||||
|
| Increased latency unacceptable | Medium | Make sequential thinking opt-in per task |
|
||||||
|
| Token cost explosion | Medium | Set hard limits on max_thoughts |
|
||||||
|
| Reasoning doesn't improve quality | High | A/B testing with metrics |
|
||||||
|
| Complex implementation | Medium | Phased rollout with testing |
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Success Metrics
|
||||||
|
|
||||||
|
### Quantitative
|
||||||
|
|
||||||
|
1. **Task Success Rate**: Compare before/after for complexity >= 7 tasks
|
||||||
|
- Target: +15% improvement
|
||||||
|
2. **Code Quality**: Static analysis scores for generated code
|
||||||
|
- Target: +20% improvement in complexity score
|
||||||
|
3. **PR Acceptance Rate**: How many agent PRs get merged
|
||||||
|
- Target: +25% improvement
|
||||||
|
4. **Latency**: Task execution time
|
||||||
|
- Acceptable: <30% increase for complex tasks
|
||||||
|
|
||||||
|
### Qualitative
|
||||||
|
|
||||||
|
1. **Reasoning Quality**: Human review of reasoning traces
|
||||||
|
2. **Decision Clarity**: Can humans understand agent's thought process?
|
||||||
|
3. **Developer Feedback**: Easier to debug failed tasks?
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Rollout Plan
|
||||||
|
|
||||||
|
### Stage 1: Internal Testing (Week 1)
|
||||||
|
|
||||||
|
- Deploy to development environment
|
||||||
|
- Test with synthetic tasks
|
||||||
|
- Gather performance metrics
|
||||||
|
- Refine heuristics
|
||||||
|
|
||||||
|
### Stage 2: Limited Production (Week 2)
|
||||||
|
|
||||||
|
- Enable for architect role only
|
||||||
|
- Enable for complexity >= 9 only
|
||||||
|
- Monitor closely
|
||||||
|
- Collect feedback
|
||||||
|
|
||||||
|
### Stage 3: Expanded Rollout (Week 3-4)
|
||||||
|
|
||||||
|
- Enable for all roles with complexity >= 7
|
||||||
|
- Add complexity-based opt-in
|
||||||
|
- Full production deployment
|
||||||
|
- Continuous monitoring
|
||||||
|
|
||||||
|
### Stage 4: Optimization (Week 5+)
|
||||||
|
|
||||||
|
- Fine-tune heuristics based on data
|
||||||
|
- Optimize thought limits
|
||||||
|
- Improve reasoning summaries
|
||||||
|
- Add advanced features (e.g., multi-agent reasoning)
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Future Enhancements
|
||||||
|
|
||||||
|
### 1. Multi-Agent Reasoning
|
||||||
|
|
||||||
|
Multiple agents can contribute thoughts to same reasoning chain:
|
||||||
|
- Architect proposes design
|
||||||
|
- Security agent reviews security implications
|
||||||
|
- Performance agent analyzes scalability
|
||||||
|
|
||||||
|
### 2. Reasoning Templates
|
||||||
|
|
||||||
|
Pre-defined thinking patterns for common scenarios:
|
||||||
|
- API design checklist
|
||||||
|
- Security review framework
|
||||||
|
- Performance optimization workflow
|
||||||
|
|
||||||
|
### 3. Learning from Reasoning
|
||||||
|
|
||||||
|
Store successful reasoning patterns:
|
||||||
|
- Build knowledge base of good reasoning traces
|
||||||
|
- Use as examples in future tasks
|
||||||
|
- Identify common pitfalls
|
||||||
|
|
||||||
|
### 4. Visualization
|
||||||
|
|
||||||
|
Dashboard showing reasoning graphs:
|
||||||
|
- Thought flow diagrams
|
||||||
|
- Revision history
|
||||||
|
- Branch exploration trees
|
||||||
|
- Confidence evolution
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## References
|
||||||
|
|
||||||
|
- **SequentialThinkingForCHORUS Issue**: (Repository in GITEA)
|
||||||
|
- **MCP Sequential Thinking Tool**: Available in Claude Code MCP servers
|
||||||
|
- **CHORUS Task Execution**: `/home/tony/chorus/project-queues/active/CHORUS/pkg/execution/engine.go`
|
||||||
|
- **AI Provider Interface**: `/home/tony/chorus/project-queues/active/CHORUS/pkg/ai/provider.go`
|
||||||
|
- **ResetData Provider**: `/home/tony/chorus/project-queues/active/CHORUS/pkg/ai/resetdata.go`
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Document Info
|
||||||
|
|
||||||
|
- **Created**: 2025-10-13
|
||||||
|
- **Author**: Claude Code
|
||||||
|
- **Status**: Design Complete - Ready for Implementation
|
||||||
|
- **Next Steps**: Begin Phase 1 implementation
|
||||||
|
|
||||||
3
go.mod
3
go.mod
@@ -12,6 +12,7 @@ require (
|
|||||||
github.com/docker/go-connections v0.6.0
|
github.com/docker/go-connections v0.6.0
|
||||||
github.com/docker/go-units v0.5.0
|
github.com/docker/go-units v0.5.0
|
||||||
github.com/go-redis/redis/v8 v8.11.5
|
github.com/go-redis/redis/v8 v8.11.5
|
||||||
|
github.com/golang-jwt/jwt/v5 v5.3.0
|
||||||
github.com/google/uuid v1.6.0
|
github.com/google/uuid v1.6.0
|
||||||
github.com/gorilla/mux v1.8.1
|
github.com/gorilla/mux v1.8.1
|
||||||
github.com/gorilla/websocket v1.5.0
|
github.com/gorilla/websocket v1.5.0
|
||||||
@@ -23,6 +24,7 @@ require (
|
|||||||
github.com/multiformats/go-multihash v0.2.3
|
github.com/multiformats/go-multihash v0.2.3
|
||||||
github.com/prometheus/client_golang v1.19.1
|
github.com/prometheus/client_golang v1.19.1
|
||||||
github.com/robfig/cron/v3 v3.0.1
|
github.com/robfig/cron/v3 v3.0.1
|
||||||
|
github.com/rs/zerolog v1.32.0
|
||||||
github.com/sashabaranov/go-openai v1.41.1
|
github.com/sashabaranov/go-openai v1.41.1
|
||||||
github.com/sony/gobreaker v0.5.0
|
github.com/sony/gobreaker v0.5.0
|
||||||
github.com/stretchr/testify v1.11.1
|
github.com/stretchr/testify v1.11.1
|
||||||
@@ -108,6 +110,7 @@ require (
|
|||||||
github.com/libp2p/go-yamux/v4 v4.0.1 // indirect
|
github.com/libp2p/go-yamux/v4 v4.0.1 // indirect
|
||||||
github.com/libp2p/zeroconf/v2 v2.2.0 // indirect
|
github.com/libp2p/zeroconf/v2 v2.2.0 // indirect
|
||||||
github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd // indirect
|
github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd // indirect
|
||||||
|
github.com/mattn/go-colorable v0.1.13 // indirect
|
||||||
github.com/mattn/go-isatty v0.0.20 // indirect
|
github.com/mattn/go-isatty v0.0.20 // indirect
|
||||||
github.com/miekg/dns v1.1.56 // indirect
|
github.com/miekg/dns v1.1.56 // indirect
|
||||||
github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b // indirect
|
github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b // indirect
|
||||||
|
|||||||
11
go.sum
11
go.sum
@@ -147,6 +147,8 @@ github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7a
|
|||||||
github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
|
github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
|
||||||
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
|
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
|
||||||
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
|
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
|
||||||
|
github.com/golang-jwt/jwt/v5 v5.3.0 h1:pv4AsKCKKZuqlgs5sUmn4x8UlGa0kEVt/puTpKx9vvo=
|
||||||
|
github.com/golang-jwt/jwt/v5 v5.3.0/go.mod h1:fxCRLWMO43lRc8nhHWY6LGqRcf+1gQWArsqaEUEa5bE=
|
||||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
||||||
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE=
|
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE=
|
||||||
@@ -304,7 +306,11 @@ github.com/lunixbochs/vtclean v1.0.0/go.mod h1:pHhQNgMf3btfWnGBVipUOjRYhoOsdGqdm
|
|||||||
github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
|
github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
|
||||||
github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd h1:br0buuQ854V8u83wA0rVZ8ttrq5CpaPZdvrK0LP2lOk=
|
github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd h1:br0buuQ854V8u83wA0rVZ8ttrq5CpaPZdvrK0LP2lOk=
|
||||||
github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd/go.mod h1:QuCEs1Nt24+FYQEqAAncTDPJIuGs+LxK1MCiFL25pMU=
|
github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd/go.mod h1:QuCEs1Nt24+FYQEqAAncTDPJIuGs+LxK1MCiFL25pMU=
|
||||||
|
github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
|
||||||
|
github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=
|
||||||
github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94=
|
github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94=
|
||||||
|
github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
|
||||||
|
github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
|
||||||
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
|
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
|
||||||
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
|
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
|
||||||
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
|
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
|
||||||
@@ -426,6 +432,9 @@ github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzG
|
|||||||
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
||||||
github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8=
|
github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8=
|
||||||
github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4=
|
github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4=
|
||||||
|
github.com/rs/xid v1.5.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg=
|
||||||
|
github.com/rs/zerolog v1.32.0 h1:keLypqrlIjaFsbmJOBdB/qvyF8KEtCWHwobLp5l/mQ0=
|
||||||
|
github.com/rs/zerolog v1.32.0/go.mod h1:/7mN4D5sKwJLZQ2b/znpjC3/GQWY/xaDXUM0kKWRHss=
|
||||||
github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
|
github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
|
||||||
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||||
github.com/sashabaranov/go-openai v1.41.1 h1:zf5tM+GuxpyiyD9XZg8nCqu52eYFQg9OOew0gnIuDy4=
|
github.com/sashabaranov/go-openai v1.41.1 h1:zf5tM+GuxpyiyD9XZg8nCqu52eYFQg9OOew0gnIuDy4=
|
||||||
@@ -620,8 +629,10 @@ golang.org/x/sys v0.0.0-20210426080607-c94f62235c83/go.mod h1:h1NjWce9XRLGQEsW7w
|
|||||||
golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
|
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
|
golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.29.0 h1:TPYlXGxvx1MGTn2GiZDhnjPA9wZzZeGKHHmKhHYvgaU=
|
golang.org/x/sys v0.29.0 h1:TPYlXGxvx1MGTn2GiZDhnjPA9wZzZeGKHHmKhHYvgaU=
|
||||||
golang.org/x/sys v0.29.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
golang.org/x/sys v0.29.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||||
golang.org/x/sys v0.35.0 h1:vz1N37gP5bs89s7He8XuIYXpyY0+QlsKmzipCbUtyxI=
|
golang.org/x/sys v0.35.0 h1:vz1N37gP5bs89s7He8XuIYXpyY0+QlsKmzipCbUtyxI=
|
||||||
|
|||||||
2
hmmm-monitor/.gitignore
vendored
Normal file
2
hmmm-monitor/.gitignore
vendored
Normal file
@@ -0,0 +1,2 @@
|
|||||||
|
hmmm-monitor
|
||||||
|
*.log
|
||||||
41
hmmm-monitor/Dockerfile
Normal file
41
hmmm-monitor/Dockerfile
Normal file
@@ -0,0 +1,41 @@
|
|||||||
|
FROM golang:1.22-alpine AS builder
|
||||||
|
|
||||||
|
# Install build dependencies
|
||||||
|
RUN apk add --no-cache git ca-certificates
|
||||||
|
|
||||||
|
WORKDIR /app
|
||||||
|
|
||||||
|
# Copy go mod files
|
||||||
|
COPY go.mod go.sum* ./
|
||||||
|
|
||||||
|
# Download dependencies
|
||||||
|
RUN go mod download || true
|
||||||
|
|
||||||
|
# Copy source code
|
||||||
|
COPY main.go ./
|
||||||
|
|
||||||
|
# Build the binary
|
||||||
|
RUN CGO_ENABLED=0 GOOS=linux go build -a -installsuffix cgo -o hmmm-monitor main.go
|
||||||
|
|
||||||
|
# Final stage - minimal image
|
||||||
|
FROM alpine:latest
|
||||||
|
|
||||||
|
RUN apk --no-cache add ca-certificates tzdata
|
||||||
|
|
||||||
|
WORKDIR /app
|
||||||
|
|
||||||
|
# Copy binary from builder
|
||||||
|
COPY --from=builder /app/hmmm-monitor .
|
||||||
|
|
||||||
|
# Run as non-root user
|
||||||
|
RUN addgroup -g 1000 monitor && \
|
||||||
|
adduser -D -u 1000 -G monitor monitor && \
|
||||||
|
chown -R monitor:monitor /app
|
||||||
|
|
||||||
|
USER monitor
|
||||||
|
|
||||||
|
# Set metadata
|
||||||
|
LABEL maintainer="CHORUS Ecosystem" \
|
||||||
|
description="HMMM Traffic Monitor - Real-time libp2p message monitoring for CHORUS"
|
||||||
|
|
||||||
|
ENTRYPOINT ["./hmmm-monitor"]
|
||||||
120
hmmm-monitor/README.md
Normal file
120
hmmm-monitor/README.md
Normal file
@@ -0,0 +1,120 @@
|
|||||||
|
# HMMM Traffic Monitor
|
||||||
|
|
||||||
|
Real-time monitoring tool for CHORUS libp2p pub/sub messages (HMMM and Bzzz).
|
||||||
|
|
||||||
|
## Purpose
|
||||||
|
|
||||||
|
This standalone monitoring container subscribes to all CHORUS pub/sub topics and logs all traffic in real-time. It's designed for:
|
||||||
|
|
||||||
|
- **Debugging**: See exactly what messages are being sent
|
||||||
|
- **Observability**: Monitor agent coordination and task execution
|
||||||
|
- **Development**: Understand message flow during development
|
||||||
|
- **Troubleshooting**: Identify communication issues between agents
|
||||||
|
|
||||||
|
## Topics Monitored
|
||||||
|
|
||||||
|
- `chorus-bzzz`: Main coordination topic (task claims, availability, progress)
|
||||||
|
- `chorus-hmmm`: Meta-discussion topic (help requests, collaboration)
|
||||||
|
- `chorus-context`: Context feedback messages
|
||||||
|
- `council-formation`: Council formation broadcasts
|
||||||
|
- `council-assignments`: Role assignments
|
||||||
|
|
||||||
|
## Usage
|
||||||
|
|
||||||
|
### Build the Image
|
||||||
|
|
||||||
|
```bash
|
||||||
|
cd hmmm-monitor
|
||||||
|
docker build -t anthonyrawlins/hmmm-monitor:latest .
|
||||||
|
```
|
||||||
|
|
||||||
|
### Run Locally
|
||||||
|
|
||||||
|
```bash
|
||||||
|
docker run --rm --network chorus_net anthonyrawlins/hmmm-monitor:latest
|
||||||
|
```
|
||||||
|
|
||||||
|
### Deploy to Swarm
|
||||||
|
|
||||||
|
```bash
|
||||||
|
docker stack deploy -c docker-compose.yml hmmm-monitor
|
||||||
|
```
|
||||||
|
|
||||||
|
### View Logs
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Real-time logs
|
||||||
|
docker service logs -f hmmm-monitor_hmmm-monitor
|
||||||
|
|
||||||
|
# Filter by topic
|
||||||
|
docker service logs hmmm-monitor_hmmm-monitor | grep "chorus-bzzz"
|
||||||
|
|
||||||
|
# Filter by message type
|
||||||
|
docker service logs hmmm-monitor_hmmm-monitor | grep "availability_broadcast"
|
||||||
|
|
||||||
|
# Export to file
|
||||||
|
docker service logs hmmm-monitor_hmmm-monitor > hmmm-traffic-$(date +%Y%m%d).log
|
||||||
|
```
|
||||||
|
|
||||||
|
## Message Format
|
||||||
|
|
||||||
|
Each logged message includes:
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"timestamp": "2025-10-11T12:30:45Z",
|
||||||
|
"topic": "chorus-bzzz",
|
||||||
|
"from": "12D3Koo...",
|
||||||
|
"type": "availability_broadcast",
|
||||||
|
"payload": {
|
||||||
|
"agent_id": "agent-123",
|
||||||
|
"current_tasks": 1,
|
||||||
|
"max_tasks": 3,
|
||||||
|
"available_for_work": true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
## Emojis
|
||||||
|
|
||||||
|
The monitor uses emojis to quickly identify message types:
|
||||||
|
|
||||||
|
- 🐝 General Bzzz coordination
|
||||||
|
- 📊 Availability broadcasts
|
||||||
|
- 🎯 Capability broadcasts
|
||||||
|
- ✋ Task claims
|
||||||
|
- ⏳ Task progress
|
||||||
|
- ✅ Task complete
|
||||||
|
- 🧠 HMMM meta-discussion
|
||||||
|
- 💬 Discussion messages
|
||||||
|
- 🆘 Help requests
|
||||||
|
- 💡 Help responses
|
||||||
|
- 🚨 Escalation triggers
|
||||||
|
- 🎭 Council formation
|
||||||
|
- 👔 Council assignments
|
||||||
|
|
||||||
|
## Troubleshooting
|
||||||
|
|
||||||
|
### No messages appearing
|
||||||
|
|
||||||
|
1. Check network connectivity: `docker exec hmmm-monitor ping chorus`
|
||||||
|
2. Verify container is on correct network: `docker inspect hmmm-monitor | grep NetworkMode`
|
||||||
|
3. Check CHORUS agents are publishing: `docker service logs CHORUS_chorus | grep "broadcast"`
|
||||||
|
|
||||||
|
### High CPU usage
|
||||||
|
|
||||||
|
The monitor processes all pub/sub traffic. If CPU usage is high, consider:
|
||||||
|
- Reducing replicas count
|
||||||
|
- Filtering logs externally rather than in the container
|
||||||
|
- Running only during debugging sessions
|
||||||
|
|
||||||
|
## Architecture
|
||||||
|
|
||||||
|
The monitor is a minimal libp2p node that:
|
||||||
|
|
||||||
|
1. Joins the same libp2p network as CHORUS agents
|
||||||
|
2. Subscribes to gossipsub topics
|
||||||
|
3. Logs all received messages
|
||||||
|
4. Does NOT publish any messages (read-only)
|
||||||
|
|
||||||
|
This makes it safe to run in production without affecting agent behavior.
|
||||||
34
hmmm-monitor/docker-compose.yml
Normal file
34
hmmm-monitor/docker-compose.yml
Normal file
@@ -0,0 +1,34 @@
|
|||||||
|
version: '3.8'
|
||||||
|
|
||||||
|
services:
|
||||||
|
hmmm-monitor:
|
||||||
|
build: .
|
||||||
|
image: anthonyrawlins/hmmm-monitor:latest
|
||||||
|
container_name: hmmm-monitor
|
||||||
|
networks:
|
||||||
|
- chorus_net
|
||||||
|
environment:
|
||||||
|
- LOG_LEVEL=info
|
||||||
|
restart: unless-stopped
|
||||||
|
deploy:
|
||||||
|
replicas: 1
|
||||||
|
placement:
|
||||||
|
constraints:
|
||||||
|
- node.hostname == walnut # Deploy on same node as CHORUS for network access
|
||||||
|
resources:
|
||||||
|
limits:
|
||||||
|
cpus: '0.5'
|
||||||
|
memory: 256M
|
||||||
|
reservations:
|
||||||
|
cpus: '0.1'
|
||||||
|
memory: 128M
|
||||||
|
logging:
|
||||||
|
driver: "json-file"
|
||||||
|
options:
|
||||||
|
max-size: "10m"
|
||||||
|
max-file: "3"
|
||||||
|
|
||||||
|
networks:
|
||||||
|
chorus_net:
|
||||||
|
external: true
|
||||||
|
name: CHORUS_chorus_net
|
||||||
113
hmmm-monitor/go.mod
Normal file
113
hmmm-monitor/go.mod
Normal file
@@ -0,0 +1,113 @@
|
|||||||
|
module hmmm-monitor
|
||||||
|
|
||||||
|
go 1.22
|
||||||
|
|
||||||
|
require (
|
||||||
|
github.com/libp2p/go-libp2p v0.36.5
|
||||||
|
github.com/libp2p/go-libp2p-pubsub v0.12.0
|
||||||
|
)
|
||||||
|
|
||||||
|
require (
|
||||||
|
github.com/benbjohnson/clock v1.3.5 // indirect
|
||||||
|
github.com/beorn7/perks v1.0.1 // indirect
|
||||||
|
github.com/cespare/xxhash/v2 v2.3.0 // indirect
|
||||||
|
github.com/containerd/cgroups v1.1.0 // indirect
|
||||||
|
github.com/coreos/go-systemd/v22 v22.5.0 // indirect
|
||||||
|
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||||
|
github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c // indirect
|
||||||
|
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.3.0 // indirect
|
||||||
|
github.com/docker/go-units v0.5.0 // indirect
|
||||||
|
github.com/elastic/gosigar v0.14.3 // indirect
|
||||||
|
github.com/flynn/noise v1.1.0 // indirect
|
||||||
|
github.com/francoispqt/gojay v1.2.13 // indirect
|
||||||
|
github.com/go-task/slim-sprig/v3 v3.0.0 // indirect
|
||||||
|
github.com/godbus/dbus/v5 v5.1.0 // indirect
|
||||||
|
github.com/gogo/protobuf v1.3.2 // indirect
|
||||||
|
github.com/google/gopacket v1.1.19 // indirect
|
||||||
|
github.com/google/pprof v0.0.0-20240727154555-813a5fbdbec8 // indirect
|
||||||
|
github.com/google/uuid v1.6.0 // indirect
|
||||||
|
github.com/gorilla/websocket v1.5.3 // indirect
|
||||||
|
github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect
|
||||||
|
github.com/huin/goupnp v1.3.0 // indirect
|
||||||
|
github.com/ipfs/go-cid v0.4.1 // indirect
|
||||||
|
github.com/ipfs/go-log/v2 v2.5.1 // indirect
|
||||||
|
github.com/jackpal/go-nat-pmp v1.0.2 // indirect
|
||||||
|
github.com/jbenet/go-temp-err-catcher v0.1.0 // indirect
|
||||||
|
github.com/klauspost/compress v1.17.9 // indirect
|
||||||
|
github.com/klauspost/cpuid/v2 v2.2.8 // indirect
|
||||||
|
github.com/koron/go-ssdp v0.0.4 // indirect
|
||||||
|
github.com/libp2p/go-buffer-pool v0.1.0 // indirect
|
||||||
|
github.com/libp2p/go-flow-metrics v0.1.0 // indirect
|
||||||
|
github.com/libp2p/go-libp2p-asn-util v0.4.1 // indirect
|
||||||
|
github.com/libp2p/go-msgio v0.3.0 // indirect
|
||||||
|
github.com/libp2p/go-nat v0.2.0 // indirect
|
||||||
|
github.com/libp2p/go-netroute v0.2.1 // indirect
|
||||||
|
github.com/libp2p/go-reuseport v0.4.0 // indirect
|
||||||
|
github.com/libp2p/go-yamux/v4 v4.0.1 // indirect
|
||||||
|
github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd // indirect
|
||||||
|
github.com/mattn/go-isatty v0.0.20 // indirect
|
||||||
|
github.com/miekg/dns v1.1.62 // indirect
|
||||||
|
github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b // indirect
|
||||||
|
github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc // indirect
|
||||||
|
github.com/minio/sha256-simd v1.0.1 // indirect
|
||||||
|
github.com/mr-tron/base58 v1.2.0 // indirect
|
||||||
|
github.com/multiformats/go-base32 v0.1.0 // indirect
|
||||||
|
github.com/multiformats/go-base36 v0.2.0 // indirect
|
||||||
|
github.com/multiformats/go-multiaddr v0.13.0 // indirect
|
||||||
|
github.com/multiformats/go-multiaddr-dns v0.4.0 // indirect
|
||||||
|
github.com/multiformats/go-multiaddr-fmt v0.1.0 // indirect
|
||||||
|
github.com/multiformats/go-multibase v0.2.0 // indirect
|
||||||
|
github.com/multiformats/go-multicodec v0.9.0 // indirect
|
||||||
|
github.com/multiformats/go-multihash v0.2.3 // indirect
|
||||||
|
github.com/multiformats/go-multistream v0.5.0 // indirect
|
||||||
|
github.com/multiformats/go-varint v0.0.7 // indirect
|
||||||
|
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
|
||||||
|
github.com/onsi/ginkgo/v2 v2.20.0 // indirect
|
||||||
|
github.com/opencontainers/runtime-spec v1.2.0 // indirect
|
||||||
|
github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 // indirect
|
||||||
|
github.com/pion/datachannel v1.5.8 // indirect
|
||||||
|
github.com/pion/dtls/v2 v2.2.12 // indirect
|
||||||
|
github.com/pion/ice/v2 v2.3.34 // indirect
|
||||||
|
github.com/pion/interceptor v0.1.30 // indirect
|
||||||
|
github.com/pion/logging v0.2.2 // indirect
|
||||||
|
github.com/pion/mdns v0.0.12 // indirect
|
||||||
|
github.com/pion/randutil v0.1.0 // indirect
|
||||||
|
github.com/pion/rtcp v1.2.14 // indirect
|
||||||
|
github.com/pion/rtp v1.8.9 // indirect
|
||||||
|
github.com/pion/sctp v1.8.33 // indirect
|
||||||
|
github.com/pion/sdp/v3 v3.0.9 // indirect
|
||||||
|
github.com/pion/srtp/v2 v2.0.20 // indirect
|
||||||
|
github.com/pion/stun v0.6.1 // indirect
|
||||||
|
github.com/pion/transport/v2 v2.2.10 // indirect
|
||||||
|
github.com/pion/turn/v2 v2.1.6 // indirect
|
||||||
|
github.com/pion/webrtc/v3 v3.3.0 // indirect
|
||||||
|
github.com/pkg/errors v0.9.1 // indirect
|
||||||
|
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||||
|
github.com/prometheus/client_golang v1.20.0 // indirect
|
||||||
|
github.com/prometheus/client_model v0.6.1 // indirect
|
||||||
|
github.com/prometheus/common v0.55.0 // indirect
|
||||||
|
github.com/prometheus/procfs v0.15.1 // indirect
|
||||||
|
github.com/quic-go/qpack v0.4.0 // indirect
|
||||||
|
github.com/quic-go/quic-go v0.46.0 // indirect
|
||||||
|
github.com/quic-go/webtransport-go v0.8.0 // indirect
|
||||||
|
github.com/raulk/go-watchdog v1.3.0 // indirect
|
||||||
|
github.com/spaolacci/murmur3 v1.1.0 // indirect
|
||||||
|
github.com/stretchr/testify v1.9.0 // indirect
|
||||||
|
github.com/wlynxg/anet v0.0.4 // indirect
|
||||||
|
go.uber.org/dig v1.18.0 // indirect
|
||||||
|
go.uber.org/fx v1.22.2 // indirect
|
||||||
|
go.uber.org/mock v0.4.0 // indirect
|
||||||
|
go.uber.org/multierr v1.11.0 // indirect
|
||||||
|
go.uber.org/zap v1.27.0 // indirect
|
||||||
|
golang.org/x/crypto v0.26.0 // indirect
|
||||||
|
golang.org/x/exp v0.0.0-20240808152545-0cdaa3abc0fa // indirect
|
||||||
|
golang.org/x/mod v0.20.0 // indirect
|
||||||
|
golang.org/x/net v0.28.0 // indirect
|
||||||
|
golang.org/x/sync v0.8.0 // indirect
|
||||||
|
golang.org/x/sys v0.24.0 // indirect
|
||||||
|
golang.org/x/text v0.17.0 // indirect
|
||||||
|
golang.org/x/tools v0.24.0 // indirect
|
||||||
|
google.golang.org/protobuf v1.34.2 // indirect
|
||||||
|
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||||
|
lukechampine.com/blake3 v1.3.0 // indirect
|
||||||
|
)
|
||||||
538
hmmm-monitor/go.sum
Normal file
538
hmmm-monitor/go.sum
Normal file
@@ -0,0 +1,538 @@
|
|||||||
|
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||||
|
cloud.google.com/go v0.31.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||||
|
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||||
|
cloud.google.com/go v0.37.0/go.mod h1:TS1dMSSfndXH133OKGwekG838Om/cQT0BUHV3HcBgoo=
|
||||||
|
dmitri.shuralyov.com/app/changes v0.0.0-20180602232624-0a106ad413e3/go.mod h1:Yl+fi1br7+Rr3LqpNJf1/uxUdtRUV+Tnj0o93V2B9MU=
|
||||||
|
dmitri.shuralyov.com/html/belt v0.0.0-20180602232347-f7d459c86be0/go.mod h1:JLBrvjyP0v+ecvNYvCpyZgu5/xkfAUhi6wJj28eUfSU=
|
||||||
|
dmitri.shuralyov.com/service/change v0.0.0-20181023043359-a85b471d5412/go.mod h1:a1inKt/atXimZ4Mv927x+r7UpyzRUf4emIoiiSC2TN4=
|
||||||
|
dmitri.shuralyov.com/state v0.0.0-20180228185332-28bcc343414c/go.mod h1:0PRwlb0D6DFvNNtx+9ybjezNCa8XF0xaYcETyp6rHWU=
|
||||||
|
git.apache.org/thrift.git v0.0.0-20180902110319-2566ecd5d999/go.mod h1:fPE2ZNJGynbRyZ4dJvy6G277gSllfV2HJqblrnkyeyg=
|
||||||
|
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||||
|
github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c=
|
||||||
|
github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
|
||||||
|
github.com/benbjohnson/clock v1.3.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
|
||||||
|
github.com/benbjohnson/clock v1.3.5 h1:VvXlSJBzZpA/zum6Sj74hxwYI2DIxRWuNIoXAzHZz5o=
|
||||||
|
github.com/benbjohnson/clock v1.3.5/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
|
||||||
|
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
|
||||||
|
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
||||||
|
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
|
||||||
|
github.com/bradfitz/go-smtpd v0.0.0-20170404230938-deb6d6237625/go.mod h1:HYsPBTaaSFSlLx/70C2HPIMNZpVV8+vt/A+FMnYP11g=
|
||||||
|
github.com/buger/jsonparser v0.0.0-20181115193947-bf1c66bbce23/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s=
|
||||||
|
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
|
||||||
|
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||||
|
github.com/cilium/ebpf v0.2.0/go.mod h1:To2CFviqOWL/M0gIMsvSMlqe7em/l1ALkX1PyjrX2Qs=
|
||||||
|
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||||
|
github.com/containerd/cgroups v0.0.0-20201119153540-4cbc285b3327/go.mod h1:ZJeTFisyysqgcCdecO57Dj79RfL0LNeGiFUqLYQRYLE=
|
||||||
|
github.com/containerd/cgroups v1.1.0 h1:v8rEWFl6EoqHB+swVNjVoCJE8o3jX7e8nqBGPLaDFBM=
|
||||||
|
github.com/containerd/cgroups v1.1.0/go.mod h1:6ppBcbh/NOOUU+dMKrykgaBnK9lCIBxHqJDGwsa1mIw=
|
||||||
|
github.com/coreos/go-systemd v0.0.0-20181012123002-c6f51f82210d/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
|
||||||
|
github.com/coreos/go-systemd/v22 v22.1.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk=
|
||||||
|
github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs=
|
||||||
|
github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
|
||||||
|
github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
|
||||||
|
github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
|
||||||
|
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
|
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||||
|
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
|
github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c h1:pFUpOrbxDR6AkioZ1ySsx5yxlDQZ8stG2b88gTPxgJU=
|
||||||
|
github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c/go.mod h1:6UhI8N9EjYm1c2odKpFpAYeR8dsBeM7PtzQhRgxRr9U=
|
||||||
|
github.com/decred/dcrd/crypto/blake256 v1.0.1 h1:7PltbUIQB7u/FfZ39+DGa/ShuMyJ5ilcvdfma9wOH6Y=
|
||||||
|
github.com/decred/dcrd/crypto/blake256 v1.0.1/go.mod h1:2OfgNZ5wDpcsFmHmCK5gZTPcCXqlm2ArzUIkw9czNJo=
|
||||||
|
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.3.0 h1:rpfIENRNNilwHwZeG5+P150SMrnNEcHYvcCuK6dPZSg=
|
||||||
|
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.3.0/go.mod h1:v57UDF4pDQJcEfFUCRop3lJL149eHGSe9Jvczhzjo/0=
|
||||||
|
github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
|
||||||
|
github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4=
|
||||||
|
github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
|
||||||
|
github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
|
||||||
|
github.com/elastic/gosigar v0.12.0/go.mod h1:iXRIGg2tLnu7LBdpqzyQfGDEidKCfWcCMS0WKyPWoMs=
|
||||||
|
github.com/elastic/gosigar v0.14.3 h1:xwkKwPia+hSfg9GqrCUKYdId102m9qTJIIr7egmK/uo=
|
||||||
|
github.com/elastic/gosigar v0.14.3/go.mod h1:iXRIGg2tLnu7LBdpqzyQfGDEidKCfWcCMS0WKyPWoMs=
|
||||||
|
github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc=
|
||||||
|
github.com/flynn/noise v1.1.0 h1:KjPQoQCEFdZDiP03phOvGi11+SVVhBG2wOWAorLsstg=
|
||||||
|
github.com/flynn/noise v1.1.0/go.mod h1:xbMo+0i6+IGbYdJhF31t2eR1BIU0CYc12+BNAKwUTag=
|
||||||
|
github.com/francoispqt/gojay v1.2.13 h1:d2m3sFjloqoIUQU3TsHBgj6qg/BVGlTBeHDUmyJnXKk=
|
||||||
|
github.com/francoispqt/gojay v1.2.13/go.mod h1:ehT5mTG4ua4581f1++1WLG0vPdaA9HaiDsoyrBGkyDY=
|
||||||
|
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||||
|
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
|
||||||
|
github.com/gliderlabs/ssh v0.1.1/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0=
|
||||||
|
github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q=
|
||||||
|
github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY=
|
||||||
|
github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
|
||||||
|
github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI=
|
||||||
|
github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8=
|
||||||
|
github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
|
||||||
|
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
|
||||||
|
github.com/godbus/dbus/v5 v5.1.0 h1:4KLkAxT3aOY8Li4FRJe/KvhoNFFxo0m6fNuFUO8QJUk=
|
||||||
|
github.com/godbus/dbus/v5 v5.1.0/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
|
||||||
|
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
||||||
|
github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
|
||||||
|
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
|
||||||
|
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
|
||||||
|
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
||||||
|
github.com/golang/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:tluoj9z5200jBnyusfRPU2LqT6J+DAorxEvtC7LHB+E=
|
||||||
|
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
||||||
|
github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
||||||
|
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||||
|
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||||
|
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
||||||
|
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
|
||||||
|
github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||||
|
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
|
||||||
|
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||||
|
github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ=
|
||||||
|
github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck=
|
||||||
|
github.com/google/gopacket v1.1.19 h1:ves8RnFZPGiFnTS0uPQStjwru6uO6h+nlr9j6fL7kF8=
|
||||||
|
github.com/google/gopacket v1.1.19/go.mod h1:iJ8V8n6KS+z2U1A8pUwu8bW5SyEMkXJB8Yo/Vo+TKTo=
|
||||||
|
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
|
||||||
|
github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
|
||||||
|
github.com/google/pprof v0.0.0-20240727154555-813a5fbdbec8 h1:FKHo8hFI3A+7w0aUQuYXQ+6EN5stWmeY/AZqtM8xk9k=
|
||||||
|
github.com/google/pprof v0.0.0-20240727154555-813a5fbdbec8/go.mod h1:K1liHPHnj73Fdn/EKuT8nrFqBihUSKXoLYU0BuatOYo=
|
||||||
|
github.com/google/uuid v1.3.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||||
|
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
|
||||||
|
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||||
|
github.com/googleapis/gax-go v2.0.0+incompatible/go.mod h1:SFVmujtThgffbyetf+mdk2eWhX2bMyUtNHzFKcPA9HY=
|
||||||
|
github.com/googleapis/gax-go/v2 v2.0.3/go.mod h1:LLvjysVCY1JZeum8Z6l8qUty8fiNwE08qbEPm1M08qg=
|
||||||
|
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
|
||||||
|
github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aNNg=
|
||||||
|
github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
||||||
|
github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
|
||||||
|
github.com/grpc-ecosystem/grpc-gateway v1.5.0/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw=
|
||||||
|
github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k=
|
||||||
|
github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM=
|
||||||
|
github.com/huin/goupnp v1.3.0 h1:UvLUlWDNpoUdYzb2TCn+MuTWtcjXKSza2n6CBdQ0xXc=
|
||||||
|
github.com/huin/goupnp v1.3.0/go.mod h1:gnGPsThkYa7bFi/KWmEysQRf48l2dvR5bxr2OFckNX8=
|
||||||
|
github.com/ipfs/go-cid v0.4.1 h1:A/T3qGvxi4kpKWWcPC/PgbvDA2bjVLO7n4UeVwnbs/s=
|
||||||
|
github.com/ipfs/go-cid v0.4.1/go.mod h1:uQHwDeX4c6CtyrFwdqyhpNcxVewur1M7l7fNU7LKwZk=
|
||||||
|
github.com/ipfs/go-log/v2 v2.5.1 h1:1XdUzF7048prq4aBjDQQ4SL5RxftpRGdXhNRwKSAlcY=
|
||||||
|
github.com/ipfs/go-log/v2 v2.5.1/go.mod h1:prSpmC1Gpllc9UYWxDiZDreBYw7zp4Iqp1kOLU9U5UI=
|
||||||
|
github.com/jackpal/go-nat-pmp v1.0.2 h1:KzKSgb7qkJvOUTqYl9/Hg/me3pWgBmERKrTGD7BdWus=
|
||||||
|
github.com/jackpal/go-nat-pmp v1.0.2/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc=
|
||||||
|
github.com/jbenet/go-temp-err-catcher v0.1.0 h1:zpb3ZH6wIE8Shj2sKS+khgRvf7T7RABoLk/+KKHggpk=
|
||||||
|
github.com/jbenet/go-temp-err-catcher v0.1.0/go.mod h1:0kJRvmDZXNMIiJirNPEYfhpPwbGVtZVWC34vc5WLsDk=
|
||||||
|
github.com/jellevandenhooff/dkim v0.0.0-20150330215556-f50fe3d243e1/go.mod h1:E0B/fFc00Y+Rasa88328GlI/XbtyysCtTHZS8h7IrBU=
|
||||||
|
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
|
||||||
|
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
|
||||||
|
github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00=
|
||||||
|
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
|
||||||
|
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||||
|
github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA=
|
||||||
|
github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw=
|
||||||
|
github.com/klauspost/cpuid/v2 v2.2.8 h1:+StwCXwm9PdpiEkPyzBXIy+M9KUb4ODm0Zarf1kS5BM=
|
||||||
|
github.com/klauspost/cpuid/v2 v2.2.8/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws=
|
||||||
|
github.com/koron/go-ssdp v0.0.4 h1:1IDwrghSKYM7yLf7XCzbByg2sJ/JcNOZRXS2jczTwz0=
|
||||||
|
github.com/koron/go-ssdp v0.0.4/go.mod h1:oDXq+E5IL5q0U8uSBcoAXzTzInwy5lEgC91HoKtbmZk=
|
||||||
|
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
||||||
|
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
|
||||||
|
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
|
||||||
|
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
|
||||||
|
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||||
|
github.com/kr/pty v1.1.3/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||||
|
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||||
|
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||||
|
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||||
|
github.com/libp2p/go-buffer-pool v0.1.0 h1:oK4mSFcQz7cTQIfqbe4MIj9gLW+mnanjyFtc6cdF0Y8=
|
||||||
|
github.com/libp2p/go-buffer-pool v0.1.0/go.mod h1:N+vh8gMqimBzdKkSMVuydVDq+UV5QTWy5HSiZacSbPg=
|
||||||
|
github.com/libp2p/go-flow-metrics v0.1.0 h1:0iPhMI8PskQwzh57jB9WxIuIOQ0r+15PChFGkx3Q3WM=
|
||||||
|
github.com/libp2p/go-flow-metrics v0.1.0/go.mod h1:4Xi8MX8wj5aWNDAZttg6UPmc0ZrnFNsMtpsYUClFtro=
|
||||||
|
github.com/libp2p/go-libp2p v0.36.5 h1:DoABsaHO0VXwH6pwCs2F6XKAXWYjFMO4HFBoVxTnF9g=
|
||||||
|
github.com/libp2p/go-libp2p v0.36.5/go.mod h1:CpszAtXxHYOcyvB7K8rSHgnNlh21eKjYbEfLoMerbEI=
|
||||||
|
github.com/libp2p/go-libp2p-asn-util v0.4.1 h1:xqL7++IKD9TBFMgnLPZR6/6iYhawHKHl950SO9L6n94=
|
||||||
|
github.com/libp2p/go-libp2p-asn-util v0.4.1/go.mod h1:d/NI6XZ9qxw67b4e+NgpQexCIiFYJjErASrYW4PFDN8=
|
||||||
|
github.com/libp2p/go-libp2p-pubsub v0.12.0 h1:PENNZjSfk8KYxANRlpipdS7+BfLmOl3L2E/6vSNjbdI=
|
||||||
|
github.com/libp2p/go-libp2p-pubsub v0.12.0/go.mod h1:Oi0zw9aw8/Y5GC99zt+Ef2gYAl+0nZlwdJonDyOz/sE=
|
||||||
|
github.com/libp2p/go-libp2p-testing v0.12.0 h1:EPvBb4kKMWO29qP4mZGyhVzUyR25dvfUIK5WDu6iPUA=
|
||||||
|
github.com/libp2p/go-libp2p-testing v0.12.0/go.mod h1:KcGDRXyN7sQCllucn1cOOS+Dmm7ujhfEyXQL5lvkcPg=
|
||||||
|
github.com/libp2p/go-msgio v0.3.0 h1:mf3Z8B1xcFN314sWX+2vOTShIE0Mmn2TXn3YCUQGNj0=
|
||||||
|
github.com/libp2p/go-msgio v0.3.0/go.mod h1:nyRM819GmVaF9LX3l03RMh10QdOroF++NBbxAb0mmDM=
|
||||||
|
github.com/libp2p/go-nat v0.2.0 h1:Tyz+bUFAYqGyJ/ppPPymMGbIgNRH+WqC5QrT5fKrrGk=
|
||||||
|
github.com/libp2p/go-nat v0.2.0/go.mod h1:3MJr+GRpRkyT65EpVPBstXLvOlAPzUVlG6Pwg9ohLJk=
|
||||||
|
github.com/libp2p/go-netroute v0.2.1 h1:V8kVrpD8GK0Riv15/7VN6RbUQ3URNZVosw7H2v9tksU=
|
||||||
|
github.com/libp2p/go-netroute v0.2.1/go.mod h1:hraioZr0fhBjG0ZRXJJ6Zj2IVEVNx6tDTFQfSmcq7mQ=
|
||||||
|
github.com/libp2p/go-reuseport v0.4.0 h1:nR5KU7hD0WxXCJbmw7r2rhRYruNRl2koHw8fQscQm2s=
|
||||||
|
github.com/libp2p/go-reuseport v0.4.0/go.mod h1:ZtI03j/wO5hZVDFo2jKywN6bYKWLOy8Se6DrI2E1cLU=
|
||||||
|
github.com/libp2p/go-yamux/v4 v4.0.1 h1:FfDR4S1wj6Bw2Pqbc8Uz7pCxeRBPbwsBbEdfwiCypkQ=
|
||||||
|
github.com/libp2p/go-yamux/v4 v4.0.1/go.mod h1:NWjl8ZTLOGlozrXSOZ/HlfG++39iKNnM5wwmtQP1YB4=
|
||||||
|
github.com/lunixbochs/vtclean v1.0.0/go.mod h1:pHhQNgMf3btfWnGBVipUOjRYhoOsdGqdm/+2c2E2WMI=
|
||||||
|
github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
|
||||||
|
github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd h1:br0buuQ854V8u83wA0rVZ8ttrq5CpaPZdvrK0LP2lOk=
|
||||||
|
github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd/go.mod h1:QuCEs1Nt24+FYQEqAAncTDPJIuGs+LxK1MCiFL25pMU=
|
||||||
|
github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94=
|
||||||
|
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
|
||||||
|
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
|
||||||
|
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
|
||||||
|
github.com/microcosm-cc/bluemonday v1.0.1/go.mod h1:hsXNsILzKxV+sX77C5b8FSuKF00vh2OMYv+xgHpAMF4=
|
||||||
|
github.com/miekg/dns v1.1.62 h1:cN8OuEF1/x5Rq6Np+h1epln8OiyPWV+lROx9LxcGgIQ=
|
||||||
|
github.com/miekg/dns v1.1.62/go.mod h1:mvDlcItzm+br7MToIKqkglaGhlFMHJ9DTNNWONWXbNQ=
|
||||||
|
github.com/mikioh/tcp v0.0.0-20190314235350-803a9b46060c h1:bzE/A84HN25pxAuk9Eej1Kz9OUelF97nAc82bDquQI8=
|
||||||
|
github.com/mikioh/tcp v0.0.0-20190314235350-803a9b46060c/go.mod h1:0SQS9kMwD2VsyFEB++InYyBJroV/FRmBgcydeSUcJms=
|
||||||
|
github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b h1:z78hV3sbSMAUoyUMM0I83AUIT6Hu17AWfgjzIbtrYFc=
|
||||||
|
github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b/go.mod h1:lxPUiZwKoFL8DUUmalo2yJJUCxbPKtm8OKfqr2/FTNU=
|
||||||
|
github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc h1:PTfri+PuQmWDqERdnNMiD9ZejrlswWrCpBEZgWOiTrc=
|
||||||
|
github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc/go.mod h1:cGKTAVKx4SxOuR/czcZ/E2RSJ3sfHs8FpHhQ5CWMf9s=
|
||||||
|
github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1/go.mod h1:pD8RvIylQ358TN4wwqatJ8rNavkEINozVn9DtGI3dfQ=
|
||||||
|
github.com/minio/sha256-simd v0.1.1-0.20190913151208-6de447530771/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM=
|
||||||
|
github.com/minio/sha256-simd v1.0.1 h1:6kaan5IFmwTNynnKKpDHe6FWHohJOHhCPchzK49dzMM=
|
||||||
|
github.com/minio/sha256-simd v1.0.1/go.mod h1:Pz6AKMiUdngCLpeTL/RJY1M9rUuPMYujV5xJjtbRSN8=
|
||||||
|
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||||
|
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
||||||
|
github.com/mr-tron/base58 v1.1.2/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc=
|
||||||
|
github.com/mr-tron/base58 v1.2.0 h1:T/HDJBh4ZCPbU39/+c3rRvE0uKBQlU27+QI8LJ4t64o=
|
||||||
|
github.com/mr-tron/base58 v1.2.0/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc=
|
||||||
|
github.com/multiformats/go-base32 v0.1.0 h1:pVx9xoSPqEIQG8o+UbAe7DNi51oej1NtK+aGkbLYxPE=
|
||||||
|
github.com/multiformats/go-base32 v0.1.0/go.mod h1:Kj3tFY6zNr+ABYMqeUNeGvkIC/UYgtWibDcT0rExnbI=
|
||||||
|
github.com/multiformats/go-base36 v0.2.0 h1:lFsAbNOGeKtuKozrtBsAkSVhv1p9D0/qedU9rQyccr0=
|
||||||
|
github.com/multiformats/go-base36 v0.2.0/go.mod h1:qvnKE++v+2MWCfePClUEjE78Z7P2a1UV0xHgWc0hkp4=
|
||||||
|
github.com/multiformats/go-multiaddr v0.1.1/go.mod h1:aMKBKNEYmzmDmxfX88/vz+J5IU55txyt0p4aiWVohjo=
|
||||||
|
github.com/multiformats/go-multiaddr v0.13.0 h1:BCBzs61E3AGHcYYTv8dqRH43ZfyrqM8RXVPT8t13tLQ=
|
||||||
|
github.com/multiformats/go-multiaddr v0.13.0/go.mod h1:sBXrNzucqkFJhvKOiwwLyqamGa/P5EIXNPLovyhQCII=
|
||||||
|
github.com/multiformats/go-multiaddr-dns v0.4.0 h1:P76EJ3qzBXpUXZ3twdCDx/kvagMsNo0LMFXpyms/zgU=
|
||||||
|
github.com/multiformats/go-multiaddr-dns v0.4.0/go.mod h1:7hfthtB4E4pQwirrz+J0CcDUfbWzTqEzVyYKKIKpgkc=
|
||||||
|
github.com/multiformats/go-multiaddr-fmt v0.1.0 h1:WLEFClPycPkp4fnIzoFoV9FVd49/eQsuaL3/CWe167E=
|
||||||
|
github.com/multiformats/go-multiaddr-fmt v0.1.0/go.mod h1:hGtDIW4PU4BqJ50gW2quDuPVjyWNZxToGUh/HwTZYJo=
|
||||||
|
github.com/multiformats/go-multibase v0.2.0 h1:isdYCVLvksgWlMW9OZRYJEa9pZETFivncJHmHnnd87g=
|
||||||
|
github.com/multiformats/go-multibase v0.2.0/go.mod h1:bFBZX4lKCA/2lyOFSAoKH5SS6oPyjtnzK/XTFDPkNuk=
|
||||||
|
github.com/multiformats/go-multicodec v0.9.0 h1:pb/dlPnzee/Sxv/j4PmkDRxCOi3hXTz3IbPKOXWJkmg=
|
||||||
|
github.com/multiformats/go-multicodec v0.9.0/go.mod h1:L3QTQvMIaVBkXOXXtVmYE+LI16i14xuaojr/H7Ai54k=
|
||||||
|
github.com/multiformats/go-multihash v0.0.8/go.mod h1:YSLudS+Pi8NHE7o6tb3D8vrpKa63epEDmG8nTduyAew=
|
||||||
|
github.com/multiformats/go-multihash v0.2.3 h1:7Lyc8XfX/IY2jWb/gI7JP+o7JEq9hOa7BFvVU9RSh+U=
|
||||||
|
github.com/multiformats/go-multihash v0.2.3/go.mod h1:dXgKXCXjBzdscBLk9JkjINiEsCKRVch90MdaGiKsvSM=
|
||||||
|
github.com/multiformats/go-multistream v0.5.0 h1:5htLSLl7lvJk3xx3qT/8Zm9J4K8vEOf/QGkvOGQAyiE=
|
||||||
|
github.com/multiformats/go-multistream v0.5.0/go.mod h1:n6tMZiwiP2wUsR8DgfDWw1dydlEqV3l6N3/GBsX6ILA=
|
||||||
|
github.com/multiformats/go-varint v0.0.7 h1:sWSGR+f/eu5ABZA2ZpYKBILXTTs9JWpdEM/nEGOHFS8=
|
||||||
|
github.com/multiformats/go-varint v0.0.7/go.mod h1:r8PUYw/fD/SjBCiKOoDlGF6QawOELpZAu9eioSos/OU=
|
||||||
|
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
|
||||||
|
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
|
||||||
|
github.com/neelance/astrewrite v0.0.0-20160511093645-99348263ae86/go.mod h1:kHJEU3ofeGjhHklVoIGuVj85JJwZ6kWPaJwCIxgnFmo=
|
||||||
|
github.com/neelance/sourcemap v0.0.0-20151028013722-8c68805598ab/go.mod h1:Qr6/a/Q4r9LP1IltGz7tA7iOK1WonHEYhu1HRBA7ZiM=
|
||||||
|
github.com/onsi/ginkgo/v2 v2.20.0 h1:PE84V2mHqoT1sglvHc8ZdQtPcwmvvt29WLEEO3xmdZw=
|
||||||
|
github.com/onsi/ginkgo/v2 v2.20.0/go.mod h1:lG9ey2Z29hR41WMVthyJBGUBcBhGOtoPF2VFMvBXFCI=
|
||||||
|
github.com/onsi/gomega v1.34.1 h1:EUMJIKUjM8sKjYbtxQI9A4z2o+rruxnzNvpknOXie6k=
|
||||||
|
github.com/onsi/gomega v1.34.1/go.mod h1:kU1QgUvBDLXBJq618Xvm2LUX6rSAfRaFRTcdOeDLwwY=
|
||||||
|
github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
|
||||||
|
github.com/opencontainers/runtime-spec v1.2.0 h1:z97+pHb3uELt/yiAWD691HNHQIF07bE7dzrbT927iTk=
|
||||||
|
github.com/opencontainers/runtime-spec v1.2.0/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
|
||||||
|
github.com/openzipkin/zipkin-go v0.1.1/go.mod h1:NtoC/o8u3JlF1lSlyPNswIbeQH9bJTmOf0Erfk+hxe8=
|
||||||
|
github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 h1:onHthvaw9LFnH4t2DcNVpwGmV9E1BkGknEliJkfwQj0=
|
||||||
|
github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58/go.mod h1:DXv8WO4yhMYhSNPKjeNKa5WY9YCIEBRbNzFFPJbWO6Y=
|
||||||
|
github.com/pion/datachannel v1.5.8 h1:ph1P1NsGkazkjrvyMfhRBUAWMxugJjq2HfQifaOoSNo=
|
||||||
|
github.com/pion/datachannel v1.5.8/go.mod h1:PgmdpoaNBLX9HNzNClmdki4DYW5JtI7Yibu8QzbL3tI=
|
||||||
|
github.com/pion/dtls/v2 v2.2.7/go.mod h1:8WiMkebSHFD0T+dIU+UeBaoV7kDhOW5oDCzZ7WZ/F9s=
|
||||||
|
github.com/pion/dtls/v2 v2.2.12 h1:KP7H5/c1EiVAAKUmXyCzPiQe5+bCJrpOeKg/L05dunk=
|
||||||
|
github.com/pion/dtls/v2 v2.2.12/go.mod h1:d9SYc9fch0CqK90mRk1dC7AkzzpwJj6u2GU3u+9pqFE=
|
||||||
|
github.com/pion/ice/v2 v2.3.34 h1:Ic1ppYCj4tUOcPAp76U6F3fVrlSw8A9JtRXLqw6BbUM=
|
||||||
|
github.com/pion/ice/v2 v2.3.34/go.mod h1:mBF7lnigdqgtB+YHkaY/Y6s6tsyRyo4u4rPGRuOjUBQ=
|
||||||
|
github.com/pion/interceptor v0.1.30 h1:au5rlVHsgmxNi+v/mjOPazbW1SHzfx7/hYOEYQnUcxA=
|
||||||
|
github.com/pion/interceptor v0.1.30/go.mod h1:RQuKT5HTdkP2Fi0cuOS5G5WNymTjzXaGF75J4k7z2nc=
|
||||||
|
github.com/pion/logging v0.2.2 h1:M9+AIj/+pxNsDfAT64+MAVgJO0rsyLnoJKCqf//DoeY=
|
||||||
|
github.com/pion/logging v0.2.2/go.mod h1:k0/tDVsRCX2Mb2ZEmTqNa7CWsQPc+YYCB7Q+5pahoms=
|
||||||
|
github.com/pion/mdns v0.0.12 h1:CiMYlY+O0azojWDmxdNr7ADGrnZ+V6Ilfner+6mSVK8=
|
||||||
|
github.com/pion/mdns v0.0.12/go.mod h1:VExJjv8to/6Wqm1FXK+Ii/Z9tsVk/F5sD/N70cnYFbk=
|
||||||
|
github.com/pion/randutil v0.1.0 h1:CFG1UdESneORglEsnimhUjf33Rwjubwj6xfiOXBa3mA=
|
||||||
|
github.com/pion/randutil v0.1.0/go.mod h1:XcJrSMMbbMRhASFVOlj/5hQial/Y8oH/HVo7TBZq+j8=
|
||||||
|
github.com/pion/rtcp v1.2.12/go.mod h1:sn6qjxvnwyAkkPzPULIbVqSKI5Dv54Rv7VG0kNxh9L4=
|
||||||
|
github.com/pion/rtcp v1.2.14 h1:KCkGV3vJ+4DAJmvP0vaQShsb0xkRfWkO540Gy102KyE=
|
||||||
|
github.com/pion/rtcp v1.2.14/go.mod h1:sn6qjxvnwyAkkPzPULIbVqSKI5Dv54Rv7VG0kNxh9L4=
|
||||||
|
github.com/pion/rtp v1.8.3/go.mod h1:pBGHaFt/yW7bf1jjWAoUjpSNoDnw98KTMg+jWWvziqU=
|
||||||
|
github.com/pion/rtp v1.8.9 h1:E2HX740TZKaqdcPmf4pw6ZZuG8u5RlMMt+l3dxeu6Wk=
|
||||||
|
github.com/pion/rtp v1.8.9/go.mod h1:pBGHaFt/yW7bf1jjWAoUjpSNoDnw98KTMg+jWWvziqU=
|
||||||
|
github.com/pion/sctp v1.8.33 h1:dSE4wX6uTJBcNm8+YlMg7lw1wqyKHggsP5uKbdj+NZw=
|
||||||
|
github.com/pion/sctp v1.8.33/go.mod h1:beTnqSzewI53KWoG3nqB282oDMGrhNxBdb+JZnkCwRM=
|
||||||
|
github.com/pion/sdp/v3 v3.0.9 h1:pX++dCHoHUwq43kuwf3PyJfHlwIj4hXA7Vrifiq0IJY=
|
||||||
|
github.com/pion/sdp/v3 v3.0.9/go.mod h1:B5xmvENq5IXJimIO4zfp6LAe1fD9N+kFv+V/1lOdz8M=
|
||||||
|
github.com/pion/srtp/v2 v2.0.20 h1:HNNny4s+OUmG280ETrCdgFndp4ufx3/uy85EawYEhTk=
|
||||||
|
github.com/pion/srtp/v2 v2.0.20/go.mod h1:0KJQjA99A6/a0DOVTu1PhDSw0CXF2jTkqOoMg3ODqdA=
|
||||||
|
github.com/pion/stun v0.6.1 h1:8lp6YejULeHBF8NmV8e2787BogQhduZugh5PdhDyyN4=
|
||||||
|
github.com/pion/stun v0.6.1/go.mod h1:/hO7APkX4hZKu/D0f2lHzNyvdkTGtIy3NDmLR7kSz/8=
|
||||||
|
github.com/pion/transport/v2 v2.2.1/go.mod h1:cXXWavvCnFF6McHTft3DWS9iic2Mftcz1Aq29pGcU5g=
|
||||||
|
github.com/pion/transport/v2 v2.2.3/go.mod h1:q2U/tf9FEfnSBGSW6w5Qp5PFWRLRj3NjLhCCgpRK4p0=
|
||||||
|
github.com/pion/transport/v2 v2.2.4/go.mod h1:q2U/tf9FEfnSBGSW6w5Qp5PFWRLRj3NjLhCCgpRK4p0=
|
||||||
|
github.com/pion/transport/v2 v2.2.10 h1:ucLBLE8nuxiHfvkFKnkDQRYWYfp8ejf4YBOPfaQpw6Q=
|
||||||
|
github.com/pion/transport/v2 v2.2.10/go.mod h1:sq1kSLWs+cHW9E+2fJP95QudkzbK7wscs8yYgQToO5E=
|
||||||
|
github.com/pion/transport/v3 v3.0.1/go.mod h1:UY7kiITrlMv7/IKgd5eTUcaahZx5oUN3l9SzK5f5xE0=
|
||||||
|
github.com/pion/transport/v3 v3.0.7 h1:iRbMH05BzSNwhILHoBoAPxoB9xQgOaJk+591KC9P1o0=
|
||||||
|
github.com/pion/transport/v3 v3.0.7/go.mod h1:YleKiTZ4vqNxVwh77Z0zytYi7rXHl7j6uPLGhhz9rwo=
|
||||||
|
github.com/pion/turn/v2 v2.1.3/go.mod h1:huEpByKKHix2/b9kmTAM3YoX6MKP+/D//0ClgUYR2fY=
|
||||||
|
github.com/pion/turn/v2 v2.1.6 h1:Xr2niVsiPTB0FPtt+yAWKFUkU1eotQbGgpTIld4x1Gc=
|
||||||
|
github.com/pion/turn/v2 v2.1.6/go.mod h1:huEpByKKHix2/b9kmTAM3YoX6MKP+/D//0ClgUYR2fY=
|
||||||
|
github.com/pion/webrtc/v3 v3.3.0 h1:Rf4u6n6U5t5sUxhYPQk/samzU/oDv7jk6BA5hyO2F9I=
|
||||||
|
github.com/pion/webrtc/v3 v3.3.0/go.mod h1:hVmrDJvwhEertRWObeb1xzulzHGeVUoPlWvxdGzcfU0=
|
||||||
|
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||||
|
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
||||||
|
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||||
|
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||||
|
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||||
|
github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
|
||||||
|
github.com/prometheus/client_golang v1.20.0 h1:jBzTZ7B099Rg24tny+qngoynol8LtVYlA2bqx3vEloI=
|
||||||
|
github.com/prometheus/client_golang v1.20.0/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE=
|
||||||
|
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
||||||
|
github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E=
|
||||||
|
github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY=
|
||||||
|
github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
|
||||||
|
github.com/prometheus/common v0.55.0 h1:KEi6DK7lXW/m7Ig5i47x0vRzuBsHuvJdi5ee6Y3G1dc=
|
||||||
|
github.com/prometheus/common v0.55.0/go.mod h1:2SECS4xJG1kd8XF9IcM1gMX6510RAEL65zxzNImwdc8=
|
||||||
|
github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||||
|
github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc=
|
||||||
|
github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk=
|
||||||
|
github.com/quic-go/qpack v0.4.0 h1:Cr9BXA1sQS2SmDUWjSofMPNKmvF6IiIfDRmgU0w1ZCo=
|
||||||
|
github.com/quic-go/qpack v0.4.0/go.mod h1:UZVnYIfi5GRk+zI9UMaCPsmZ2xKJP7XBUvVyT1Knj9A=
|
||||||
|
github.com/quic-go/quic-go v0.46.0 h1:uuwLClEEyk1DNvchH8uCByQVjo3yKL9opKulExNDs7Y=
|
||||||
|
github.com/quic-go/quic-go v0.46.0/go.mod h1:1dLehS7TIR64+vxGR70GDcatWTOtMX2PUtnKsjbTurI=
|
||||||
|
github.com/quic-go/webtransport-go v0.8.0 h1:HxSrwun11U+LlmwpgM1kEqIqH90IT4N8auv/cD7QFJg=
|
||||||
|
github.com/quic-go/webtransport-go v0.8.0/go.mod h1:N99tjprW432Ut5ONql/aUhSLT0YVSlwHohQsuac9WaM=
|
||||||
|
github.com/raulk/go-watchdog v1.3.0 h1:oUmdlHxdkXRJlwfG0O9omj8ukerm8MEQavSiDTEtBsk=
|
||||||
|
github.com/raulk/go-watchdog v1.3.0/go.mod h1:fIvOnLbF0b0ZwkB9YU4mOW9Did//4vPZtDqv66NfsMU=
|
||||||
|
github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
|
||||||
|
github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog=
|
||||||
|
github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
|
||||||
|
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||||
|
github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo=
|
||||||
|
github.com/shurcooL/component v0.0.0-20170202220835-f88ec8f54cc4/go.mod h1:XhFIlyj5a1fBNx5aJTbKoIq0mNaPvOagO+HjB3EtxrY=
|
||||||
|
github.com/shurcooL/events v0.0.0-20181021180414-410e4ca65f48/go.mod h1:5u70Mqkb5O5cxEA8nxTsgrgLehJeAw6Oc4Ab1c/P1HM=
|
||||||
|
github.com/shurcooL/github_flavored_markdown v0.0.0-20181002035957-2122de532470/go.mod h1:2dOwnU2uBioM+SGy2aZoq1f/Sd1l9OkAeAUvjSyvgU0=
|
||||||
|
github.com/shurcooL/go v0.0.0-20180423040247-9e1955d9fb6e/go.mod h1:TDJrrUr11Vxrven61rcy3hJMUqaf/CLWYhHNPmT14Lk=
|
||||||
|
github.com/shurcooL/go-goon v0.0.0-20170922171312-37c2f522c041/go.mod h1:N5mDOmsrJOB+vfqUK+7DmDyjhSLIIBnXo9lvZJj3MWQ=
|
||||||
|
github.com/shurcooL/gofontwoff v0.0.0-20180329035133-29b52fc0a18d/go.mod h1:05UtEgK5zq39gLST6uB0cf3NEHjETfB4Fgr3Gx5R9Vw=
|
||||||
|
github.com/shurcooL/gopherjslib v0.0.0-20160914041154-feb6d3990c2c/go.mod h1:8d3azKNyqcHP1GaQE/c6dDgjkgSx2BZ4IoEi4F1reUI=
|
||||||
|
github.com/shurcooL/highlight_diff v0.0.0-20170515013008-09bb4053de1b/go.mod h1:ZpfEhSmds4ytuByIcDnOLkTHGUI6KNqRNPDLHDk+mUU=
|
||||||
|
github.com/shurcooL/highlight_go v0.0.0-20181028180052-98c3abbbae20/go.mod h1:UDKB5a1T23gOMUJrI+uSuH0VRDStOiUVSjBTRDVBVag=
|
||||||
|
github.com/shurcooL/home v0.0.0-20181020052607-80b7ffcb30f9/go.mod h1:+rgNQw2P9ARFAs37qieuu7ohDNQ3gds9msbT2yn85sg=
|
||||||
|
github.com/shurcooL/htmlg v0.0.0-20170918183704-d01228ac9e50/go.mod h1:zPn1wHpTIePGnXSHpsVPWEktKXHr6+SS6x/IKRb7cpw=
|
||||||
|
github.com/shurcooL/httperror v0.0.0-20170206035902-86b7830d14cc/go.mod h1:aYMfkZ6DWSJPJ6c4Wwz3QtW22G7mf/PEgaB9k/ik5+Y=
|
||||||
|
github.com/shurcooL/httpfs v0.0.0-20171119174359-809beceb2371/go.mod h1:ZY1cvUeJuFPAdZ/B6v7RHavJWZn2YPVFQ1OSXhCGOkg=
|
||||||
|
github.com/shurcooL/httpgzip v0.0.0-20180522190206-b1c53ac65af9/go.mod h1:919LwcH0M7/W4fcZ0/jy0qGght1GIhqyS/EgWGH2j5Q=
|
||||||
|
github.com/shurcooL/issues v0.0.0-20181008053335-6292fdc1e191/go.mod h1:e2qWDig5bLteJ4fwvDAc2NHzqFEthkqn7aOZAOpj+PQ=
|
||||||
|
github.com/shurcooL/issuesapp v0.0.0-20180602232740-048589ce2241/go.mod h1:NPpHK2TI7iSaM0buivtFUc9offApnI0Alt/K8hcHy0I=
|
||||||
|
github.com/shurcooL/notifications v0.0.0-20181007000457-627ab5aea122/go.mod h1:b5uSkrEVM1jQUspwbixRBhaIjIzL2xazXp6kntxYle0=
|
||||||
|
github.com/shurcooL/octicon v0.0.0-20181028054416-fa4f57f9efb2/go.mod h1:eWdoE5JD4R5UVWDucdOPg1g2fqQRq78IQa9zlOV1vpQ=
|
||||||
|
github.com/shurcooL/reactions v0.0.0-20181006231557-f2e0b4ca5b82/go.mod h1:TCR1lToEk4d2s07G3XGfz2QrgHXg4RJBvjrOozvoWfk=
|
||||||
|
github.com/shurcooL/sanitized_anchor_name v0.0.0-20170918181015-86672fcb3f95/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
|
||||||
|
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
|
||||||
|
github.com/shurcooL/users v0.0.0-20180125191416-49c67e49c537/go.mod h1:QJTqeLYEDaXHZDBsXlPCDqdhQuJkuw4NOtaxYe3xii4=
|
||||||
|
github.com/shurcooL/webdavfs v0.0.0-20170829043945-18c3829fa133/go.mod h1:hKmq5kWdCj2z2KEozexVbfEZIWiTjhE0+UjmZgPqehw=
|
||||||
|
github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
|
||||||
|
github.com/sourcegraph/annotate v0.0.0-20160123013949-f4cad6c6324d/go.mod h1:UdhH50NIW0fCiwBSr0co2m7BnFLdv4fQTgdqdJTHFeE=
|
||||||
|
github.com/sourcegraph/syntaxhighlight v0.0.0-20170531221838-bd320f5d308e/go.mod h1:HuIsMU8RRBOtsCgI77wP899iHVBQpCmg4ErYMZB+2IA=
|
||||||
|
github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI=
|
||||||
|
github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
|
||||||
|
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||||
|
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
|
||||||
|
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
|
||||||
|
github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA=
|
||||||
|
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||||
|
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||||
|
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
|
||||||
|
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||||
|
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||||
|
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
|
||||||
|
github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
|
||||||
|
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
|
||||||
|
github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
|
||||||
|
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
||||||
|
github.com/tarm/serial v0.0.0-20180830185346-98f6abe2eb07/go.mod h1:kDXzergiv9cbyO7IOYJZWg1U88JhDg3PB6klq9Hg2pA=
|
||||||
|
github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
|
||||||
|
github.com/viant/assertly v0.4.8/go.mod h1:aGifi++jvCrUaklKEKT0BU95igDNaqkvz+49uaYMPRU=
|
||||||
|
github.com/viant/toolbox v0.24.0/go.mod h1:OxMCG57V0PXuIP2HNQrtJf2CjqdmbrOx5EkMILuUhzM=
|
||||||
|
github.com/wlynxg/anet v0.0.3/go.mod h1:eay5PRQr7fIVAMbTbchTnO9gG65Hg/uYGdc7mguHxoA=
|
||||||
|
github.com/wlynxg/anet v0.0.4 h1:0de1OFQxnNqAu+x2FAKKCVIrnfGKQbs7FQz++tB0+Uw=
|
||||||
|
github.com/wlynxg/anet v0.0.4/go.mod h1:eay5PRQr7fIVAMbTbchTnO9gG65Hg/uYGdc7mguHxoA=
|
||||||
|
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||||
|
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||||
|
github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
|
||||||
|
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
|
||||||
|
go.opencensus.io v0.18.0/go.mod h1:vKdFvxhtzZ9onBp9VKHK8z/sRpBMnKAsufL7wlDrCOA=
|
||||||
|
go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
|
||||||
|
go.uber.org/dig v1.18.0 h1:imUL1UiY0Mg4bqbFfsRQO5G4CGRBec/ZujWTvSVp3pw=
|
||||||
|
go.uber.org/dig v1.18.0/go.mod h1:Us0rSJiThwCv2GteUN0Q7OKvU7n5J4dxZ9JKUXozFdE=
|
||||||
|
go.uber.org/fx v1.22.2 h1:iPW+OPxv0G8w75OemJ1RAnTUrF55zOJlXlo1TbJ0Buw=
|
||||||
|
go.uber.org/fx v1.22.2/go.mod h1:o/D9n+2mLP6v1EG+qsdT1O8wKopYAsqZasju97SDFCU=
|
||||||
|
go.uber.org/goleak v1.1.11-0.20210813005559-691160354723/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ=
|
||||||
|
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
|
||||||
|
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
|
||||||
|
go.uber.org/mock v0.4.0 h1:VcM4ZOtdbR4f6VXfiOpwpVJDL6lCReaZ6mw31wqh7KU=
|
||||||
|
go.uber.org/mock v0.4.0/go.mod h1:a6FSlNadKUHUa9IP5Vyt1zh4fC7uAwxMutEAscFbkZc=
|
||||||
|
go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU=
|
||||||
|
go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
|
||||||
|
go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
|
||||||
|
go.uber.org/zap v1.19.1/go.mod h1:j3DNczoxDZroyBnOT1L/Q79cfUMGZxlv/9dzN7SM1rI=
|
||||||
|
go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8=
|
||||||
|
go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E=
|
||||||
|
go4.org v0.0.0-20180809161055-417644f6feb5/go.mod h1:MkTOUMDaeVYJUOUsaDXIhWPZYa1yOyC1qaOBpL57BhE=
|
||||||
|
golang.org/x/build v0.0.0-20190111050920-041ab4dc3f9d/go.mod h1:OWs+y06UdEOHN4y+MfF/py+xQ/tYqIWW03b70/CG9Rw=
|
||||||
|
golang.org/x/crypto v0.0.0-20181030102418-4d3f4d9ffa16/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||||
|
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||||
|
golang.org/x/crypto v0.0.0-20190313024323-a1f597ede03a/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||||
|
golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||||
|
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||||
|
golang.org/x/crypto v0.0.0-20200602180216-279210d13fed/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||||
|
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||||
|
golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
|
||||||
|
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||||
|
golang.org/x/crypto v0.8.0/go.mod h1:mRqEX+O9/h5TFCrQhkgjo2yKi0yYA+9ecGkdQoHrywE=
|
||||||
|
golang.org/x/crypto v0.12.0/go.mod h1:NF0Gs7EO5K4qLn+Ylc+fih8BSTeIjAP05siRnAh98yw=
|
||||||
|
golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg=
|
||||||
|
golang.org/x/crypto v0.26.0 h1:RrRspgV4mU+YwB4FYnuBoKsUapNIL5cohGAmSH3azsw=
|
||||||
|
golang.org/x/crypto v0.26.0/go.mod h1:GY7jblb9wI+FOo5y8/S2oY4zWP07AkOJ4+jxCqdqn54=
|
||||||
|
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||||
|
golang.org/x/exp v0.0.0-20240808152545-0cdaa3abc0fa h1:ELnwvuAXPNtPk1TJRuGkI9fDTwym6AYBu0qzT8AcHdI=
|
||||||
|
golang.org/x/exp v0.0.0-20240808152545-0cdaa3abc0fa/go.mod h1:akd2r19cwCdwSwWeIdzYQGa/EZZyqcOdwWiwj5L5eKQ=
|
||||||
|
golang.org/x/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||||
|
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||||
|
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
|
||||||
|
golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||||
|
golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
|
||||||
|
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
|
||||||
|
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||||
|
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||||
|
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||||
|
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
|
||||||
|
golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
||||||
|
golang.org/x/mod v0.20.0 h1:utOm6MM3R3dnawAiJgn0y+xvuYRsm1RKM/4giyfDgV0=
|
||||||
|
golang.org/x/mod v0.20.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
|
||||||
|
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
|
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
|
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
|
golang.org/x/net v0.0.0-20181029044818-c44066c5c816/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
|
golang.org/x/net v0.0.0-20181106065722-10aee1819953/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
|
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
|
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
|
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||||
|
golang.org/x/net v0.0.0-20190313220215-9f648a60d977/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||||
|
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||||
|
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||||
|
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||||
|
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||||
|
golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||||
|
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||||
|
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
|
||||||
|
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
||||||
|
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
|
||||||
|
golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns=
|
||||||
|
golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
|
||||||
|
golang.org/x/net v0.14.0/go.mod h1:PpSgVXXLK0OxS0F31C1/tv6XNguvCrnXIDrFMspZIUI=
|
||||||
|
golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY=
|
||||||
|
golang.org/x/net v0.28.0 h1:a9JDOJc5GMUJ0+UDqmLT86WiEy7iWyIhz8gz8E4e5hE=
|
||||||
|
golang.org/x/net v0.28.0/go.mod h1:yqtgsTWOOnlGLG9GFRrK3++bGOUEkNBoHZc8MEDWPNg=
|
||||||
|
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||||
|
golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||||
|
golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||||
|
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||||
|
golang.org/x/perf v0.0.0-20180704124530-6e6d33e29852/go.mod h1:JLpeXjPJfIyPr5TlbXLkXWLhP8nz10XfvxElABhCtcw=
|
||||||
|
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
|
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
|
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
|
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
|
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
|
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
|
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
|
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
|
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
|
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
|
golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ=
|
||||||
|
golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||||
|
golang.org/x/sys v0.0.0-20180810173357-98c5dad5d1a0/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
|
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
|
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
|
golang.org/x/sys v0.0.0-20181029174526-d69651ed3497/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
|
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
|
golang.org/x/sys v0.0.0-20190316082340-a2f829d7f35f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20200602225109-6fdc65e7d980/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
|
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
|
golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
|
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
|
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
|
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
|
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
|
golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
|
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
|
golang.org/x/sys v0.9.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
|
golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
|
golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||||
|
golang.org/x/sys v0.24.0 h1:Twjiwq9dn6R1fQcyiK+wQyHWfaz/BJB+YIpzU/Cv3Xg=
|
||||||
|
golang.org/x/sys v0.24.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||||
|
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||||
|
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||||
|
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
|
||||||
|
golang.org/x/term v0.7.0/go.mod h1:P32HKFT3hSsZrRxla30E9HqToFYAQPCMs/zFMBUFqPY=
|
||||||
|
golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=
|
||||||
|
golang.org/x/term v0.11.0/go.mod h1:zC9APTIj3jG3FdV/Ons+XE1riIZXG4aZ4GTHiPZJPIU=
|
||||||
|
golang.org/x/term v0.16.0/go.mod h1:yn7UURbUtPyrVJPGPq404EukNFxcm/foM+bV/bfcDsY=
|
||||||
|
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||||
|
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||||
|
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||||
|
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
||||||
|
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||||
|
golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
|
||||||
|
golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
|
||||||
|
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
|
||||||
|
golang.org/x/text v0.17.0 h1:XtiM5bkSOt+ewxlOE/aE/AKEHibwj/6gvWMl9Rsh0Qc=
|
||||||
|
golang.org/x/text v0.17.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY=
|
||||||
|
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||||
|
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||||
|
golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk=
|
||||||
|
golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
|
||||||
|
golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||||
|
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||||
|
golang.org/x/tools v0.0.0-20181030000716-a0a13e073c7b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||||
|
golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||||
|
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||||
|
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
|
||||||
|
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||||
|
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||||
|
golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||||
|
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||||
|
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||||
|
golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
||||||
|
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
|
||||||
|
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
|
||||||
|
golang.org/x/tools v0.24.0 h1:J1shsA93PJUEVaUSaay7UXAyE8aimq3GW0pjlolpa24=
|
||||||
|
golang.org/x/tools v0.24.0/go.mod h1:YhNqVBIfWHdzvTLs0d8LCuMhkKUgSUKldakyV7W/WDQ=
|
||||||
|
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
|
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
|
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
|
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
|
google.golang.org/api v0.0.0-20180910000450-7ca32eb868bf/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0=
|
||||||
|
google.golang.org/api v0.0.0-20181030000543-1d582fd0359e/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0=
|
||||||
|
google.golang.org/api v0.1.0/go.mod h1:UGEZY7KEX120AnNLIHFMKIo4obdJhkp2tPbaPlQx13Y=
|
||||||
|
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||||
|
google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||||
|
google.golang.org/appengine v1.3.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||||
|
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||||
|
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
||||||
|
google.golang.org/genproto v0.0.0-20180831171423-11092d34479b/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
||||||
|
google.golang.org/genproto v0.0.0-20181029155118-b69ba1387ce2/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
||||||
|
google.golang.org/genproto v0.0.0-20181202183823-bd91e49a0898/go.mod h1:7Ep/1NZk928CDR8SjdVbjWNpdIf6nzjE3BTgJDr2Atg=
|
||||||
|
google.golang.org/genproto v0.0.0-20190306203927-b5d61aea6440/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||||
|
google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
|
||||||
|
google.golang.org/grpc v1.16.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio=
|
||||||
|
google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
|
||||||
|
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||||
|
google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg=
|
||||||
|
google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw=
|
||||||
|
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||||
|
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||||
|
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||||
|
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
||||||
|
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
|
||||||
|
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
|
||||||
|
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||||
|
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||||
|
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||||
|
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||||
|
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||||
|
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||||
|
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||||
|
grpc.go4.org v0.0.0-20170609214715-11d0a25b4919/go.mod h1:77eQGdRu53HpSqPFJFmuJdjuHRquDANNeA4x7B8WQ9o=
|
||||||
|
honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||||
|
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||||
|
honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||||
|
lukechampine.com/blake3 v1.3.0 h1:sJ3XhFINmHSrYCgl958hscfIa3bw8x4DqMP3u1YvoYE=
|
||||||
|
lukechampine.com/blake3 v1.3.0/go.mod h1:0OFRp7fBtAylGVCO40o87sbupkyIGgbpv1+M1k1LM6k=
|
||||||
|
sourcegraph.com/sourcegraph/go-diff v0.5.0/go.mod h1:kuch7UrkMzY0X+p9CRK03kfuPQ2zzQcaEFbx8wA8rck=
|
||||||
|
sourcegraph.com/sqs/pbtypes v0.0.0-20180604144634-d3ebe8f20ae4/go.mod h1:ketZ/q3QxT9HOBeFhu6RdvsftgpsbFHBF5Cas6cDKZ0=
|
||||||
195
hmmm-monitor/main.go
Normal file
195
hmmm-monitor/main.go
Normal file
@@ -0,0 +1,195 @@
|
|||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"log"
|
||||||
|
"os"
|
||||||
|
"os/signal"
|
||||||
|
"syscall"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/libp2p/go-libp2p"
|
||||||
|
pubsub "github.com/libp2p/go-libp2p-pubsub"
|
||||||
|
"github.com/libp2p/go-libp2p/core/host"
|
||||||
|
)
|
||||||
|
|
||||||
|
// MessageLog represents a logged HMMM/Bzzz message
|
||||||
|
type MessageLog struct {
|
||||||
|
Timestamp time.Time `json:"timestamp"`
|
||||||
|
Topic string `json:"topic"`
|
||||||
|
From string `json:"from"`
|
||||||
|
Type string `json:"type,omitempty"`
|
||||||
|
Payload map[string]interface{} `json:"payload"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
// Handle graceful shutdown
|
||||||
|
sigChan := make(chan os.Signal, 1)
|
||||||
|
signal.Notify(sigChan, syscall.SIGINT, syscall.SIGTERM)
|
||||||
|
go func() {
|
||||||
|
<-sigChan
|
||||||
|
log.Println("🛑 Shutting down HMMM monitor...")
|
||||||
|
cancel()
|
||||||
|
}()
|
||||||
|
|
||||||
|
log.Println("🔍 Starting HMMM Traffic Monitor...")
|
||||||
|
|
||||||
|
// Create libp2p host
|
||||||
|
h, err := libp2p.New(
|
||||||
|
libp2p.ListenAddrStrings("/ip4/0.0.0.0/tcp/0"),
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal("Failed to create libp2p host:", err)
|
||||||
|
}
|
||||||
|
defer h.Close()
|
||||||
|
|
||||||
|
log.Printf("📡 Monitor node ID: %s", h.ID().String())
|
||||||
|
log.Printf("📍 Listening on: %v", h.Addrs())
|
||||||
|
|
||||||
|
// Create PubSub instance
|
||||||
|
ps, err := pubsub.NewGossipSub(ctx, h)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal("Failed to create PubSub:", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Topics to monitor
|
||||||
|
topics := []string{
|
||||||
|
"chorus-bzzz", // Main CHORUS coordination topic
|
||||||
|
"chorus-hmmm", // HMMM meta-discussion topic
|
||||||
|
"chorus-context", // Context feedback topic
|
||||||
|
"council-formation", // Council formation broadcasts
|
||||||
|
"council-assignments", // Role assignments
|
||||||
|
}
|
||||||
|
|
||||||
|
// Subscribe to all topics
|
||||||
|
for _, topicName := range topics {
|
||||||
|
go monitorTopic(ctx, ps, h, topicName)
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Println("✅ HMMM Monitor ready - listening for traffic...")
|
||||||
|
log.Println(" Press Ctrl+C to stop")
|
||||||
|
|
||||||
|
// Keep running until context is cancelled
|
||||||
|
<-ctx.Done()
|
||||||
|
log.Println("✅ HMMM Monitor stopped")
|
||||||
|
}
|
||||||
|
|
||||||
|
func monitorTopic(ctx context.Context, ps *pubsub.PubSub, h host.Host, topicName string) {
|
||||||
|
// Join topic
|
||||||
|
topic, err := ps.Join(topicName)
|
||||||
|
if err != nil {
|
||||||
|
log.Printf("❌ Failed to join topic %s: %v", topicName, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
defer topic.Close()
|
||||||
|
|
||||||
|
// Subscribe to topic
|
||||||
|
sub, err := topic.Subscribe()
|
||||||
|
if err != nil {
|
||||||
|
log.Printf("❌ Failed to subscribe to %s: %v", topicName, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
defer sub.Cancel()
|
||||||
|
|
||||||
|
log.Printf("👂 Monitoring topic: %s", topicName)
|
||||||
|
|
||||||
|
// Process messages
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
return
|
||||||
|
default:
|
||||||
|
msg, err := sub.Next(ctx)
|
||||||
|
if err != nil {
|
||||||
|
if ctx.Err() != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
log.Printf("⚠️ Error reading from %s: %v", topicName, err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// Skip messages from ourselves
|
||||||
|
if msg.ReceivedFrom == h.ID() {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
logMessage(topicName, msg)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func logMessage(topicName string, msg *pubsub.Message) {
|
||||||
|
// Try to parse as JSON
|
||||||
|
var payload map[string]interface{}
|
||||||
|
if err := json.Unmarshal(msg.Data, &payload); err != nil {
|
||||||
|
// Not JSON, log as raw data
|
||||||
|
log.Printf("🐝 [%s] from %s: %s", topicName, msg.ReceivedFrom.ShortString(), string(msg.Data))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Extract message type if available
|
||||||
|
msgType, _ := payload["type"].(string)
|
||||||
|
|
||||||
|
logEntry := MessageLog{
|
||||||
|
Timestamp: time.Now(),
|
||||||
|
Topic: topicName,
|
||||||
|
From: msg.ReceivedFrom.ShortString(),
|
||||||
|
Type: msgType,
|
||||||
|
Payload: payload,
|
||||||
|
}
|
||||||
|
|
||||||
|
// Pretty print JSON log
|
||||||
|
jsonLog, _ := json.MarshalIndent(logEntry, "", " ")
|
||||||
|
|
||||||
|
// Use emoji based on topic
|
||||||
|
emoji := getTopicEmoji(topicName, msgType)
|
||||||
|
|
||||||
|
fmt.Printf("\n%s [%s] from %s\n%s\n", emoji, topicName, msg.ReceivedFrom.ShortString(), jsonLog)
|
||||||
|
}
|
||||||
|
|
||||||
|
func getTopicEmoji(topic, msgType string) string {
|
||||||
|
// Topic-based emojis
|
||||||
|
switch topic {
|
||||||
|
case "chorus-bzzz":
|
||||||
|
switch msgType {
|
||||||
|
case "availability_broadcast":
|
||||||
|
return "📊"
|
||||||
|
case "capability_broadcast":
|
||||||
|
return "🎯"
|
||||||
|
case "task_claim":
|
||||||
|
return "✋"
|
||||||
|
case "task_progress":
|
||||||
|
return "⏳"
|
||||||
|
case "task_complete":
|
||||||
|
return "✅"
|
||||||
|
default:
|
||||||
|
return "🐝"
|
||||||
|
}
|
||||||
|
case "chorus-hmmm":
|
||||||
|
switch msgType {
|
||||||
|
case "meta_discussion":
|
||||||
|
return "💬"
|
||||||
|
case "task_help_request":
|
||||||
|
return "🆘"
|
||||||
|
case "task_help_response":
|
||||||
|
return "💡"
|
||||||
|
case "escalation_trigger":
|
||||||
|
return "🚨"
|
||||||
|
default:
|
||||||
|
return "🧠"
|
||||||
|
}
|
||||||
|
case "chorus-context":
|
||||||
|
return "📝"
|
||||||
|
case "council-formation":
|
||||||
|
return "🎭"
|
||||||
|
case "council-assignments":
|
||||||
|
return "👔"
|
||||||
|
default:
|
||||||
|
return "📡"
|
||||||
|
}
|
||||||
|
}
|
||||||
451
internal/council/manager.go
Normal file
451
internal/council/manager.go
Normal file
@@ -0,0 +1,451 @@
|
|||||||
|
package council
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"crypto/sha256"
|
||||||
|
"encoding/hex"
|
||||||
|
"encoding/json"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"hash/fnv"
|
||||||
|
"math/rand"
|
||||||
|
"net/http"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"chorus/internal/persona"
|
||||||
|
)
|
||||||
|
|
||||||
|
// CouncilOpportunity represents a council formation opportunity from WHOOSH.
|
||||||
|
type CouncilOpportunity struct {
|
||||||
|
CouncilID string `json:"council_id"`
|
||||||
|
ProjectName string `json:"project_name"`
|
||||||
|
Repository string `json:"repository"`
|
||||||
|
ProjectBrief string `json:"project_brief"`
|
||||||
|
CoreRoles []CouncilRole `json:"core_roles"`
|
||||||
|
OptionalRoles []CouncilRole `json:"optional_roles"`
|
||||||
|
UCXLAddress string `json:"ucxl_address"`
|
||||||
|
FormationDeadline time.Time `json:"formation_deadline"`
|
||||||
|
CreatedAt time.Time `json:"created_at"`
|
||||||
|
Metadata map[string]interface{} `json:"metadata"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// CouncilRole represents a single role available within a council.
|
||||||
|
type CouncilRole struct {
|
||||||
|
RoleName string `json:"role_name"`
|
||||||
|
AgentName string `json:"agent_name"`
|
||||||
|
Required bool `json:"required"`
|
||||||
|
RequiredSkills []string `json:"required_skills"`
|
||||||
|
Description string `json:"description"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// RoleProfile mirrors WHOOSH role profile metadata included in claim responses.
|
||||||
|
type RoleProfile struct {
|
||||||
|
RoleName string `json:"role_name"`
|
||||||
|
DisplayName string `json:"display_name"`
|
||||||
|
PromptKey string `json:"prompt_key"`
|
||||||
|
PromptPack string `json:"prompt_pack"`
|
||||||
|
Capabilities []string `json:"capabilities"`
|
||||||
|
BriefRoutingHint string `json:"brief_routing_hint"`
|
||||||
|
DefaultBriefOwner bool `json:"default_brief_owner"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// CouncilBrief carries the high-level brief metadata for an activated council.
|
||||||
|
type CouncilBrief struct {
|
||||||
|
CouncilID string `json:"council_id"`
|
||||||
|
RoleName string `json:"role_name"`
|
||||||
|
ProjectName string `json:"project_name"`
|
||||||
|
Repository string `json:"repository"`
|
||||||
|
Summary string `json:"summary"`
|
||||||
|
BriefURL string `json:"brief_url"`
|
||||||
|
IssueID *int64 `json:"issue_id"`
|
||||||
|
UCXLAddress string `json:"ucxl_address"`
|
||||||
|
ExpectedArtifacts []string `json:"expected_artifacts"`
|
||||||
|
HMMMTopic string `json:"hmmm_topic"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// RoleAssignment keeps track of the agent's current council engagement.
|
||||||
|
type RoleAssignment struct {
|
||||||
|
CouncilID string
|
||||||
|
RoleName string
|
||||||
|
UCXLAddress string
|
||||||
|
AssignedAt time.Time
|
||||||
|
Profile RoleProfile
|
||||||
|
Brief *CouncilBrief
|
||||||
|
Persona *persona.Persona
|
||||||
|
PersonaHash string
|
||||||
|
}
|
||||||
|
|
||||||
|
var ErrRoleConflict = errors.New("council role already claimed")
|
||||||
|
|
||||||
|
const defaultModelProvider = "ollama"
|
||||||
|
|
||||||
|
// Manager handles council opportunity evaluation, persona preparation, and brief handoff.
|
||||||
|
type Manager struct {
|
||||||
|
agentID string
|
||||||
|
agentName string
|
||||||
|
endpoint string
|
||||||
|
p2pAddr string
|
||||||
|
capabilities []string
|
||||||
|
|
||||||
|
httpClient *http.Client
|
||||||
|
personaLoader *persona.Loader
|
||||||
|
|
||||||
|
mu sync.Mutex
|
||||||
|
currentAssignment *RoleAssignment
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewManager creates a new council manager.
|
||||||
|
func NewManager(agentID, agentName, endpoint, p2pAddr string, capabilities []string) *Manager {
|
||||||
|
loader, err := persona.NewLoader()
|
||||||
|
if err != nil {
|
||||||
|
fmt.Printf("⚠️ Persona loader initialisation failed: %v\n", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return &Manager{
|
||||||
|
agentID: agentID,
|
||||||
|
agentName: agentName,
|
||||||
|
endpoint: endpoint,
|
||||||
|
p2pAddr: p2pAddr,
|
||||||
|
capabilities: capabilities,
|
||||||
|
httpClient: &http.Client{Timeout: 10 * time.Second},
|
||||||
|
personaLoader: loader,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// AgentID returns the agent's identifier.
|
||||||
|
func (m *Manager) AgentID() string {
|
||||||
|
return m.agentID
|
||||||
|
}
|
||||||
|
|
||||||
|
// EvaluateOpportunity analyzes a council opportunity and decides whether to claim a role.
|
||||||
|
func (m *Manager) EvaluateOpportunity(opportunity *CouncilOpportunity, whooshEndpoint string) error {
|
||||||
|
fmt.Printf("\n🤔 Evaluating council opportunity for: %s\n", opportunity.ProjectName)
|
||||||
|
|
||||||
|
if current := m.currentAssignmentSnapshot(); current != nil {
|
||||||
|
fmt.Printf(" ℹ️ Agent already assigned to council %s as %s; skipping new claims\n", current.CouncilID, current.RoleName)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
const maxAttempts = 10
|
||||||
|
const retryDelay = 3 * time.Second
|
||||||
|
|
||||||
|
var attemptedAtLeastOne bool
|
||||||
|
|
||||||
|
for attempt := 1; attempt <= maxAttempts; attempt++ {
|
||||||
|
assignment, attemptedCore, err := m.tryClaimRoles(opportunity.CoreRoles, opportunity, whooshEndpoint, "CORE")
|
||||||
|
attemptedAtLeastOne = attemptedAtLeastOne || attemptedCore
|
||||||
|
if assignment != nil {
|
||||||
|
m.setCurrentAssignment(assignment)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if err != nil && !errors.Is(err, ErrRoleConflict) {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
assignment, attemptedOptional, err := m.tryClaimRoles(opportunity.OptionalRoles, opportunity, whooshEndpoint, "OPTIONAL")
|
||||||
|
attemptedAtLeastOne = attemptedAtLeastOne || attemptedOptional
|
||||||
|
if assignment != nil {
|
||||||
|
m.setCurrentAssignment(assignment)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if err != nil && !errors.Is(err, ErrRoleConflict) {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if !attemptedAtLeastOne {
|
||||||
|
fmt.Printf(" ✗ No suitable roles found for this agent\n\n")
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Printf(" ↻ Attempt %d did not secure a council role; retrying in %s...\n", attempt, retryDelay)
|
||||||
|
time.Sleep(retryDelay)
|
||||||
|
}
|
||||||
|
|
||||||
|
return fmt.Errorf("exhausted council role claim attempts for council %s", opportunity.CouncilID)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Manager) tryClaimRoles(roles []CouncilRole, opportunity *CouncilOpportunity, whooshEndpoint string, roleType string) (*RoleAssignment, bool, error) {
|
||||||
|
var attempted bool
|
||||||
|
|
||||||
|
// Shuffle roles deterministically per agent+council to reduce herd on the first role
|
||||||
|
shuffled := append([]CouncilRole(nil), roles...)
|
||||||
|
if len(shuffled) > 1 {
|
||||||
|
h := fnv.New64a()
|
||||||
|
_, _ = h.Write([]byte(m.agentID))
|
||||||
|
_, _ = h.Write([]byte(opportunity.CouncilID))
|
||||||
|
seed := int64(h.Sum64())
|
||||||
|
r := rand.New(rand.NewSource(seed))
|
||||||
|
r.Shuffle(len(shuffled), func(i, j int) { shuffled[i], shuffled[j] = shuffled[j], shuffled[i] })
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, role := range shuffled {
|
||||||
|
if !m.shouldClaimRole(role, opportunity) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
attempted = true
|
||||||
|
fmt.Printf(" ✓ Attempting to claim %s role: %s (%s)\n", roleType, role.AgentName, role.RoleName)
|
||||||
|
|
||||||
|
assignment, err := m.claimRole(opportunity, role, whooshEndpoint)
|
||||||
|
if assignment != nil {
|
||||||
|
return assignment, attempted, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if errors.Is(err, ErrRoleConflict) {
|
||||||
|
fmt.Printf(" ⚠️ Role %s already claimed by another agent, trying next role...\n", role.RoleName)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return nil, attempted, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil, attempted, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Manager) shouldClaimRole(role CouncilRole, _ *CouncilOpportunity) bool {
|
||||||
|
if m.hasActiveAssignment() {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
// TODO: implement capability-based selection. For now, opportunistically claim any available role.
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Manager) claimRole(opportunity *CouncilOpportunity, role CouncilRole, whooshEndpoint string) (*RoleAssignment, error) {
|
||||||
|
claimURL := fmt.Sprintf("%s/api/v1/councils/%s/claims", strings.TrimRight(whooshEndpoint, "/"), opportunity.CouncilID)
|
||||||
|
|
||||||
|
claim := map[string]interface{}{
|
||||||
|
"agent_id": m.agentID,
|
||||||
|
"agent_name": m.agentName,
|
||||||
|
"role_name": role.RoleName,
|
||||||
|
"capabilities": m.capabilities,
|
||||||
|
"confidence": 0.75, // TODO: calculate based on capability match quality.
|
||||||
|
"reasoning": fmt.Sprintf("Agent has capabilities matching role: %s", role.RoleName),
|
||||||
|
"endpoint": m.endpoint,
|
||||||
|
"p2p_addr": m.p2pAddr,
|
||||||
|
}
|
||||||
|
|
||||||
|
payload, err := json.Marshal(claim)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to marshal claim: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
req, err := http.NewRequest(http.MethodPost, claimURL, bytes.NewBuffer(payload))
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to create claim request: %w", err)
|
||||||
|
}
|
||||||
|
req.Header.Set("Content-Type", "application/json")
|
||||||
|
|
||||||
|
resp, err := m.httpClient.Do(req)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to send claim: %w", err)
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusCreated {
|
||||||
|
var errorResp map[string]interface{}
|
||||||
|
_ = json.NewDecoder(resp.Body).Decode(&errorResp)
|
||||||
|
|
||||||
|
if resp.StatusCode == http.StatusConflict {
|
||||||
|
reason := "role already claimed"
|
||||||
|
if msg, ok := errorResp["error"].(string); ok && msg != "" {
|
||||||
|
reason = msg
|
||||||
|
}
|
||||||
|
return nil, fmt.Errorf("%w: %s", ErrRoleConflict, reason)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil, fmt.Errorf("claim rejected (status %d): %v", resp.StatusCode, errorResp)
|
||||||
|
}
|
||||||
|
|
||||||
|
var claimResp roleClaimResponse
|
||||||
|
if err := json.NewDecoder(resp.Body).Decode(&claimResp); err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to decode claim response: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
assignment := &RoleAssignment{
|
||||||
|
CouncilID: opportunity.CouncilID,
|
||||||
|
RoleName: role.RoleName,
|
||||||
|
UCXLAddress: claimResp.UCXLAddress,
|
||||||
|
Profile: claimResp.RoleProfile,
|
||||||
|
}
|
||||||
|
|
||||||
|
if t, err := time.Parse(time.RFC3339, claimResp.AssignedAt); err == nil {
|
||||||
|
assignment.AssignedAt = t
|
||||||
|
}
|
||||||
|
|
||||||
|
if claimResp.CouncilBrief != nil {
|
||||||
|
assignment.Brief = claimResp.CouncilBrief
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Printf("\n✅ ROLE CLAIM ACCEPTED!\n")
|
||||||
|
fmt.Printf(" Council ID: %s\n", opportunity.CouncilID)
|
||||||
|
fmt.Printf(" Role: %s (%s)\n", role.AgentName, role.RoleName)
|
||||||
|
fmt.Printf(" UCXL: %s\n", assignment.UCXLAddress)
|
||||||
|
fmt.Printf(" Assigned At: %s\n", claimResp.AssignedAt)
|
||||||
|
|
||||||
|
if err := m.preparePersonaAndAck(opportunity.CouncilID, role.RoleName, &assignment.Profile, claimResp.CouncilBrief, whooshEndpoint, assignment); err != nil {
|
||||||
|
fmt.Printf(" ⚠️ Persona preparation encountered an issue: %v\n", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Printf("\n")
|
||||||
|
return assignment, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Manager) preparePersonaAndAck(councilID, roleName string, profile *RoleProfile, brief *CouncilBrief, whooshEndpoint string, assignment *RoleAssignment) error {
|
||||||
|
if m.personaLoader == nil {
|
||||||
|
return m.sendPersonaAck(councilID, roleName, whooshEndpoint, nil, "", "failed", []string{"persona loader unavailable"})
|
||||||
|
}
|
||||||
|
|
||||||
|
promptKey := profile.PromptKey
|
||||||
|
if promptKey == "" {
|
||||||
|
promptKey = roleName
|
||||||
|
}
|
||||||
|
|
||||||
|
personaCapabilities := profile.Capabilities
|
||||||
|
personaCapabilities = append([]string{}, personaCapabilities...)
|
||||||
|
|
||||||
|
personaEntry, err := m.personaLoader.Compose(promptKey, profile.DisplayName, "", personaCapabilities)
|
||||||
|
if err != nil {
|
||||||
|
return m.sendPersonaAck(councilID, roleName, whooshEndpoint, nil, "", "failed", []string{err.Error()})
|
||||||
|
}
|
||||||
|
|
||||||
|
hash := sha256.Sum256([]byte(personaEntry.SystemPrompt))
|
||||||
|
personaHash := hex.EncodeToString(hash[:])
|
||||||
|
|
||||||
|
assignment.Persona = personaEntry
|
||||||
|
assignment.PersonaHash = personaHash
|
||||||
|
|
||||||
|
if err := m.sendPersonaAck(councilID, roleName, whooshEndpoint, personaEntry, personaHash, "loaded", nil); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Manager) sendPersonaAck(councilID, roleName, whooshEndpoint string, personaEntry *persona.Persona, personaHash string, status string, errs []string) error {
|
||||||
|
ackURL := fmt.Sprintf("%s/api/v1/councils/%s/roles/%s/personas", strings.TrimRight(whooshEndpoint, "/"), councilID, roleName)
|
||||||
|
|
||||||
|
payload := map[string]interface{}{
|
||||||
|
"agent_id": m.agentID,
|
||||||
|
"status": status,
|
||||||
|
"model_provider": defaultModelProvider,
|
||||||
|
"capabilities": m.capabilities,
|
||||||
|
"metadata": map[string]interface{}{
|
||||||
|
"endpoint": m.endpoint,
|
||||||
|
"p2p_addr": m.p2pAddr,
|
||||||
|
"agent_name": m.agentName,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
if personaEntry != nil {
|
||||||
|
payload["system_prompt_hash"] = personaHash
|
||||||
|
payload["model_name"] = personaEntry.Model
|
||||||
|
if len(personaEntry.Capabilities) > 0 {
|
||||||
|
payload["capabilities"] = personaEntry.Capabilities
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(errs) > 0 {
|
||||||
|
payload["errors"] = errs
|
||||||
|
}
|
||||||
|
|
||||||
|
body, err := json.Marshal(payload)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("marshal persona ack: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
req, err := http.NewRequest(http.MethodPost, ackURL, bytes.NewBuffer(body))
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("create persona ack request: %w", err)
|
||||||
|
}
|
||||||
|
req.Header.Set("Content-Type", "application/json")
|
||||||
|
|
||||||
|
resp, err := m.httpClient.Do(req)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("send persona ack: %w", err)
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusAccepted {
|
||||||
|
return fmt.Errorf("persona ack rejected with status %d", resp.StatusCode)
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Printf(" 📫 Persona status '%s' acknowledged by WHOOSH\n", status)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// HandleCouncilBrief records the design brief assigned to this agent once WHOOSH dispatches it.
|
||||||
|
func (m *Manager) HandleCouncilBrief(councilID, roleName string, brief *CouncilBrief) {
|
||||||
|
if brief == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
m.mu.Lock()
|
||||||
|
defer m.mu.Unlock()
|
||||||
|
|
||||||
|
if m.currentAssignment == nil {
|
||||||
|
fmt.Printf("⚠️ Received council brief for %s (%s) but agent has no active assignment\n", councilID, roleName)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if m.currentAssignment.CouncilID != councilID || !strings.EqualFold(m.currentAssignment.RoleName, roleName) {
|
||||||
|
fmt.Printf("⚠️ Received council brief for %s (%s) but agent is assigned to %s (%s)\n", councilID, roleName, m.currentAssignment.CouncilID, m.currentAssignment.RoleName)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
brief.CouncilID = councilID
|
||||||
|
brief.RoleName = roleName
|
||||||
|
m.currentAssignment.Brief = brief
|
||||||
|
|
||||||
|
fmt.Printf("📦 Design brief received for council %s (%s)\n", councilID, roleName)
|
||||||
|
if brief.BriefURL != "" {
|
||||||
|
fmt.Printf(" Brief URL: %s\n", brief.BriefURL)
|
||||||
|
}
|
||||||
|
if brief.Summary != "" {
|
||||||
|
fmt.Printf(" Summary: %s\n", brief.Summary)
|
||||||
|
}
|
||||||
|
if len(brief.ExpectedArtifacts) > 0 {
|
||||||
|
fmt.Printf(" Expected Artifacts: %v\n", brief.ExpectedArtifacts)
|
||||||
|
}
|
||||||
|
if brief.HMMMTopic != "" {
|
||||||
|
fmt.Printf(" HMMM Topic: %s\n", brief.HMMMTopic)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Manager) hasActiveAssignment() bool {
|
||||||
|
m.mu.Lock()
|
||||||
|
defer m.mu.Unlock()
|
||||||
|
return m.currentAssignment != nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Manager) setCurrentAssignment(assignment *RoleAssignment) {
|
||||||
|
m.mu.Lock()
|
||||||
|
defer m.mu.Unlock()
|
||||||
|
m.currentAssignment = assignment
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Manager) currentAssignmentSnapshot() *RoleAssignment {
|
||||||
|
m.mu.Lock()
|
||||||
|
defer m.mu.Unlock()
|
||||||
|
return m.currentAssignment
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetCurrentAssignment returns the current role assignment (public accessor)
|
||||||
|
func (m *Manager) GetCurrentAssignment() *RoleAssignment {
|
||||||
|
return m.currentAssignmentSnapshot()
|
||||||
|
}
|
||||||
|
|
||||||
|
// roleClaimResponse mirrors WHOOSH role claim response payload.
|
||||||
|
type roleClaimResponse struct {
|
||||||
|
Status string `json:"status"`
|
||||||
|
CouncilID string `json:"council_id"`
|
||||||
|
RoleName string `json:"role_name"`
|
||||||
|
UCXLAddress string `json:"ucxl_address"`
|
||||||
|
AssignedAt string `json:"assigned_at"`
|
||||||
|
RoleProfile RoleProfile `json:"role_profile"`
|
||||||
|
CouncilBrief *CouncilBrief `json:"council_brief"`
|
||||||
|
PersonaStatus string `json:"persona_status"`
|
||||||
|
}
|
||||||
@@ -1,12 +1,18 @@
|
|||||||
package runtime
|
package runtime
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"bytes"
|
||||||
"context"
|
"context"
|
||||||
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"net/http"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"chorus/internal/council"
|
||||||
"chorus/internal/logging"
|
"chorus/internal/logging"
|
||||||
|
"chorus/pkg/ai"
|
||||||
"chorus/pkg/dht"
|
"chorus/pkg/dht"
|
||||||
|
"chorus/pkg/execution"
|
||||||
"chorus/pkg/health"
|
"chorus/pkg/health"
|
||||||
"chorus/pkg/shutdown"
|
"chorus/pkg/shutdown"
|
||||||
"chorus/pubsub"
|
"chorus/pubsub"
|
||||||
@@ -39,6 +45,10 @@ func (r *SharedRuntime) StartAgentMode() error {
|
|||||||
// Start status reporting
|
// Start status reporting
|
||||||
go r.statusReporter()
|
go r.statusReporter()
|
||||||
|
|
||||||
|
// Start council brief processing
|
||||||
|
ctx := context.Background()
|
||||||
|
go r.processBriefs(ctx)
|
||||||
|
|
||||||
r.Logger.Info("🔍 Listening for peers on container network...")
|
r.Logger.Info("🔍 Listening for peers on container network...")
|
||||||
r.Logger.Info("📡 Ready for task coordination and meta-discussion")
|
r.Logger.Info("📡 Ready for task coordination and meta-discussion")
|
||||||
r.Logger.Info("🎯 HMMM collaborative reasoning enabled")
|
r.Logger.Info("🎯 HMMM collaborative reasoning enabled")
|
||||||
@@ -321,3 +331,206 @@ func (r *SharedRuntime) setupGracefulShutdown(shutdownManager *shutdown.Manager,
|
|||||||
|
|
||||||
r.Logger.Info("🛡️ Graceful shutdown components registered")
|
r.Logger.Info("🛡️ Graceful shutdown components registered")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// processBriefs polls for council briefs and executes them
|
||||||
|
func (r *SharedRuntime) processBriefs(ctx context.Context) {
|
||||||
|
ticker := time.NewTicker(15 * time.Second)
|
||||||
|
defer ticker.Stop()
|
||||||
|
|
||||||
|
r.Logger.Info("📦 Brief processing loop started")
|
||||||
|
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
r.Logger.Info("📦 Brief processing loop stopped")
|
||||||
|
return
|
||||||
|
case <-ticker.C:
|
||||||
|
if r.HTTPServer == nil || r.HTTPServer.CouncilManager == nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
assignment := r.HTTPServer.CouncilManager.GetCurrentAssignment()
|
||||||
|
if assignment == nil || assignment.Brief == nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if we have a brief to execute
|
||||||
|
brief := assignment.Brief
|
||||||
|
if brief.BriefURL == "" && brief.Summary == "" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
r.Logger.Info("📦 Processing design brief for council %s, role %s", assignment.CouncilID, assignment.RoleName)
|
||||||
|
|
||||||
|
// Execute the brief
|
||||||
|
if err := r.executeBrief(ctx, assignment); err != nil {
|
||||||
|
r.Logger.Error("❌ Failed to execute brief: %v", err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
r.Logger.Info("✅ Brief execution completed for council %s", assignment.CouncilID)
|
||||||
|
|
||||||
|
// Clear the brief after execution to prevent re-execution
|
||||||
|
assignment.Brief = nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// executeBrief executes a council brief using the ExecutionEngine
|
||||||
|
func (r *SharedRuntime) executeBrief(ctx context.Context, assignment *council.RoleAssignment) error {
|
||||||
|
brief := assignment.Brief
|
||||||
|
if brief == nil {
|
||||||
|
return fmt.Errorf("no brief to execute")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create execution engine
|
||||||
|
engine := execution.NewTaskExecutionEngine()
|
||||||
|
|
||||||
|
// Create AI provider factory with proper configuration
|
||||||
|
aiFactory := ai.NewProviderFactory()
|
||||||
|
|
||||||
|
// Register the configured provider
|
||||||
|
providerConfig := ai.ProviderConfig{
|
||||||
|
Type: r.Config.AI.Provider,
|
||||||
|
Endpoint: r.Config.AI.Ollama.Endpoint,
|
||||||
|
DefaultModel: "llama3.1:8b",
|
||||||
|
Timeout: r.Config.AI.Ollama.Timeout,
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := aiFactory.RegisterProvider(r.Config.AI.Provider, providerConfig); err != nil {
|
||||||
|
r.Logger.Warn("⚠️ Failed to register AI provider: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set role mapping with default provider
|
||||||
|
// This ensures GetProviderForRole() can find a provider for any role
|
||||||
|
roleMapping := ai.RoleModelMapping{
|
||||||
|
DefaultProvider: r.Config.AI.Provider,
|
||||||
|
FallbackProvider: r.Config.AI.Provider,
|
||||||
|
Roles: make(map[string]ai.RoleConfig),
|
||||||
|
}
|
||||||
|
aiFactory.SetRoleMapping(roleMapping)
|
||||||
|
|
||||||
|
engineConfig := &execution.EngineConfig{
|
||||||
|
AIProviderFactory: aiFactory,
|
||||||
|
MaxConcurrentTasks: 1,
|
||||||
|
DefaultTimeout: time.Hour,
|
||||||
|
EnableMetrics: true,
|
||||||
|
LogLevel: "info",
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := engine.Initialize(ctx, engineConfig); err != nil {
|
||||||
|
return fmt.Errorf("failed to initialize execution engine: %w", err)
|
||||||
|
}
|
||||||
|
defer engine.Shutdown()
|
||||||
|
|
||||||
|
// Build execution request
|
||||||
|
request := r.buildExecutionRequest(assignment)
|
||||||
|
|
||||||
|
r.Logger.Info("🚀 Executing brief for council %s, role %s", assignment.CouncilID, assignment.RoleName)
|
||||||
|
|
||||||
|
// Track task
|
||||||
|
taskID := fmt.Sprintf("council-%s-%s", assignment.CouncilID, assignment.RoleName)
|
||||||
|
r.TaskTracker.AddTask(taskID)
|
||||||
|
defer r.TaskTracker.RemoveTask(taskID)
|
||||||
|
|
||||||
|
// Execute the task
|
||||||
|
result, err := engine.ExecuteTask(ctx, request)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("task execution failed: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
r.Logger.Info("✅ Task execution successful. Output: %s", result.Output)
|
||||||
|
|
||||||
|
// Upload results to WHOOSH
|
||||||
|
if err := r.uploadResults(assignment, result); err != nil {
|
||||||
|
r.Logger.Error("⚠️ Failed to upload results to WHOOSH: %v", err)
|
||||||
|
// Don't fail the execution if upload fails
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// buildExecutionRequest converts a council brief to an execution request
|
||||||
|
func (r *SharedRuntime) buildExecutionRequest(assignment *council.RoleAssignment) *execution.TaskExecutionRequest {
|
||||||
|
brief := assignment.Brief
|
||||||
|
|
||||||
|
// Build task description from brief
|
||||||
|
taskDescription := brief.Summary
|
||||||
|
if taskDescription == "" {
|
||||||
|
taskDescription = "Execute council brief"
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add additional context
|
||||||
|
additionalContext := map[string]interface{}{
|
||||||
|
"council_id": assignment.CouncilID,
|
||||||
|
"role_name": assignment.RoleName,
|
||||||
|
"brief_url": brief.BriefURL,
|
||||||
|
"expected_artifacts": brief.ExpectedArtifacts,
|
||||||
|
"hmmm_topic": brief.HMMMTopic,
|
||||||
|
"persona": assignment.Persona,
|
||||||
|
}
|
||||||
|
|
||||||
|
return &execution.TaskExecutionRequest{
|
||||||
|
ID: fmt.Sprintf("council-%s-%s", assignment.CouncilID, assignment.RoleName),
|
||||||
|
Type: "council_brief",
|
||||||
|
Description: taskDescription,
|
||||||
|
Context: additionalContext,
|
||||||
|
Requirements: &execution.TaskRequirements{
|
||||||
|
AIModel: r.Config.AI.Provider,
|
||||||
|
SandboxType: "docker",
|
||||||
|
RequiredTools: []string{},
|
||||||
|
},
|
||||||
|
Timeout: time.Hour,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// uploadResults uploads execution results to WHOOSH
|
||||||
|
func (r *SharedRuntime) uploadResults(assignment *council.RoleAssignment, result *execution.TaskExecutionResult) error {
|
||||||
|
// Get WHOOSH endpoint from environment or config
|
||||||
|
whooshEndpoint := r.Config.WHOOSHAPI.BaseURL
|
||||||
|
if whooshEndpoint == "" {
|
||||||
|
whooshEndpoint = "http://whoosh:8080"
|
||||||
|
}
|
||||||
|
|
||||||
|
// Build result payload
|
||||||
|
payload := map[string]interface{}{
|
||||||
|
"council_id": assignment.CouncilID,
|
||||||
|
"role_name": assignment.RoleName,
|
||||||
|
"agent_id": r.Config.Agent.ID,
|
||||||
|
"ucxl_address": assignment.UCXLAddress,
|
||||||
|
"output": result.Output,
|
||||||
|
"artifacts": result.Artifacts,
|
||||||
|
"success": result.Success,
|
||||||
|
"error_message": result.ErrorMessage,
|
||||||
|
"execution_time": result.Metrics.Duration.Seconds(),
|
||||||
|
"timestamp": time.Now().Unix(),
|
||||||
|
}
|
||||||
|
|
||||||
|
jsonData, err := json.Marshal(payload)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to marshal result payload: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Send to WHOOSH
|
||||||
|
url := fmt.Sprintf("%s/api/councils/%s/results", whooshEndpoint, assignment.CouncilID)
|
||||||
|
req, err := http.NewRequest("POST", url, bytes.NewBuffer(jsonData))
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to create HTTP request: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
req.Header.Set("Content-Type", "application/json")
|
||||||
|
|
||||||
|
client := &http.Client{Timeout: 30 * time.Second}
|
||||||
|
resp, err := client.Do(req)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to send results to WHOOSH: %w", err)
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusAccepted {
|
||||||
|
return fmt.Errorf("WHOOSH returned status %d", resp.StatusCode)
|
||||||
|
}
|
||||||
|
|
||||||
|
r.Logger.Info("📤 Results uploaded to WHOOSH for council %s", assignment.CouncilID)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|||||||
@@ -2,8 +2,8 @@ package runtime
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"log"
|
|
||||||
"net/http"
|
"net/http"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
@@ -16,11 +16,13 @@ import (
|
|||||||
"chorus/internal/backbeat"
|
"chorus/internal/backbeat"
|
||||||
"chorus/internal/licensing"
|
"chorus/internal/licensing"
|
||||||
"chorus/internal/logging"
|
"chorus/internal/logging"
|
||||||
|
councilnats "chorus/internal/nats"
|
||||||
"chorus/p2p"
|
"chorus/p2p"
|
||||||
"chorus/pkg/config"
|
"chorus/pkg/config"
|
||||||
"chorus/pkg/dht"
|
"chorus/pkg/dht"
|
||||||
"chorus/pkg/election"
|
"chorus/pkg/election"
|
||||||
"chorus/pkg/health"
|
"chorus/pkg/health"
|
||||||
|
"chorus/pkg/mcp"
|
||||||
"chorus/pkg/metrics"
|
"chorus/pkg/metrics"
|
||||||
"chorus/pkg/prompt"
|
"chorus/pkg/prompt"
|
||||||
"chorus/pkg/shhh"
|
"chorus/pkg/shhh"
|
||||||
@@ -31,29 +33,38 @@ import (
|
|||||||
"chorus/reasoning"
|
"chorus/reasoning"
|
||||||
"github.com/libp2p/go-libp2p/core/peer"
|
"github.com/libp2p/go-libp2p/core/peer"
|
||||||
"github.com/multiformats/go-multiaddr"
|
"github.com/multiformats/go-multiaddr"
|
||||||
|
"github.com/rs/zerolog"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Build information - set by main package
|
// Build information - set by main package
|
||||||
var (
|
var (
|
||||||
AppName = "CHORUS"
|
AppName = "CHORUS"
|
||||||
AppVersion = "0.1.0-dev"
|
AppVersion = "0.5.32"
|
||||||
AppCommitHash = "unknown"
|
AppCommitHash = "unknown"
|
||||||
AppBuildDate = "unknown"
|
AppBuildDate = "unknown"
|
||||||
)
|
)
|
||||||
|
|
||||||
// SimpleLogger provides basic logging implementation
|
// SimpleLogger provides structured logging implementation via zerolog
|
||||||
type SimpleLogger struct{}
|
type SimpleLogger struct {
|
||||||
|
logger zerolog.Logger
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewSimpleLogger(component string) *SimpleLogger {
|
||||||
|
return &SimpleLogger{
|
||||||
|
logger: logging.ForComponent(component),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func (l *SimpleLogger) Info(msg string, args ...interface{}) {
|
func (l *SimpleLogger) Info(msg string, args ...interface{}) {
|
||||||
log.Printf("[INFO] "+msg, args...)
|
l.logger.Info().Msgf(msg, args...)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (l *SimpleLogger) Warn(msg string, args ...interface{}) {
|
func (l *SimpleLogger) Warn(msg string, args ...interface{}) {
|
||||||
log.Printf("[WARN] "+msg, args...)
|
l.logger.Warn().Msgf(msg, args...)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (l *SimpleLogger) Error(msg string, args ...interface{}) {
|
func (l *SimpleLogger) Error(msg string, args ...interface{}) {
|
||||||
log.Printf("[ERROR] "+msg, args...)
|
l.logger.Error().Msgf(msg, args...)
|
||||||
}
|
}
|
||||||
|
|
||||||
// SimpleTaskTracker tracks active tasks for availability reporting
|
// SimpleTaskTracker tracks active tasks for availability reporting
|
||||||
@@ -61,6 +72,7 @@ type SimpleTaskTracker struct {
|
|||||||
maxTasks int
|
maxTasks int
|
||||||
activeTasks map[string]bool
|
activeTasks map[string]bool
|
||||||
decisionPublisher *ucxl.DecisionPublisher
|
decisionPublisher *ucxl.DecisionPublisher
|
||||||
|
logger zerolog.Logger
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetActiveTasks returns list of active task IDs
|
// GetActiveTasks returns list of active task IDs
|
||||||
@@ -99,9 +111,14 @@ func (t *SimpleTaskTracker) publishTaskCompletion(taskID string, success bool, s
|
|||||||
}
|
}
|
||||||
|
|
||||||
if err := t.decisionPublisher.PublishTaskCompletion(taskID, success, summary, filesModified); err != nil {
|
if err := t.decisionPublisher.PublishTaskCompletion(taskID, success, summary, filesModified); err != nil {
|
||||||
fmt.Printf("⚠️ Failed to publish task completion for %s: %v\n", taskID, err)
|
t.logger.Warn().
|
||||||
|
Err(err).
|
||||||
|
Str("task_id", taskID).
|
||||||
|
Msg("Failed to publish task completion")
|
||||||
} else {
|
} else {
|
||||||
fmt.Printf("📤 Published task completion decision for: %s\n", taskID)
|
t.logger.Debug().
|
||||||
|
Str("task_id", taskID).
|
||||||
|
Msg("Published task completion decision")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -130,52 +147,53 @@ type SharedRuntime struct {
|
|||||||
TaskTracker *SimpleTaskTracker
|
TaskTracker *SimpleTaskTracker
|
||||||
Metrics *metrics.CHORUSMetrics
|
Metrics *metrics.CHORUSMetrics
|
||||||
Shhh *shhh.Sentinel
|
Shhh *shhh.Sentinel
|
||||||
|
CouncilSubscriber *councilnats.CouncilSubscriber
|
||||||
}
|
}
|
||||||
|
|
||||||
// Initialize sets up all shared P2P infrastructure components
|
// Initialize sets up all shared P2P infrastructure components
|
||||||
func Initialize(appMode string) (*SharedRuntime, error) {
|
func Initialize(appMode string) (*SharedRuntime, error) {
|
||||||
runtime := &SharedRuntime{}
|
runtime := &SharedRuntime{}
|
||||||
runtime.Logger = &SimpleLogger{}
|
runtime.Logger = NewSimpleLogger(logging.ComponentRuntime)
|
||||||
|
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
runtime.Context = ctx
|
runtime.Context = ctx
|
||||||
runtime.Cancel = cancel
|
runtime.Cancel = cancel
|
||||||
|
|
||||||
runtime.Logger.Info("🎭 Starting CHORUS v%s (build: %s, %s) - Container-First P2P Task Coordination", AppVersion, AppCommitHash, AppBuildDate)
|
runtime.Logger.Info("Starting CHORUS v%s (build: %s, %s) - Container-First P2P Task Coordination", AppVersion, AppCommitHash, AppBuildDate)
|
||||||
runtime.Logger.Info("📦 Container deployment - Mode: %s", appMode)
|
runtime.Logger.Info("📦 Container deployment - Mode: %s", appMode)
|
||||||
|
|
||||||
// Load configuration from environment (no config files in containers)
|
// Load configuration from environment (no config files in containers)
|
||||||
runtime.Logger.Info("📋 Loading configuration from environment variables...")
|
runtime.Logger.Info("Loading configuration from environment variables...")
|
||||||
cfg, err := config.LoadFromEnvironment()
|
cfg, err := config.LoadFromEnvironment()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("configuration error: %v", err)
|
return nil, fmt.Errorf("configuration error: %v", err)
|
||||||
}
|
}
|
||||||
runtime.Config = cfg
|
runtime.Config = cfg
|
||||||
|
|
||||||
runtime.Logger.Info("✅ Configuration loaded successfully")
|
runtime.Logger.Info("Configuration loaded successfully")
|
||||||
|
|
||||||
// Initialize runtime configuration with assignment support
|
// Initialize runtime configuration with assignment support
|
||||||
runtime.RuntimeConfig = config.NewRuntimeConfig(cfg)
|
runtime.RuntimeConfig = config.NewRuntimeConfig(cfg)
|
||||||
|
|
||||||
// Load assignment if ASSIGN_URL is configured
|
// Load assignment if ASSIGN_URL is configured
|
||||||
if assignURL := os.Getenv("ASSIGN_URL"); assignURL != "" {
|
if assignURL := os.Getenv("ASSIGN_URL"); assignURL != "" {
|
||||||
runtime.Logger.Info("📡 Loading assignment from WHOOSH: %s", assignURL)
|
runtime.Logger.Info("Loading assignment from WHOOSH: %s", assignURL)
|
||||||
|
|
||||||
ctx, cancel := context.WithTimeout(runtime.Context, 10*time.Second)
|
ctx, cancel := context.WithTimeout(runtime.Context, 10*time.Second)
|
||||||
if err := runtime.RuntimeConfig.LoadAssignment(ctx, assignURL); err != nil {
|
if err := runtime.RuntimeConfig.LoadAssignment(ctx, assignURL); err != nil {
|
||||||
runtime.Logger.Warn("⚠️ Failed to load assignment (continuing with base config): %v", err)
|
runtime.Logger.Warn("Failed to load assignment (continuing with base config): %v", err)
|
||||||
} else {
|
} else {
|
||||||
runtime.Logger.Info("✅ Assignment loaded successfully")
|
runtime.Logger.Info("Assignment loaded successfully")
|
||||||
}
|
}
|
||||||
cancel()
|
cancel()
|
||||||
|
|
||||||
// Start reload handler for SIGHUP
|
// Start reload handler for SIGHUP
|
||||||
runtime.RuntimeConfig.StartReloadHandler(runtime.Context, assignURL)
|
runtime.RuntimeConfig.StartReloadHandler(runtime.Context, assignURL)
|
||||||
runtime.Logger.Info("📡 SIGHUP reload handler started for assignment updates")
|
runtime.Logger.Info("SIGHUP reload handler started for assignment updates")
|
||||||
} else {
|
} else {
|
||||||
runtime.Logger.Info("⚪ No ASSIGN_URL configured, using static configuration")
|
runtime.Logger.Info("⚪ No ASSIGN_URL configured, using static configuration")
|
||||||
}
|
}
|
||||||
runtime.Logger.Info("🤖 Agent ID: %s", cfg.Agent.ID)
|
runtime.Logger.Info("Agent ID: %s", cfg.Agent.ID)
|
||||||
runtime.Logger.Info("🎯 Specialization: %s", cfg.Agent.Specialization)
|
runtime.Logger.Info("🎯 Specialization: %s", cfg.Agent.Specialization)
|
||||||
|
|
||||||
// CRITICAL: Validate license before any P2P operations
|
// CRITICAL: Validate license before any P2P operations
|
||||||
@@ -184,18 +202,19 @@ func Initialize(appMode string) (*SharedRuntime, error) {
|
|||||||
LicenseID: cfg.License.LicenseID,
|
LicenseID: cfg.License.LicenseID,
|
||||||
ClusterID: cfg.License.ClusterID,
|
ClusterID: cfg.License.ClusterID,
|
||||||
KachingURL: cfg.License.KachingURL,
|
KachingURL: cfg.License.KachingURL,
|
||||||
|
Version: AppVersion,
|
||||||
})
|
})
|
||||||
if err := licenseValidator.Validate(); err != nil {
|
if err := licenseValidator.Validate(); err != nil {
|
||||||
return nil, fmt.Errorf("license validation failed: %v", err)
|
return nil, fmt.Errorf("license validation failed: %v", err)
|
||||||
}
|
}
|
||||||
runtime.Logger.Info("✅ License validation successful - CHORUS authorized to run")
|
runtime.Logger.Info("License validation successful - CHORUS authorized to run")
|
||||||
|
|
||||||
// Initialize AI provider configuration
|
// Initialize AI provider configuration
|
||||||
runtime.Logger.Info("🧠 Configuring AI provider: %s", cfg.AI.Provider)
|
runtime.Logger.Info("🧠 Configuring AI provider: %s", cfg.AI.Provider)
|
||||||
if err := initializeAIProvider(cfg, runtime.Logger); err != nil {
|
if err := initializeAIProvider(cfg, runtime.Logger); err != nil {
|
||||||
return nil, fmt.Errorf("AI provider initialization failed: %v", err)
|
return nil, fmt.Errorf("AI provider initialization failed: %v", err)
|
||||||
}
|
}
|
||||||
runtime.Logger.Info("✅ AI provider configured successfully")
|
runtime.Logger.Info("AI provider configured successfully")
|
||||||
|
|
||||||
// Initialize metrics collector
|
// Initialize metrics collector
|
||||||
runtime.Metrics = metrics.NewCHORUSMetrics(nil)
|
runtime.Metrics = metrics.NewCHORUSMetrics(nil)
|
||||||
@@ -216,11 +235,11 @@ func Initialize(appMode string) (*SharedRuntime, error) {
|
|||||||
var backbeatIntegration *backbeat.Integration
|
var backbeatIntegration *backbeat.Integration
|
||||||
backbeatIntegration, err = backbeat.NewIntegration(cfg, cfg.Agent.ID, runtime.Logger)
|
backbeatIntegration, err = backbeat.NewIntegration(cfg, cfg.Agent.ID, runtime.Logger)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
runtime.Logger.Warn("⚠️ BACKBEAT integration initialization failed: %v", err)
|
runtime.Logger.Warn("BACKBEAT integration initialization failed: %v", err)
|
||||||
runtime.Logger.Info("📍 P2P operations will run without beat synchronization")
|
runtime.Logger.Info("📍 P2P operations will run without beat synchronization")
|
||||||
} else {
|
} else {
|
||||||
if err := backbeatIntegration.Start(ctx); err != nil {
|
if err := backbeatIntegration.Start(ctx); err != nil {
|
||||||
runtime.Logger.Warn("⚠️ Failed to start BACKBEAT integration: %v", err)
|
runtime.Logger.Warn("Failed to start BACKBEAT integration: %v", err)
|
||||||
backbeatIntegration = nil
|
backbeatIntegration = nil
|
||||||
} else {
|
} else {
|
||||||
runtime.Logger.Info("🎵 BACKBEAT integration started successfully")
|
runtime.Logger.Info("🎵 BACKBEAT integration started successfully")
|
||||||
@@ -228,6 +247,29 @@ func Initialize(appMode string) (*SharedRuntime, error) {
|
|||||||
}
|
}
|
||||||
runtime.BackbeatIntegration = backbeatIntegration
|
runtime.BackbeatIntegration = backbeatIntegration
|
||||||
|
|
||||||
|
// Fetch bootstrap peers from WHOOSH for P2P mesh formation
|
||||||
|
runtime.Logger.Info("Fetching bootstrap peers from WHOOSH...")
|
||||||
|
bootstrapPeers, err := fetchBootstrapPeers(cfg.WHOOSHAPI.BaseURL, runtime.Logger)
|
||||||
|
if err != nil {
|
||||||
|
runtime.Logger.Warn("Failed to fetch bootstrap peers from WHOOSH: %v", err)
|
||||||
|
runtime.Logger.Info("Falling back to static bootstrap configuration")
|
||||||
|
bootstrapPeers = getStaticBootstrapPeers(runtime.Logger)
|
||||||
|
} else {
|
||||||
|
runtime.Logger.Info("Fetched %d bootstrap peers from WHOOSH", len(bootstrapPeers))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set bootstrap peers in config for P2P node initialization
|
||||||
|
if len(bootstrapPeers) > 0 {
|
||||||
|
cfg.V2.DHT.BootstrapPeers = make([]string, len(bootstrapPeers))
|
||||||
|
for i, peer := range bootstrapPeers {
|
||||||
|
for _, addr := range peer.Addrs {
|
||||||
|
// Convert to full multiaddr with peer ID
|
||||||
|
cfg.V2.DHT.BootstrapPeers[i] = fmt.Sprintf("%s/p2p/%s", addr.String(), peer.ID.String())
|
||||||
|
break // Use first address
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Initialize P2P node
|
// Initialize P2P node
|
||||||
node, err := p2p.NewNode(ctx)
|
node, err := p2p.NewNode(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -242,6 +284,35 @@ func Initialize(appMode string) (*SharedRuntime, error) {
|
|||||||
runtime.Logger.Info(" %s/p2p/%s", addr, node.ID())
|
runtime.Logger.Info(" %s/p2p/%s", addr, node.ID())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Wait for bootstrap peers to connect before proceeding
|
||||||
|
// This prevents election race conditions where elections start before peer discovery
|
||||||
|
// Increased from 5s to 15s to allow more time for P2P mesh formation
|
||||||
|
if len(bootstrapPeers) > 0 {
|
||||||
|
runtime.Logger.Info("Waiting 15 seconds for bootstrap peer connections to establish...")
|
||||||
|
runtime.Logger.Info(" Target peers: %d bootstrap peers", len(bootstrapPeers))
|
||||||
|
|
||||||
|
// Poll connectivity every 3 seconds to provide feedback
|
||||||
|
for i := 0; i < 5; i++ {
|
||||||
|
time.Sleep(3 * time.Second)
|
||||||
|
connectedPeers := len(node.Peers())
|
||||||
|
runtime.Logger.Info(" [%ds] Connected to %d peers", (i+1)*3, connectedPeers)
|
||||||
|
|
||||||
|
// If we've connected to at least half the bootstrap peers, we're in good shape
|
||||||
|
if connectedPeers >= len(bootstrapPeers)/2 && connectedPeers > 0 {
|
||||||
|
runtime.Logger.Info("Bootstrap connectivity achieved (%d/%d peers), proceeding early",
|
||||||
|
connectedPeers, len(bootstrapPeers))
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
finalConnected := len(node.Peers())
|
||||||
|
if finalConnected == 0 {
|
||||||
|
runtime.Logger.Warn("Bootstrap complete but NO peers connected - mesh may be isolated")
|
||||||
|
} else {
|
||||||
|
runtime.Logger.Info("Bootstrap grace period complete - %d peers connected", finalConnected)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Initialize Hypercore-style logger for P2P coordination
|
// Initialize Hypercore-style logger for P2P coordination
|
||||||
hlog := logging.NewHypercoreLog(node.ID())
|
hlog := logging.NewHypercoreLog(node.ID())
|
||||||
if runtime.Shhh != nil {
|
if runtime.Shhh != nil {
|
||||||
@@ -268,7 +339,7 @@ func Initialize(appMode string) (*SharedRuntime, error) {
|
|||||||
}
|
}
|
||||||
runtime.PubSub = ps
|
runtime.PubSub = ps
|
||||||
|
|
||||||
runtime.Logger.Info("📡 PubSub system initialized")
|
runtime.Logger.Info("PubSub system initialized")
|
||||||
|
|
||||||
// Join role-based topics if role is configured
|
// Join role-based topics if role is configured
|
||||||
if cfg.Agent.Role != "" {
|
if cfg.Agent.Role != "" {
|
||||||
@@ -277,7 +348,7 @@ func Initialize(appMode string) (*SharedRuntime, error) {
|
|||||||
reportsTo = []string{cfg.Agent.ReportsTo}
|
reportsTo = []string{cfg.Agent.ReportsTo}
|
||||||
}
|
}
|
||||||
if err := ps.JoinRoleBasedTopics(cfg.Agent.Role, cfg.Agent.Expertise, reportsTo); err != nil {
|
if err := ps.JoinRoleBasedTopics(cfg.Agent.Role, cfg.Agent.Expertise, reportsTo); err != nil {
|
||||||
runtime.Logger.Warn("⚠️ Failed to join role-based topics: %v", err)
|
runtime.Logger.Warn("Failed to join role-based topics: %v", err)
|
||||||
} else {
|
} else {
|
||||||
runtime.Logger.Info("🎯 Joined role-based collaboration topics")
|
runtime.Logger.Info("🎯 Joined role-based collaboration topics")
|
||||||
}
|
}
|
||||||
@@ -301,7 +372,7 @@ func Initialize(appMode string) (*SharedRuntime, error) {
|
|||||||
|
|
||||||
// Cleanup properly shuts down all runtime components
|
// Cleanup properly shuts down all runtime components
|
||||||
func (r *SharedRuntime) Cleanup() {
|
func (r *SharedRuntime) Cleanup() {
|
||||||
r.Logger.Info("🔄 Starting graceful shutdown...")
|
r.Logger.Info("Starting graceful shutdown...")
|
||||||
|
|
||||||
if r.BackbeatIntegration != nil {
|
if r.BackbeatIntegration != nil {
|
||||||
r.BackbeatIntegration.Stop()
|
r.BackbeatIntegration.Stop()
|
||||||
@@ -309,7 +380,7 @@ func (r *SharedRuntime) Cleanup() {
|
|||||||
|
|
||||||
if r.MDNSDiscovery != nil {
|
if r.MDNSDiscovery != nil {
|
||||||
r.MDNSDiscovery.Close()
|
r.MDNSDiscovery.Close()
|
||||||
r.Logger.Info("🔍 mDNS discovery closed")
|
r.Logger.Info("mDNS discovery closed")
|
||||||
}
|
}
|
||||||
|
|
||||||
if r.PubSub != nil {
|
if r.PubSub != nil {
|
||||||
@@ -328,6 +399,12 @@ func (r *SharedRuntime) Cleanup() {
|
|||||||
r.HTTPServer.Stop()
|
r.HTTPServer.Stop()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if r.CouncilSubscriber != nil {
|
||||||
|
if err := r.CouncilSubscriber.Close(); err != nil {
|
||||||
|
r.Logger.Warn("Failed to close council NATS subscriber: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if r.UCXIServer != nil {
|
if r.UCXIServer != nil {
|
||||||
r.UCXIServer.Stop()
|
r.UCXIServer.Stop()
|
||||||
}
|
}
|
||||||
@@ -340,7 +417,7 @@ func (r *SharedRuntime) Cleanup() {
|
|||||||
r.Cancel()
|
r.Cancel()
|
||||||
}
|
}
|
||||||
|
|
||||||
r.Logger.Info("✅ CHORUS shutdown completed")
|
r.Logger.Info("CHORUS shutdown completed")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Helper methods for initialization (extracted from main.go)
|
// Helper methods for initialization (extracted from main.go)
|
||||||
@@ -348,6 +425,15 @@ func (r *SharedRuntime) initializeElectionSystem() error {
|
|||||||
// === Admin Election System ===
|
// === Admin Election System ===
|
||||||
electionManager := election.NewElectionManager(r.Context, r.Config, r.Node.Host(), r.PubSub, r.Node.ID().ShortString())
|
electionManager := election.NewElectionManager(r.Context, r.Config, r.Node.Host(), r.PubSub, r.Node.ID().ShortString())
|
||||||
|
|
||||||
|
if r.BackbeatIntegration != nil {
|
||||||
|
electionManager.SetTempoResolver(func() int {
|
||||||
|
return r.BackbeatIntegration.CurrentTempoBPM()
|
||||||
|
})
|
||||||
|
electionManager.SetBeatGapResolver(func() time.Duration {
|
||||||
|
return r.BackbeatIntegration.TimeSinceLastBeat()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
// Set election callbacks with BACKBEAT integration
|
// Set election callbacks with BACKBEAT integration
|
||||||
electionManager.SetCallbacks(
|
electionManager.SetCallbacks(
|
||||||
func(oldAdmin, newAdmin string) {
|
func(oldAdmin, newAdmin string) {
|
||||||
@@ -371,7 +457,7 @@ func (r *SharedRuntime) initializeElectionSystem() error {
|
|||||||
r.Config.Slurp.Enabled = true
|
r.Config.Slurp.Enabled = true
|
||||||
// Apply admin role configuration
|
// Apply admin role configuration
|
||||||
if err := r.Config.ApplyRoleDefinition("admin"); err != nil {
|
if err := r.Config.ApplyRoleDefinition("admin"); err != nil {
|
||||||
r.Logger.Warn("⚠️ Failed to apply admin role: %v", err)
|
r.Logger.Warn("Failed to apply admin role: %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
@@ -395,7 +481,7 @@ func (r *SharedRuntime) initializeElectionSystem() error {
|
|||||||
return fmt.Errorf("failed to start election manager: %v", err)
|
return fmt.Errorf("failed to start election manager: %v", err)
|
||||||
}
|
}
|
||||||
r.ElectionManager = electionManager
|
r.ElectionManager = electionManager
|
||||||
r.Logger.Info("✅ Election manager started with automated heartbeat management")
|
r.Logger.Info("Election manager started with automated heartbeat management")
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@@ -411,7 +497,7 @@ func (r *SharedRuntime) initializeDHTStorage() error {
|
|||||||
var err error
|
var err error
|
||||||
dhtNode, err = dht.NewLibP2PDHT(r.Context, r.Node.Host())
|
dhtNode, err = dht.NewLibP2PDHT(r.Context, r.Node.Host())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
r.Logger.Warn("⚠️ Failed to create DHT: %v", err)
|
r.Logger.Warn("Failed to create DHT: %v", err)
|
||||||
} else {
|
} else {
|
||||||
r.Logger.Info("🕸️ DHT initialized")
|
r.Logger.Info("🕸️ DHT initialized")
|
||||||
|
|
||||||
@@ -423,14 +509,14 @@ func (r *SharedRuntime) initializeDHTStorage() error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if err := dhtNode.Bootstrap(); err != nil {
|
if err := dhtNode.Bootstrap(); err != nil {
|
||||||
r.Logger.Warn("⚠️ DHT bootstrap failed: %v", err)
|
r.Logger.Warn("DHT bootstrap failed: %v", err)
|
||||||
r.BackbeatIntegration.FailP2POperation(operationID, err.Error())
|
r.BackbeatIntegration.FailP2POperation(operationID, err.Error())
|
||||||
} else {
|
} else {
|
||||||
r.BackbeatIntegration.CompleteP2POperation(operationID, 1)
|
r.BackbeatIntegration.CompleteP2POperation(operationID, 1)
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
if err := dhtNode.Bootstrap(); err != nil {
|
if err := dhtNode.Bootstrap(); err != nil {
|
||||||
r.Logger.Warn("⚠️ DHT bootstrap failed: %v", err)
|
r.Logger.Warn("DHT bootstrap failed: %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -450,14 +536,14 @@ func (r *SharedRuntime) initializeDHTStorage() error {
|
|||||||
for _, addrStr := range bootstrapPeers {
|
for _, addrStr := range bootstrapPeers {
|
||||||
addr, err := multiaddr.NewMultiaddr(addrStr)
|
addr, err := multiaddr.NewMultiaddr(addrStr)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
r.Logger.Warn("⚠️ Invalid bootstrap address %s: %v", addrStr, err)
|
r.Logger.Warn("Invalid bootstrap address %s: %v", addrStr, err)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
// Extract peer info from multiaddr
|
// Extract peer info from multiaddr
|
||||||
info, err := peer.AddrInfoFromP2pAddr(addr)
|
info, err := peer.AddrInfoFromP2pAddr(addr)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
r.Logger.Warn("⚠️ Failed to parse peer info from %s: %v", addrStr, err)
|
r.Logger.Warn("Failed to parse peer info from %s: %v", addrStr, err)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -470,7 +556,7 @@ func (r *SharedRuntime) initializeDHTStorage() error {
|
|||||||
r.BackbeatIntegration.UpdateP2POperationPhase(operationID, backbeat.PhaseConnecting, 0)
|
r.BackbeatIntegration.UpdateP2POperationPhase(operationID, backbeat.PhaseConnecting, 0)
|
||||||
|
|
||||||
if err := r.Node.Host().Connect(r.Context, *info); err != nil {
|
if err := r.Node.Host().Connect(r.Context, *info); err != nil {
|
||||||
r.Logger.Warn("⚠️ Failed to connect to bootstrap peer %s: %v", addrStr, err)
|
r.Logger.Warn("Failed to connect to bootstrap peer %s: %v", addrStr, err)
|
||||||
r.BackbeatIntegration.FailP2POperation(operationID, err.Error())
|
r.BackbeatIntegration.FailP2POperation(operationID, err.Error())
|
||||||
} else {
|
} else {
|
||||||
r.Logger.Info("🔗 Connected to DHT bootstrap peer: %s", addrStr)
|
r.Logger.Info("🔗 Connected to DHT bootstrap peer: %s", addrStr)
|
||||||
@@ -479,7 +565,7 @@ func (r *SharedRuntime) initializeDHTStorage() error {
|
|||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
if err := r.Node.Host().Connect(r.Context, *info); err != nil {
|
if err := r.Node.Host().Connect(r.Context, *info); err != nil {
|
||||||
r.Logger.Warn("⚠️ Failed to connect to bootstrap peer %s: %v", addrStr, err)
|
r.Logger.Warn("Failed to connect to bootstrap peer %s: %v", addrStr, err)
|
||||||
} else {
|
} else {
|
||||||
r.Logger.Info("🔗 Connected to DHT bootstrap peer: %s", addrStr)
|
r.Logger.Info("🔗 Connected to DHT bootstrap peer: %s", addrStr)
|
||||||
}
|
}
|
||||||
@@ -507,7 +593,7 @@ func (r *SharedRuntime) initializeDHTStorage() error {
|
|||||||
r.Node.ID().ShortString(),
|
r.Node.ID().ShortString(),
|
||||||
r.Config.Agent.ID,
|
r.Config.Agent.ID,
|
||||||
)
|
)
|
||||||
r.Logger.Info("📤 Decision publisher initialized")
|
r.Logger.Info("Decision publisher initialized")
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
r.Logger.Info("⚪ DHT disabled in configuration")
|
r.Logger.Info("⚪ DHT disabled in configuration")
|
||||||
@@ -525,12 +611,13 @@ func (r *SharedRuntime) initializeServices() error {
|
|||||||
taskTracker := &SimpleTaskTracker{
|
taskTracker := &SimpleTaskTracker{
|
||||||
maxTasks: r.Config.Agent.MaxTasks,
|
maxTasks: r.Config.Agent.MaxTasks,
|
||||||
activeTasks: make(map[string]bool),
|
activeTasks: make(map[string]bool),
|
||||||
|
logger: logging.ForComponent(logging.ComponentRuntime),
|
||||||
}
|
}
|
||||||
|
|
||||||
// Connect decision publisher to task tracker if available
|
// Connect decision publisher to task tracker if available
|
||||||
if r.DecisionPublisher != nil {
|
if r.DecisionPublisher != nil {
|
||||||
taskTracker.decisionPublisher = r.DecisionPublisher
|
taskTracker.decisionPublisher = r.DecisionPublisher
|
||||||
r.Logger.Info("📤 Task completion decisions will be published to DHT")
|
r.Logger.Info("Task completion decisions will be published to DHT")
|
||||||
}
|
}
|
||||||
r.TaskTracker = taskTracker
|
r.TaskTracker = taskTracker
|
||||||
|
|
||||||
@@ -547,18 +634,34 @@ func (r *SharedRuntime) initializeServices() error {
|
|||||||
|
|
||||||
taskCoordinator.Start()
|
taskCoordinator.Start()
|
||||||
r.TaskCoordinator = taskCoordinator
|
r.TaskCoordinator = taskCoordinator
|
||||||
r.Logger.Info("✅ Task coordination system active")
|
r.Logger.Info("Task coordination system active")
|
||||||
|
|
||||||
// Start HTTP API server
|
// Start HTTP API server
|
||||||
httpServer := api.NewHTTPServer(r.Config.Network.APIPort, r.HypercoreLog, r.PubSub)
|
httpServer := api.NewHTTPServer(r.Config, r.Node, r.HypercoreLog, r.PubSub)
|
||||||
go func() {
|
go func() {
|
||||||
r.Logger.Info("🌐 HTTP API server starting on :%d", r.Config.Network.APIPort)
|
r.Logger.Info("HTTP API server starting on :%d", r.Config.Network.APIPort)
|
||||||
if err := httpServer.Start(); err != nil && err != http.ErrServerClosed {
|
if err := httpServer.Start(); err != nil && err != http.ErrServerClosed {
|
||||||
r.Logger.Error("❌ HTTP server error: %v", err)
|
r.Logger.Error("HTTP server error: %v", err)
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
r.HTTPServer = httpServer
|
r.HTTPServer = httpServer
|
||||||
|
|
||||||
|
// Enable NATS-based council opportunity delivery.
|
||||||
|
natsURL := strings.TrimSpace(os.Getenv("CHORUS_COUNCIL_NATS_URL"))
|
||||||
|
if natsURL == "" {
|
||||||
|
natsURL = strings.TrimSpace(os.Getenv("CHORUS_BACKBEAT_NATS_URL"))
|
||||||
|
}
|
||||||
|
if natsURL == "" {
|
||||||
|
natsURL = "nats://backbeat-nats:4222"
|
||||||
|
}
|
||||||
|
|
||||||
|
if subscriber, err := councilnats.NewCouncilSubscriber(natsURL, httpServer.CouncilManager, httpServer.WhooshEndpoint()); err != nil {
|
||||||
|
r.Logger.Warn("Council NATS subscriber disabled: %v", err)
|
||||||
|
} else {
|
||||||
|
r.CouncilSubscriber = subscriber
|
||||||
|
r.Logger.Info("Council opportunities via NATS enabled (url=%s)", natsURL)
|
||||||
|
}
|
||||||
|
|
||||||
// === UCXI Server Integration ===
|
// === UCXI Server Integration ===
|
||||||
var ucxiServer *ucxi.Server
|
var ucxiServer *ucxi.Server
|
||||||
if r.Config.UCXL.Enabled && r.Config.UCXL.Server.Enabled {
|
if r.Config.UCXL.Enabled && r.Config.UCXL.Server.Enabled {
|
||||||
@@ -569,7 +672,7 @@ func (r *SharedRuntime) initializeServices() error {
|
|||||||
|
|
||||||
storage, err := ucxi.NewBasicContentStorage(storageDir)
|
storage, err := ucxi.NewBasicContentStorage(storageDir)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
r.Logger.Warn("⚠️ Failed to create UCXI storage: %v", err)
|
r.Logger.Warn("Failed to create UCXI storage: %v", err)
|
||||||
} else {
|
} else {
|
||||||
resolver := ucxi.NewBasicAddressResolver(r.Node.ID().ShortString())
|
resolver := ucxi.NewBasicAddressResolver(r.Node.ID().ShortString())
|
||||||
resolver.SetDefaultTTL(r.Config.UCXL.Resolution.CacheTTL)
|
resolver.SetDefaultTTL(r.Config.UCXL.Resolution.CacheTTL)
|
||||||
@@ -579,14 +682,14 @@ func (r *SharedRuntime) initializeServices() error {
|
|||||||
BasePath: r.Config.UCXL.Server.BasePath,
|
BasePath: r.Config.UCXL.Server.BasePath,
|
||||||
Resolver: resolver,
|
Resolver: resolver,
|
||||||
Storage: storage,
|
Storage: storage,
|
||||||
Logger: ucxi.SimpleLogger{},
|
Logger: ucxi.NewSimpleLogger(logging.ComponentUCXI),
|
||||||
}
|
}
|
||||||
|
|
||||||
ucxiServer = ucxi.NewServer(ucxiConfig)
|
ucxiServer = ucxi.NewServer(ucxiConfig)
|
||||||
go func() {
|
go func() {
|
||||||
r.Logger.Info("🔗 UCXI server starting on :%d", r.Config.UCXL.Server.Port)
|
r.Logger.Info("🔗 UCXI server starting on :%d", r.Config.UCXL.Server.Port)
|
||||||
if err := ucxiServer.Start(); err != nil && err != http.ErrServerClosed {
|
if err := ucxiServer.Start(); err != nil && err != http.ErrServerClosed {
|
||||||
r.Logger.Error("❌ UCXI server error: %v", err)
|
r.Logger.Error("UCXI server error: %v", err)
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
}
|
}
|
||||||
@@ -636,7 +739,7 @@ func initializeAIProvider(cfg *config.Config, logger *SimpleLogger) error {
|
|||||||
Timeout: cfg.AI.ResetData.Timeout,
|
Timeout: cfg.AI.ResetData.Timeout,
|
||||||
}
|
}
|
||||||
reasoning.SetResetDataConfig(resetdataConfig)
|
reasoning.SetResetDataConfig(resetdataConfig)
|
||||||
logger.Info("🌐 ResetData AI provider configured - Endpoint: %s, Model: %s",
|
logger.Info("ResetData AI provider configured - Endpoint: %s, Model: %s",
|
||||||
cfg.AI.ResetData.BaseURL, cfg.AI.ResetData.Model)
|
cfg.AI.ResetData.BaseURL, cfg.AI.ResetData.Model)
|
||||||
|
|
||||||
case "ollama":
|
case "ollama":
|
||||||
@@ -644,7 +747,7 @@ func initializeAIProvider(cfg *config.Config, logger *SimpleLogger) error {
|
|||||||
logger.Info("🦙 Ollama AI provider configured - Endpoint: %s", cfg.AI.Ollama.Endpoint)
|
logger.Info("🦙 Ollama AI provider configured - Endpoint: %s", cfg.AI.Ollama.Endpoint)
|
||||||
|
|
||||||
default:
|
default:
|
||||||
logger.Warn("⚠️ Unknown AI provider '%s', defaulting to resetdata", cfg.AI.Provider)
|
logger.Warn("Unknown AI provider '%s', defaulting to resetdata", cfg.AI.Provider)
|
||||||
if cfg.AI.ResetData.APIKey == "" {
|
if cfg.AI.ResetData.APIKey == "" {
|
||||||
return fmt.Errorf("RESETDATA_API_KEY environment variable is required for default resetdata provider")
|
return fmt.Errorf("RESETDATA_API_KEY environment variable is required for default resetdata provider")
|
||||||
}
|
}
|
||||||
@@ -682,5 +785,112 @@ func initializeAIProvider(cfg *config.Config, logger *SimpleLogger) error {
|
|||||||
reasoning.SetDefaultSystemPrompt(d)
|
reasoning.SetDefaultSystemPrompt(d)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Initialize LightRAG client if enabled
|
||||||
|
if cfg.LightRAG.Enabled {
|
||||||
|
lightragConfig := mcp.LightRAGConfig{
|
||||||
|
BaseURL: cfg.LightRAG.BaseURL,
|
||||||
|
Timeout: cfg.LightRAG.Timeout,
|
||||||
|
APIKey: cfg.LightRAG.APIKey,
|
||||||
|
}
|
||||||
|
lightragClient := mcp.NewLightRAGClient(lightragConfig)
|
||||||
|
|
||||||
|
// Test connectivity
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||||
|
defer cancel()
|
||||||
|
if lightragClient.IsHealthy(ctx) {
|
||||||
|
reasoning.SetLightRAGClient(lightragClient)
|
||||||
|
logger.Info("📚 LightRAG RAG system enabled - Endpoint: %s, Mode: %s",
|
||||||
|
cfg.LightRAG.BaseURL, cfg.LightRAG.DefaultMode)
|
||||||
|
} else {
|
||||||
|
logger.Warn("LightRAG enabled but server not healthy at %s", cfg.LightRAG.BaseURL)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// fetchBootstrapPeers fetches bootstrap peer list from WHOOSH
|
||||||
|
func fetchBootstrapPeers(whooshURL string, logger *SimpleLogger) ([]peer.AddrInfo, error) {
|
||||||
|
client := &http.Client{Timeout: 10 * time.Second}
|
||||||
|
|
||||||
|
url := fmt.Sprintf("%s/api/v1/bootstrap-peers", whooshURL)
|
||||||
|
logger.Info("Fetching bootstrap peers from: %s", url)
|
||||||
|
|
||||||
|
resp, err := client.Get(url)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to fetch bootstrap peers: %w", err)
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
if resp.StatusCode != http.StatusOK {
|
||||||
|
return nil, fmt.Errorf("bootstrap endpoint returned status %d", resp.StatusCode)
|
||||||
|
}
|
||||||
|
|
||||||
|
var result struct {
|
||||||
|
BootstrapPeers []struct {
|
||||||
|
Multiaddr string `json:"multiaddr"`
|
||||||
|
PeerID string `json:"peer_id"`
|
||||||
|
Name string `json:"name"`
|
||||||
|
Priority int `json:"priority"`
|
||||||
|
} `json:"bootstrap_peers"`
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := json.NewDecoder(resp.Body).Decode(&result); err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to decode bootstrap peers: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Convert to peer.AddrInfo format
|
||||||
|
peers := make([]peer.AddrInfo, 0, len(result.BootstrapPeers))
|
||||||
|
for _, bp := range result.BootstrapPeers {
|
||||||
|
maddr, err := multiaddr.NewMultiaddr(bp.Multiaddr)
|
||||||
|
if err != nil {
|
||||||
|
logger.Warn("Invalid multiaddr %s: %v", bp.Multiaddr, err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
peerID, err := peer.Decode(bp.PeerID)
|
||||||
|
if err != nil {
|
||||||
|
logger.Warn("Invalid peer ID %s: %v", bp.PeerID, err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
peers = append(peers, peer.AddrInfo{
|
||||||
|
ID: peerID,
|
||||||
|
Addrs: []multiaddr.Multiaddr{maddr},
|
||||||
|
})
|
||||||
|
|
||||||
|
logger.Info(" Bootstrap peer: %s (%s, priority %d)", bp.Name, bp.PeerID, bp.Priority)
|
||||||
|
}
|
||||||
|
|
||||||
|
return peers, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// getStaticBootstrapPeers returns a static fallback list of bootstrap peers
|
||||||
|
func getStaticBootstrapPeers(logger *SimpleLogger) []peer.AddrInfo {
|
||||||
|
logger.Warn("Using static bootstrap peer configuration (fallback)")
|
||||||
|
|
||||||
|
// Static HMMM monitor peer (if WHOOSH is unavailable)
|
||||||
|
staticPeers := []string{
|
||||||
|
"/ip4/172.27.0.6/tcp/9001/p2p/12D3KooWBhVfNETuGyjsrGwmhny7vnJzP1y7H59oqmq1VAPTzQMW",
|
||||||
|
}
|
||||||
|
|
||||||
|
peers := make([]peer.AddrInfo, 0, len(staticPeers))
|
||||||
|
for _, peerStr := range staticPeers {
|
||||||
|
maddr, err := multiaddr.NewMultiaddr(peerStr)
|
||||||
|
if err != nil {
|
||||||
|
logger.Warn("Invalid static multiaddr %s: %v", peerStr, err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
addrInfo, err := peer.AddrInfoFromP2pAddr(maddr)
|
||||||
|
if err != nil {
|
||||||
|
logger.Warn("Failed to parse static peer address %s: %v", peerStr, err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
peers = append(peers, *addrInfo)
|
||||||
|
logger.Info(" 📌 Static bootstrap peer: %s", addrInfo.ID.ShortString())
|
||||||
|
}
|
||||||
|
|
||||||
|
return peers
|
||||||
|
}
|
||||||
|
|||||||
@@ -408,13 +408,16 @@ func (p *OllamaProvider) getSupportedModels() []string {
|
|||||||
|
|
||||||
// parseResponseForActions extracts actions and artifacts from the response
|
// parseResponseForActions extracts actions and artifacts from the response
|
||||||
func (p *OllamaProvider) parseResponseForActions(response string, request *TaskRequest) ([]TaskAction, []Artifact) {
|
func (p *OllamaProvider) parseResponseForActions(response string, request *TaskRequest) ([]TaskAction, []Artifact) {
|
||||||
var actions []TaskAction
|
// Use the response parser to extract structured actions and artifacts
|
||||||
var artifacts []Artifact
|
parser := NewResponseParser()
|
||||||
|
actions, artifacts := parser.ParseResponse(response)
|
||||||
|
|
||||||
// This is a simplified implementation - in reality, you'd parse the response
|
// If parser found concrete actions, return them
|
||||||
// to extract specific actions like file changes, commands to run, etc.
|
if len(actions) > 0 {
|
||||||
|
return actions, artifacts
|
||||||
|
}
|
||||||
|
|
||||||
// For now, just create a basic action indicating task analysis
|
// Otherwise, create a basic task analysis action as fallback
|
||||||
action := TaskAction{
|
action := TaskAction{
|
||||||
Type: "task_analysis",
|
Type: "task_analysis",
|
||||||
Target: request.TaskTitle,
|
Target: request.TaskTitle,
|
||||||
|
|||||||
@@ -477,10 +477,16 @@ func (p *ResetDataProvider) handleHTTPError(statusCode int, body []byte) *Provid
|
|||||||
|
|
||||||
// parseResponseForActions extracts actions from the response text
|
// parseResponseForActions extracts actions from the response text
|
||||||
func (p *ResetDataProvider) parseResponseForActions(response string, request *TaskRequest) ([]TaskAction, []Artifact) {
|
func (p *ResetDataProvider) parseResponseForActions(response string, request *TaskRequest) ([]TaskAction, []Artifact) {
|
||||||
var actions []TaskAction
|
// Use the response parser to extract structured actions and artifacts
|
||||||
var artifacts []Artifact
|
parser := NewResponseParser()
|
||||||
|
actions, artifacts := parser.ParseResponse(response)
|
||||||
|
|
||||||
// Create a basic task analysis action
|
// If parser found concrete actions, return them
|
||||||
|
if len(actions) > 0 {
|
||||||
|
return actions, artifacts
|
||||||
|
}
|
||||||
|
|
||||||
|
// Otherwise, create a basic task analysis action as fallback
|
||||||
action := TaskAction{
|
action := TaskAction{
|
||||||
Type: "task_analysis",
|
Type: "task_analysis",
|
||||||
Target: request.TaskTitle,
|
Target: request.TaskTitle,
|
||||||
|
|||||||
206
pkg/ai/response_parser.go
Normal file
206
pkg/ai/response_parser.go
Normal file
@@ -0,0 +1,206 @@
|
|||||||
|
package ai
|
||||||
|
|
||||||
|
import (
|
||||||
|
"regexp"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ResponseParser extracts actions and artifacts from LLM text responses
|
||||||
|
type ResponseParser struct{}
|
||||||
|
|
||||||
|
// NewResponseParser creates a new response parser instance
|
||||||
|
func NewResponseParser() *ResponseParser {
|
||||||
|
return &ResponseParser{}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ParseResponse extracts structured actions and artifacts from LLM response text
|
||||||
|
func (rp *ResponseParser) ParseResponse(response string) ([]TaskAction, []Artifact) {
|
||||||
|
var actions []TaskAction
|
||||||
|
var artifacts []Artifact
|
||||||
|
|
||||||
|
// Extract code blocks with filenames
|
||||||
|
fileBlocks := rp.extractFileBlocks(response)
|
||||||
|
for _, block := range fileBlocks {
|
||||||
|
// Create file creation action
|
||||||
|
action := TaskAction{
|
||||||
|
Type: "file_create",
|
||||||
|
Target: block.Filename,
|
||||||
|
Content: block.Content,
|
||||||
|
Result: "File created from LLM response",
|
||||||
|
Success: true,
|
||||||
|
Timestamp: time.Now(),
|
||||||
|
Metadata: map[string]interface{}{
|
||||||
|
"language": block.Language,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
actions = append(actions, action)
|
||||||
|
|
||||||
|
// Create artifact
|
||||||
|
artifact := Artifact{
|
||||||
|
Name: block.Filename,
|
||||||
|
Type: "file",
|
||||||
|
Path: block.Filename,
|
||||||
|
Content: block.Content,
|
||||||
|
Size: int64(len(block.Content)),
|
||||||
|
CreatedAt: time.Now(),
|
||||||
|
}
|
||||||
|
artifacts = append(artifacts, artifact)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Extract shell commands
|
||||||
|
commands := rp.extractCommands(response)
|
||||||
|
for _, cmd := range commands {
|
||||||
|
action := TaskAction{
|
||||||
|
Type: "command_run",
|
||||||
|
Target: "shell",
|
||||||
|
Content: cmd,
|
||||||
|
Result: "Command extracted from LLM response",
|
||||||
|
Success: true,
|
||||||
|
Timestamp: time.Now(),
|
||||||
|
}
|
||||||
|
actions = append(actions, action)
|
||||||
|
}
|
||||||
|
|
||||||
|
return actions, artifacts
|
||||||
|
}
|
||||||
|
|
||||||
|
// FileBlock represents a code block with filename
|
||||||
|
type FileBlock struct {
|
||||||
|
Filename string
|
||||||
|
Language string
|
||||||
|
Content string
|
||||||
|
}
|
||||||
|
|
||||||
|
// extractFileBlocks finds code blocks that represent files
|
||||||
|
func (rp *ResponseParser) extractFileBlocks(response string) []FileBlock {
|
||||||
|
var blocks []FileBlock
|
||||||
|
|
||||||
|
// Pattern 1: Markdown code blocks with filename comments
|
||||||
|
// ```language
|
||||||
|
// // filename: path/to/file.ext
|
||||||
|
// content
|
||||||
|
// ```
|
||||||
|
pattern1 := regexp.MustCompile("(?s)```(\\w+)?\\s*\\n(?://|#)\\s*(?:filename|file|path):\\s*([^\\n]+)\\n(.*?)```")
|
||||||
|
matches1 := pattern1.FindAllStringSubmatch(response, -1)
|
||||||
|
for _, match := range matches1 {
|
||||||
|
if len(match) >= 4 {
|
||||||
|
blocks = append(blocks, FileBlock{
|
||||||
|
Filename: strings.TrimSpace(match[2]),
|
||||||
|
Language: match[1],
|
||||||
|
Content: strings.TrimSpace(match[3]),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Pattern 2: Filename in backticks followed by "content" and code block
|
||||||
|
// Matches: `filename.ext` ... content ... ```language ... ```
|
||||||
|
// This handles cases like:
|
||||||
|
// - "file named `hello.sh` ... should have the following content: ```bash ... ```"
|
||||||
|
// - "Create `script.py` with this content: ```python ... ```"
|
||||||
|
pattern2 := regexp.MustCompile("`([^`]+)`[^`]*?(?:content|code)[^`]*?```([a-z]+)?\\s*\\n([^`]+)```")
|
||||||
|
matches2 := pattern2.FindAllStringSubmatch(response, -1)
|
||||||
|
for _, match := range matches2 {
|
||||||
|
if len(match) >= 4 {
|
||||||
|
blocks = append(blocks, FileBlock{
|
||||||
|
Filename: strings.TrimSpace(match[1]),
|
||||||
|
Language: match[2],
|
||||||
|
Content: strings.TrimSpace(match[3]),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Pattern 3: File header notation
|
||||||
|
// --- filename: path/to/file.ext ---
|
||||||
|
// content
|
||||||
|
// --- end ---
|
||||||
|
pattern3 := regexp.MustCompile("(?s)---\\s*(?:filename|file):\\s*([^\\n]+)\\s*---\\s*\\n(.*?)\\n---\\s*(?:end)?\\s*---")
|
||||||
|
matches3 := pattern3.FindAllStringSubmatch(response, -1)
|
||||||
|
for _, match := range matches3 {
|
||||||
|
if len(match) >= 3 {
|
||||||
|
blocks = append(blocks, FileBlock{
|
||||||
|
Filename: strings.TrimSpace(match[1]),
|
||||||
|
Language: rp.detectLanguage(match[1]),
|
||||||
|
Content: strings.TrimSpace(match[2]),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Pattern 4: Shell script style file creation
|
||||||
|
// cat > filename.ext << 'EOF'
|
||||||
|
// content
|
||||||
|
// EOF
|
||||||
|
pattern4 := regexp.MustCompile("(?s)cat\\s*>\\s*([^\\s<]+)\\s*<<\\s*['\"]?EOF['\"]?\\s*\\n(.*?)\\nEOF")
|
||||||
|
matches4 := pattern4.FindAllStringSubmatch(response, -1)
|
||||||
|
for _, match := range matches4 {
|
||||||
|
if len(match) >= 3 {
|
||||||
|
blocks = append(blocks, FileBlock{
|
||||||
|
Filename: strings.TrimSpace(match[1]),
|
||||||
|
Language: rp.detectLanguage(match[1]),
|
||||||
|
Content: strings.TrimSpace(match[2]),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return blocks
|
||||||
|
}
|
||||||
|
|
||||||
|
// extractCommands extracts shell commands from response
|
||||||
|
func (rp *ResponseParser) extractCommands(response string) []string {
|
||||||
|
var commands []string
|
||||||
|
|
||||||
|
// Pattern: Markdown code blocks marked as bash/sh
|
||||||
|
pattern := regexp.MustCompile("(?s)```(?:bash|sh|shell)\\s*\\n(.*?)```")
|
||||||
|
matches := pattern.FindAllStringSubmatch(response, -1)
|
||||||
|
for _, match := range matches {
|
||||||
|
if len(match) >= 2 {
|
||||||
|
lines := strings.Split(strings.TrimSpace(match[1]), "\n")
|
||||||
|
for _, line := range lines {
|
||||||
|
line = strings.TrimSpace(line)
|
||||||
|
// Skip comments and empty lines
|
||||||
|
if line != "" && !strings.HasPrefix(line, "#") {
|
||||||
|
commands = append(commands, line)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return commands
|
||||||
|
}
|
||||||
|
|
||||||
|
// detectLanguage attempts to detect language from filename extension
|
||||||
|
func (rp *ResponseParser) detectLanguage(filename string) string {
|
||||||
|
ext := ""
|
||||||
|
if idx := strings.LastIndex(filename, "."); idx != -1 {
|
||||||
|
ext = strings.ToLower(filename[idx+1:])
|
||||||
|
}
|
||||||
|
|
||||||
|
languageMap := map[string]string{
|
||||||
|
"go": "go",
|
||||||
|
"py": "python",
|
||||||
|
"js": "javascript",
|
||||||
|
"ts": "typescript",
|
||||||
|
"java": "java",
|
||||||
|
"cpp": "cpp",
|
||||||
|
"c": "c",
|
||||||
|
"rs": "rust",
|
||||||
|
"sh": "bash",
|
||||||
|
"bash": "bash",
|
||||||
|
"yaml": "yaml",
|
||||||
|
"yml": "yaml",
|
||||||
|
"json": "json",
|
||||||
|
"xml": "xml",
|
||||||
|
"html": "html",
|
||||||
|
"css": "css",
|
||||||
|
"md": "markdown",
|
||||||
|
"txt": "text",
|
||||||
|
"sql": "sql",
|
||||||
|
"rb": "ruby",
|
||||||
|
"php": "php",
|
||||||
|
}
|
||||||
|
|
||||||
|
if lang, ok := languageMap[ext]; ok {
|
||||||
|
return lang
|
||||||
|
}
|
||||||
|
return "text"
|
||||||
|
}
|
||||||
@@ -24,6 +24,7 @@ type Config struct {
|
|||||||
Slurp SlurpConfig `yaml:"slurp"`
|
Slurp SlurpConfig `yaml:"slurp"`
|
||||||
Security SecurityConfig `yaml:"security"`
|
Security SecurityConfig `yaml:"security"`
|
||||||
WHOOSHAPI WHOOSHAPIConfig `yaml:"whoosh_api"`
|
WHOOSHAPI WHOOSHAPIConfig `yaml:"whoosh_api"`
|
||||||
|
LightRAG LightRAGConfig `yaml:"lightrag"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// AgentConfig defines agent-specific settings
|
// AgentConfig defines agent-specific settings
|
||||||
@@ -161,6 +162,15 @@ type WHOOSHAPIConfig struct {
|
|||||||
Enabled bool `yaml:"enabled"`
|
Enabled bool `yaml:"enabled"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// LightRAGConfig defines LightRAG RAG service settings
|
||||||
|
type LightRAGConfig struct {
|
||||||
|
Enabled bool `yaml:"enabled"`
|
||||||
|
BaseURL string `yaml:"base_url"`
|
||||||
|
Timeout time.Duration `yaml:"timeout"`
|
||||||
|
APIKey string `yaml:"api_key"`
|
||||||
|
DefaultMode string `yaml:"default_mode"` // naive, local, global, hybrid
|
||||||
|
}
|
||||||
|
|
||||||
// LoadFromEnvironment loads configuration from environment variables
|
// LoadFromEnvironment loads configuration from environment variables
|
||||||
func LoadFromEnvironment() (*Config, error) {
|
func LoadFromEnvironment() (*Config, error) {
|
||||||
cfg := &Config{
|
cfg := &Config{
|
||||||
@@ -270,6 +280,13 @@ func LoadFromEnvironment() (*Config, error) {
|
|||||||
Token: os.Getenv("WHOOSH_API_TOKEN"),
|
Token: os.Getenv("WHOOSH_API_TOKEN"),
|
||||||
Enabled: getEnvBoolOrDefault("WHOOSH_API_ENABLED", false),
|
Enabled: getEnvBoolOrDefault("WHOOSH_API_ENABLED", false),
|
||||||
},
|
},
|
||||||
|
LightRAG: LightRAGConfig{
|
||||||
|
Enabled: getEnvBoolOrDefault("CHORUS_LIGHTRAG_ENABLED", false),
|
||||||
|
BaseURL: getEnvOrDefault("CHORUS_LIGHTRAG_BASE_URL", "http://127.0.0.1:9621"),
|
||||||
|
Timeout: getEnvDurationOrDefault("CHORUS_LIGHTRAG_TIMEOUT", 30*time.Second),
|
||||||
|
APIKey: os.Getenv("CHORUS_LIGHTRAG_API_KEY"),
|
||||||
|
DefaultMode: getEnvOrDefault("CHORUS_LIGHTRAG_DEFAULT_MODE", "hybrid"),
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
// Validate required configuration
|
// Validate required configuration
|
||||||
|
|||||||
@@ -4,10 +4,12 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"log"
|
"log"
|
||||||
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"chorus/pkg/ai"
|
"chorus/pkg/ai"
|
||||||
|
"chorus/pkg/prompt"
|
||||||
)
|
)
|
||||||
|
|
||||||
// TaskExecutionEngine provides AI-powered task execution with isolated sandboxes
|
// TaskExecutionEngine provides AI-powered task execution with isolated sandboxes
|
||||||
@@ -20,12 +22,12 @@ type TaskExecutionEngine interface {
|
|||||||
|
|
||||||
// TaskExecutionRequest represents a task to be executed
|
// TaskExecutionRequest represents a task to be executed
|
||||||
type TaskExecutionRequest struct {
|
type TaskExecutionRequest struct {
|
||||||
ID string `json:"id"`
|
ID string `json:"id"`
|
||||||
Type string `json:"type"`
|
Type string `json:"type"`
|
||||||
Description string `json:"description"`
|
Description string `json:"description"`
|
||||||
Context map[string]interface{} `json:"context,omitempty"`
|
Context map[string]interface{} `json:"context,omitempty"`
|
||||||
Requirements *TaskRequirements `json:"requirements,omitempty"`
|
Requirements *TaskRequirements `json:"requirements,omitempty"`
|
||||||
Timeout time.Duration `json:"timeout,omitempty"`
|
Timeout time.Duration `json:"timeout,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// TaskRequirements specifies execution environment needs
|
// TaskRequirements specifies execution environment needs
|
||||||
@@ -51,54 +53,54 @@ type TaskExecutionResult struct {
|
|||||||
|
|
||||||
// TaskArtifact represents a file or data produced during execution
|
// TaskArtifact represents a file or data produced during execution
|
||||||
type TaskArtifact struct {
|
type TaskArtifact struct {
|
||||||
Name string `json:"name"`
|
Name string `json:"name"`
|
||||||
Type string `json:"type"`
|
Type string `json:"type"`
|
||||||
Path string `json:"path,omitempty"`
|
Path string `json:"path,omitempty"`
|
||||||
Content []byte `json:"content,omitempty"`
|
Content []byte `json:"content,omitempty"`
|
||||||
Size int64 `json:"size"`
|
Size int64 `json:"size"`
|
||||||
CreatedAt time.Time `json:"created_at"`
|
CreatedAt time.Time `json:"created_at"`
|
||||||
Metadata map[string]string `json:"metadata,omitempty"`
|
Metadata map[string]string `json:"metadata,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// ExecutionMetrics tracks resource usage and performance
|
// ExecutionMetrics tracks resource usage and performance
|
||||||
type ExecutionMetrics struct {
|
type ExecutionMetrics struct {
|
||||||
StartTime time.Time `json:"start_time"`
|
StartTime time.Time `json:"start_time"`
|
||||||
EndTime time.Time `json:"end_time"`
|
EndTime time.Time `json:"end_time"`
|
||||||
Duration time.Duration `json:"duration"`
|
Duration time.Duration `json:"duration"`
|
||||||
AIProviderTime time.Duration `json:"ai_provider_time"`
|
AIProviderTime time.Duration `json:"ai_provider_time"`
|
||||||
SandboxTime time.Duration `json:"sandbox_time"`
|
SandboxTime time.Duration `json:"sandbox_time"`
|
||||||
ResourceUsage *ResourceUsage `json:"resource_usage,omitempty"`
|
ResourceUsage *ResourceUsage `json:"resource_usage,omitempty"`
|
||||||
CommandsExecuted int `json:"commands_executed"`
|
CommandsExecuted int `json:"commands_executed"`
|
||||||
FilesGenerated int `json:"files_generated"`
|
FilesGenerated int `json:"files_generated"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// EngineConfig configures the task execution engine
|
// EngineConfig configures the task execution engine
|
||||||
type EngineConfig struct {
|
type EngineConfig struct {
|
||||||
AIProviderFactory *ai.ProviderFactory `json:"-"`
|
AIProviderFactory *ai.ProviderFactory `json:"-"`
|
||||||
SandboxDefaults *SandboxConfig `json:"sandbox_defaults"`
|
SandboxDefaults *SandboxConfig `json:"sandbox_defaults"`
|
||||||
DefaultTimeout time.Duration `json:"default_timeout"`
|
DefaultTimeout time.Duration `json:"default_timeout"`
|
||||||
MaxConcurrentTasks int `json:"max_concurrent_tasks"`
|
MaxConcurrentTasks int `json:"max_concurrent_tasks"`
|
||||||
EnableMetrics bool `json:"enable_metrics"`
|
EnableMetrics bool `json:"enable_metrics"`
|
||||||
LogLevel string `json:"log_level"`
|
LogLevel string `json:"log_level"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// EngineMetrics tracks overall engine performance
|
// EngineMetrics tracks overall engine performance
|
||||||
type EngineMetrics struct {
|
type EngineMetrics struct {
|
||||||
TasksExecuted int64 `json:"tasks_executed"`
|
TasksExecuted int64 `json:"tasks_executed"`
|
||||||
TasksSuccessful int64 `json:"tasks_successful"`
|
TasksSuccessful int64 `json:"tasks_successful"`
|
||||||
TasksFailed int64 `json:"tasks_failed"`
|
TasksFailed int64 `json:"tasks_failed"`
|
||||||
AverageTime time.Duration `json:"average_time"`
|
AverageTime time.Duration `json:"average_time"`
|
||||||
TotalExecutionTime time.Duration `json:"total_execution_time"`
|
TotalExecutionTime time.Duration `json:"total_execution_time"`
|
||||||
ActiveTasks int `json:"active_tasks"`
|
ActiveTasks int `json:"active_tasks"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// DefaultTaskExecutionEngine implements the TaskExecutionEngine interface
|
// DefaultTaskExecutionEngine implements the TaskExecutionEngine interface
|
||||||
type DefaultTaskExecutionEngine struct {
|
type DefaultTaskExecutionEngine struct {
|
||||||
config *EngineConfig
|
config *EngineConfig
|
||||||
aiFactory *ai.ProviderFactory
|
aiFactory *ai.ProviderFactory
|
||||||
metrics *EngineMetrics
|
metrics *EngineMetrics
|
||||||
activeTasks map[string]context.CancelFunc
|
activeTasks map[string]context.CancelFunc
|
||||||
logger *log.Logger
|
logger *log.Logger
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewTaskExecutionEngine creates a new task execution engine
|
// NewTaskExecutionEngine creates a new task execution engine
|
||||||
@@ -192,26 +194,49 @@ func (e *DefaultTaskExecutionEngine) ExecuteTask(ctx context.Context, request *T
|
|||||||
|
|
||||||
// executeTaskInternal performs the actual task execution
|
// executeTaskInternal performs the actual task execution
|
||||||
func (e *DefaultTaskExecutionEngine) executeTaskInternal(ctx context.Context, request *TaskExecutionRequest, result *TaskExecutionResult) error {
|
func (e *DefaultTaskExecutionEngine) executeTaskInternal(ctx context.Context, request *TaskExecutionRequest, result *TaskExecutionResult) error {
|
||||||
// Step 1: Determine AI model and get provider
|
if request == nil {
|
||||||
|
return fmt.Errorf("task execution request cannot be nil")
|
||||||
|
}
|
||||||
|
|
||||||
aiStartTime := time.Now()
|
aiStartTime := time.Now()
|
||||||
|
|
||||||
role := e.determineRoleFromTask(request)
|
role := e.determineRoleFromTask(request)
|
||||||
|
|
||||||
provider, providerConfig, err := e.aiFactory.GetProviderForRole(role)
|
provider, providerConfig, err := e.aiFactory.GetProviderForRole(role)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("failed to get AI provider for role %s: %w", role, err)
|
return fmt.Errorf("failed to get AI provider for role %s: %w", role, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Step 2: Create AI request
|
roleConfig, _ := e.aiFactory.GetRoleConfig(role)
|
||||||
|
|
||||||
aiRequest := &ai.TaskRequest{
|
aiRequest := &ai.TaskRequest{
|
||||||
TaskID: request.ID,
|
TaskID: request.ID,
|
||||||
TaskTitle: request.Type,
|
TaskTitle: extractTaskTitle(request),
|
||||||
TaskDescription: request.Description,
|
TaskDescription: request.Description,
|
||||||
Context: request.Context,
|
Context: request.Context,
|
||||||
ModelName: providerConfig.DefaultModel,
|
AgentRole: role,
|
||||||
AgentRole: role,
|
AgentID: extractAgentID(request.Context),
|
||||||
|
Repository: extractRepository(request.Context),
|
||||||
|
TaskLabels: extractTaskLabels(request.Context),
|
||||||
|
Priority: extractContextInt(request.Context, "priority"),
|
||||||
|
Complexity: extractContextInt(request.Context, "complexity"),
|
||||||
|
ModelName: providerConfig.DefaultModel,
|
||||||
|
Temperature: providerConfig.Temperature,
|
||||||
|
MaxTokens: providerConfig.MaxTokens,
|
||||||
|
WorkingDirectory: extractWorkingDirectory(request.Context),
|
||||||
|
EnableTools: providerConfig.EnableTools || roleConfig.EnableTools,
|
||||||
|
MCPServers: combineStringSlices(providerConfig.MCPServers, roleConfig.MCPServers),
|
||||||
|
AllowedTools: combineStringSlices(roleConfig.AllowedTools, nil),
|
||||||
|
}
|
||||||
|
|
||||||
|
if aiRequest.AgentID == "" {
|
||||||
|
aiRequest.AgentID = request.ID
|
||||||
|
}
|
||||||
|
|
||||||
|
if systemPrompt := e.resolveSystemPrompt(role, roleConfig, request.Context); systemPrompt != "" {
|
||||||
|
aiRequest.SystemPrompt = systemPrompt
|
||||||
}
|
}
|
||||||
|
|
||||||
// Step 3: Get AI response
|
|
||||||
aiResponse, err := provider.ExecuteTask(ctx, aiRequest)
|
aiResponse, err := provider.ExecuteTask(ctx, aiRequest)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("AI provider execution failed: %w", err)
|
return fmt.Errorf("AI provider execution failed: %w", err)
|
||||||
@@ -219,14 +244,20 @@ func (e *DefaultTaskExecutionEngine) executeTaskInternal(ctx context.Context, re
|
|||||||
|
|
||||||
result.Metrics.AIProviderTime = time.Since(aiStartTime)
|
result.Metrics.AIProviderTime = time.Since(aiStartTime)
|
||||||
|
|
||||||
// Step 4: Parse AI response for executable commands
|
|
||||||
commands, artifacts, err := e.parseAIResponse(aiResponse)
|
commands, artifacts, err := e.parseAIResponse(aiResponse)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("failed to parse AI response: %w", err)
|
return fmt.Errorf("failed to parse AI response: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Step 5: Execute commands in sandbox if needed
|
// Only execute sandbox if sandbox type is not explicitly disabled (empty string or "none")
|
||||||
if len(commands) > 0 {
|
sandboxType := ""
|
||||||
|
if request.Requirements != nil {
|
||||||
|
sandboxType = request.Requirements.SandboxType
|
||||||
|
}
|
||||||
|
|
||||||
|
shouldExecuteSandbox := len(commands) > 0 && sandboxType != "" && sandboxType != "none"
|
||||||
|
|
||||||
|
if shouldExecuteSandbox {
|
||||||
sandboxStartTime := time.Now()
|
sandboxStartTime := time.Now()
|
||||||
|
|
||||||
sandboxResult, err := e.executeSandboxCommands(ctx, request, commands)
|
sandboxResult, err := e.executeSandboxCommands(ctx, request, commands)
|
||||||
@@ -238,16 +269,13 @@ func (e *DefaultTaskExecutionEngine) executeTaskInternal(ctx context.Context, re
|
|||||||
result.Metrics.CommandsExecuted = len(commands)
|
result.Metrics.CommandsExecuted = len(commands)
|
||||||
result.Metrics.ResourceUsage = sandboxResult.ResourceUsage
|
result.Metrics.ResourceUsage = sandboxResult.ResourceUsage
|
||||||
|
|
||||||
// Merge sandbox artifacts
|
|
||||||
artifacts = append(artifacts, sandboxResult.Artifacts...)
|
artifacts = append(artifacts, sandboxResult.Artifacts...)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Step 6: Process results and artifacts
|
|
||||||
result.Output = e.formatOutput(aiResponse, artifacts)
|
result.Output = e.formatOutput(aiResponse, artifacts)
|
||||||
result.Artifacts = artifacts
|
result.Artifacts = artifacts
|
||||||
result.Metrics.FilesGenerated = len(artifacts)
|
result.Metrics.FilesGenerated = len(artifacts)
|
||||||
|
|
||||||
// Add metadata
|
|
||||||
result.Metadata = map[string]interface{}{
|
result.Metadata = map[string]interface{}{
|
||||||
"ai_provider": providerConfig.Type,
|
"ai_provider": providerConfig.Type,
|
||||||
"ai_model": providerConfig.DefaultModel,
|
"ai_model": providerConfig.DefaultModel,
|
||||||
@@ -260,26 +288,365 @@ func (e *DefaultTaskExecutionEngine) executeTaskInternal(ctx context.Context, re
|
|||||||
|
|
||||||
// determineRoleFromTask analyzes the task to determine appropriate AI role
|
// determineRoleFromTask analyzes the task to determine appropriate AI role
|
||||||
func (e *DefaultTaskExecutionEngine) determineRoleFromTask(request *TaskExecutionRequest) string {
|
func (e *DefaultTaskExecutionEngine) determineRoleFromTask(request *TaskExecutionRequest) string {
|
||||||
taskType := strings.ToLower(request.Type)
|
if request == nil {
|
||||||
description := strings.ToLower(request.Description)
|
|
||||||
|
|
||||||
// Determine role based on task type and description keywords
|
|
||||||
if strings.Contains(taskType, "code") || strings.Contains(description, "program") ||
|
|
||||||
strings.Contains(description, "script") || strings.Contains(description, "function") {
|
|
||||||
return "developer"
|
return "developer"
|
||||||
}
|
}
|
||||||
|
|
||||||
if strings.Contains(taskType, "analysis") || strings.Contains(description, "analyze") ||
|
if role := extractRoleFromContext(request.Context); role != "" {
|
||||||
strings.Contains(description, "review") {
|
return role
|
||||||
return "analyst"
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if strings.Contains(taskType, "test") || strings.Contains(description, "test") {
|
typeLower := strings.ToLower(request.Type)
|
||||||
return "tester"
|
descriptionLower := strings.ToLower(request.Description)
|
||||||
|
|
||||||
|
switch {
|
||||||
|
case strings.Contains(typeLower, "security") || strings.Contains(descriptionLower, "security"):
|
||||||
|
return normalizeRole("security")
|
||||||
|
case strings.Contains(typeLower, "test") || strings.Contains(descriptionLower, "test"):
|
||||||
|
return normalizeRole("tester")
|
||||||
|
case strings.Contains(typeLower, "review") || strings.Contains(descriptionLower, "review"):
|
||||||
|
return normalizeRole("reviewer")
|
||||||
|
case strings.Contains(typeLower, "design") || strings.Contains(typeLower, "architecture") || strings.Contains(descriptionLower, "architecture") || strings.Contains(descriptionLower, "design"):
|
||||||
|
return normalizeRole("architect")
|
||||||
|
case strings.Contains(typeLower, "analysis") || strings.Contains(descriptionLower, "analysis") || strings.Contains(descriptionLower, "analyz"):
|
||||||
|
return normalizeRole("systems analyst")
|
||||||
|
case strings.Contains(typeLower, "doc") || strings.Contains(descriptionLower, "documentation") || strings.Contains(descriptionLower, "docs"):
|
||||||
|
return normalizeRole("technical writer")
|
||||||
|
default:
|
||||||
|
return normalizeRole("developer")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *DefaultTaskExecutionEngine) resolveSystemPrompt(role string, roleConfig ai.RoleConfig, ctx map[string]interface{}) string {
|
||||||
|
if promptText := extractSystemPromptFromContext(ctx); promptText != "" {
|
||||||
|
return promptText
|
||||||
|
}
|
||||||
|
if strings.TrimSpace(roleConfig.SystemPrompt) != "" {
|
||||||
|
return strings.TrimSpace(roleConfig.SystemPrompt)
|
||||||
|
}
|
||||||
|
if role != "" {
|
||||||
|
if composed, err := prompt.ComposeSystemPrompt(role); err == nil && strings.TrimSpace(composed) != "" {
|
||||||
|
return composed
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if defaultInstr := prompt.GetDefaultInstructions(); strings.TrimSpace(defaultInstr) != "" {
|
||||||
|
return strings.TrimSpace(defaultInstr)
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func extractRoleFromContext(ctx map[string]interface{}) string {
|
||||||
|
if ctx == nil {
|
||||||
|
return ""
|
||||||
}
|
}
|
||||||
|
|
||||||
// Default to general purpose
|
if rolesVal, ok := ctx["required_roles"]; ok {
|
||||||
return "general"
|
if roles := convertToStringSlice(rolesVal); len(roles) > 0 {
|
||||||
|
for _, role := range roles {
|
||||||
|
if normalized := normalizeRole(role); normalized != "" {
|
||||||
|
return normalized
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
candidates := []string{
|
||||||
|
extractStringFromContext(ctx, "required_role"),
|
||||||
|
extractStringFromContext(ctx, "role"),
|
||||||
|
extractStringFromContext(ctx, "agent_role"),
|
||||||
|
extractStringFromNestedMap(ctx, "agent_info", "role"),
|
||||||
|
extractStringFromNestedMap(ctx, "task_metadata", "required_role"),
|
||||||
|
extractStringFromNestedMap(ctx, "task_metadata", "role"),
|
||||||
|
extractStringFromNestedMap(ctx, "council", "role"),
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, candidate := range candidates {
|
||||||
|
if normalized := normalizeRole(candidate); normalized != "" {
|
||||||
|
return normalized
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func extractSystemPromptFromContext(ctx map[string]interface{}) string {
|
||||||
|
if promptText := extractStringFromContext(ctx, "system_prompt"); promptText != "" {
|
||||||
|
return promptText
|
||||||
|
}
|
||||||
|
if promptText := extractStringFromNestedMap(ctx, "task_metadata", "system_prompt"); promptText != "" {
|
||||||
|
return promptText
|
||||||
|
}
|
||||||
|
if promptText := extractStringFromNestedMap(ctx, "council", "system_prompt"); promptText != "" {
|
||||||
|
return promptText
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func extractTaskTitle(request *TaskExecutionRequest) string {
|
||||||
|
if request == nil {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
if title := extractStringFromContext(request.Context, "task_title"); title != "" {
|
||||||
|
return title
|
||||||
|
}
|
||||||
|
if title := extractStringFromNestedMap(request.Context, "task_metadata", "title"); title != "" {
|
||||||
|
return title
|
||||||
|
}
|
||||||
|
if request.Type != "" {
|
||||||
|
return request.Type
|
||||||
|
}
|
||||||
|
return request.ID
|
||||||
|
}
|
||||||
|
|
||||||
|
func extractRepository(ctx map[string]interface{}) string {
|
||||||
|
if repo := extractStringFromContext(ctx, "repository"); repo != "" {
|
||||||
|
return repo
|
||||||
|
}
|
||||||
|
if repo := extractStringFromNestedMap(ctx, "task_metadata", "repository"); repo != "" {
|
||||||
|
return repo
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func extractAgentID(ctx map[string]interface{}) string {
|
||||||
|
if id := extractStringFromContext(ctx, "agent_id"); id != "" {
|
||||||
|
return id
|
||||||
|
}
|
||||||
|
if id := extractStringFromNestedMap(ctx, "agent_info", "id"); id != "" {
|
||||||
|
return id
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func extractWorkingDirectory(ctx map[string]interface{}) string {
|
||||||
|
if dir := extractStringFromContext(ctx, "working_directory"); dir != "" {
|
||||||
|
return dir
|
||||||
|
}
|
||||||
|
if dir := extractStringFromNestedMap(ctx, "task_metadata", "working_directory"); dir != "" {
|
||||||
|
return dir
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func extractTaskLabels(ctx map[string]interface{}) []string {
|
||||||
|
if ctx == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
labels := convertToStringSlice(ctx["labels"])
|
||||||
|
if meta, ok := ctx["task_metadata"].(map[string]interface{}); ok {
|
||||||
|
labels = append(labels, convertToStringSlice(meta["labels"])...)
|
||||||
|
}
|
||||||
|
|
||||||
|
return uniqueStrings(labels)
|
||||||
|
}
|
||||||
|
|
||||||
|
func convertToStringSlice(value interface{}) []string {
|
||||||
|
switch v := value.(type) {
|
||||||
|
case []string:
|
||||||
|
result := make([]string, 0, len(v))
|
||||||
|
for _, item := range v {
|
||||||
|
item = strings.TrimSpace(item)
|
||||||
|
if item != "" {
|
||||||
|
result = append(result, item)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return result
|
||||||
|
case []interface{}:
|
||||||
|
result := make([]string, 0, len(v))
|
||||||
|
for _, item := range v {
|
||||||
|
if str, ok := item.(string); ok {
|
||||||
|
str = strings.TrimSpace(str)
|
||||||
|
if str != "" {
|
||||||
|
result = append(result, str)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return result
|
||||||
|
case string:
|
||||||
|
trimmed := strings.TrimSpace(v)
|
||||||
|
if trimmed == "" {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
parts := strings.Split(trimmed, ",")
|
||||||
|
if len(parts) == 1 {
|
||||||
|
return []string{trimmed}
|
||||||
|
}
|
||||||
|
result := make([]string, 0, len(parts))
|
||||||
|
for _, part := range parts {
|
||||||
|
p := strings.TrimSpace(part)
|
||||||
|
if p != "" {
|
||||||
|
result = append(result, p)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return result
|
||||||
|
default:
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func uniqueStrings(values []string) []string {
|
||||||
|
if len(values) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
seen := make(map[string]struct{})
|
||||||
|
result := make([]string, 0, len(values))
|
||||||
|
for _, value := range values {
|
||||||
|
trimmed := strings.TrimSpace(value)
|
||||||
|
if trimmed == "" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if _, exists := seen[trimmed]; exists {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
seen[trimmed] = struct{}{}
|
||||||
|
result = append(result, trimmed)
|
||||||
|
}
|
||||||
|
if len(result) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return result
|
||||||
|
}
|
||||||
|
|
||||||
|
func extractContextInt(ctx map[string]interface{}, key string) int {
|
||||||
|
if ctx == nil {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
if value, ok := ctx[key]; ok {
|
||||||
|
if intVal, ok := toInt(value); ok {
|
||||||
|
return intVal
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if meta, ok := ctx["task_metadata"].(map[string]interface{}); ok {
|
||||||
|
if value, ok := meta[key]; ok {
|
||||||
|
if intVal, ok := toInt(value); ok {
|
||||||
|
return intVal
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func toInt(value interface{}) (int, bool) {
|
||||||
|
switch v := value.(type) {
|
||||||
|
case int:
|
||||||
|
return v, true
|
||||||
|
case int32:
|
||||||
|
return int(v), true
|
||||||
|
case int64:
|
||||||
|
return int(v), true
|
||||||
|
case float64:
|
||||||
|
return int(v), true
|
||||||
|
case float32:
|
||||||
|
return int(v), true
|
||||||
|
case string:
|
||||||
|
trimmed := strings.TrimSpace(v)
|
||||||
|
if trimmed == "" {
|
||||||
|
return 0, false
|
||||||
|
}
|
||||||
|
parsed, err := strconv.Atoi(trimmed)
|
||||||
|
if err != nil {
|
||||||
|
return 0, false
|
||||||
|
}
|
||||||
|
return parsed, true
|
||||||
|
default:
|
||||||
|
return 0, false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func extractStringFromContext(ctx map[string]interface{}, key string) string {
|
||||||
|
if ctx == nil {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
if value, ok := ctx[key]; ok {
|
||||||
|
switch v := value.(type) {
|
||||||
|
case string:
|
||||||
|
return strings.TrimSpace(v)
|
||||||
|
case fmt.Stringer:
|
||||||
|
return strings.TrimSpace(v.String())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func extractStringFromNestedMap(ctx map[string]interface{}, parentKey, key string) string {
|
||||||
|
if ctx == nil {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
nested, ok := ctx[parentKey].(map[string]interface{})
|
||||||
|
if !ok {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
return getStringFromMap(nested, key)
|
||||||
|
}
|
||||||
|
|
||||||
|
func getStringFromMap(m map[string]interface{}, key string) string {
|
||||||
|
if m == nil {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
if value, ok := m[key]; ok {
|
||||||
|
switch v := value.(type) {
|
||||||
|
case string:
|
||||||
|
return strings.TrimSpace(v)
|
||||||
|
case fmt.Stringer:
|
||||||
|
return strings.TrimSpace(v.String())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func combineStringSlices(base []string, extra []string) []string {
|
||||||
|
if len(base) == 0 && len(extra) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
seen := make(map[string]struct{})
|
||||||
|
combined := make([]string, 0, len(base)+len(extra))
|
||||||
|
|
||||||
|
appendValues := func(values []string) {
|
||||||
|
for _, value := range values {
|
||||||
|
trimmed := strings.TrimSpace(value)
|
||||||
|
if trimmed == "" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if _, exists := seen[trimmed]; exists {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
seen[trimmed] = struct{}{}
|
||||||
|
combined = append(combined, trimmed)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
appendValues(base)
|
||||||
|
appendValues(extra)
|
||||||
|
|
||||||
|
if len(combined) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return combined
|
||||||
|
}
|
||||||
|
|
||||||
|
func normalizeRole(role string) string {
|
||||||
|
role = strings.TrimSpace(role)
|
||||||
|
if role == "" {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
role = strings.ToLower(role)
|
||||||
|
role = strings.ReplaceAll(role, "_", "-")
|
||||||
|
role = strings.ReplaceAll(role, " ", "-")
|
||||||
|
role = strings.ReplaceAll(role, "--", "-")
|
||||||
|
return role
|
||||||
}
|
}
|
||||||
|
|
||||||
// parseAIResponse extracts executable commands and artifacts from AI response
|
// parseAIResponse extracts executable commands and artifacts from AI response
|
||||||
@@ -501,4 +868,4 @@ func (e *DefaultTaskExecutionEngine) Shutdown() error {
|
|||||||
|
|
||||||
e.logger.Printf("TaskExecutionEngine shutdown complete")
|
e.logger.Printf("TaskExecutionEngine shutdown complete")
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|||||||
265
pkg/mcp/lightrag_client.go
Normal file
265
pkg/mcp/lightrag_client.go
Normal file
@@ -0,0 +1,265 @@
|
|||||||
|
package mcp
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"context"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"net/http"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// LightRAGClient provides access to LightRAG MCP server
|
||||||
|
type LightRAGClient struct {
|
||||||
|
baseURL string
|
||||||
|
httpClient *http.Client
|
||||||
|
apiKey string // Optional API key for authentication
|
||||||
|
}
|
||||||
|
|
||||||
|
// LightRAGConfig holds configuration for LightRAG client
|
||||||
|
type LightRAGConfig struct {
|
||||||
|
BaseURL string // e.g., "http://127.0.0.1:9621"
|
||||||
|
Timeout time.Duration // HTTP timeout
|
||||||
|
APIKey string // Optional API key
|
||||||
|
}
|
||||||
|
|
||||||
|
// QueryMode represents LightRAG query modes
|
||||||
|
type QueryMode string
|
||||||
|
|
||||||
|
const (
|
||||||
|
QueryModeNaive QueryMode = "naive" // Simple semantic search
|
||||||
|
QueryModeLocal QueryMode = "local" // Local graph traversal
|
||||||
|
QueryModeGlobal QueryMode = "global" // Global graph analysis
|
||||||
|
QueryModeHybrid QueryMode = "hybrid" // Combined approach
|
||||||
|
)
|
||||||
|
|
||||||
|
// QueryRequest represents a LightRAG query request
|
||||||
|
type QueryRequest struct {
|
||||||
|
Query string `json:"query"`
|
||||||
|
Mode QueryMode `json:"mode"`
|
||||||
|
OnlyNeedContext bool `json:"only_need_context,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// QueryResponse represents a LightRAG query response
|
||||||
|
type QueryResponse struct {
|
||||||
|
Response string `json:"response"`
|
||||||
|
Context string `json:"context,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// InsertRequest represents a LightRAG document insertion request
|
||||||
|
type InsertRequest struct {
|
||||||
|
Text string `json:"text"`
|
||||||
|
Description string `json:"description,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// InsertResponse represents a LightRAG insertion response
|
||||||
|
type InsertResponse struct {
|
||||||
|
Success bool `json:"success"`
|
||||||
|
Message string `json:"message"`
|
||||||
|
Status string `json:"status"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// HealthResponse represents LightRAG health check response
|
||||||
|
type HealthResponse struct {
|
||||||
|
Status string `json:"status"`
|
||||||
|
WorkingDirectory string `json:"working_directory"`
|
||||||
|
InputDirectory string `json:"input_directory"`
|
||||||
|
Configuration map[string]interface{} `json:"configuration"`
|
||||||
|
AuthMode string `json:"auth_mode"`
|
||||||
|
PipelineBusy bool `json:"pipeline_busy"`
|
||||||
|
KeyedLocks map[string]interface{} `json:"keyed_locks"`
|
||||||
|
CoreVersion string `json:"core_version"`
|
||||||
|
APIVersion string `json:"api_version"`
|
||||||
|
WebUITitle string `json:"webui_title"`
|
||||||
|
WebUIDescription string `json:"webui_description"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewLightRAGClient creates a new LightRAG MCP client
|
||||||
|
func NewLightRAGClient(config LightRAGConfig) *LightRAGClient {
|
||||||
|
if config.Timeout == 0 {
|
||||||
|
config.Timeout = 30 * time.Second
|
||||||
|
}
|
||||||
|
|
||||||
|
return &LightRAGClient{
|
||||||
|
baseURL: config.BaseURL,
|
||||||
|
httpClient: &http.Client{
|
||||||
|
Timeout: config.Timeout,
|
||||||
|
},
|
||||||
|
apiKey: config.APIKey,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Query performs a RAG query against LightRAG
|
||||||
|
func (c *LightRAGClient) Query(ctx context.Context, query string, mode QueryMode) (*QueryResponse, error) {
|
||||||
|
req := QueryRequest{
|
||||||
|
Query: query,
|
||||||
|
Mode: mode,
|
||||||
|
}
|
||||||
|
|
||||||
|
respData, err := c.post(ctx, "/query", req)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("query failed: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
var response QueryResponse
|
||||||
|
if err := json.Unmarshal(respData, &response); err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to parse response: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return &response, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// QueryWithContext performs a RAG query and returns both response and context
|
||||||
|
func (c *LightRAGClient) QueryWithContext(ctx context.Context, query string, mode QueryMode) (*QueryResponse, error) {
|
||||||
|
req := QueryRequest{
|
||||||
|
Query: query,
|
||||||
|
Mode: mode,
|
||||||
|
OnlyNeedContext: false, // Get both response and context
|
||||||
|
}
|
||||||
|
|
||||||
|
respData, err := c.post(ctx, "/query", req)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("query with context failed: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
var response QueryResponse
|
||||||
|
if err := json.Unmarshal(respData, &response); err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to parse response: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return &response, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetContext retrieves context without generating a response
|
||||||
|
func (c *LightRAGClient) GetContext(ctx context.Context, query string, mode QueryMode) (string, error) {
|
||||||
|
req := QueryRequest{
|
||||||
|
Query: query,
|
||||||
|
Mode: mode,
|
||||||
|
OnlyNeedContext: true,
|
||||||
|
}
|
||||||
|
|
||||||
|
respData, err := c.post(ctx, "/query", req)
|
||||||
|
if err != nil {
|
||||||
|
return "", fmt.Errorf("get context failed: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
var response QueryResponse
|
||||||
|
if err := json.Unmarshal(respData, &response); err != nil {
|
||||||
|
return "", fmt.Errorf("failed to parse response: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return response.Context, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Insert adds a document to the LightRAG knowledge base
|
||||||
|
func (c *LightRAGClient) Insert(ctx context.Context, text, description string) error {
|
||||||
|
req := InsertRequest{
|
||||||
|
Text: text,
|
||||||
|
Description: description,
|
||||||
|
}
|
||||||
|
|
||||||
|
respData, err := c.post(ctx, "/insert", req)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("insert failed: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
var response InsertResponse
|
||||||
|
if err := json.Unmarshal(respData, &response); err != nil {
|
||||||
|
return fmt.Errorf("failed to parse insert response: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if !response.Success {
|
||||||
|
return fmt.Errorf("insert failed: %s", response.Message)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Health checks the health of the LightRAG server
|
||||||
|
func (c *LightRAGClient) Health(ctx context.Context) (*HealthResponse, error) {
|
||||||
|
respData, err := c.get(ctx, "/health")
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("health check failed: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
var response HealthResponse
|
||||||
|
if err := json.Unmarshal(respData, &response); err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to parse health response: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return &response, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsHealthy checks if LightRAG server is healthy
|
||||||
|
func (c *LightRAGClient) IsHealthy(ctx context.Context) bool {
|
||||||
|
health, err := c.Health(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return health.Status == "healthy"
|
||||||
|
}
|
||||||
|
|
||||||
|
// post performs an HTTP POST request
|
||||||
|
func (c *LightRAGClient) post(ctx context.Context, endpoint string, body interface{}) ([]byte, error) {
|
||||||
|
jsonData, err := json.Marshal(body)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to marshal request: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
req, err := http.NewRequestWithContext(ctx, "POST", c.baseURL+endpoint, bytes.NewBuffer(jsonData))
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to create request: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
req.Header.Set("Content-Type", "application/json")
|
||||||
|
if c.apiKey != "" {
|
||||||
|
req.Header.Set("Authorization", "Bearer "+c.apiKey)
|
||||||
|
}
|
||||||
|
|
||||||
|
resp, err := c.httpClient.Do(req)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("request failed: %w", err)
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
respData, err := io.ReadAll(resp.Body)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to read response: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if resp.StatusCode != http.StatusOK {
|
||||||
|
return nil, fmt.Errorf("request failed with status %d: %s", resp.StatusCode, string(respData))
|
||||||
|
}
|
||||||
|
|
||||||
|
return respData, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// get performs an HTTP GET request
|
||||||
|
func (c *LightRAGClient) get(ctx context.Context, endpoint string) ([]byte, error) {
|
||||||
|
req, err := http.NewRequestWithContext(ctx, "GET", c.baseURL+endpoint, nil)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to create request: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if c.apiKey != "" {
|
||||||
|
req.Header.Set("Authorization", "Bearer "+c.apiKey)
|
||||||
|
}
|
||||||
|
|
||||||
|
resp, err := c.httpClient.Do(req)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("request failed: %w", err)
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
respData, err := io.ReadAll(resp.Body)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to read response: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if resp.StatusCode != http.StatusOK {
|
||||||
|
return nil, fmt.Errorf("request failed with status %d: %s", resp.StatusCode, string(respData))
|
||||||
|
}
|
||||||
|
|
||||||
|
return respData, nil
|
||||||
|
}
|
||||||
243
pkg/mcp/lightrag_client_test.go
Normal file
243
pkg/mcp/lightrag_client_test.go
Normal file
@@ -0,0 +1,243 @@
|
|||||||
|
package mcp
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// TestLightRAGClient_NewClient tests client creation
|
||||||
|
func TestLightRAGClient_NewClient(t *testing.T) {
|
||||||
|
config := LightRAGConfig{
|
||||||
|
BaseURL: "http://127.0.0.1:9621",
|
||||||
|
Timeout: 10 * time.Second,
|
||||||
|
APIKey: "",
|
||||||
|
}
|
||||||
|
|
||||||
|
client := NewLightRAGClient(config)
|
||||||
|
if client == nil {
|
||||||
|
t.Fatal("expected non-nil client")
|
||||||
|
}
|
||||||
|
|
||||||
|
if client.baseURL != config.BaseURL {
|
||||||
|
t.Errorf("expected baseURL %s, got %s", config.BaseURL, client.baseURL)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestLightRAGClient_Health tests health check
|
||||||
|
// NOTE: This test requires a running LightRAG server at 127.0.0.1:9621
|
||||||
|
func TestLightRAGClient_Health(t *testing.T) {
|
||||||
|
if testing.Short() {
|
||||||
|
t.Skip("skipping integration test in short mode")
|
||||||
|
}
|
||||||
|
|
||||||
|
config := LightRAGConfig{
|
||||||
|
BaseURL: "http://127.0.0.1:9621",
|
||||||
|
Timeout: 5 * time.Second,
|
||||||
|
}
|
||||||
|
|
||||||
|
client := NewLightRAGClient(config)
|
||||||
|
ctx := context.Background()
|
||||||
|
|
||||||
|
health, err := client.Health(ctx)
|
||||||
|
if err != nil {
|
||||||
|
t.Logf("Health check failed (server may not be running): %v", err)
|
||||||
|
t.Skip("skipping test - lightrag server not available")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if health.Status != "healthy" {
|
||||||
|
t.Errorf("expected status 'healthy', got '%s'", health.Status)
|
||||||
|
}
|
||||||
|
|
||||||
|
t.Logf("LightRAG Health: %s", health.Status)
|
||||||
|
t.Logf("Core Version: %s", health.CoreVersion)
|
||||||
|
t.Logf("API Version: %s", health.APIVersion)
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestLightRAGClient_IsHealthy tests the convenience health check
|
||||||
|
func TestLightRAGClient_IsHealthy(t *testing.T) {
|
||||||
|
if testing.Short() {
|
||||||
|
t.Skip("skipping integration test in short mode")
|
||||||
|
}
|
||||||
|
|
||||||
|
config := LightRAGConfig{
|
||||||
|
BaseURL: "http://127.0.0.1:9621",
|
||||||
|
Timeout: 5 * time.Second,
|
||||||
|
}
|
||||||
|
|
||||||
|
client := NewLightRAGClient(config)
|
||||||
|
ctx := context.Background()
|
||||||
|
|
||||||
|
healthy := client.IsHealthy(ctx)
|
||||||
|
if !healthy {
|
||||||
|
t.Log("Server not healthy (may not be running)")
|
||||||
|
t.Skip("skipping test - lightrag server not available")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestLightRAGClient_Query tests querying with different modes
|
||||||
|
func TestLightRAGClient_Query(t *testing.T) {
|
||||||
|
if testing.Short() {
|
||||||
|
t.Skip("skipping integration test in short mode")
|
||||||
|
}
|
||||||
|
|
||||||
|
config := LightRAGConfig{
|
||||||
|
BaseURL: "http://127.0.0.1:9621",
|
||||||
|
Timeout: 30 * time.Second,
|
||||||
|
}
|
||||||
|
|
||||||
|
client := NewLightRAGClient(config)
|
||||||
|
ctx := context.Background()
|
||||||
|
|
||||||
|
// First check if server is available
|
||||||
|
if !client.IsHealthy(ctx) {
|
||||||
|
t.Skip("skipping test - lightrag server not available")
|
||||||
|
}
|
||||||
|
|
||||||
|
testCases := []struct {
|
||||||
|
name string
|
||||||
|
query string
|
||||||
|
mode QueryMode
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "naive mode",
|
||||||
|
query: "What is CHORUS?",
|
||||||
|
mode: QueryModeNaive,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "local mode",
|
||||||
|
query: "How does P2P networking work?",
|
||||||
|
mode: QueryModeLocal,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "global mode",
|
||||||
|
query: "What are the main components?",
|
||||||
|
mode: QueryModeGlobal,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "hybrid mode",
|
||||||
|
query: "Explain the architecture",
|
||||||
|
mode: QueryModeHybrid,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tc := range testCases {
|
||||||
|
t.Run(tc.name, func(t *testing.T) {
|
||||||
|
response, err := client.Query(ctx, tc.query, tc.mode)
|
||||||
|
if err != nil {
|
||||||
|
t.Logf("Query failed: %v", err)
|
||||||
|
return // Non-fatal - may just have empty knowledge base
|
||||||
|
}
|
||||||
|
|
||||||
|
if response == nil {
|
||||||
|
t.Error("expected non-nil response")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
t.Logf("Query: %s", tc.query)
|
||||||
|
t.Logf("Mode: %s", tc.mode)
|
||||||
|
t.Logf("Response length: %d chars", len(response.Response))
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestLightRAGClient_GetContext tests context retrieval
|
||||||
|
func TestLightRAGClient_GetContext(t *testing.T) {
|
||||||
|
if testing.Short() {
|
||||||
|
t.Skip("skipping integration test in short mode")
|
||||||
|
}
|
||||||
|
|
||||||
|
config := LightRAGConfig{
|
||||||
|
BaseURL: "http://127.0.0.1:9621",
|
||||||
|
Timeout: 30 * time.Second,
|
||||||
|
}
|
||||||
|
|
||||||
|
client := NewLightRAGClient(config)
|
||||||
|
ctx := context.Background()
|
||||||
|
|
||||||
|
if !client.IsHealthy(ctx) {
|
||||||
|
t.Skip("skipping test - lightrag server not available")
|
||||||
|
}
|
||||||
|
|
||||||
|
context, err := client.GetContext(ctx, "distributed systems", QueryModeHybrid)
|
||||||
|
if err != nil {
|
||||||
|
t.Logf("GetContext failed: %v", err)
|
||||||
|
return // Non-fatal
|
||||||
|
}
|
||||||
|
|
||||||
|
t.Logf("Context length: %d chars", len(context))
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestLightRAGClient_Insert tests document insertion
|
||||||
|
func TestLightRAGClient_Insert(t *testing.T) {
|
||||||
|
if testing.Short() {
|
||||||
|
t.Skip("skipping integration test in short mode")
|
||||||
|
}
|
||||||
|
|
||||||
|
config := LightRAGConfig{
|
||||||
|
BaseURL: "http://127.0.0.1:9621",
|
||||||
|
Timeout: 30 * time.Second,
|
||||||
|
}
|
||||||
|
|
||||||
|
client := NewLightRAGClient(config)
|
||||||
|
ctx := context.Background()
|
||||||
|
|
||||||
|
if !client.IsHealthy(ctx) {
|
||||||
|
t.Skip("skipping test - lightrag server not available")
|
||||||
|
}
|
||||||
|
|
||||||
|
text := `CHORUS is a distributed task coordination system built on P2P networking.
|
||||||
|
It uses libp2p for peer-to-peer communication and implements democratic leader election.
|
||||||
|
Tasks are executed in Docker sandboxes for security and isolation.`
|
||||||
|
|
||||||
|
description := "CHORUS system overview"
|
||||||
|
|
||||||
|
err := client.Insert(ctx, text, description)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Insert failed: %v", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
t.Log("Document inserted successfully")
|
||||||
|
|
||||||
|
// Give time for indexing
|
||||||
|
time.Sleep(2 * time.Second)
|
||||||
|
|
||||||
|
// Try to query the inserted document
|
||||||
|
response, err := client.Query(ctx, "What is CHORUS?", QueryModeHybrid)
|
||||||
|
if err != nil {
|
||||||
|
t.Logf("Query after insert failed: %v", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
t.Logf("Query response after insert: %s", response.Response)
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestLightRAGClient_QueryWithContext tests retrieving both response and context
|
||||||
|
func TestLightRAGClient_QueryWithContext(t *testing.T) {
|
||||||
|
if testing.Short() {
|
||||||
|
t.Skip("skipping integration test in short mode")
|
||||||
|
}
|
||||||
|
|
||||||
|
config := LightRAGConfig{
|
||||||
|
BaseURL: "http://127.0.0.1:9621",
|
||||||
|
Timeout: 30 * time.Second,
|
||||||
|
}
|
||||||
|
|
||||||
|
client := NewLightRAGClient(config)
|
||||||
|
ctx := context.Background()
|
||||||
|
|
||||||
|
if !client.IsHealthy(ctx) {
|
||||||
|
t.Skip("skipping test - lightrag server not available")
|
||||||
|
}
|
||||||
|
|
||||||
|
response, err := client.QueryWithContext(ctx, "distributed coordination", QueryModeHybrid)
|
||||||
|
if err != nil {
|
||||||
|
t.Logf("QueryWithContext failed: %v", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
t.Logf("Response: %s", response.Response)
|
||||||
|
t.Logf("Context: %s", response.Context)
|
||||||
|
}
|
||||||
@@ -102,6 +102,7 @@ const (
|
|||||||
StatusCollaborating AgentStatus = "collaborating"
|
StatusCollaborating AgentStatus = "collaborating"
|
||||||
StatusEscalating AgentStatus = "escalating"
|
StatusEscalating AgentStatus = "escalating"
|
||||||
StatusTerminating AgentStatus = "terminating"
|
StatusTerminating AgentStatus = "terminating"
|
||||||
|
StatusOffline AgentStatus = "offline"
|
||||||
)
|
)
|
||||||
|
|
||||||
// AgentTask represents a task being worked on by an agent
|
// AgentTask represents a task being worked on by an agent
|
||||||
@@ -427,7 +428,7 @@ func (s *McpServer) processMCPMessage(message map[string]interface{}) (map[strin
|
|||||||
case "tools/call":
|
case "tools/call":
|
||||||
return s.callTool(params)
|
return s.callTool(params)
|
||||||
case "resources/list":
|
case "resources/list":
|
||||||
return s.listResources(), nil
|
return s.listResources()
|
||||||
case "resources/read":
|
case "resources/read":
|
||||||
return s.readResource(params)
|
return s.readResource(params)
|
||||||
default:
|
default:
|
||||||
@@ -625,4 +626,347 @@ type Relation struct {
|
|||||||
Type string
|
Type string
|
||||||
Strength float64
|
Strength float64
|
||||||
Evidence []string
|
Evidence []string
|
||||||
|
}
|
||||||
|
|
||||||
|
// REST API handlers
|
||||||
|
|
||||||
|
func (s *McpServer) handleAgentsAPI(w http.ResponseWriter, r *http.Request) {
|
||||||
|
s.agentsMutex.RLock()
|
||||||
|
defer s.agentsMutex.RUnlock()
|
||||||
|
|
||||||
|
agents := make([]map[string]interface{}, 0, len(s.agents))
|
||||||
|
for _, agent := range s.agents {
|
||||||
|
agent.mutex.RLock()
|
||||||
|
agents = append(agents, map[string]interface{}{
|
||||||
|
"id": agent.ID,
|
||||||
|
"role": agent.Role,
|
||||||
|
"status": agent.Status,
|
||||||
|
"specialization": agent.Specialization,
|
||||||
|
"capabilities": agent.Capabilities,
|
||||||
|
"current_tasks": len(agent.CurrentTasks),
|
||||||
|
"max_tasks": agent.MaxTasks,
|
||||||
|
})
|
||||||
|
agent.mutex.RUnlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
w.Header().Set("Content-Type", "application/json")
|
||||||
|
json.NewEncoder(w).Encode(map[string]interface{}{
|
||||||
|
"agents": agents,
|
||||||
|
"total": len(agents),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *McpServer) handleConversationsAPI(w http.ResponseWriter, r *http.Request) {
|
||||||
|
// Collect all active conversation threads from agents
|
||||||
|
conversations := make([]map[string]interface{}, 0)
|
||||||
|
|
||||||
|
s.agentsMutex.RLock()
|
||||||
|
for _, agent := range s.agents {
|
||||||
|
agent.mutex.RLock()
|
||||||
|
for threadID, thread := range agent.ActiveThreads {
|
||||||
|
conversations = append(conversations, map[string]interface{}{
|
||||||
|
"id": threadID,
|
||||||
|
"topic": thread.Topic,
|
||||||
|
"state": thread.State,
|
||||||
|
"participants": len(thread.Participants),
|
||||||
|
"created_at": thread.CreatedAt,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
agent.mutex.RUnlock()
|
||||||
|
}
|
||||||
|
s.agentsMutex.RUnlock()
|
||||||
|
|
||||||
|
w.Header().Set("Content-Type", "application/json")
|
||||||
|
json.NewEncoder(w).Encode(map[string]interface{}{
|
||||||
|
"conversations": conversations,
|
||||||
|
"total": len(conversations),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *McpServer) handleStatsAPI(w http.ResponseWriter, r *http.Request) {
|
||||||
|
s.stats.mutex.RLock()
|
||||||
|
defer s.stats.mutex.RUnlock()
|
||||||
|
|
||||||
|
w.Header().Set("Content-Type", "application/json")
|
||||||
|
json.NewEncoder(w).Encode(map[string]interface{}{
|
||||||
|
"start_time": s.stats.StartTime,
|
||||||
|
"uptime_seconds": time.Since(s.stats.StartTime).Seconds(),
|
||||||
|
"total_requests": s.stats.TotalRequests,
|
||||||
|
"active_agents": s.stats.ActiveAgents,
|
||||||
|
"messages_processed": s.stats.MessagesProcessed,
|
||||||
|
"tokens_consumed": s.stats.TokensConsumed,
|
||||||
|
"average_cost_per_task": s.stats.AverageCostPerTask,
|
||||||
|
"error_rate": s.stats.ErrorRate,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *McpServer) handleHealthCheck(w http.ResponseWriter, r *http.Request) {
|
||||||
|
s.agentsMutex.RLock()
|
||||||
|
agentCount := len(s.agents)
|
||||||
|
s.agentsMutex.RUnlock()
|
||||||
|
|
||||||
|
w.Header().Set("Content-Type", "application/json")
|
||||||
|
w.WriteHeader(http.StatusOK)
|
||||||
|
json.NewEncoder(w).Encode(map[string]interface{}{
|
||||||
|
"status": "healthy",
|
||||||
|
"active_agents": agentCount,
|
||||||
|
"uptime": time.Since(s.stats.StartTime).String(),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// Message handlers
|
||||||
|
|
||||||
|
func (s *McpServer) handleBzzzMessages() {
|
||||||
|
// Subscribe to BZZZ messages via pubsub
|
||||||
|
if s.pubsub == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Listen for BZZZ coordination messages
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-s.ctx.Done():
|
||||||
|
return
|
||||||
|
default:
|
||||||
|
// Process BZZZ messages from pubsub
|
||||||
|
time.Sleep(1 * time.Second)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *McpServer) handleHmmmMessages() {
|
||||||
|
// Subscribe to HMMM messages via pubsub
|
||||||
|
if s.pubsub == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Listen for HMMM discussion messages
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-s.ctx.Done():
|
||||||
|
return
|
||||||
|
default:
|
||||||
|
// Process HMMM messages from pubsub
|
||||||
|
time.Sleep(1 * time.Second)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *McpServer) periodicTasks() {
|
||||||
|
ticker := time.NewTicker(30 * time.Second)
|
||||||
|
defer ticker.Stop()
|
||||||
|
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-s.ctx.Done():
|
||||||
|
return
|
||||||
|
case <-ticker.C:
|
||||||
|
// Update agent statistics
|
||||||
|
s.agentsMutex.RLock()
|
||||||
|
s.stats.mutex.Lock()
|
||||||
|
s.stats.ActiveAgents = len(s.agents)
|
||||||
|
s.stats.mutex.Unlock()
|
||||||
|
s.agentsMutex.RUnlock()
|
||||||
|
|
||||||
|
// Re-announce agents periodically
|
||||||
|
s.agentsMutex.RLock()
|
||||||
|
for _, agent := range s.agents {
|
||||||
|
if time.Since(agent.LastAnnouncement) > 5*time.Minute {
|
||||||
|
s.announceAgent(agent)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
s.agentsMutex.RUnlock()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Agent management
|
||||||
|
|
||||||
|
func (s *McpServer) stopAgent(agent *GPTAgent) {
|
||||||
|
agent.mutex.Lock()
|
||||||
|
defer agent.mutex.Unlock()
|
||||||
|
|
||||||
|
// Update status
|
||||||
|
agent.Status = StatusOffline
|
||||||
|
|
||||||
|
// Clean up active tasks
|
||||||
|
for taskID := range agent.CurrentTasks {
|
||||||
|
delete(agent.CurrentTasks, taskID)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Clean up active threads
|
||||||
|
for threadID := range agent.ActiveThreads {
|
||||||
|
delete(agent.ActiveThreads, threadID)
|
||||||
|
}
|
||||||
|
|
||||||
|
s.hlog.Append(logging.PeerLeft, map[string]interface{}{
|
||||||
|
"agent_id": agent.ID,
|
||||||
|
"role": string(agent.Role),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *McpServer) initiateCollaboration(thread *ConversationThread) error {
|
||||||
|
// Send collaboration invitation to all participants
|
||||||
|
for _, participant := range thread.Participants {
|
||||||
|
s.agentsMutex.RLock()
|
||||||
|
agent, exists := s.agents[participant.AgentID]
|
||||||
|
s.agentsMutex.RUnlock()
|
||||||
|
|
||||||
|
if !exists {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update participant status
|
||||||
|
agent.mutex.Lock()
|
||||||
|
participant.Status = ParticipantStatusActive
|
||||||
|
agent.mutex.Unlock()
|
||||||
|
|
||||||
|
// Log collaboration start
|
||||||
|
s.hlog.Append(logging.Collaboration, map[string]interface{}{
|
||||||
|
"event": "collaboration_started",
|
||||||
|
"thread_id": thread.ID,
|
||||||
|
"agent_id": agent.ID,
|
||||||
|
"role": string(agent.Role),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// MCP tool listing
|
||||||
|
|
||||||
|
func (s *McpServer) listTools() map[string]interface{} {
|
||||||
|
return map[string]interface{}{
|
||||||
|
"tools": []map[string]interface{}{
|
||||||
|
{
|
||||||
|
"name": "chorus_announce",
|
||||||
|
"description": "Announce agent availability to CHORUS network",
|
||||||
|
"parameters": map[string]interface{}{
|
||||||
|
"agent_id": "string",
|
||||||
|
"capabilities": "array",
|
||||||
|
"specialization": "string",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "chorus_lookup",
|
||||||
|
"description": "Look up available agents by capability or role",
|
||||||
|
"parameters": map[string]interface{}{
|
||||||
|
"capability": "string",
|
||||||
|
"role": "string",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "chorus_get",
|
||||||
|
"description": "Retrieve context or data from CHORUS DHT",
|
||||||
|
"parameters": map[string]interface{}{
|
||||||
|
"key": "string",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "chorus_store",
|
||||||
|
"description": "Store data in CHORUS DHT",
|
||||||
|
"parameters": map[string]interface{}{
|
||||||
|
"key": "string",
|
||||||
|
"value": "string",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "chorus_collaborate",
|
||||||
|
"description": "Request multi-agent collaboration on a task",
|
||||||
|
"parameters": map[string]interface{}{
|
||||||
|
"task": "object",
|
||||||
|
"required_roles": "array",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// MCP resource handling
|
||||||
|
|
||||||
|
func (s *McpServer) listResources() (map[string]interface{}, error) {
|
||||||
|
return map[string]interface{}{
|
||||||
|
"resources": []map[string]interface{}{
|
||||||
|
{
|
||||||
|
"uri": "chorus://agents",
|
||||||
|
"name": "Available Agents",
|
||||||
|
"description": "List of all available CHORUS agents",
|
||||||
|
"mimeType": "application/json",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"uri": "chorus://dht",
|
||||||
|
"name": "DHT Storage",
|
||||||
|
"description": "Access to distributed hash table storage",
|
||||||
|
"mimeType": "application/json",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *McpServer) readResource(params map[string]interface{}) (map[string]interface{}, error) {
|
||||||
|
uri, ok := params["uri"].(string)
|
||||||
|
if !ok {
|
||||||
|
return nil, fmt.Errorf("missing uri parameter")
|
||||||
|
}
|
||||||
|
|
||||||
|
switch uri {
|
||||||
|
case "chorus://agents":
|
||||||
|
s.agentsMutex.RLock()
|
||||||
|
defer s.agentsMutex.RUnlock()
|
||||||
|
|
||||||
|
agents := make([]map[string]interface{}, 0, len(s.agents))
|
||||||
|
for _, agent := range s.agents {
|
||||||
|
agents = append(agents, map[string]interface{}{
|
||||||
|
"id": agent.ID,
|
||||||
|
"role": agent.Role,
|
||||||
|
"status": agent.Status,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
return map[string]interface{}{"agents": agents}, nil
|
||||||
|
|
||||||
|
case "chorus://dht":
|
||||||
|
return map[string]interface{}{"message": "DHT access not implemented"}, nil
|
||||||
|
|
||||||
|
default:
|
||||||
|
return nil, fmt.Errorf("unknown resource: %s", uri)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// BZZZ tool handlers
|
||||||
|
|
||||||
|
func (s *McpServer) handleBzzzLookup(params map[string]interface{}) (map[string]interface{}, error) {
|
||||||
|
// Stub: Lookup agents or resources via BZZZ
|
||||||
|
return map[string]interface{}{
|
||||||
|
"results": []interface{}{},
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *McpServer) handleBzzzGet(params map[string]interface{}) (map[string]interface{}, error) {
|
||||||
|
// Stub: Get data from BZZZ system
|
||||||
|
return map[string]interface{}{
|
||||||
|
"data": nil,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *McpServer) handleBzzzPost(params map[string]interface{}) (map[string]interface{}, error) {
|
||||||
|
// Stub: Post data to BZZZ system
|
||||||
|
return map[string]interface{}{
|
||||||
|
"success": false,
|
||||||
|
"message": "not implemented",
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *McpServer) handleBzzzThread(params map[string]interface{}) (map[string]interface{}, error) {
|
||||||
|
// Stub: Handle BZZZ thread operations
|
||||||
|
return map[string]interface{}{
|
||||||
|
"thread": nil,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *McpServer) handleBzzzSubscribe(params map[string]interface{}) (map[string]interface{}, error) {
|
||||||
|
// Stub: Subscribe to BZZZ events
|
||||||
|
return map[string]interface{}{
|
||||||
|
"subscribed": false,
|
||||||
|
"message": "not implemented",
|
||||||
|
}, nil
|
||||||
}
|
}
|
||||||
126
pkg/seqthink/ageio/crypto.go
Normal file
126
pkg/seqthink/ageio/crypto.go
Normal file
@@ -0,0 +1,126 @@
|
|||||||
|
package ageio
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
|
||||||
|
"filippo.io/age"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Encryptor handles age encryption operations
|
||||||
|
type Encryptor struct {
|
||||||
|
recipients []age.Recipient
|
||||||
|
}
|
||||||
|
|
||||||
|
// Decryptor handles age decryption operations
|
||||||
|
type Decryptor struct {
|
||||||
|
identities []age.Identity
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewEncryptor creates an encryptor from a recipients file
|
||||||
|
func NewEncryptor(recipientsPath string) (*Encryptor, error) {
|
||||||
|
if recipientsPath == "" {
|
||||||
|
return nil, fmt.Errorf("recipients path is empty")
|
||||||
|
}
|
||||||
|
|
||||||
|
data, err := os.ReadFile(recipientsPath)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("read recipients file: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
recipients, err := age.ParseRecipients(bytes.NewReader(data))
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("parse recipients: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(recipients) == 0 {
|
||||||
|
return nil, fmt.Errorf("no recipients found in file")
|
||||||
|
}
|
||||||
|
|
||||||
|
return &Encryptor{recipients: recipients}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewDecryptor creates a decryptor from an identity file
|
||||||
|
func NewDecryptor(identityPath string) (*Decryptor, error) {
|
||||||
|
if identityPath == "" {
|
||||||
|
return nil, fmt.Errorf("identity path is empty")
|
||||||
|
}
|
||||||
|
|
||||||
|
data, err := os.ReadFile(identityPath)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("read identity file: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
identities, err := age.ParseIdentities(bytes.NewReader(data))
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("parse identities: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(identities) == 0 {
|
||||||
|
return nil, fmt.Errorf("no identities found in file")
|
||||||
|
}
|
||||||
|
|
||||||
|
return &Decryptor{identities: identities}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Encrypt encrypts plaintext data with age
|
||||||
|
func (e *Encryptor) Encrypt(plaintext []byte) ([]byte, error) {
|
||||||
|
if len(plaintext) == 0 {
|
||||||
|
return nil, fmt.Errorf("plaintext is empty")
|
||||||
|
}
|
||||||
|
|
||||||
|
var buf bytes.Buffer
|
||||||
|
w, err := age.Encrypt(&buf, e.recipients...)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("create encryptor: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, err := w.Write(plaintext); err != nil {
|
||||||
|
return nil, fmt.Errorf("write plaintext: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := w.Close(); err != nil {
|
||||||
|
return nil, fmt.Errorf("close encryptor: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return buf.Bytes(), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Decrypt decrypts age-encrypted data
|
||||||
|
func (d *Decryptor) Decrypt(ciphertext []byte) ([]byte, error) {
|
||||||
|
if len(ciphertext) == 0 {
|
||||||
|
return nil, fmt.Errorf("ciphertext is empty")
|
||||||
|
}
|
||||||
|
|
||||||
|
r, err := age.Decrypt(bytes.NewReader(ciphertext), d.identities...)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("create decryptor: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
plaintext, err := io.ReadAll(r)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("read plaintext: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return plaintext, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// EncryptStream creates an encrypted writer for streaming
|
||||||
|
func (e *Encryptor) EncryptStream(w io.Writer) (io.WriteCloser, error) {
|
||||||
|
ew, err := age.Encrypt(w, e.recipients...)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("create stream encryptor: %w", err)
|
||||||
|
}
|
||||||
|
return ew, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// DecryptStream creates a decrypted reader for streaming
|
||||||
|
func (d *Decryptor) DecryptStream(r io.Reader) (io.Reader, error) {
|
||||||
|
dr, err := age.Decrypt(r, d.identities...)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("create stream decryptor: %w", err)
|
||||||
|
}
|
||||||
|
return dr, nil
|
||||||
|
}
|
||||||
291
pkg/seqthink/ageio/crypto_test.go
Normal file
291
pkg/seqthink/ageio/crypto_test.go
Normal file
@@ -0,0 +1,291 @@
|
|||||||
|
package ageio
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"filippo.io/age"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestEncryptDecryptRoundTrip(t *testing.T) {
|
||||||
|
// Generate test key pair
|
||||||
|
tmpDir := t.TempDir()
|
||||||
|
identityPath, recipientPath, err := GenerateTestKeyPair(tmpDir)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("generate test key pair: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create encryptor and decryptor
|
||||||
|
enc, err := NewEncryptor(recipientPath)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("create encryptor: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
dec, err := NewDecryptor(identityPath)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("create decryptor: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test data
|
||||||
|
testCases := []struct {
|
||||||
|
name string
|
||||||
|
plaintext []byte
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "simple text",
|
||||||
|
plaintext: []byte("hello world"),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "json data",
|
||||||
|
plaintext: []byte(`{"tool":"sequentialthinking","payload":{"thought":"test"}}`),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "large data",
|
||||||
|
plaintext: bytes.Repeat([]byte("ABCDEFGHIJ"), 1000), // 10KB
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "unicode",
|
||||||
|
plaintext: []byte("Hello 世界 🌍"),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tc := range testCases {
|
||||||
|
t.Run(tc.name, func(t *testing.T) {
|
||||||
|
// Encrypt
|
||||||
|
ciphertext, err := enc.Encrypt(tc.plaintext)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("encrypt: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify ciphertext is not empty and different from plaintext
|
||||||
|
if len(ciphertext) == 0 {
|
||||||
|
t.Fatal("ciphertext is empty")
|
||||||
|
}
|
||||||
|
|
||||||
|
if bytes.Equal(ciphertext, tc.plaintext) {
|
||||||
|
t.Fatal("ciphertext equals plaintext (not encrypted)")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Decrypt
|
||||||
|
decrypted, err := dec.Decrypt(ciphertext)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("decrypt: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify decrypted matches original
|
||||||
|
if !bytes.Equal(decrypted, tc.plaintext) {
|
||||||
|
t.Fatalf("decrypted data doesn't match original\ngot: %q\nwant: %q", decrypted, tc.plaintext)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestEncryptEmptyData(t *testing.T) {
|
||||||
|
tmpDir := t.TempDir()
|
||||||
|
_, recipientPath, err := GenerateTestKeyPair(tmpDir)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("generate test key pair: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
enc, err := NewEncryptor(recipientPath)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("create encryptor: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = enc.Encrypt([]byte{})
|
||||||
|
if err == nil {
|
||||||
|
t.Fatal("expected error encrypting empty data")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestDecryptEmptyData(t *testing.T) {
|
||||||
|
tmpDir := t.TempDir()
|
||||||
|
identityPath, _, err := GenerateTestKeyPair(tmpDir)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("generate test key pair: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
dec, err := NewDecryptor(identityPath)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("create decryptor: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = dec.Decrypt([]byte{})
|
||||||
|
if err == nil {
|
||||||
|
t.Fatal("expected error decrypting empty data")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestDecryptInvalidCiphertext(t *testing.T) {
|
||||||
|
tmpDir := t.TempDir()
|
||||||
|
identityPath, _, err := GenerateTestKeyPair(tmpDir)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("generate test key pair: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
dec, err := NewDecryptor(identityPath)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("create decryptor: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Try to decrypt garbage data
|
||||||
|
_, err = dec.Decrypt([]byte("not a valid age ciphertext"))
|
||||||
|
if err == nil {
|
||||||
|
t.Fatal("expected error decrypting invalid ciphertext")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestDecryptWrongKey(t *testing.T) {
|
||||||
|
tmpDir := t.TempDir()
|
||||||
|
|
||||||
|
// Generate two separate key pairs
|
||||||
|
identity1Path := filepath.Join(tmpDir, "key1.age")
|
||||||
|
recipient1Path := filepath.Join(tmpDir, "key1.pub")
|
||||||
|
identity2Path := filepath.Join(tmpDir, "key2.age")
|
||||||
|
|
||||||
|
// Create first key pair
|
||||||
|
id1, err := age.GenerateX25519Identity()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("generate key 1: %v", err)
|
||||||
|
}
|
||||||
|
os.WriteFile(identity1Path, []byte(id1.String()+"\n"), 0600)
|
||||||
|
os.WriteFile(recipient1Path, []byte(id1.Recipient().String()+"\n"), 0644)
|
||||||
|
|
||||||
|
// Create second key pair
|
||||||
|
id2, err := age.GenerateX25519Identity()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("generate key 2: %v", err)
|
||||||
|
}
|
||||||
|
os.WriteFile(identity2Path, []byte(id2.String()+"\n"), 0600)
|
||||||
|
|
||||||
|
// Encrypt with key 1
|
||||||
|
enc, err := NewEncryptor(recipient1Path)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("create encryptor: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
ciphertext, err := enc.Encrypt([]byte("secret message"))
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("encrypt: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Try to decrypt with key 2 (should fail)
|
||||||
|
dec, err := NewDecryptor(identity2Path)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("create decryptor: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = dec.Decrypt(ciphertext)
|
||||||
|
if err == nil {
|
||||||
|
t.Fatal("expected error decrypting with wrong key")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestNewEncryptorInvalidPath(t *testing.T) {
|
||||||
|
_, err := NewEncryptor("/nonexistent/path/to/recipients")
|
||||||
|
if err == nil {
|
||||||
|
t.Fatal("expected error with nonexistent recipients file")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestNewDecryptorInvalidPath(t *testing.T) {
|
||||||
|
_, err := NewDecryptor("/nonexistent/path/to/identity")
|
||||||
|
if err == nil {
|
||||||
|
t.Fatal("expected error with nonexistent identity file")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestNewEncryptorEmptyPath(t *testing.T) {
|
||||||
|
_, err := NewEncryptor("")
|
||||||
|
if err == nil {
|
||||||
|
t.Fatal("expected error with empty recipients path")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestNewDecryptorEmptyPath(t *testing.T) {
|
||||||
|
_, err := NewDecryptor("")
|
||||||
|
if err == nil {
|
||||||
|
t.Fatal("expected error with empty identity path")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestStreamingEncryptDecrypt(t *testing.T) {
|
||||||
|
// Generate test key pair
|
||||||
|
tmpDir := t.TempDir()
|
||||||
|
identityPath, recipientPath, err := GenerateTestKeyPair(tmpDir)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("generate test key pair: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create encryptor and decryptor
|
||||||
|
enc, err := NewEncryptor(recipientPath)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("create encryptor: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
dec, err := NewDecryptor(identityPath)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("create decryptor: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test streaming encryption
|
||||||
|
plaintext := []byte("streaming test data")
|
||||||
|
var ciphertextBuf bytes.Buffer
|
||||||
|
|
||||||
|
encWriter, err := enc.EncryptStream(&ciphertextBuf)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("create encrypt stream: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, err := encWriter.Write(plaintext); err != nil {
|
||||||
|
t.Fatalf("write to encrypt stream: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := encWriter.Close(); err != nil {
|
||||||
|
t.Fatalf("close encrypt stream: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test streaming decryption
|
||||||
|
decReader, err := dec.DecryptStream(&ciphertextBuf)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("create decrypt stream: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
decrypted := make([]byte, len(plaintext))
|
||||||
|
n, err := decReader.Read(decrypted)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("read from decrypt stream: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if !bytes.Equal(decrypted[:n], plaintext) {
|
||||||
|
t.Fatalf("decrypted data doesn't match original\ngot: %q\nwant: %q", decrypted[:n], plaintext)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestConvenienceFunctions(t *testing.T) {
|
||||||
|
// Generate test keys in memory
|
||||||
|
identity, recipient, err := GenerateTestKeys()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("generate test keys: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
plaintext := []byte("test message")
|
||||||
|
|
||||||
|
// Encrypt with convenience function
|
||||||
|
ciphertext, err := EncryptBytes(plaintext, recipient)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("encrypt bytes: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Decrypt with convenience function
|
||||||
|
decrypted, err := DecryptBytes(ciphertext, identity)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("decrypt bytes: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if !bytes.Equal(decrypted, plaintext) {
|
||||||
|
t.Fatalf("decrypted data doesn't match original\ngot: %q\nwant: %q", decrypted, plaintext)
|
||||||
|
}
|
||||||
|
}
|
||||||
354
pkg/seqthink/ageio/golden_test.go
Normal file
354
pkg/seqthink/ageio/golden_test.go
Normal file
@@ -0,0 +1,354 @@
|
|||||||
|
package ageio
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
// TestGoldenEncryptionRoundTrip validates encryption/decryption with golden test data
|
||||||
|
func TestGoldenEncryptionRoundTrip(t *testing.T) {
|
||||||
|
// Generate test key pair once
|
||||||
|
tmpDir := t.TempDir()
|
||||||
|
identityPath, recipientPath, err := GenerateTestKeyPair(tmpDir)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("generate test key pair: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create encryptor and decryptor
|
||||||
|
enc, err := NewEncryptor(recipientPath)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("create encryptor: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
dec, err := NewDecryptor(identityPath)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("create decryptor: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Golden test cases representing real MCP payloads
|
||||||
|
goldenTests := []struct {
|
||||||
|
name string
|
||||||
|
payload []byte
|
||||||
|
description string
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "sequential_thinking_request",
|
||||||
|
payload: []byte(`{
|
||||||
|
"tool": "mcp__sequential-thinking__sequentialthinking",
|
||||||
|
"payload": {
|
||||||
|
"thought": "First, I need to analyze the problem by breaking it down into smaller components.",
|
||||||
|
"thoughtNumber": 1,
|
||||||
|
"totalThoughts": 5,
|
||||||
|
"nextThoughtNeeded": true,
|
||||||
|
"isRevision": false
|
||||||
|
}
|
||||||
|
}`),
|
||||||
|
description: "Initial sequential thinking request",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "sequential_thinking_revision",
|
||||||
|
payload: []byte(`{
|
||||||
|
"tool": "mcp__sequential-thinking__sequentialthinking",
|
||||||
|
"payload": {
|
||||||
|
"thought": "Wait, I need to revise my previous thought - I missed considering edge cases.",
|
||||||
|
"thoughtNumber": 3,
|
||||||
|
"totalThoughts": 6,
|
||||||
|
"nextThoughtNeeded": true,
|
||||||
|
"isRevision": true,
|
||||||
|
"revisesThought": 2
|
||||||
|
}
|
||||||
|
}`),
|
||||||
|
description: "Revision of previous thought",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "sequential_thinking_branching",
|
||||||
|
payload: []byte(`{
|
||||||
|
"tool": "mcp__sequential-thinking__sequentialthinking",
|
||||||
|
"payload": {
|
||||||
|
"thought": "Let me explore an alternative approach using event sourcing instead.",
|
||||||
|
"thoughtNumber": 4,
|
||||||
|
"totalThoughts": 8,
|
||||||
|
"nextThoughtNeeded": true,
|
||||||
|
"branchFromThought": 2,
|
||||||
|
"branchId": "alternative-approach-1"
|
||||||
|
}
|
||||||
|
}`),
|
||||||
|
description: "Branching to explore alternative",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "sequential_thinking_final",
|
||||||
|
payload: []byte(`{
|
||||||
|
"tool": "mcp__sequential-thinking__sequentialthinking",
|
||||||
|
"payload": {
|
||||||
|
"thought": "Based on all previous analysis, I recommend implementing the event sourcing pattern with CQRS for optimal scalability.",
|
||||||
|
"thoughtNumber": 8,
|
||||||
|
"totalThoughts": 8,
|
||||||
|
"nextThoughtNeeded": false,
|
||||||
|
"confidence": 0.85
|
||||||
|
}
|
||||||
|
}`),
|
||||||
|
description: "Final thought with conclusion",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "large_context_payload",
|
||||||
|
payload: bytes.Repeat([]byte(`{"key": "value", "data": "ABCDEFGHIJ"}`), 100),
|
||||||
|
description: "Large payload testing encryption of substantial data",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "unicode_payload",
|
||||||
|
payload: []byte(`{
|
||||||
|
"tool": "mcp__sequential-thinking__sequentialthinking",
|
||||||
|
"payload": {
|
||||||
|
"thought": "分析日本語でのデータ処理 🌸🎌 and mixed language content: 你好世界",
|
||||||
|
"thoughtNumber": 1,
|
||||||
|
"totalThoughts": 1,
|
||||||
|
"nextThoughtNeeded": false
|
||||||
|
}
|
||||||
|
}`),
|
||||||
|
description: "Unicode and emoji content",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "special_characters",
|
||||||
|
payload: []byte(`{
|
||||||
|
"tool": "test",
|
||||||
|
"payload": {
|
||||||
|
"special": "Testing: \n\t\r\b\"'\\\/\u0000\u001f",
|
||||||
|
"symbols": "!@#$%^&*()_+-=[]{}|;:,.<>?~"
|
||||||
|
}
|
||||||
|
}`),
|
||||||
|
description: "Special characters and escape sequences",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, gt := range goldenTests {
|
||||||
|
t.Run(gt.name, func(t *testing.T) {
|
||||||
|
t.Logf("Testing: %s", gt.description)
|
||||||
|
t.Logf("Original size: %d bytes", len(gt.payload))
|
||||||
|
|
||||||
|
// Encrypt
|
||||||
|
ciphertext, err := enc.Encrypt(gt.payload)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("encrypt failed: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
t.Logf("Encrypted size: %d bytes (%.1f%% overhead)",
|
||||||
|
len(ciphertext),
|
||||||
|
float64(len(ciphertext)-len(gt.payload))/float64(len(gt.payload))*100)
|
||||||
|
|
||||||
|
// Verify ciphertext is different from plaintext
|
||||||
|
if bytes.Equal(ciphertext, gt.payload) {
|
||||||
|
t.Fatal("ciphertext equals plaintext - encryption failed")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify ciphertext doesn't contain plaintext patterns
|
||||||
|
// (basic sanity check - not cryptographically rigorous)
|
||||||
|
if bytes.Contains(ciphertext, []byte("mcp__sequential-thinking")) {
|
||||||
|
t.Error("ciphertext contains plaintext patterns - weak encryption")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Decrypt
|
||||||
|
decrypted, err := dec.Decrypt(ciphertext)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("decrypt failed: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify perfect round-trip
|
||||||
|
if !bytes.Equal(decrypted, gt.payload) {
|
||||||
|
t.Errorf("decrypted data doesn't match original\nOriginal: %s\nDecrypted: %s",
|
||||||
|
string(gt.payload), string(decrypted))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Optional: Save golden files for inspection
|
||||||
|
if os.Getenv("SAVE_GOLDEN") == "1" {
|
||||||
|
goldenDir := filepath.Join(tmpDir, "golden")
|
||||||
|
os.MkdirAll(goldenDir, 0755)
|
||||||
|
|
||||||
|
plainPath := filepath.Join(goldenDir, gt.name+".plain.json")
|
||||||
|
encPath := filepath.Join(goldenDir, gt.name+".encrypted.age")
|
||||||
|
|
||||||
|
os.WriteFile(plainPath, gt.payload, 0644)
|
||||||
|
os.WriteFile(encPath, ciphertext, 0644)
|
||||||
|
|
||||||
|
t.Logf("Golden files saved to: %s", goldenDir)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestGoldenDecryptionFailures validates proper error handling
|
||||||
|
func TestGoldenDecryptionFailures(t *testing.T) {
|
||||||
|
tmpDir := t.TempDir()
|
||||||
|
identityPath, recipientPath, err := GenerateTestKeyPair(tmpDir)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("generate test key pair: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
dec, err := NewDecryptor(identityPath)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("create decryptor: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
enc, err := NewEncryptor(recipientPath)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("create encryptor: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
failureTests := []struct {
|
||||||
|
name string
|
||||||
|
ciphertext []byte
|
||||||
|
expectError string
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "empty_ciphertext",
|
||||||
|
ciphertext: []byte{},
|
||||||
|
expectError: "ciphertext is empty",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "invalid_age_format",
|
||||||
|
ciphertext: []byte("not a valid age ciphertext"),
|
||||||
|
expectError: "create decryptor",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "corrupted_header",
|
||||||
|
ciphertext: []byte("-----BEGIN AGE ENCRYPTED FILE-----\ngarbage\n-----END AGE ENCRYPTED FILE-----"),
|
||||||
|
expectError: "create decryptor",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, ft := range failureTests {
|
||||||
|
t.Run(ft.name, func(t *testing.T) {
|
||||||
|
_, err := dec.Decrypt(ft.ciphertext)
|
||||||
|
if err == nil {
|
||||||
|
t.Fatal("expected error but got none")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Just verify we got an error - specific error messages may vary
|
||||||
|
t.Logf("Got expected error: %v", err)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test truncated ciphertext
|
||||||
|
t.Run("truncated_ciphertext", func(t *testing.T) {
|
||||||
|
// Create valid ciphertext
|
||||||
|
validPlaintext := []byte("test message")
|
||||||
|
validCiphertext, err := enc.Encrypt(validPlaintext)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("encrypt: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Truncate it
|
||||||
|
truncated := validCiphertext[:len(validCiphertext)/2]
|
||||||
|
|
||||||
|
// Try to decrypt
|
||||||
|
_, err = dec.Decrypt(truncated)
|
||||||
|
if err == nil {
|
||||||
|
t.Fatal("expected error decrypting truncated ciphertext")
|
||||||
|
}
|
||||||
|
|
||||||
|
t.Logf("Got expected error for truncated ciphertext: %v", err)
|
||||||
|
})
|
||||||
|
|
||||||
|
// Test modified ciphertext
|
||||||
|
t.Run("modified_ciphertext", func(t *testing.T) {
|
||||||
|
// Create valid ciphertext
|
||||||
|
validPlaintext := []byte("test message")
|
||||||
|
validCiphertext, err := enc.Encrypt(validPlaintext)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("encrypt: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Flip a bit in the middle
|
||||||
|
modified := make([]byte, len(validCiphertext))
|
||||||
|
copy(modified, validCiphertext)
|
||||||
|
modified[len(modified)/2] ^= 0x01
|
||||||
|
|
||||||
|
// Try to decrypt
|
||||||
|
_, err = dec.Decrypt(modified)
|
||||||
|
if err == nil {
|
||||||
|
t.Fatal("expected error decrypting modified ciphertext")
|
||||||
|
}
|
||||||
|
|
||||||
|
t.Logf("Got expected error for modified ciphertext: %v", err)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// BenchmarkEncryption benchmarks encryption performance
|
||||||
|
func BenchmarkEncryption(b *testing.B) {
|
||||||
|
tmpDir := b.TempDir()
|
||||||
|
_, recipientPath, err := GenerateTestKeyPair(tmpDir)
|
||||||
|
if err != nil {
|
||||||
|
b.Fatalf("generate test key pair: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
enc, err := NewEncryptor(recipientPath)
|
||||||
|
if err != nil {
|
||||||
|
b.Fatalf("create encryptor: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
payloads := map[string][]byte{
|
||||||
|
"small_1KB": bytes.Repeat([]byte("A"), 1024),
|
||||||
|
"medium_10KB": bytes.Repeat([]byte("A"), 10*1024),
|
||||||
|
"large_100KB": bytes.Repeat([]byte("A"), 100*1024),
|
||||||
|
}
|
||||||
|
|
||||||
|
for name, payload := range payloads {
|
||||||
|
b.Run(name, func(b *testing.B) {
|
||||||
|
b.SetBytes(int64(len(payload)))
|
||||||
|
b.ResetTimer()
|
||||||
|
|
||||||
|
for i := 0; i < b.N; i++ {
|
||||||
|
_, err := enc.Encrypt(payload)
|
||||||
|
if err != nil {
|
||||||
|
b.Fatalf("encrypt: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// BenchmarkDecryption benchmarks decryption performance
|
||||||
|
func BenchmarkDecryption(b *testing.B) {
|
||||||
|
tmpDir := b.TempDir()
|
||||||
|
identityPath, recipientPath, err := GenerateTestKeyPair(tmpDir)
|
||||||
|
if err != nil {
|
||||||
|
b.Fatalf("generate test key pair: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
enc, err := NewEncryptor(recipientPath)
|
||||||
|
if err != nil {
|
||||||
|
b.Fatalf("create encryptor: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
dec, err := NewDecryptor(identityPath)
|
||||||
|
if err != nil {
|
||||||
|
b.Fatalf("create decryptor: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
payloads := map[string][]byte{
|
||||||
|
"small_1KB": bytes.Repeat([]byte("A"), 1024),
|
||||||
|
"medium_10KB": bytes.Repeat([]byte("A"), 10*1024),
|
||||||
|
"large_100KB": bytes.Repeat([]byte("A"), 100*1024),
|
||||||
|
}
|
||||||
|
|
||||||
|
for name, payload := range payloads {
|
||||||
|
// Pre-encrypt
|
||||||
|
ciphertext, err := enc.Encrypt(payload)
|
||||||
|
if err != nil {
|
||||||
|
b.Fatalf("encrypt: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
b.Run(name, func(b *testing.B) {
|
||||||
|
b.SetBytes(int64(len(payload)))
|
||||||
|
b.ResetTimer()
|
||||||
|
|
||||||
|
for i := 0; i < b.N; i++ {
|
||||||
|
_, err := dec.Decrypt(ciphertext)
|
||||||
|
if err != nil {
|
||||||
|
b.Fatalf("decrypt: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
88
pkg/seqthink/ageio/testkeys.go
Normal file
88
pkg/seqthink/ageio/testkeys.go
Normal file
@@ -0,0 +1,88 @@
|
|||||||
|
package ageio
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
|
||||||
|
"filippo.io/age"
|
||||||
|
)
|
||||||
|
|
||||||
|
// GenerateTestKeyPair generates a test age key pair and returns paths
|
||||||
|
func GenerateTestKeyPair(dir string) (identityPath, recipientPath string, err error) {
|
||||||
|
// Generate identity
|
||||||
|
identity, err := age.GenerateX25519Identity()
|
||||||
|
if err != nil {
|
||||||
|
return "", "", fmt.Errorf("generate identity: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create identity file
|
||||||
|
identityPath = filepath.Join(dir, "age.key")
|
||||||
|
if err := os.WriteFile(identityPath, []byte(identity.String()+"\n"), 0600); err != nil {
|
||||||
|
return "", "", fmt.Errorf("write identity file: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create recipient file
|
||||||
|
recipientPath = filepath.Join(dir, "age.pub")
|
||||||
|
recipient := identity.Recipient().String()
|
||||||
|
if err := os.WriteFile(recipientPath, []byte(recipient+"\n"), 0644); err != nil {
|
||||||
|
return "", "", fmt.Errorf("write recipient file: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return identityPath, recipientPath, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// GenerateTestKeys generates test keys in memory
|
||||||
|
func GenerateTestKeys() (identity age.Identity, recipient age.Recipient, err error) {
|
||||||
|
id, err := age.GenerateX25519Identity()
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, fmt.Errorf("generate identity: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return id, id.Recipient(), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// MustGenerateTestKeyPair generates a test key pair or panics
|
||||||
|
func MustGenerateTestKeyPair(dir string) (identityPath, recipientPath string) {
|
||||||
|
identityPath, recipientPath, err := GenerateTestKeyPair(dir)
|
||||||
|
if err != nil {
|
||||||
|
panic(fmt.Sprintf("failed to generate test key pair: %v", err))
|
||||||
|
}
|
||||||
|
return identityPath, recipientPath
|
||||||
|
}
|
||||||
|
|
||||||
|
// EncryptBytes is a convenience function for one-shot encryption
|
||||||
|
func EncryptBytes(plaintext []byte, recipients ...age.Recipient) ([]byte, error) {
|
||||||
|
var buf bytes.Buffer
|
||||||
|
w, err := age.Encrypt(&buf, recipients...)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("create encryptor: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, err := w.Write(plaintext); err != nil {
|
||||||
|
return nil, fmt.Errorf("write plaintext: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := w.Close(); err != nil {
|
||||||
|
return nil, fmt.Errorf("close encryptor: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return buf.Bytes(), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// DecryptBytes is a convenience function for one-shot decryption
|
||||||
|
func DecryptBytes(ciphertext []byte, identities ...age.Identity) ([]byte, error) {
|
||||||
|
r, err := age.Decrypt(bytes.NewReader(ciphertext), identities...)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("create decryptor: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
plaintext, err := io.ReadAll(r)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("read plaintext: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return plaintext, nil
|
||||||
|
}
|
||||||
100
pkg/seqthink/mcpclient/client.go
Normal file
100
pkg/seqthink/mcpclient/client.go
Normal file
@@ -0,0 +1,100 @@
|
|||||||
|
package mcpclient
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"context"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"net/http"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Client is a client for the Sequential Thinking MCP server
|
||||||
|
type Client struct {
|
||||||
|
baseURL string
|
||||||
|
httpClient *http.Client
|
||||||
|
}
|
||||||
|
|
||||||
|
// ToolRequest represents a request to call an MCP tool
|
||||||
|
type ToolRequest struct {
|
||||||
|
Tool string `json:"tool"`
|
||||||
|
Payload map[string]interface{} `json:"payload"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// ToolResponse represents the response from an MCP tool call
|
||||||
|
type ToolResponse struct {
|
||||||
|
Result interface{} `json:"result,omitempty"`
|
||||||
|
Error string `json:"error,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// New creates a new MCP client
|
||||||
|
func New(baseURL string) *Client {
|
||||||
|
return &Client{
|
||||||
|
baseURL: baseURL,
|
||||||
|
httpClient: &http.Client{
|
||||||
|
Timeout: 120 * time.Second, // Longer timeout for thinking operations
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Health checks if the MCP server is healthy
|
||||||
|
func (c *Client) Health(ctx context.Context) error {
|
||||||
|
req, err := http.NewRequestWithContext(ctx, "GET", c.baseURL+"/health", nil)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("create request: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
resp, err := c.httpClient.Do(req)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("http request: %w", err)
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
if resp.StatusCode != http.StatusOK {
|
||||||
|
return fmt.Errorf("health check failed: status %d", resp.StatusCode)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// CallTool calls an MCP tool
|
||||||
|
func (c *Client) CallTool(ctx context.Context, req *ToolRequest) (*ToolResponse, error) {
|
||||||
|
jsonData, err := json.Marshal(req)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("marshal request: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
httpReq, err := http.NewRequestWithContext(ctx, "POST", c.baseURL+"/mcp/tool", bytes.NewReader(jsonData))
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("create request: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
httpReq.Header.Set("Content-Type", "application/json")
|
||||||
|
|
||||||
|
resp, err := c.httpClient.Do(httpReq)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("http request: %w", err)
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
body, err := io.ReadAll(resp.Body)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("read response: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if resp.StatusCode != http.StatusOK {
|
||||||
|
return nil, fmt.Errorf("tool call failed: status %d, body: %s", resp.StatusCode, string(body))
|
||||||
|
}
|
||||||
|
|
||||||
|
var toolResp ToolResponse
|
||||||
|
if err := json.Unmarshal(body, &toolResp); err != nil {
|
||||||
|
return nil, fmt.Errorf("unmarshal response: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if toolResp.Error != "" {
|
||||||
|
return nil, fmt.Errorf("tool error: %s", toolResp.Error)
|
||||||
|
}
|
||||||
|
|
||||||
|
return &toolResp, nil
|
||||||
|
}
|
||||||
39
pkg/seqthink/observability/logger.go
Normal file
39
pkg/seqthink/observability/logger.go
Normal file
@@ -0,0 +1,39 @@
|
|||||||
|
package observability
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/rs/zerolog"
|
||||||
|
"github.com/rs/zerolog/log"
|
||||||
|
)
|
||||||
|
|
||||||
|
// InitLogger initializes the global logger
|
||||||
|
func InitLogger(level string) {
|
||||||
|
// Set up zerolog with human-friendly console output
|
||||||
|
output := zerolog.ConsoleWriter{
|
||||||
|
Out: os.Stdout,
|
||||||
|
TimeFormat: time.RFC3339,
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Logger = zerolog.New(output).
|
||||||
|
With().
|
||||||
|
Timestamp().
|
||||||
|
Caller().
|
||||||
|
Logger()
|
||||||
|
|
||||||
|
// Set log level
|
||||||
|
switch strings.ToLower(level) {
|
||||||
|
case "debug":
|
||||||
|
zerolog.SetGlobalLevel(zerolog.DebugLevel)
|
||||||
|
case "info":
|
||||||
|
zerolog.SetGlobalLevel(zerolog.InfoLevel)
|
||||||
|
case "warn":
|
||||||
|
zerolog.SetGlobalLevel(zerolog.WarnLevel)
|
||||||
|
case "error":
|
||||||
|
zerolog.SetGlobalLevel(zerolog.ErrorLevel)
|
||||||
|
default:
|
||||||
|
zerolog.SetGlobalLevel(zerolog.InfoLevel)
|
||||||
|
}
|
||||||
|
}
|
||||||
85
pkg/seqthink/observability/metrics.go
Normal file
85
pkg/seqthink/observability/metrics.go
Normal file
@@ -0,0 +1,85 @@
|
|||||||
|
package observability
|
||||||
|
|
||||||
|
import (
|
||||||
|
"net/http"
|
||||||
|
|
||||||
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
|
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||||
|
"github.com/prometheus/client_golang/prometheus/promhttp"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Metrics holds Prometheus metrics for the wrapper
|
||||||
|
type Metrics struct {
|
||||||
|
requestsTotal prometheus.Counter
|
||||||
|
errorsTotal prometheus.Counter
|
||||||
|
decryptFails prometheus.Counter
|
||||||
|
encryptFails prometheus.Counter
|
||||||
|
policyDenials prometheus.Counter
|
||||||
|
requestDuration prometheus.Histogram
|
||||||
|
}
|
||||||
|
|
||||||
|
// InitMetrics initializes Prometheus metrics
|
||||||
|
func InitMetrics() *Metrics {
|
||||||
|
return &Metrics{
|
||||||
|
requestsTotal: promauto.NewCounter(prometheus.CounterOpts{
|
||||||
|
Name: "seqthink_requests_total",
|
||||||
|
Help: "Total number of requests received",
|
||||||
|
}),
|
||||||
|
errorsTotal: promauto.NewCounter(prometheus.CounterOpts{
|
||||||
|
Name: "seqthink_errors_total",
|
||||||
|
Help: "Total number of errors",
|
||||||
|
}),
|
||||||
|
decryptFails: promauto.NewCounter(prometheus.CounterOpts{
|
||||||
|
Name: "seqthink_decrypt_failures_total",
|
||||||
|
Help: "Total number of decryption failures",
|
||||||
|
}),
|
||||||
|
encryptFails: promauto.NewCounter(prometheus.CounterOpts{
|
||||||
|
Name: "seqthink_encrypt_failures_total",
|
||||||
|
Help: "Total number of encryption failures",
|
||||||
|
}),
|
||||||
|
policyDenials: promauto.NewCounter(prometheus.CounterOpts{
|
||||||
|
Name: "seqthink_policy_denials_total",
|
||||||
|
Help: "Total number of policy denials",
|
||||||
|
}),
|
||||||
|
requestDuration: promauto.NewHistogram(prometheus.HistogramOpts{
|
||||||
|
Name: "seqthink_request_duration_seconds",
|
||||||
|
Help: "Request duration in seconds",
|
||||||
|
Buckets: prometheus.DefBuckets,
|
||||||
|
}),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// IncrementRequests increments the request counter
|
||||||
|
func (m *Metrics) IncrementRequests() {
|
||||||
|
m.requestsTotal.Inc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// IncrementErrors increments the error counter
|
||||||
|
func (m *Metrics) IncrementErrors() {
|
||||||
|
m.errorsTotal.Inc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// IncrementDecryptFails increments the decrypt failure counter
|
||||||
|
func (m *Metrics) IncrementDecryptFails() {
|
||||||
|
m.decryptFails.Inc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// IncrementEncryptFails increments the encrypt failure counter
|
||||||
|
func (m *Metrics) IncrementEncryptFails() {
|
||||||
|
m.encryptFails.Inc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// IncrementPolicyDenials increments the policy denial counter
|
||||||
|
func (m *Metrics) IncrementPolicyDenials() {
|
||||||
|
m.policyDenials.Inc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ObserveRequestDuration records request duration
|
||||||
|
func (m *Metrics) ObserveRequestDuration(seconds float64) {
|
||||||
|
m.requestDuration.Observe(seconds)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handler returns the Prometheus metrics HTTP handler
|
||||||
|
func (m *Metrics) Handler() http.Handler {
|
||||||
|
return promhttp.Handler()
|
||||||
|
}
|
||||||
354
pkg/seqthink/policy/jwt.go
Normal file
354
pkg/seqthink/policy/jwt.go
Normal file
@@ -0,0 +1,354 @@
|
|||||||
|
package policy
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"crypto/ed25519"
|
||||||
|
"crypto/rsa"
|
||||||
|
"encoding/base64"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"math/big"
|
||||||
|
"net/http"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/golang-jwt/jwt/v5"
|
||||||
|
"github.com/rs/zerolog/log"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Claims represents the JWT claims structure
|
||||||
|
type Claims struct {
|
||||||
|
Subject string `json:"sub"`
|
||||||
|
Scopes []string `json:"scopes,omitempty"`
|
||||||
|
Scope string `json:"scope,omitempty"` // Space-separated scopes
|
||||||
|
jwt.RegisteredClaims
|
||||||
|
}
|
||||||
|
|
||||||
|
// JWKS represents a JSON Web Key Set
|
||||||
|
type JWKS struct {
|
||||||
|
Keys []JWK `json:"keys"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// JWK represents a JSON Web Key
|
||||||
|
type JWK struct {
|
||||||
|
Kid string `json:"kid"`
|
||||||
|
Kty string `json:"kty"`
|
||||||
|
Alg string `json:"alg"`
|
||||||
|
Use string `json:"use"`
|
||||||
|
N string `json:"n"`
|
||||||
|
E string `json:"e"`
|
||||||
|
X string `json:"x"`
|
||||||
|
Crv string `json:"crv"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// Validator validates JWT tokens
|
||||||
|
type Validator struct {
|
||||||
|
jwksURL string
|
||||||
|
requiredScope string
|
||||||
|
httpClient *http.Client
|
||||||
|
keys map[string]interface{}
|
||||||
|
keysMutex sync.RWMutex
|
||||||
|
lastFetch time.Time
|
||||||
|
cacheDuration time.Duration
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewValidator creates a new JWT validator
|
||||||
|
func NewValidator(jwksURL, requiredScope string) *Validator {
|
||||||
|
return &Validator{
|
||||||
|
jwksURL: jwksURL,
|
||||||
|
requiredScope: requiredScope,
|
||||||
|
httpClient: &http.Client{
|
||||||
|
Timeout: 10 * time.Second,
|
||||||
|
},
|
||||||
|
keys: make(map[string]interface{}),
|
||||||
|
cacheDuration: 1 * time.Hour, // Cache JWKS for 1 hour
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ValidateToken validates a JWT token and checks required scopes
|
||||||
|
func (v *Validator) ValidateToken(tokenString string) (*Claims, error) {
|
||||||
|
// Parse token
|
||||||
|
token, err := jwt.ParseWithClaims(tokenString, &Claims{}, func(token *jwt.Token) (interface{}, error) {
|
||||||
|
// Get key ID from header
|
||||||
|
kid, ok := token.Header["kid"].(string)
|
||||||
|
if !ok {
|
||||||
|
return nil, fmt.Errorf("no kid in token header")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get public key for this kid
|
||||||
|
publicKey, err := v.getPublicKey(kid)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("get public key: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
switch token.Method.(type) {
|
||||||
|
case *jwt.SigningMethodRSA, *jwt.SigningMethodRSAPSS:
|
||||||
|
rsaKey, ok := publicKey.(*rsa.PublicKey)
|
||||||
|
if !ok {
|
||||||
|
return nil, fmt.Errorf("expected RSA public key for kid %s", kid)
|
||||||
|
}
|
||||||
|
return rsaKey, nil
|
||||||
|
case *jwt.SigningMethodEd25519:
|
||||||
|
edKey, ok := publicKey.(ed25519.PublicKey)
|
||||||
|
if !ok {
|
||||||
|
return nil, fmt.Errorf("expected Ed25519 public key for kid %s", kid)
|
||||||
|
}
|
||||||
|
return edKey, nil
|
||||||
|
default:
|
||||||
|
return nil, fmt.Errorf("unexpected signing method: %v", token.Header["alg"])
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("parse token: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Extract claims
|
||||||
|
claims, ok := token.Claims.(*Claims)
|
||||||
|
if !ok || !token.Valid {
|
||||||
|
return nil, fmt.Errorf("invalid token claims")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Validate expiration
|
||||||
|
if claims.ExpiresAt != nil && claims.ExpiresAt.Before(time.Now()) {
|
||||||
|
return nil, fmt.Errorf("token expired")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Validate not before
|
||||||
|
if claims.NotBefore != nil && claims.NotBefore.After(time.Now()) {
|
||||||
|
return nil, fmt.Errorf("token not yet valid")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check required scope
|
||||||
|
if v.requiredScope != "" {
|
||||||
|
if !v.hasRequiredScope(claims) {
|
||||||
|
return nil, fmt.Errorf("missing required scope: %s", v.requiredScope)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return claims, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// hasRequiredScope checks if claims contain the required scope
|
||||||
|
func (v *Validator) hasRequiredScope(claims *Claims) bool {
|
||||||
|
// Check scopes array
|
||||||
|
for _, scope := range claims.Scopes {
|
||||||
|
if scope == v.requiredScope {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check space-separated scope string (OAuth2 style)
|
||||||
|
if claims.Scope != "" {
|
||||||
|
for _, scope := range parseScopes(claims.Scope) {
|
||||||
|
if scope == v.requiredScope {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// getPublicKey retrieves a public key by kid, fetching JWKS if needed
|
||||||
|
func (v *Validator) getPublicKey(kid string) (interface{}, error) {
|
||||||
|
// Check if cache is expired
|
||||||
|
v.keysMutex.RLock()
|
||||||
|
cacheExpired := time.Since(v.lastFetch) > v.cacheDuration
|
||||||
|
key, keyExists := v.keys[kid]
|
||||||
|
v.keysMutex.RUnlock()
|
||||||
|
|
||||||
|
// If key exists and cache is not expired, return it
|
||||||
|
if keyExists && !cacheExpired {
|
||||||
|
return key, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Need to fetch JWKS (either key not found or cache expired)
|
||||||
|
if err := v.fetchJWKS(); err != nil {
|
||||||
|
return nil, fmt.Errorf("fetch JWKS: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Try again after fetch
|
||||||
|
v.keysMutex.RLock()
|
||||||
|
defer v.keysMutex.RUnlock()
|
||||||
|
|
||||||
|
if key, ok := v.keys[kid]; ok {
|
||||||
|
return key, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil, fmt.Errorf("key not found: %s", kid)
|
||||||
|
}
|
||||||
|
|
||||||
|
// fetchJWKS fetches and caches the JWKS from the server
|
||||||
|
func (v *Validator) fetchJWKS() error {
|
||||||
|
log.Info().Str("url", v.jwksURL).Msg("Fetching JWKS")
|
||||||
|
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
req, err := http.NewRequestWithContext(ctx, "GET", v.jwksURL, nil)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("create request: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
resp, err := v.httpClient.Do(req)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("http request: %w", err)
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
if resp.StatusCode != http.StatusOK {
|
||||||
|
return fmt.Errorf("JWKS fetch failed: status %d", resp.StatusCode)
|
||||||
|
}
|
||||||
|
|
||||||
|
body, err := io.ReadAll(resp.Body)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("read response: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
var jwks JWKS
|
||||||
|
if err := json.Unmarshal(body, &jwks); err != nil {
|
||||||
|
return fmt.Errorf("unmarshal JWKS: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Parse and cache all keys
|
||||||
|
newKeys := make(map[string]interface{})
|
||||||
|
for _, jwk := range jwks.Keys {
|
||||||
|
switch jwk.Kty {
|
||||||
|
case "RSA":
|
||||||
|
publicKey, err := jwk.toRSAPublicKey()
|
||||||
|
if err != nil {
|
||||||
|
log.Error().Err(err).Str("kid", jwk.Kid).Msg("Failed to parse RSA JWK")
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
newKeys[jwk.Kid] = publicKey
|
||||||
|
case "OKP":
|
||||||
|
if strings.EqualFold(jwk.Crv, "Ed25519") {
|
||||||
|
publicKey, err := jwk.toEd25519PublicKey()
|
||||||
|
if err != nil {
|
||||||
|
log.Error().Err(err).Str("kid", jwk.Kid).Msg("Failed to parse Ed25519 JWK")
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
newKeys[jwk.Kid] = publicKey
|
||||||
|
} else {
|
||||||
|
log.Warn().Str("kid", jwk.Kid).Str("crv", jwk.Crv).Msg("Skipping unsupported OKP curve")
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
log.Warn().Str("kid", jwk.Kid).Str("kty", jwk.Kty).Msg("Skipping unsupported key type")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(newKeys) == 0 {
|
||||||
|
return fmt.Errorf("no valid keys found in JWKS")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update cache
|
||||||
|
v.keysMutex.Lock()
|
||||||
|
v.keys = newKeys
|
||||||
|
v.lastFetch = time.Now()
|
||||||
|
v.keysMutex.Unlock()
|
||||||
|
|
||||||
|
log.Info().Int("key_count", len(newKeys)).Msg("JWKS cached successfully")
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// toRSAPublicKey converts a JWK to an RSA public key
|
||||||
|
func (jwk *JWK) toRSAPublicKey() (*rsa.PublicKey, error) {
|
||||||
|
// Decode N (modulus) - use base64 URL encoding without padding
|
||||||
|
nBytes, err := base64URLDecode(jwk.N)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("decode N: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Decode E (exponent)
|
||||||
|
eBytes, err := base64URLDecode(jwk.E)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("decode E: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Convert E bytes to int
|
||||||
|
var e int
|
||||||
|
for _, b := range eBytes {
|
||||||
|
e = e<<8 | int(b)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create RSA public key
|
||||||
|
publicKey := &rsa.PublicKey{
|
||||||
|
N: new(big.Int).SetBytes(nBytes),
|
||||||
|
E: e,
|
||||||
|
}
|
||||||
|
|
||||||
|
return publicKey, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// toEd25519PublicKey converts a JWK to an Ed25519 public key
|
||||||
|
func (jwk *JWK) toEd25519PublicKey() (ed25519.PublicKey, error) {
|
||||||
|
if jwk.X == "" {
|
||||||
|
return nil, fmt.Errorf("missing x coordinate for Ed25519 key")
|
||||||
|
}
|
||||||
|
|
||||||
|
xBytes, err := base64URLDecode(jwk.X)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("decode x: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(xBytes) != ed25519.PublicKeySize {
|
||||||
|
return nil, fmt.Errorf("invalid Ed25519 public key length: expected %d, got %d", ed25519.PublicKeySize, len(xBytes))
|
||||||
|
}
|
||||||
|
|
||||||
|
return ed25519.PublicKey(xBytes), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// parseScopes splits a space-separated scope string
|
||||||
|
func parseScopes(scopeString string) []string {
|
||||||
|
if scopeString == "" {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var scopes []string
|
||||||
|
current := ""
|
||||||
|
for _, ch := range scopeString {
|
||||||
|
if ch == ' ' {
|
||||||
|
if current != "" {
|
||||||
|
scopes = append(scopes, current)
|
||||||
|
current = ""
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
current += string(ch)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if current != "" {
|
||||||
|
scopes = append(scopes, current)
|
||||||
|
}
|
||||||
|
|
||||||
|
return scopes
|
||||||
|
}
|
||||||
|
|
||||||
|
// RefreshJWKS forces a refresh of the JWKS cache
|
||||||
|
func (v *Validator) RefreshJWKS() error {
|
||||||
|
return v.fetchJWKS()
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetCachedKeyCount returns the number of cached keys
|
||||||
|
func (v *Validator) GetCachedKeyCount() int {
|
||||||
|
v.keysMutex.RLock()
|
||||||
|
defer v.keysMutex.RUnlock()
|
||||||
|
return len(v.keys)
|
||||||
|
}
|
||||||
|
|
||||||
|
// base64URLDecode decodes a base64 URL-encoded string (with or without padding)
|
||||||
|
func base64URLDecode(s string) ([]byte, error) {
|
||||||
|
// Add padding if needed
|
||||||
|
if l := len(s) % 4; l > 0 {
|
||||||
|
s += strings.Repeat("=", 4-l)
|
||||||
|
}
|
||||||
|
return base64.URLEncoding.DecodeString(s)
|
||||||
|
}
|
||||||
|
|
||||||
|
// base64URLEncode encodes bytes to base64 URL encoding without padding
|
||||||
|
func base64URLEncode(data []byte) string {
|
||||||
|
return strings.TrimRight(base64.URLEncoding.EncodeToString(data), "=")
|
||||||
|
}
|
||||||
354
pkg/seqthink/policy/jwt_test.go
Normal file
354
pkg/seqthink/policy/jwt_test.go
Normal file
@@ -0,0 +1,354 @@
|
|||||||
|
package policy
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crypto/rand"
|
||||||
|
"crypto/rsa"
|
||||||
|
"encoding/json"
|
||||||
|
"net/http"
|
||||||
|
"net/http/httptest"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/golang-jwt/jwt/v5"
|
||||||
|
)
|
||||||
|
|
||||||
|
// generateTestKeyPair generates an RSA key pair for testing
|
||||||
|
func generateTestKeyPair() (*rsa.PrivateKey, error) {
|
||||||
|
return rsa.GenerateKey(rand.Reader, 2048)
|
||||||
|
}
|
||||||
|
|
||||||
|
// createTestJWKS creates a test JWKS server
|
||||||
|
func createTestJWKS(t *testing.T, privateKey *rsa.PrivateKey) *httptest.Server {
|
||||||
|
publicKey := &privateKey.PublicKey
|
||||||
|
|
||||||
|
// Create JWK from public key
|
||||||
|
jwk := JWK{
|
||||||
|
Kid: "test-key-1",
|
||||||
|
Kty: "RSA",
|
||||||
|
Alg: "RS256",
|
||||||
|
Use: "sig",
|
||||||
|
N: base64URLEncode(publicKey.N.Bytes()),
|
||||||
|
E: base64URLEncode([]byte{1, 0, 1}), // 65537
|
||||||
|
}
|
||||||
|
|
||||||
|
jwks := JWKS{
|
||||||
|
Keys: []JWK{jwk},
|
||||||
|
}
|
||||||
|
|
||||||
|
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||||
|
w.Header().Set("Content-Type", "application/json")
|
||||||
|
json.NewEncoder(w).Encode(jwks)
|
||||||
|
}))
|
||||||
|
|
||||||
|
return server
|
||||||
|
}
|
||||||
|
|
||||||
|
// createTestToken creates a test JWT token
|
||||||
|
func createTestToken(privateKey *rsa.PrivateKey, claims *Claims) (string, error) {
|
||||||
|
token := jwt.NewWithClaims(jwt.SigningMethodRS256, claims)
|
||||||
|
token.Header["kid"] = "test-key-1"
|
||||||
|
return token.SignedString(privateKey)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestValidateToken(t *testing.T) {
|
||||||
|
// Generate test key pair
|
||||||
|
privateKey, err := generateTestKeyPair()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("generate key pair: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create test JWKS server
|
||||||
|
jwksServer := createTestJWKS(t, privateKey)
|
||||||
|
defer jwksServer.Close()
|
||||||
|
|
||||||
|
// Create validator
|
||||||
|
validator := NewValidator(jwksServer.URL, "sequentialthinking.run")
|
||||||
|
|
||||||
|
// Test valid token
|
||||||
|
t.Run("valid_token", func(t *testing.T) {
|
||||||
|
claims := &Claims{
|
||||||
|
Subject: "test-user",
|
||||||
|
Scopes: []string{"sequentialthinking.run"},
|
||||||
|
RegisteredClaims: jwt.RegisteredClaims{
|
||||||
|
ExpiresAt: jwt.NewNumericDate(time.Now().Add(1 * time.Hour)),
|
||||||
|
IssuedAt: jwt.NewNumericDate(time.Now()),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
tokenString, err := createTestToken(privateKey, claims)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("create token: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
validatedClaims, err := validator.ValidateToken(tokenString)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("validate token: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if validatedClaims.Subject != "test-user" {
|
||||||
|
t.Errorf("wrong subject: got %s, want test-user", validatedClaims.Subject)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
// Test expired token
|
||||||
|
t.Run("expired_token", func(t *testing.T) {
|
||||||
|
claims := &Claims{
|
||||||
|
Subject: "test-user",
|
||||||
|
Scopes: []string{"sequentialthinking.run"},
|
||||||
|
RegisteredClaims: jwt.RegisteredClaims{
|
||||||
|
ExpiresAt: jwt.NewNumericDate(time.Now().Add(-1 * time.Hour)),
|
||||||
|
IssuedAt: jwt.NewNumericDate(time.Now().Add(-2 * time.Hour)),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
tokenString, err := createTestToken(privateKey, claims)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("create token: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = validator.ValidateToken(tokenString)
|
||||||
|
if err == nil {
|
||||||
|
t.Fatal("expected error for expired token")
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
// Test missing scope
|
||||||
|
t.Run("missing_scope", func(t *testing.T) {
|
||||||
|
claims := &Claims{
|
||||||
|
Subject: "test-user",
|
||||||
|
Scopes: []string{"other.scope"},
|
||||||
|
RegisteredClaims: jwt.RegisteredClaims{
|
||||||
|
ExpiresAt: jwt.NewNumericDate(time.Now().Add(1 * time.Hour)),
|
||||||
|
IssuedAt: jwt.NewNumericDate(time.Now()),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
tokenString, err := createTestToken(privateKey, claims)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("create token: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = validator.ValidateToken(tokenString)
|
||||||
|
if err == nil {
|
||||||
|
t.Fatal("expected error for missing scope")
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
// Test space-separated scopes
|
||||||
|
t.Run("space_separated_scopes", func(t *testing.T) {
|
||||||
|
claims := &Claims{
|
||||||
|
Subject: "test-user",
|
||||||
|
Scope: "read write sequentialthinking.run admin",
|
||||||
|
RegisteredClaims: jwt.RegisteredClaims{
|
||||||
|
ExpiresAt: jwt.NewNumericDate(time.Now().Add(1 * time.Hour)),
|
||||||
|
IssuedAt: jwt.NewNumericDate(time.Now()),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
tokenString, err := createTestToken(privateKey, claims)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("create token: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
validatedClaims, err := validator.ValidateToken(tokenString)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("validate token: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if validatedClaims.Subject != "test-user" {
|
||||||
|
t.Errorf("wrong subject: got %s, want test-user", validatedClaims.Subject)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
// Test not before
|
||||||
|
t.Run("not_yet_valid", func(t *testing.T) {
|
||||||
|
claims := &Claims{
|
||||||
|
Subject: "test-user",
|
||||||
|
Scopes: []string{"sequentialthinking.run"},
|
||||||
|
RegisteredClaims: jwt.RegisteredClaims{
|
||||||
|
ExpiresAt: jwt.NewNumericDate(time.Now().Add(2 * time.Hour)),
|
||||||
|
NotBefore: jwt.NewNumericDate(time.Now().Add(1 * time.Hour)),
|
||||||
|
IssuedAt: jwt.NewNumericDate(time.Now()),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
tokenString, err := createTestToken(privateKey, claims)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("create token: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = validator.ValidateToken(tokenString)
|
||||||
|
if err == nil {
|
||||||
|
t.Fatal("expected error for not-yet-valid token")
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestJWKSCaching(t *testing.T) {
|
||||||
|
privateKey, err := generateTestKeyPair()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("generate key pair: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
fetchCount := 0
|
||||||
|
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||||
|
fetchCount++
|
||||||
|
publicKey := &privateKey.PublicKey
|
||||||
|
|
||||||
|
jwk := JWK{
|
||||||
|
Kid: "test-key-1",
|
||||||
|
Kty: "RSA",
|
||||||
|
Alg: "RS256",
|
||||||
|
Use: "sig",
|
||||||
|
N: base64URLEncode(publicKey.N.Bytes()),
|
||||||
|
E: base64URLEncode([]byte{1, 0, 1}),
|
||||||
|
}
|
||||||
|
|
||||||
|
jwks := JWKS{Keys: []JWK{jwk}}
|
||||||
|
w.Header().Set("Content-Type", "application/json")
|
||||||
|
json.NewEncoder(w).Encode(jwks)
|
||||||
|
}))
|
||||||
|
defer server.Close()
|
||||||
|
|
||||||
|
validator := NewValidator(server.URL, "sequentialthinking.run")
|
||||||
|
validator.cacheDuration = 100 * time.Millisecond // Short cache for testing
|
||||||
|
|
||||||
|
claims := &Claims{
|
||||||
|
Subject: "test-user",
|
||||||
|
Scopes: []string{"sequentialthinking.run"},
|
||||||
|
RegisteredClaims: jwt.RegisteredClaims{
|
||||||
|
ExpiresAt: jwt.NewNumericDate(time.Now().Add(1 * time.Hour)),
|
||||||
|
IssuedAt: jwt.NewNumericDate(time.Now()),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
tokenString, err := createTestToken(privateKey, claims)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("create token: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// First validation - should fetch JWKS
|
||||||
|
_, err = validator.ValidateToken(tokenString)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("validate token: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if fetchCount != 1 {
|
||||||
|
t.Errorf("expected 1 fetch, got %d", fetchCount)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Second validation - should use cache
|
||||||
|
_, err = validator.ValidateToken(tokenString)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("validate token: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if fetchCount != 1 {
|
||||||
|
t.Errorf("expected 1 fetch (cached), got %d", fetchCount)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Wait for cache to expire
|
||||||
|
time.Sleep(150 * time.Millisecond)
|
||||||
|
|
||||||
|
// Third validation - should fetch again
|
||||||
|
_, err = validator.ValidateToken(tokenString)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("validate token: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if fetchCount != 2 {
|
||||||
|
t.Errorf("expected 2 fetches (cache expired), got %d", fetchCount)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestParseScopes(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
input string
|
||||||
|
expected []string
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "single_scope",
|
||||||
|
input: "read",
|
||||||
|
expected: []string{"read"},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "multiple_scopes",
|
||||||
|
input: "read write admin",
|
||||||
|
expected: []string{"read", "write", "admin"},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "extra_spaces",
|
||||||
|
input: "read write admin",
|
||||||
|
expected: []string{"read", "write", "admin"},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "empty_string",
|
||||||
|
input: "",
|
||||||
|
expected: nil,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "spaces_only",
|
||||||
|
input: " ",
|
||||||
|
expected: nil,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tt := range tests {
|
||||||
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
result := parseScopes(tt.input)
|
||||||
|
|
||||||
|
if len(result) != len(tt.expected) {
|
||||||
|
t.Errorf("wrong length: got %d, want %d", len(result), len(tt.expected))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, expected := range tt.expected {
|
||||||
|
if result[i] != expected {
|
||||||
|
t.Errorf("scope %d: got %s, want %s", i, result[i], expected)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestInvalidJWKS(t *testing.T) {
|
||||||
|
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||||
|
w.WriteHeader(http.StatusInternalServerError)
|
||||||
|
}))
|
||||||
|
defer server.Close()
|
||||||
|
|
||||||
|
validator := NewValidator(server.URL, "sequentialthinking.run")
|
||||||
|
|
||||||
|
err := validator.RefreshJWKS()
|
||||||
|
if err == nil {
|
||||||
|
t.Fatal("expected error for invalid JWKS server")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestGetCachedKeyCount(t *testing.T) {
|
||||||
|
privateKey, err := generateTestKeyPair()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("generate key pair: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
jwksServer := createTestJWKS(t, privateKey)
|
||||||
|
defer jwksServer.Close()
|
||||||
|
|
||||||
|
validator := NewValidator(jwksServer.URL, "sequentialthinking.run")
|
||||||
|
|
||||||
|
// Initially no keys
|
||||||
|
if count := validator.GetCachedKeyCount(); count != 0 {
|
||||||
|
t.Errorf("expected 0 cached keys initially, got %d", count)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Refresh JWKS
|
||||||
|
if err := validator.RefreshJWKS(); err != nil {
|
||||||
|
t.Fatalf("refresh JWKS: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Should have 1 key
|
||||||
|
if count := validator.GetCachedKeyCount(); count != 1 {
|
||||||
|
t.Errorf("expected 1 cached key after refresh, got %d", count)
|
||||||
|
}
|
||||||
|
}
|
||||||
80
pkg/seqthink/policy/middleware.go
Normal file
80
pkg/seqthink/policy/middleware.go
Normal file
@@ -0,0 +1,80 @@
|
|||||||
|
package policy
|
||||||
|
|
||||||
|
import (
|
||||||
|
"net/http"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/rs/zerolog/log"
|
||||||
|
)
|
||||||
|
|
||||||
|
// AuthMiddleware creates HTTP middleware for JWT authentication
|
||||||
|
type AuthMiddleware struct {
|
||||||
|
validator *Validator
|
||||||
|
policyDenials func() // Metrics callback for policy denials
|
||||||
|
enforcementEnabled bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewAuthMiddleware creates a new authentication middleware
|
||||||
|
func NewAuthMiddleware(validator *Validator, policyDenials func()) *AuthMiddleware {
|
||||||
|
return &AuthMiddleware{
|
||||||
|
validator: validator,
|
||||||
|
policyDenials: policyDenials,
|
||||||
|
enforcementEnabled: validator != nil,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Wrap wraps an HTTP handler with JWT authentication
|
||||||
|
func (m *AuthMiddleware) Wrap(next http.Handler) http.Handler {
|
||||||
|
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||||
|
// If enforcement is disabled, pass through
|
||||||
|
if !m.enforcementEnabled {
|
||||||
|
log.Warn().Msg("Policy enforcement disabled - allowing request")
|
||||||
|
next.ServeHTTP(w, r)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Extract token from Authorization header
|
||||||
|
authHeader := r.Header.Get("Authorization")
|
||||||
|
if authHeader == "" {
|
||||||
|
log.Error().Msg("Missing Authorization header")
|
||||||
|
m.policyDenials()
|
||||||
|
http.Error(w, "Unauthorized: missing authorization header", http.StatusUnauthorized)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check Bearer scheme
|
||||||
|
parts := strings.SplitN(authHeader, " ", 2)
|
||||||
|
if len(parts) != 2 || parts[0] != "Bearer" {
|
||||||
|
log.Error().Str("auth_header", authHeader).Msg("Invalid Authorization header format")
|
||||||
|
m.policyDenials()
|
||||||
|
http.Error(w, "Unauthorized: invalid authorization format", http.StatusUnauthorized)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
tokenString := parts[1]
|
||||||
|
|
||||||
|
// Validate token
|
||||||
|
claims, err := m.validator.ValidateToken(tokenString)
|
||||||
|
if err != nil {
|
||||||
|
log.Error().Err(err).Msg("Token validation failed")
|
||||||
|
m.policyDenials()
|
||||||
|
http.Error(w, "Unauthorized: "+err.Error(), http.StatusUnauthorized)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Info().
|
||||||
|
Str("subject", claims.Subject).
|
||||||
|
Strs("scopes", claims.Scopes).
|
||||||
|
Msg("Request authorized")
|
||||||
|
|
||||||
|
// Token is valid, pass to next handler
|
||||||
|
next.ServeHTTP(w, r)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// WrapFunc wraps an HTTP handler function with JWT authentication
|
||||||
|
func (m *AuthMiddleware) WrapFunc(next http.HandlerFunc) http.HandlerFunc {
|
||||||
|
return func(w http.ResponseWriter, r *http.Request) {
|
||||||
|
m.Wrap(next).ServeHTTP(w, r)
|
||||||
|
}
|
||||||
|
}
|
||||||
185
pkg/seqthink/proxy/server.go
Normal file
185
pkg/seqthink/proxy/server.go
Normal file
@@ -0,0 +1,185 @@
|
|||||||
|
package proxy
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"net/http"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"chorus/pkg/seqthink/mcpclient"
|
||||||
|
"chorus/pkg/seqthink/observability"
|
||||||
|
"chorus/pkg/seqthink/policy"
|
||||||
|
"github.com/gorilla/mux"
|
||||||
|
"github.com/rs/zerolog/log"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ServerConfig holds the proxy server configuration
|
||||||
|
type ServerConfig struct {
|
||||||
|
MCPClient *mcpclient.Client
|
||||||
|
Metrics *observability.Metrics
|
||||||
|
MaxBodyMB int
|
||||||
|
AgeIdentPath string
|
||||||
|
AgeRecipsPath string
|
||||||
|
KachingJWKSURL string
|
||||||
|
RequiredScope string
|
||||||
|
}
|
||||||
|
|
||||||
|
// Server is the proxy server handling requests
|
||||||
|
type Server struct {
|
||||||
|
config ServerConfig
|
||||||
|
router *mux.Router
|
||||||
|
authMiddleware *policy.AuthMiddleware
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewServer creates a new proxy server
|
||||||
|
func NewServer(cfg ServerConfig) (*Server, error) {
|
||||||
|
s := &Server{
|
||||||
|
config: cfg,
|
||||||
|
router: mux.NewRouter(),
|
||||||
|
}
|
||||||
|
|
||||||
|
// Setup policy enforcement if configured
|
||||||
|
if cfg.KachingJWKSURL != "" && cfg.RequiredScope != "" {
|
||||||
|
log.Info().
|
||||||
|
Str("jwks_url", cfg.KachingJWKSURL).
|
||||||
|
Str("required_scope", cfg.RequiredScope).
|
||||||
|
Msg("Policy enforcement enabled")
|
||||||
|
|
||||||
|
validator := policy.NewValidator(cfg.KachingJWKSURL, cfg.RequiredScope)
|
||||||
|
|
||||||
|
// Pre-fetch JWKS
|
||||||
|
if err := validator.RefreshJWKS(); err != nil {
|
||||||
|
log.Warn().Err(err).Msg("Failed to pre-fetch JWKS, will retry on first request")
|
||||||
|
}
|
||||||
|
|
||||||
|
s.authMiddleware = policy.NewAuthMiddleware(validator, cfg.Metrics.IncrementPolicyDenials)
|
||||||
|
} else {
|
||||||
|
log.Warn().Msg("Policy enforcement disabled - no JWKS URL or required scope configured")
|
||||||
|
s.authMiddleware = policy.NewAuthMiddleware(nil, cfg.Metrics.IncrementPolicyDenials)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Setup routes
|
||||||
|
s.setupRoutes()
|
||||||
|
|
||||||
|
return s, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handler returns the HTTP handler
|
||||||
|
func (s *Server) Handler() http.Handler {
|
||||||
|
return s.router
|
||||||
|
}
|
||||||
|
|
||||||
|
// setupRoutes configures the HTTP routes
|
||||||
|
func (s *Server) setupRoutes() {
|
||||||
|
// Health checks (no auth required)
|
||||||
|
s.router.HandleFunc("/health", s.handleHealth).Methods("GET")
|
||||||
|
s.router.HandleFunc("/ready", s.handleReady).Methods("GET")
|
||||||
|
|
||||||
|
// MCP tool endpoint - route based on encryption config, with auth
|
||||||
|
if s.isEncryptionEnabled() {
|
||||||
|
log.Info().Msg("Encryption enabled - using encrypted endpoint")
|
||||||
|
s.router.Handle("/mcp/tool",
|
||||||
|
s.authMiddleware.Wrap(http.HandlerFunc(s.handleToolCallEncrypted))).Methods("POST")
|
||||||
|
} else {
|
||||||
|
log.Warn().Msg("Encryption disabled - using plaintext endpoint")
|
||||||
|
s.router.Handle("/mcp/tool",
|
||||||
|
s.authMiddleware.Wrap(http.HandlerFunc(s.handleToolCall))).Methods("POST")
|
||||||
|
}
|
||||||
|
|
||||||
|
// SSE endpoint - route based on encryption config, with auth
|
||||||
|
if s.isEncryptionEnabled() {
|
||||||
|
s.router.Handle("/mcp/sse",
|
||||||
|
s.authMiddleware.Wrap(http.HandlerFunc(s.handleSSEEncrypted))).Methods("GET")
|
||||||
|
} else {
|
||||||
|
s.router.Handle("/mcp/sse",
|
||||||
|
s.authMiddleware.Wrap(http.HandlerFunc(s.handleSSEPlaintext))).Methods("GET")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Metrics endpoint (no auth required for internal monitoring)
|
||||||
|
s.router.Handle("/metrics", s.config.Metrics.Handler())
|
||||||
|
}
|
||||||
|
|
||||||
|
// isEncryptionEnabled checks if encryption is configured
|
||||||
|
func (s *Server) isEncryptionEnabled() bool {
|
||||||
|
return s.config.AgeIdentPath != "" && s.config.AgeRecipsPath != ""
|
||||||
|
}
|
||||||
|
|
||||||
|
// handleHealth returns 200 OK if wrapper is running
|
||||||
|
func (s *Server) handleHealth(w http.ResponseWriter, r *http.Request) {
|
||||||
|
w.WriteHeader(http.StatusOK)
|
||||||
|
w.Write([]byte("OK"))
|
||||||
|
}
|
||||||
|
|
||||||
|
// handleReady checks if MCP server is ready
|
||||||
|
func (s *Server) handleReady(w http.ResponseWriter, r *http.Request) {
|
||||||
|
ctx, cancel := context.WithTimeout(r.Context(), 5*time.Second)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
if err := s.config.MCPClient.Health(ctx); err != nil {
|
||||||
|
log.Error().Err(err).Msg("MCP server not ready")
|
||||||
|
http.Error(w, "MCP server not ready", http.StatusServiceUnavailable)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
w.WriteHeader(http.StatusOK)
|
||||||
|
w.Write([]byte("READY"))
|
||||||
|
}
|
||||||
|
|
||||||
|
// handleToolCall proxies tool calls to MCP server (plaintext for Beat 1)
|
||||||
|
func (s *Server) handleToolCall(w http.ResponseWriter, r *http.Request) {
|
||||||
|
s.config.Metrics.IncrementRequests()
|
||||||
|
startTime := time.Now()
|
||||||
|
|
||||||
|
// Limit request body size
|
||||||
|
r.Body = http.MaxBytesReader(w, r.Body, int64(s.config.MaxBodyMB)*1024*1024)
|
||||||
|
|
||||||
|
// Read request body
|
||||||
|
body, err := io.ReadAll(r.Body)
|
||||||
|
if err != nil {
|
||||||
|
log.Error().Err(err).Msg("Failed to read request body")
|
||||||
|
s.config.Metrics.IncrementErrors()
|
||||||
|
http.Error(w, "Failed to read request", http.StatusBadRequest)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Parse tool request
|
||||||
|
var toolReq mcpclient.ToolRequest
|
||||||
|
if err := json.Unmarshal(body, &toolReq); err != nil {
|
||||||
|
log.Error().Err(err).Msg("Failed to parse tool request")
|
||||||
|
s.config.Metrics.IncrementErrors()
|
||||||
|
http.Error(w, "Invalid request format", http.StatusBadRequest)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Info().
|
||||||
|
Str("tool", toolReq.Tool).
|
||||||
|
Msg("Proxying tool call to MCP server")
|
||||||
|
|
||||||
|
// Call MCP server
|
||||||
|
ctx, cancel := context.WithTimeout(r.Context(), 120*time.Second)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
toolResp, err := s.config.MCPClient.CallTool(ctx, &toolReq)
|
||||||
|
if err != nil {
|
||||||
|
log.Error().Err(err).Msg("MCP tool call failed")
|
||||||
|
s.config.Metrics.IncrementErrors()
|
||||||
|
http.Error(w, fmt.Sprintf("Tool call failed: %v", err), http.StatusInternalServerError)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Return response
|
||||||
|
w.Header().Set("Content-Type", "application/json")
|
||||||
|
if err := json.NewEncoder(w).Encode(toolResp); err != nil {
|
||||||
|
log.Error().Err(err).Msg("Failed to encode response")
|
||||||
|
s.config.Metrics.IncrementErrors()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
duration := time.Since(startTime)
|
||||||
|
log.Info().
|
||||||
|
Str("tool", toolReq.Tool).
|
||||||
|
Dur("duration", duration).
|
||||||
|
Msg("Tool call completed")
|
||||||
|
}
|
||||||
140
pkg/seqthink/proxy/server_encrypted.go
Normal file
140
pkg/seqthink/proxy/server_encrypted.go
Normal file
@@ -0,0 +1,140 @@
|
|||||||
|
package proxy
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"net/http"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"chorus/pkg/seqthink/ageio"
|
||||||
|
"chorus/pkg/seqthink/mcpclient"
|
||||||
|
"github.com/rs/zerolog/log"
|
||||||
|
)
|
||||||
|
|
||||||
|
// handleToolCallEncrypted proxies encrypted tool calls to MCP server (Beat 2)
|
||||||
|
func (s *Server) handleToolCallEncrypted(w http.ResponseWriter, r *http.Request) {
|
||||||
|
s.config.Metrics.IncrementRequests()
|
||||||
|
startTime := time.Now()
|
||||||
|
|
||||||
|
// Check Content-Type header
|
||||||
|
contentType := r.Header.Get("Content-Type")
|
||||||
|
if contentType != "application/age" {
|
||||||
|
log.Error().
|
||||||
|
Str("content_type", contentType).
|
||||||
|
Msg("Invalid Content-Type, expected application/age")
|
||||||
|
s.config.Metrics.IncrementErrors()
|
||||||
|
http.Error(w, "Content-Type must be application/age", http.StatusUnsupportedMediaType)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Limit request body size
|
||||||
|
r.Body = http.MaxBytesReader(w, r.Body, int64(s.config.MaxBodyMB)*1024*1024)
|
||||||
|
|
||||||
|
// Read encrypted request body
|
||||||
|
encryptedBody, err := io.ReadAll(r.Body)
|
||||||
|
if err != nil {
|
||||||
|
log.Error().Err(err).Msg("Failed to read encrypted request body")
|
||||||
|
s.config.Metrics.IncrementErrors()
|
||||||
|
http.Error(w, "Failed to read request", http.StatusBadRequest)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create decryptor
|
||||||
|
decryptor, err := ageio.NewDecryptor(s.config.AgeIdentPath)
|
||||||
|
if err != nil {
|
||||||
|
log.Error().Err(err).Msg("Failed to create decryptor")
|
||||||
|
s.config.Metrics.IncrementErrors()
|
||||||
|
http.Error(w, "Decryption initialization failed", http.StatusInternalServerError)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Decrypt request
|
||||||
|
plaintext, err := decryptor.Decrypt(encryptedBody)
|
||||||
|
if err != nil {
|
||||||
|
log.Error().Err(err).Msg("Failed to decrypt request")
|
||||||
|
s.config.Metrics.IncrementDecryptFails()
|
||||||
|
http.Error(w, "Decryption failed", http.StatusBadRequest)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Debug().
|
||||||
|
Int("encrypted_size", len(encryptedBody)).
|
||||||
|
Int("plaintext_size", len(plaintext)).
|
||||||
|
Msg("Request decrypted successfully")
|
||||||
|
|
||||||
|
// Parse tool request
|
||||||
|
var toolReq mcpclient.ToolRequest
|
||||||
|
if err := json.Unmarshal(plaintext, &toolReq); err != nil {
|
||||||
|
log.Error().Err(err).Msg("Failed to parse decrypted tool request")
|
||||||
|
s.config.Metrics.IncrementErrors()
|
||||||
|
http.Error(w, "Invalid request format", http.StatusBadRequest)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Info().
|
||||||
|
Str("tool", toolReq.Tool).
|
||||||
|
Msg("Proxying encrypted tool call to MCP server")
|
||||||
|
|
||||||
|
// Call MCP server (plaintext internally)
|
||||||
|
ctx, cancel := context.WithTimeout(r.Context(), 120*time.Second)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
toolResp, err := s.config.MCPClient.CallTool(ctx, &toolReq)
|
||||||
|
if err != nil {
|
||||||
|
log.Error().Err(err).Msg("MCP tool call failed")
|
||||||
|
s.config.Metrics.IncrementErrors()
|
||||||
|
http.Error(w, fmt.Sprintf("Tool call failed: %v", err), http.StatusInternalServerError)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Serialize response
|
||||||
|
responseJSON, err := json.Marshal(toolResp)
|
||||||
|
if err != nil {
|
||||||
|
log.Error().Err(err).Msg("Failed to marshal response")
|
||||||
|
s.config.Metrics.IncrementErrors()
|
||||||
|
http.Error(w, "Response serialization failed", http.StatusInternalServerError)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create encryptor
|
||||||
|
encryptor, err := ageio.NewEncryptor(s.config.AgeRecipsPath)
|
||||||
|
if err != nil {
|
||||||
|
log.Error().Err(err).Msg("Failed to create encryptor")
|
||||||
|
s.config.Metrics.IncrementErrors()
|
||||||
|
http.Error(w, "Encryption initialization failed", http.StatusInternalServerError)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Encrypt response
|
||||||
|
encryptedResponse, err := encryptor.Encrypt(responseJSON)
|
||||||
|
if err != nil {
|
||||||
|
log.Error().Err(err).Msg("Failed to encrypt response")
|
||||||
|
s.config.Metrics.IncrementEncryptFails()
|
||||||
|
http.Error(w, "Encryption failed", http.StatusInternalServerError)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Debug().
|
||||||
|
Int("plaintext_size", len(responseJSON)).
|
||||||
|
Int("encrypted_size", len(encryptedResponse)).
|
||||||
|
Msg("Response encrypted successfully")
|
||||||
|
|
||||||
|
// Return encrypted response
|
||||||
|
w.Header().Set("Content-Type", "application/age")
|
||||||
|
w.WriteHeader(http.StatusOK)
|
||||||
|
if _, err := w.Write(encryptedResponse); err != nil {
|
||||||
|
log.Error().Err(err).Msg("Failed to write encrypted response")
|
||||||
|
s.config.Metrics.IncrementErrors()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
duration := time.Since(startTime)
|
||||||
|
s.config.Metrics.ObserveRequestDuration(duration.Seconds())
|
||||||
|
log.Info().
|
||||||
|
Str("tool", toolReq.Tool).
|
||||||
|
Dur("duration", duration).
|
||||||
|
Bool("encrypted", true).
|
||||||
|
Msg("Tool call completed")
|
||||||
|
}
|
||||||
242
pkg/seqthink/proxy/sse.go
Normal file
242
pkg/seqthink/proxy/sse.go
Normal file
@@ -0,0 +1,242 @@
|
|||||||
|
package proxy
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bufio"
|
||||||
|
"bytes"
|
||||||
|
"context"
|
||||||
|
"encoding/base64"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"net/http"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"chorus/pkg/seqthink/ageio"
|
||||||
|
"github.com/rs/zerolog/log"
|
||||||
|
)
|
||||||
|
|
||||||
|
// SSEFrame represents a single Server-Sent Event frame
|
||||||
|
type SSEFrame struct {
|
||||||
|
Event string `json:"event,omitempty"`
|
||||||
|
Data string `json:"data"`
|
||||||
|
ID string `json:"id,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// handleSSEEncrypted handles encrypted Server-Sent Events streaming
|
||||||
|
func (s *Server) handleSSEEncrypted(w http.ResponseWriter, r *http.Request) {
|
||||||
|
s.config.Metrics.IncrementRequests()
|
||||||
|
startTime := time.Now()
|
||||||
|
|
||||||
|
// Set SSE headers
|
||||||
|
w.Header().Set("Content-Type", "text/event-stream")
|
||||||
|
w.Header().Set("Cache-Control", "no-cache")
|
||||||
|
w.Header().Set("Connection", "keep-alive")
|
||||||
|
w.Header().Set("X-Accel-Buffering", "no") // Disable nginx buffering
|
||||||
|
|
||||||
|
// Create flusher for streaming
|
||||||
|
flusher, ok := w.(http.Flusher)
|
||||||
|
if !ok {
|
||||||
|
log.Error().Msg("Streaming not supported")
|
||||||
|
http.Error(w, "Streaming not supported", http.StatusInternalServerError)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create encryptor for streaming
|
||||||
|
encryptor, err := ageio.NewEncryptor(s.config.AgeRecipsPath)
|
||||||
|
if err != nil {
|
||||||
|
log.Error().Err(err).Msg("Failed to create encryptor")
|
||||||
|
http.Error(w, "Encryption initialization failed", http.StatusInternalServerError)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create context with timeout
|
||||||
|
ctx, cancel := context.WithTimeout(r.Context(), 5*time.Minute)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
log.Info().Msg("Starting encrypted SSE stream")
|
||||||
|
|
||||||
|
// Simulate streaming encrypted frames
|
||||||
|
// In production, this would stream from MCP server
|
||||||
|
frameCount := 0
|
||||||
|
ticker := time.NewTicker(1 * time.Second)
|
||||||
|
defer ticker.Stop()
|
||||||
|
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
log.Info().
|
||||||
|
Int("frames_sent", frameCount).
|
||||||
|
Dur("duration", time.Since(startTime)).
|
||||||
|
Msg("SSE stream closed")
|
||||||
|
return
|
||||||
|
|
||||||
|
case <-ticker.C:
|
||||||
|
frameCount++
|
||||||
|
|
||||||
|
// Create frame data
|
||||||
|
frameData := fmt.Sprintf(`{"thought_number":%d,"thought":"Processing...","next_thought_needed":true}`, frameCount)
|
||||||
|
|
||||||
|
// Encrypt frame
|
||||||
|
encryptedFrame, err := encryptor.Encrypt([]byte(frameData))
|
||||||
|
if err != nil {
|
||||||
|
log.Error().Err(err).Msg("Failed to encrypt SSE frame")
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// Base64 encode for SSE transmission
|
||||||
|
encodedFrame := base64.StdEncoding.EncodeToString(encryptedFrame)
|
||||||
|
|
||||||
|
// Send SSE frame
|
||||||
|
fmt.Fprintf(w, "event: thought\n")
|
||||||
|
fmt.Fprintf(w, "data: %s\n", encodedFrame)
|
||||||
|
fmt.Fprintf(w, "id: %d\n\n", frameCount)
|
||||||
|
flusher.Flush()
|
||||||
|
|
||||||
|
log.Debug().
|
||||||
|
Int("frame", frameCount).
|
||||||
|
Int("encrypted_size", len(encryptedFrame)).
|
||||||
|
Msg("Sent encrypted SSE frame")
|
||||||
|
|
||||||
|
// Stop after 10 frames for demo
|
||||||
|
if frameCount >= 10 {
|
||||||
|
fmt.Fprintf(w, "event: done\n")
|
||||||
|
fmt.Fprintf(w, "data: complete\n\n")
|
||||||
|
flusher.Flush()
|
||||||
|
|
||||||
|
log.Info().
|
||||||
|
Int("frames_sent", frameCount).
|
||||||
|
Dur("duration", time.Since(startTime)).
|
||||||
|
Msg("SSE stream completed")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// handleSSEPlaintext handles plaintext Server-Sent Events streaming
|
||||||
|
func (s *Server) handleSSEPlaintext(w http.ResponseWriter, r *http.Request) {
|
||||||
|
s.config.Metrics.IncrementRequests()
|
||||||
|
startTime := time.Now()
|
||||||
|
|
||||||
|
// Set SSE headers
|
||||||
|
w.Header().Set("Content-Type", "text/event-stream")
|
||||||
|
w.Header().Set("Cache-Control", "no-cache")
|
||||||
|
w.Header().Set("Connection", "keep-alive")
|
||||||
|
w.Header().Set("X-Accel-Buffering", "no")
|
||||||
|
|
||||||
|
// Create flusher for streaming
|
||||||
|
flusher, ok := w.(http.Flusher)
|
||||||
|
if !ok {
|
||||||
|
log.Error().Msg("Streaming not supported")
|
||||||
|
http.Error(w, "Streaming not supported", http.StatusInternalServerError)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create context with timeout
|
||||||
|
ctx, cancel := context.WithTimeout(r.Context(), 5*time.Minute)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
log.Info().Msg("Starting plaintext SSE stream")
|
||||||
|
|
||||||
|
// Simulate streaming frames
|
||||||
|
frameCount := 0
|
||||||
|
ticker := time.NewTicker(1 * time.Second)
|
||||||
|
defer ticker.Stop()
|
||||||
|
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
log.Info().
|
||||||
|
Int("frames_sent", frameCount).
|
||||||
|
Dur("duration", time.Since(startTime)).
|
||||||
|
Msg("SSE stream closed")
|
||||||
|
return
|
||||||
|
|
||||||
|
case <-ticker.C:
|
||||||
|
frameCount++
|
||||||
|
|
||||||
|
// Create frame data
|
||||||
|
frameData := fmt.Sprintf(`{"thought_number":%d,"thought":"Processing...","next_thought_needed":true}`, frameCount)
|
||||||
|
|
||||||
|
// Send SSE frame
|
||||||
|
fmt.Fprintf(w, "event: thought\n")
|
||||||
|
fmt.Fprintf(w, "data: %s\n", frameData)
|
||||||
|
fmt.Fprintf(w, "id: %d\n\n", frameCount)
|
||||||
|
flusher.Flush()
|
||||||
|
|
||||||
|
log.Debug().
|
||||||
|
Int("frame", frameCount).
|
||||||
|
Msg("Sent plaintext SSE frame")
|
||||||
|
|
||||||
|
// Stop after 10 frames for demo
|
||||||
|
if frameCount >= 10 {
|
||||||
|
fmt.Fprintf(w, "event: done\n")
|
||||||
|
fmt.Fprintf(w, "data: complete\n\n")
|
||||||
|
flusher.Flush()
|
||||||
|
|
||||||
|
log.Info().
|
||||||
|
Int("frames_sent", frameCount).
|
||||||
|
Dur("duration", time.Since(startTime)).
|
||||||
|
Msg("SSE stream completed")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// DecryptSSEFrame decrypts a base64-encoded encrypted SSE frame
|
||||||
|
func DecryptSSEFrame(encodedFrame string, identityPath string) ([]byte, error) {
|
||||||
|
// Base64 decode
|
||||||
|
encryptedFrame, err := base64.StdEncoding.DecodeString(encodedFrame)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("base64 decode: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create decryptor
|
||||||
|
decryptor, err := ageio.NewDecryptor(identityPath)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("create decryptor: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Decrypt
|
||||||
|
plaintext, err := decryptor.Decrypt(encryptedFrame)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("decrypt: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return plaintext, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ReadSSEStream reads an SSE stream and returns frames
|
||||||
|
func ReadSSEStream(r io.Reader) ([]SSEFrame, error) {
|
||||||
|
var frames []SSEFrame
|
||||||
|
scanner := bufio.NewScanner(r)
|
||||||
|
|
||||||
|
var currentFrame SSEFrame
|
||||||
|
for scanner.Scan() {
|
||||||
|
line := scanner.Text()
|
||||||
|
|
||||||
|
if line == "" {
|
||||||
|
// Empty line signals end of frame
|
||||||
|
if currentFrame.Data != "" {
|
||||||
|
frames = append(frames, currentFrame)
|
||||||
|
currentFrame = SSEFrame{}
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// Parse SSE field
|
||||||
|
if bytes.HasPrefix([]byte(line), []byte("event: ")) {
|
||||||
|
currentFrame.Event = line[7:]
|
||||||
|
} else if bytes.HasPrefix([]byte(line), []byte("data: ")) {
|
||||||
|
currentFrame.Data = line[6:]
|
||||||
|
} else if bytes.HasPrefix([]byte(line), []byte("id: ")) {
|
||||||
|
currentFrame.ID = line[4:]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := scanner.Err(); err != nil {
|
||||||
|
return nil, fmt.Errorf("scan stream: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return frames, nil
|
||||||
|
}
|
||||||
218
pkg/slurp/context/lightrag.go
Normal file
218
pkg/slurp/context/lightrag.go
Normal file
@@ -0,0 +1,218 @@
|
|||||||
|
package context
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"chorus/pkg/mcp"
|
||||||
|
"chorus/pkg/ucxl"
|
||||||
|
)
|
||||||
|
|
||||||
|
// LightRAGEnricher enriches context nodes with RAG-retrieved information
|
||||||
|
type LightRAGEnricher struct {
|
||||||
|
client *mcp.LightRAGClient
|
||||||
|
defaultMode mcp.QueryMode
|
||||||
|
enabled bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewLightRAGEnricher creates a new LightRAG context enricher
|
||||||
|
func NewLightRAGEnricher(client *mcp.LightRAGClient, defaultMode string) *LightRAGEnricher {
|
||||||
|
if client == nil {
|
||||||
|
return &LightRAGEnricher{enabled: false}
|
||||||
|
}
|
||||||
|
|
||||||
|
mode := mcp.QueryModeHybrid // Default to hybrid
|
||||||
|
switch defaultMode {
|
||||||
|
case "naive":
|
||||||
|
mode = mcp.QueryModeNaive
|
||||||
|
case "local":
|
||||||
|
mode = mcp.QueryModeLocal
|
||||||
|
case "global":
|
||||||
|
mode = mcp.QueryModeGlobal
|
||||||
|
case "hybrid":
|
||||||
|
mode = mcp.QueryModeHybrid
|
||||||
|
}
|
||||||
|
|
||||||
|
return &LightRAGEnricher{
|
||||||
|
client: client,
|
||||||
|
defaultMode: mode,
|
||||||
|
enabled: true,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// EnrichContextNode enriches a ContextNode with LightRAG data
|
||||||
|
// This queries LightRAG for relevant information and adds it to the node's insights
|
||||||
|
func (e *LightRAGEnricher) EnrichContextNode(ctx context.Context, node *ContextNode) error {
|
||||||
|
if !e.enabled || e.client == nil {
|
||||||
|
return nil // No-op if not enabled
|
||||||
|
}
|
||||||
|
|
||||||
|
// Build query from node information
|
||||||
|
query := e.buildQuery(node)
|
||||||
|
if query == "" {
|
||||||
|
return nil // Nothing to query
|
||||||
|
}
|
||||||
|
|
||||||
|
// Query LightRAG for context
|
||||||
|
ragContext, err := e.client.GetContext(ctx, query, e.defaultMode)
|
||||||
|
if err != nil {
|
||||||
|
// Non-fatal - just log and continue
|
||||||
|
return fmt.Errorf("lightrag query failed (non-fatal): %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add RAG context to insights if we got meaningful data
|
||||||
|
if strings.TrimSpace(ragContext) != "" {
|
||||||
|
insight := fmt.Sprintf("RAG Context: %s", strings.TrimSpace(ragContext))
|
||||||
|
node.Insights = append(node.Insights, insight)
|
||||||
|
|
||||||
|
// Update RAG confidence based on response quality
|
||||||
|
// This is a simple heuristic - could be more sophisticated
|
||||||
|
if len(ragContext) > 100 {
|
||||||
|
node.RAGConfidence = 0.8
|
||||||
|
} else if len(ragContext) > 50 {
|
||||||
|
node.RAGConfidence = 0.6
|
||||||
|
} else {
|
||||||
|
node.RAGConfidence = 0.4
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// EnrichResolvedContext enriches a ResolvedContext with LightRAG data
|
||||||
|
// This is called after context resolution to add additional RAG-retrieved insights
|
||||||
|
func (e *LightRAGEnricher) EnrichResolvedContext(ctx context.Context, resolved *ResolvedContext) error {
|
||||||
|
if !e.enabled || e.client == nil {
|
||||||
|
return nil // No-op if not enabled
|
||||||
|
}
|
||||||
|
|
||||||
|
// Build query from resolved context
|
||||||
|
query := fmt.Sprintf("Purpose: %s\nSummary: %s\nTechnologies: %s",
|
||||||
|
resolved.Purpose,
|
||||||
|
resolved.Summary,
|
||||||
|
strings.Join(resolved.Technologies, ", "))
|
||||||
|
|
||||||
|
// Query LightRAG
|
||||||
|
ragContext, err := e.client.GetContext(ctx, query, e.defaultMode)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("lightrag query failed (non-fatal): %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add to insights if meaningful
|
||||||
|
if strings.TrimSpace(ragContext) != "" {
|
||||||
|
insight := fmt.Sprintf("RAG Enhancement: %s", strings.TrimSpace(ragContext))
|
||||||
|
resolved.Insights = append(resolved.Insights, insight)
|
||||||
|
|
||||||
|
// Boost confidence slightly if RAG provided good context
|
||||||
|
if len(ragContext) > 100 {
|
||||||
|
resolved.ResolutionConfidence = min(1.0, resolved.ResolutionConfidence*1.1)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// EnrichBatchResolution enriches a batch resolution with LightRAG data
|
||||||
|
// Efficiently processes multiple addresses by batching queries where possible
|
||||||
|
func (e *LightRAGEnricher) EnrichBatchResolution(ctx context.Context, batch *BatchResolutionResult) error {
|
||||||
|
if !e.enabled || e.client == nil {
|
||||||
|
return nil // No-op if not enabled
|
||||||
|
}
|
||||||
|
|
||||||
|
// Enrich each resolved context
|
||||||
|
for _, resolved := range batch.Results {
|
||||||
|
if err := e.EnrichResolvedContext(ctx, resolved); err != nil {
|
||||||
|
// Log error but continue with other contexts
|
||||||
|
// Errors are non-fatal for enrichment
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// InsertContextNode inserts a context node into LightRAG for future retrieval
|
||||||
|
// This builds the knowledge base over time as contexts are created
|
||||||
|
func (e *LightRAGEnricher) InsertContextNode(ctx context.Context, node *ContextNode) error {
|
||||||
|
if !e.enabled || e.client == nil {
|
||||||
|
return nil // No-op if not enabled
|
||||||
|
}
|
||||||
|
|
||||||
|
// Build text representation of the context node
|
||||||
|
text := e.buildTextRepresentation(node)
|
||||||
|
description := fmt.Sprintf("Context for %s: %s", node.Path, node.Summary)
|
||||||
|
|
||||||
|
// Insert into LightRAG
|
||||||
|
if err := e.client.Insert(ctx, text, description); err != nil {
|
||||||
|
return fmt.Errorf("failed to insert context into lightrag: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsEnabled returns whether LightRAG enrichment is enabled
|
||||||
|
func (e *LightRAGEnricher) IsEnabled() bool {
|
||||||
|
return e.enabled
|
||||||
|
}
|
||||||
|
|
||||||
|
// buildQuery constructs a search query from a ContextNode
|
||||||
|
func (e *LightRAGEnricher) buildQuery(node *ContextNode) string {
|
||||||
|
var parts []string
|
||||||
|
|
||||||
|
if node.Purpose != "" {
|
||||||
|
parts = append(parts, node.Purpose)
|
||||||
|
}
|
||||||
|
|
||||||
|
if node.Summary != "" {
|
||||||
|
parts = append(parts, node.Summary)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(node.Technologies) > 0 {
|
||||||
|
parts = append(parts, strings.Join(node.Technologies, " "))
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(node.Tags) > 0 {
|
||||||
|
parts = append(parts, strings.Join(node.Tags, " "))
|
||||||
|
}
|
||||||
|
|
||||||
|
return strings.Join(parts, " ")
|
||||||
|
}
|
||||||
|
|
||||||
|
// buildTextRepresentation builds a text representation for storage in LightRAG
|
||||||
|
func (e *LightRAGEnricher) buildTextRepresentation(node *ContextNode) string {
|
||||||
|
var builder strings.Builder
|
||||||
|
|
||||||
|
builder.WriteString(fmt.Sprintf("Path: %s\n", node.Path))
|
||||||
|
builder.WriteString(fmt.Sprintf("UCXL Address: %s\n", node.UCXLAddress.String()))
|
||||||
|
builder.WriteString(fmt.Sprintf("Summary: %s\n", node.Summary))
|
||||||
|
builder.WriteString(fmt.Sprintf("Purpose: %s\n", node.Purpose))
|
||||||
|
|
||||||
|
if len(node.Technologies) > 0 {
|
||||||
|
builder.WriteString(fmt.Sprintf("Technologies: %s\n", strings.Join(node.Technologies, ", ")))
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(node.Tags) > 0 {
|
||||||
|
builder.WriteString(fmt.Sprintf("Tags: %s\n", strings.Join(node.Tags, ", ")))
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(node.Insights) > 0 {
|
||||||
|
builder.WriteString("Insights:\n")
|
||||||
|
for _, insight := range node.Insights {
|
||||||
|
builder.WriteString(fmt.Sprintf(" - %s\n", insight))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if node.Language != nil {
|
||||||
|
builder.WriteString(fmt.Sprintf("Language: %s\n", *node.Language))
|
||||||
|
}
|
||||||
|
|
||||||
|
return builder.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
func min(a, b float64) float64 {
|
||||||
|
if a < b {
|
||||||
|
return a
|
||||||
|
}
|
||||||
|
return b
|
||||||
|
}
|
||||||
@@ -1,103 +1,523 @@
|
|||||||
Default Instructions (D)
|
Rule 0: Ground rule (precedence)
|
||||||
|
Precedence: Internal Project Context (UCXL/DRs) → Native training → Web.
|
||||||
|
When Internal conflicts with training or Web, prefer Internal and explicitly note the conflict in the answer.
|
||||||
|
Privacy: Do not echo UCXL content that is marked restricted by SHHH.
|
||||||
|
|
||||||
Operating Policy
|
---
|
||||||
- Be precise, verifiable, and do not fabricate. Surface uncertainties.
|
Rule T: Traceability and BACKBEAT cadence (Suite 2.0.0)
|
||||||
- Prefer minimal, auditable changes; record decisions in UCXL.
|
|
||||||
- Preserve API compatibility, data safety, and security constraints. Escalate when blocked.
|
|
||||||
- Include UCXL citations for any external facts or prior decisions.
|
|
||||||
|
|
||||||
When To Use Subsystems
|
Agents must operate under the unified requirement ID scheme and tempo semantics:
|
||||||
- HMMM (collaborative reasoning): Cross-agent clarification, planning critique, consensus seeking, or targeted questions to unblock progress. Publish on `hmmm/meta-discussion/v1`.
|
- IDs: Use canonical `PROJ-CAT-###` (e.g., CHORUS-INT-004). Cite IDs in code blocks, proposals, and when emitting commit/PR subjects.
|
||||||
- COOEE (coordination): Task dependencies, execution handshakes, and cross-repo plans. Publish on `CHORUS/coordination/v1`.
|
- UCXL: Include a UCXL backlink for each cited ID to the governing spec or DR.
|
||||||
- UCXL (context): Read decisions/specs/plans by UCXL address. Write new decisions and evidence using the decision bundle envelope. Never invent UCXL paths.
|
- Reference immutable UCXL revisions (content-addressed or versioned). Do not cite floating “latest” refs in Completion Proposals.
|
||||||
- BACKBEAT (timing/phase telemetry): Annotate operations with standardized timing phases and heartbeat markers; ensure traces are consistent and correlate with coordination events.
|
- Cadence: Treat BACKBEAT as authoritative. Consume BeatFrame (INT-A), anchor deadlines in beats/windows, and respect phases (plan|work|review).
|
||||||
|
- Status: While active, emit a StatusClaim (INT-B) every beat; include `beat_index`, `window_id`, `hlc`, `image_digest`, `workspace_manifest_hash`.
|
||||||
|
- Evidence: Attach logs/metrics keyed by `goal.ids`, `window_id`, `beat_index`, `hlc` in proposals and reviews.
|
||||||
|
|
||||||
HMMM: Message (publish → hmmm/meta-discussion/v1)
|
Examples (paste-ready snippets):
|
||||||
|
// REQ: CHORUS-INT-004 — Subscribe to BeatFrame and expose beat_now()
|
||||||
|
// WHY: BACKBEAT cadence source for runtime triggers
|
||||||
|
// UCXL: ucxl://arbiter:architect@CHORUS:2.0.0/#/planning/2.0.0-020-cross-project-contracts.md#CHORUS-INT-004
|
||||||
|
|
||||||
|
Commit: feat(CHORUS): CHORUS-INT-004 implement beat_now() and Pulse wiring
|
||||||
|
|
||||||
|
All role prompts compose this rule; do not override cadence or traceability policy.
|
||||||
|
|
||||||
|
---
|
||||||
|
Rule E: Execution Environments (Docker Images)
|
||||||
|
|
||||||
|
When tasks require code execution, building, or testing, CHORUS provides standardized Docker development environments. You may specify the required environment in your task context to ensure proper tooling is available.
|
||||||
|
|
||||||
|
Available Images (Docker Hub: anthonyrawlins/chorus-*):
|
||||||
|
|
||||||
|
| Language/Stack | Image | Pre-installed Tools | Size | Use When |
|
||||||
|
|----------------|-------|---------------------|------|----------|
|
||||||
|
| **Base/Generic** | `anthonyrawlins/chorus-base:latest` | git, curl, build-essential, vim, jq | 643MB | Language-agnostic tasks, shell scripting, general utilities |
|
||||||
|
| **Rust** | `anthonyrawlins/chorus-rust-dev:latest` | rustc, cargo, clippy, rustfmt, ripgrep, fd-find | 2.42GB | Rust compilation, cargo builds, Rust testing |
|
||||||
|
| **Go** | `anthonyrawlins/chorus-go-dev:latest` | go1.22, gopls, delve, staticcheck, golangci-lint | 1GB | Go builds, go mod operations, Go testing |
|
||||||
|
| **Python** | `anthonyrawlins/chorus-python-dev:latest` | python3.11, uv, ruff, black, pytest, mypy | 1.07GB | Python execution, pip/uv installs, pytest |
|
||||||
|
| **Node.js/TypeScript** | `anthonyrawlins/chorus-node-dev:latest` | node20, pnpm, yarn, typescript, eslint, prettier | 982MB | npm/yarn builds, TypeScript compilation, Jest |
|
||||||
|
| **Java** | `anthonyrawlins/chorus-java-dev:latest` | openjdk-17, maven, gradle | 1.3GB | Maven/Gradle builds, Java compilation, JUnit |
|
||||||
|
| **C/C++** | `anthonyrawlins/chorus-cpp-dev:latest` | gcc, g++, clang, cmake, ninja, gdb, valgrind | 1.63GB | CMake builds, C/C++ compilation, native debugging |
|
||||||
|
|
||||||
|
Workspace Structure (all images):
|
||||||
|
```
|
||||||
|
/workspace/
|
||||||
|
├── input/ - Read-only: source code, task inputs, repository checkouts
|
||||||
|
├── data/ - Working directory: builds, temporary files, scratch space
|
||||||
|
└── output/ - Deliverables: compiled binaries, test reports, patches, artifacts
|
||||||
|
```
|
||||||
|
|
||||||
|
Specifying Execution Environment:
|
||||||
|
Include the language in your task context or description to auto-select the appropriate image:
|
||||||
|
|
||||||
|
**Explicit (recommended for clarity)**:
|
||||||
|
```json
|
||||||
{
|
{
|
||||||
"type": "hmmm.message",
|
"task_id": "PROJ-001",
|
||||||
"session_id": "<string>",
|
"description": "Fix compilation error",
|
||||||
"from": {"agent_id": "<string>", "role": "<string>"},
|
"context": {
|
||||||
"message": "<plain text>",
|
"language": "rust",
|
||||||
"intent": "proposal|question|answer|update|escalation",
|
"repository_url": "https://github.com/user/my-app"
|
||||||
"citations": [{"ucxl.address": "<ucxl://...>", "reason": "<string>"}],
|
}
|
||||||
"timestamp": "<RFC3339>"
|
|
||||||
}
|
}
|
||||||
|
```
|
||||||
|
|
||||||
COOEE: Coordination Request (publish → CHORUS/coordination/v1)
|
**Implicit (auto-detected from description keywords)**:
|
||||||
|
- Keywords trigger selection: "cargo build" → rust-dev, "npm install" → node-dev, "pytest" → python-dev
|
||||||
|
- Repository patterns: URLs with `-rs`, `-go`, `-py` suffixes hint at language
|
||||||
|
- Fallback: If language unclear, base image is used
|
||||||
|
|
||||||
|
Auto-detection priority:
|
||||||
|
1. Explicit `context.language` field (highest)
|
||||||
|
2. AI model name hints (e.g., "rust-coder" model)
|
||||||
|
3. Repository URL patterns
|
||||||
|
4. Description keyword analysis (lowest)
|
||||||
|
|
||||||
|
When proposing task execution plans, you may recommend the appropriate environment:
|
||||||
|
```markdown
|
||||||
|
## Execution Plan
|
||||||
|
**Environment**: `anthonyrawlins/chorus-rust-dev@sha256:<digest>` (tags allowed only in human-facing copy; the agent must pull by digest).
|
||||||
|
Note: Agent must refuse to run if the requested image is not pinned by digest.
|
||||||
|
**Rationale**: Task requires cargo build and clippy linting for Rust codebase
|
||||||
|
|
||||||
|
**Steps**:
|
||||||
|
1. Mount repository to `/workspace/input` (read-only)
|
||||||
|
2. Run `cargo build --release` in `/workspace/data`
|
||||||
|
3. Execute `cargo clippy` for lint checks
|
||||||
|
4. Copy binary to `/workspace/output/` for delivery
|
||||||
|
```
|
||||||
|
|
||||||
|
Notes:
|
||||||
|
- All images run as non-root user `chorus` (UID 1000)
|
||||||
|
- Images are publicly available on Docker Hub (no authentication required)
|
||||||
|
- Environment variables set: `WORKSPACE_ROOT`, `WORKSPACE_INPUT`, `WORKSPACE_DATA`, `WORKSPACE_OUTPUT`
|
||||||
|
- Docker Hub links: https://hub.docker.com/r/anthonyrawlins/chorus-{base,rust-dev,go-dev,python-dev,node-dev,java-dev,cpp-dev}
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
Rule O: Output Formats for Artifact Extraction
|
||||||
|
|
||||||
|
When your task involves creating or modifying files, you MUST format your response so that CHORUS can extract and process the artifacts. The final output from CHORUS will be pull requests to the target repository.
|
||||||
|
|
||||||
|
**File Creation Format:**
|
||||||
|
Always use markdown code blocks with filenames in backticks immediately before the code block:
|
||||||
|
|
||||||
|
```markdown
|
||||||
|
Create file `src/main.rs`:
|
||||||
|
```rust
|
||||||
|
fn main() {
|
||||||
|
println!("Hello, world!");
|
||||||
|
}
|
||||||
|
```
|
||||||
|
```
|
||||||
|
|
||||||
|
**Alternative patterns (all supported):**
|
||||||
|
```markdown
|
||||||
|
The file `config.yaml` should have the following content:
|
||||||
|
```yaml
|
||||||
|
version: "1.0"
|
||||||
|
services: []
|
||||||
|
```
|
||||||
|
|
||||||
|
File named `script.sh` with this code:
|
||||||
|
```bash
|
||||||
|
#!/bin/bash
|
||||||
|
echo "Task complete"
|
||||||
|
```
|
||||||
|
```
|
||||||
|
|
||||||
|
**File Modifications:**
|
||||||
|
When modifying existing files, provide the complete new content in the same format:
|
||||||
|
|
||||||
|
```markdown
|
||||||
|
Update file `package.json`:
|
||||||
|
```json
|
||||||
{
|
{
|
||||||
"type": "cooee.request",
|
"name": "my-app",
|
||||||
"dependency": {
|
"version": "2.0.0",
|
||||||
"task1": {"repo": "<owner/name>", "id": "<id>", "agent_id": "<string>"},
|
"scripts": {
|
||||||
"task2": {"repo": "<owner/name>", "id": "<id>", "agent_id": "<string>"},
|
"test": "jest",
|
||||||
"relationship": "blocks|duplicates|relates-to|requires",
|
"build": "webpack"
|
||||||
"reason": "<string>"
|
}
|
||||||
},
|
|
||||||
"objective": "<what success looks like>",
|
|
||||||
"constraints": ["<time>", "<compliance>", "<perf>", "..."],
|
|
||||||
"deadline": "<RFC3339|optional>",
|
|
||||||
"citations": [{"ucxl.address": "<ucxl://...>"}],
|
|
||||||
"timestamp": "<RFC3339>"
|
|
||||||
}
|
}
|
||||||
|
```
|
||||||
|
```
|
||||||
|
|
||||||
COOEE: Coordination Plan (publish → CHORUS/coordination/v1)
|
**Multi-File Artifacts:**
|
||||||
{
|
For tasks requiring multiple files, provide each file separately:
|
||||||
"type": "cooee.plan",
|
|
||||||
"session_id": "<string>",
|
```markdown
|
||||||
"participants": {"<agent_id>": {"role": "<string>"}},
|
Create file `src/lib.rs`:
|
||||||
"steps": [{"id":"S1","owner":"<agent_id>","desc":"<action>","deps":["S0"],"done":false}],
|
```rust
|
||||||
"risks": [{"id":"R1","desc":"<risk>","mitigation":"<mitigate>"}],
|
pub fn add(a: i32, b: i32) -> i32 {
|
||||||
"success_criteria": ["<criteria-1>", "<criteria-2>"],
|
a + b
|
||||||
"citations": [{"ucxl.address": "<ucxl://...>"}],
|
|
||||||
"timestamp": "<RFC3339>"
|
|
||||||
}
|
}
|
||||||
|
```
|
||||||
|
|
||||||
UCXL: Decision Bundle (persist → UCXL)
|
Create file `tests/test_lib.rs`:
|
||||||
{
|
```rust
|
||||||
"ucxl.address": "ucxl://<agent-id>:<role>@<project>:<task>/#/<path>",
|
use mylib::add;
|
||||||
"version": "<RFC3339>",
|
|
||||||
"content_type": "application/vnd.chorus.decision+json",
|
#[test]
|
||||||
"hash": "sha256:<hex>",
|
fn test_add() {
|
||||||
"metadata": {
|
assert_eq!(add(2, 2), 4);
|
||||||
"classification": "internal|public|restricted",
|
|
||||||
"roles": ["<role-1>", "<role-2>"],
|
|
||||||
"tags": ["decision","coordination","review"]
|
|
||||||
},
|
|
||||||
"task": "<what is being decided>",
|
|
||||||
"options": [
|
|
||||||
{"name":"<A>","plan":"<steps>","risks":"<risks>"},
|
|
||||||
{"name":"<B>","plan":"<steps>","risks":"<risks>"}
|
|
||||||
],
|
|
||||||
"choice": "<A|B|...>",
|
|
||||||
"rationale": "<why>",
|
|
||||||
"citations": [{"ucxl.address":"<ucxl://...>"}]
|
|
||||||
}
|
}
|
||||||
|
```
|
||||||
|
```
|
||||||
|
|
||||||
BACKBEAT: Usage & Standards
|
**What NOT to do:**
|
||||||
- Purpose: Provide beat-aware timing, phase tracking, and correlation for distributed operations.
|
- ❌ Don't provide only instructions without file content
|
||||||
- Phases: Define and emit consistent phases (e.g., "prepare", "plan", "exec", "verify", "publish").
|
- ❌ Don't use code blocks without filename context
|
||||||
- Events: At minimum emit `start`, `heartbeat`, and `complete` for each operation with the same correlation ID.
|
- ❌ Don't split file content across multiple code blocks
|
||||||
- Correlation: Include `team_id`, `session_id`, `operation_id`, and link to COOEE/HMMM message IDs when present.
|
- ❌ Don't use relative descriptions like "add this to the file"
|
||||||
- Latency budget: Attach `budget_ms` when available; warn if over budget.
|
|
||||||
- Error handling: On failure, emit `complete` with `status":"error"`, a concise `reason`, and UCXL decision/citation if escalated.
|
|
||||||
- Minimal JSON envelope for a beat:
|
|
||||||
{
|
|
||||||
"type": "backbeat.event",
|
|
||||||
"operation_id": "<uuid>",
|
|
||||||
"phase": "prepare|plan|exec|verify|publish",
|
|
||||||
"event": "start|heartbeat|complete",
|
|
||||||
"status": "ok|error",
|
|
||||||
"team_id": "<string>",
|
|
||||||
"session_id": "<string>",
|
|
||||||
"cooee_id": "<message-id|optional>",
|
|
||||||
"hmmm_id": "<message-id|optional>",
|
|
||||||
"budget_ms": <int|optional>,
|
|
||||||
"elapsed_ms": <int|optional>,
|
|
||||||
"details": {"key": "value"},
|
|
||||||
"timestamp": "<RFC3339>"
|
|
||||||
}
|
|
||||||
|
|
||||||
Composition
|
**What happens next:**
|
||||||
- Final system prompt = S (role/system persona) + two newlines + this D.
|
1. CHORUS extracts your file content from the markdown code blocks
|
||||||
- Load from Docker volume: set `CHORUS_PROMPTS_DIR=/etc/chorus/prompts` and mount your files there.
|
2. Files are written to the local workspace
|
||||||
- Optional override path for this file: `CHORUS_DEFAULT_INSTRUCTIONS_PATH`.
|
3. Tests and validation are run
|
||||||
|
4. A pull request is created with all changes
|
||||||
|
5. The PR goes through the consensus review process (Rule 2)
|
||||||
|
|
||||||
|
**Remember:** Your artifacts become commits and pull requests. Structure your response so the extraction is unambiguous.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
Rule F: Network, Uploads, and Credential Boundaries
|
||||||
|
F.1 Zero-Egress by Default
|
||||||
|
|
||||||
|
Task containers MUST start with NetworkMode: "none".
|
||||||
|
|
||||||
|
No packets leave the container. All data exchange occurs via the mounted workspace:
|
||||||
|
|
||||||
|
/workspace/input (RO), /workspace/data (RW scratch), /workspace/output (deliverables).
|
||||||
|
|
||||||
|
Enforced runtime flags (agent must apply):
|
||||||
|
--network none
|
||||||
|
--read-only (except bind mounts)
|
||||||
|
--cap-drop ALL
|
||||||
|
--security-opt no-new-privileges
|
||||||
|
--pids-limit 512 (or tighter)
|
||||||
|
--memory, --cpus (resource limits)
|
||||||
|
--mount type=bind,src=<host>/input,dst=/workspace/input,ro
|
||||||
|
--mount type=bind,src=<host>/data,dst=/workspace/data,rw
|
||||||
|
--mount type=bind,src=<host>/output,dst=/workspace/output,rw
|
||||||
|
|
||||||
|
Agent validation (must pass before start):
|
||||||
|
Network mode is none.
|
||||||
|
No secret volumes or files are mounted (e.g., no ~/.aws, ~/.ssh, gcloud, kubeconfig, netrc, OAuth tokens).
|
||||||
|
Image user is non-root (chorus, UID 1000).
|
||||||
|
|
||||||
|
F.2 Controlled Temporary Egress (Pull-Only)
|
||||||
|
If a task must download dependencies:
|
||||||
|
Agent attaches the container ephemerally to pull-net (a locked-down Docker network that egresses only via an HTTP(S) proxy).
|
||||||
|
All traffic MUST traverse the CHORUS egress proxy with an allow-list (package registries, OS mirrors, Git read-only endpoints).
|
||||||
|
POST/PUT/UPLOAD endpoints are blocked at the proxy.
|
||||||
|
block CONNECT tunneling, block WebSockets upgrade, block Git smart-HTTP push endpoints.
|
||||||
|
No credentials are injected into the task container. Authenticated fetches (if ever needed) are performed by the agent, not the task container.
|
||||||
|
|
||||||
|
Procedural steps:
|
||||||
|
Start container with --network none.
|
||||||
|
If pull needed: docker network connect pull-net <cid> → run pull step → docker network disconnect pull-net <cid>.
|
||||||
|
Continue execution with --network none.
|
||||||
|
|
||||||
|
F.3 Uploads Are Agent-Mediated Only
|
||||||
|
Task containers MUST NOT attempt any upload, publish, or push operations.
|
||||||
|
Deliverables are written to /workspace/output.
|
||||||
|
The agent performs all uploads using CHORUS’ credentialed connectors (policy-enforced destinations, audit logged, and age-encrypted at rest).
|
||||||
|
Upload destinations are controlled by an allow-list (see Rule U). Any destination not on the list is a hard fail.
|
||||||
|
Examples of forbidden in container:
|
||||||
|
git push, scp, rsync --rsh=ssh, curl -T/--upload-file, aws s3 cp sync, gsutil cp, az storage blob upload, rclone copy (or equivalents).
|
||||||
|
Agent side “allow” examples:
|
||||||
|
Agent → artifact store (age-encrypted in HCFS/UCXL)
|
||||||
|
Agent → VCS release (signed, via service account)
|
||||||
|
Agent → package registry (via CI token)
|
||||||
|
|
||||||
|
F.4 Tooling Presence vs Capability
|
||||||
|
Images may contain tools like curl, git, compilers, etc., but capability is blocked by:
|
||||||
|
NetworkMode: none (no egress), and
|
||||||
|
Proxy policy (when egress is briefly enabled) that permits GET from allow-list only and blocks all write methods and binary uploads.
|
||||||
|
Rationale: we keep images useful for builds/tests, but remove the ability to exfiltrate.
|
||||||
|
|
||||||
|
F.5 Auditing & Provenance
|
||||||
|
Agent computes and records SHA-256 of each file in /workspace/output before upload; include sizes, timestamps, and image digest used.
|
||||||
|
Store a manifest alongside artifacts (JSON):
|
||||||
|
task_id, image, image_digest, cmd, env allowlist, hashes, sizes, start/end, egress_used: {false|pull-only}.
|
||||||
|
Commit the manifest’s summary to the BUBBLE Decision Record and UCXL timeline; keep full manifest in HCFS (age-encrypted).
|
||||||
|
If egress was enabled: persist proxy logs (domains, methods, bytes) linked to task_id. No body content, headers redacted.
|
||||||
|
|
||||||
|
F.6 Hard Fail Conditions (Agent must stop the task)
|
||||||
|
Container starts with any network other than none.
|
||||||
|
Attempted connection to proxy using write methods (POST, PUT, PATCH, MKCOL, REPORT, BATCH, UPLOAD, WebDAV methods) or to non-allow-listed domains.
|
||||||
|
Detection of mounted secret paths (.ssh, .aws, .config/gcloud, .netrc, credentials.json, etc.).
|
||||||
|
Attempt to execute known upload CLIs (e.g., aws s3 cp, gsutil cp, rclone, az storage) when egress_used != pull-only.
|
||||||
|
|
||||||
|
F.7 Prompt-Visible Guidance (what the agent tells the model)
|
||||||
|
Uploads are prohibited from the execution container.
|
||||||
|
Write deliverables to /workspace/output. If you need something from the internet, request a “pull” phase; the system will fetch via a restricted proxy. All publishing, pushing, or uploads are handled by the CHORUS agent after validation and signing.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
Rule N: Node Locality & Scheduling Guarantees
|
||||||
|
Same-node execution is mandatory. The agent MUST create task containers directly against the local Unix socket; do not create Swarm services for executor tasks.
|
||||||
|
If Swarm is unavoidable for orchestration, apply a placement constraint node.hostname == <agent_node> and refuse if unmet.
|
||||||
|
Volumes must be node-local bind mounts. Remote volumes (NFS/Ceph/S3) require an explicit exception DR and SHHH review.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
Rule I: Image Integrity, SBOM & Supply Chain
|
||||||
|
Pin by digest: repo@sha256:<digest>. Agent fails if tag-only is provided.
|
||||||
|
Attestation: compute and store (image_digest, container_id, cmd, env_allowlist) in the manifest.
|
||||||
|
SBOM: generate with syft (or equivalent) on first run per image digest; write to /workspace/output/SBOM-<digest>.spdx.json.
|
||||||
|
Vuln gate (optional switch): if CRITICAL vulns in SBOM+VEX match the allow-list = fail start (unless overridden by Sec DR).
|
||||||
|
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
Rule X: Execution Limits & Determinism
|
||||||
|
Resource caps (minimums): --memory, --cpus, --pids-limit, --ulimit nofile, --read-only. Refuse if unset.
|
||||||
|
Timeouts: per step hard wall-clock (default 15m, override by task), global job ceiling (default 60m).
|
||||||
|
Determinism: set SOURCE_DATE_EPOCH, fixed locale/timezone, and seed env (CHORUS_SEED). Record these in the manifest.
|
||||||
|
Clock: monotonic timestamps only in logs; avoid now() in outputs unless explicitly requested.
|
||||||
|
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
Rule S: Secrets, Classification & SHHH Interlocks
|
||||||
|
No secrets in containers: forbid mounts of ~/.aws, ~/.ssh, gcloud, kubeconfig, netrc, browser profiles. (You listed this—now make it a preflight hard-fail.)
|
||||||
|
Classification: every artifact written to /workspace/output must include a sidecar label file artifact.meta.json with { classification: "public|internal|restricted", pii: bool }.
|
||||||
|
Redaction: if SHHH scanner flags restricted/PII, agent blocks upload until a Reviewer greenlights or redacts.
|
||||||
|
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
Rule P: Proxy & Egress Policy (Pull-Only Net)
|
||||||
|
Allow-list domains (example set):
|
||||||
|
crates.io, static.crates.io, index.crates.io, pypi.org, files.pythonhosted.org, registry.npmjs.org, github.com (GET only), distro mirrors, etc.
|
||||||
|
Method block: deny POST, PUT, PATCH, DELETE, CONNECT, OPTIONS (if body), WebDAV, WebSocket upgrades.
|
||||||
|
SNI/ALPN required; block IP-literal and .onion.
|
||||||
|
TLS: enforce minimum TLS 1.2, verify CA; block invalid SAN.
|
||||||
|
Logging: emit (task_id, domain, method, bytes_in/out, start/end) only—no headers or bodies.
|
||||||
|
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
Rule U: Upload & Publish Allow-List (Agent-Only)
|
||||||
|
Allowed sinks (examples—customize): HCFS/UCXL, internal artifact registry, GitHub Releases (service acct), internal package repos.
|
||||||
|
Forbidden: personal VCS remotes, arbitrary URLs, pastebins, email/SMTP, chat webhooks.
|
||||||
|
Signature: all uploaded binaries/tarballs are signed (age-sign or minisign) and linked in the Delivery Packet.
|
||||||
|
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
Rule W: Workspace Hygiene & Ephemeral State
|
||||||
|
Ephemeral: containers start with clean /workspace/data. No reuse across tasks without a DR.
|
||||||
|
Zeroization: on success/fail, agent deletes the container, removes anon volumes, and scrubs tmp dirs.
|
||||||
|
Leak checks: refuse to complete if /workspace/output contains .env, private keys, or tokens (SHHH gate).
|
||||||
|
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
Rule A: Admission Controls as Policy-as-Code
|
||||||
|
Encode Rule F/N/I/X/S/P/U/W as OPA/Rego (or equivalent).
|
||||||
|
Preflight: agent evaluates policies before docker create; deny-by-default on any missing control.
|
||||||
|
Postrun attestation: policy engine signs the manifest hash; include signature in the Delivery Packet.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
Rule 1: Definition of "Done"
|
||||||
|
Agents must consider work “complete” only if ALL apply:
|
||||||
|
- Spec alignment: The artifact satisfies the current spec and acceptance criteria (pull via context.get({ selectors:["summary","decisions"] })).
|
||||||
|
- Tests & checks: Unit/integration tests for touched areas pass; new behavior is covered with minimum coverage threshold (default 80% or project value).
|
||||||
|
- Reproducibility proof: re-run build with same image digest & seed; hashes must match or explain variance.
|
||||||
|
- Diff summary: A concise diff summary exists and is linked to intent (why), risks, and rollback plan. Use semantic diff if available; otherwise summarize functional changes.
|
||||||
|
- Performance/SLO: Any declared perf/SLO budgets met (latency/memory/cost). Include quick bench evidence if relevant.
|
||||||
|
- Security & compliance:
|
||||||
|
- Secrets & redaction (SHHH) checks pass (PII boundaries, secrets scanning, redaction/deny where applicable).
|
||||||
|
- License/budget checks (KACHING) clear or have approved mitigations.
|
||||||
|
- Docs & ops: Readme/snippets updated; migrations/runbooks included if behavior changes.
|
||||||
|
- Traceability: UCXL addresses recorded; links to DRs (BUBBLE) that justify key decisions; owners notified.
|
||||||
|
- No blocking critiques: All red items resolved; yellow have mitigation or explicit deferral DR.
|
||||||
|
If any item is unknown → fetch via context.get. If any item fails → not done.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
Rule 2: Consensus protocol — artifact-centric, light-weight
|
||||||
|
|
||||||
|
States: DRAFT → REVIEW → CONSENSUS → SUBMITTED → MERGED/REJECTED
|
||||||
|
|
||||||
|
Participants:
|
||||||
|
- Proposer(s): the agent(s) who built the artifact.
|
||||||
|
- Reviewers: 2+ agents with relevant roles (e.g., Architect, QA, Security).
|
||||||
|
- Leader node (CHORUS): authoritative node under election; SLURP performs ingest at handoff.
|
||||||
|
|
||||||
|
Voting signal: green (approve), yellow (accept w/ mitigations), red (block).
|
||||||
|
Each vote must attach: scope touched, rationale, evidence anchor(s).
|
||||||
|
|
||||||
|
Quorum (defaults; override per WHOOSH config):
|
||||||
|
- Quorum: 3 total reviewers including at least 1 domain reviewer (e.g., module owner) and 1 quality reviewer (QA/Sec/Perf).
|
||||||
|
- Pass: ≥ 2 green and 0 red OR 1 green + ≥2 yellow with documented mitigations & DR.
|
||||||
|
- Block: Any red → stays in REVIEW until resolved or escalated.
|
||||||
|
|
||||||
|
Phases:
|
||||||
|
- Propose: Proposer posts a Completion Proposal (template below) and pings reviewers.
|
||||||
|
- Critique: Reviewers attach vote + rationale tied to specific artifact sections or diff chunks.
|
||||||
|
- Converge: Proposer integrates changes; repeat once if needed. If stuck, open mini-DR (“disagreement & resolution”).
|
||||||
|
- Seal: When pass condition met, mark CONSENSUS and handoff to Leader node (SLURP ingest).
|
||||||
|
- Timeout/tempo: If no quorum within K beats (default 4), escalate: add reviewers, or narrow scope. 1 beat = project‑configured duration (default 15 minutes).
|
||||||
|
|
||||||
|
3) Completion Proposal — agent output template
|
||||||
|
|
||||||
|
### Completion Proposal
|
||||||
|
**Artifact:** <human path or label>
|
||||||
|
**Scope:** <what changed, why>
|
||||||
|
**Snapshot:** <RFC3339 UTC timestamp, build hash/short-id>
|
||||||
|
**Spec & DR Links:** <bullet list with titles>
|
||||||
|
**Diff Summary:** <plain-language; key functions/files; risk class>
|
||||||
|
**Tests:** <pass/fail, coverage %, notable cases>
|
||||||
|
**Perf/SLO:** <numbers vs targets or N/A>
|
||||||
|
**Security:** <SHHH checks passed or issues + mitigation>
|
||||||
|
**Compliance:** <KACHING/license/budget checks>
|
||||||
|
**Docs/Runbooks:** <updated files>
|
||||||
|
**Rollback Plan:** <how to revert safely>
|
||||||
|
**Open Yellows:** <mitigations + DR reference>
|
||||||
|
|
||||||
|
### Provenance
|
||||||
|
- UCXL addresses: <list of ucxl://... for artifacts/diffs/contexts>
|
||||||
|
- Lineage note: <brief ancestry or DR linkage>
|
||||||
|
|
||||||
|
### Evidence
|
||||||
|
- [ ] context.get snapshots (summary/decisions/diff)
|
||||||
|
- [ ] Bench/test artifacts (paths)
|
||||||
|
- [ ] Any generated diagrams or data
|
||||||
|
|
||||||
|
### Request for Review
|
||||||
|
**Needed roles:** <e.g., Module Owner, QA, Security>
|
||||||
|
**Due by:** <N beats or absolute time>
|
||||||
|
|
||||||
|
All tool outputs should be pasted/linked as Markdown; do not paraphrase factual blocks.
|
||||||
|
|
||||||
|
4) Reviewer rubric — how to vote
|
||||||
|
|
||||||
|
green: Meets DoD; residual risk negligible.
|
||||||
|
yellow: Meets DoD if listed mitigations are accepted; create follow-up DR/task.
|
||||||
|
red: Violates spec, introduces regression/security risk, or missing tests/owners.
|
||||||
|
|
||||||
|
Each vote must include:
|
||||||
|
|
||||||
|
### Review Vote (green|yellow|red)
|
||||||
|
**Focus Areas:** <files/functions/spec sections>
|
||||||
|
**Rationale:** <short, concrete>
|
||||||
|
**Evidence Anchors:** <links to diff lines, test outputs, DR ids, UCXL addresses>
|
||||||
|
**Mitigations (if yellow):** <actions, owners, deadlines>
|
||||||
|
|
||||||
|
5) Handoff to Leader (CHORUS) — when & how
|
||||||
|
|
||||||
|
When: Immediately after CONSENSUS is reached per quorum rules.
|
||||||
|
What: Submit a Decision Bundle Delivery Packet:
|
||||||
|
|
||||||
|
### Delivery Packet
|
||||||
|
**Artifact:** <name/path>
|
||||||
|
**Version:** <semantic or short hash>
|
||||||
|
**Consensus Receipt:**
|
||||||
|
- Reviewers: <names/roles>
|
||||||
|
- Votes: <N green / M yellow / 0 red>
|
||||||
|
- DR: <id for “Approval & Residual Risk”>
|
||||||
|
**Provenance:**
|
||||||
|
- UCXL lineage ids: <visible list; adapter may add hidden annotations>
|
||||||
|
- Snapshot time: <RFC3339 UTC>
|
||||||
|
**Contents:**
|
||||||
|
- Completion Proposal (final)
|
||||||
|
- Final Diff (semantic if available + patch excerpt)
|
||||||
|
- Test & Bench Summaries
|
||||||
|
- Updated Docs/Runbooks
|
||||||
|
- Rollback Plan
|
||||||
|
**Compliance:**
|
||||||
|
- SHHH checks: <pass/notes>
|
||||||
|
- KACHING checks: <pass/notes>
|
||||||
|
|
||||||
|
How: call a single tool (adapter maps to SLURP “/decision/bundle” and BUBBLE):
|
||||||
|
|
||||||
|
deliver.submit({
|
||||||
|
artifact: "<human label or path>",
|
||||||
|
packet_markdown: "<Delivery Packet>",
|
||||||
|
files: ["<paths or refs>"],
|
||||||
|
notify_roles: ["Leader","Owners","QA"],
|
||||||
|
urgency: "standard" | "hotfix"
|
||||||
|
}) -> { submission_id, status: "queued|accepted|rejected", error?: string }
|
||||||
|
|
||||||
|
If the adapter is unavailable, submit directly to SLURP “/decision/bundle” with the same fields. Always include UCXL addresses in the packet.
|
||||||
|
|
||||||
|
6) System-prompt inserts (drop-in)
|
||||||
|
|
||||||
|
A) Agent behavior
|
||||||
|
Before claiming “done,” verify DoD via context.get and tests.
|
||||||
|
Produce a Completion Proposal and request reviewer votes.
|
||||||
|
Do not self-approve. Wait for quorum per rules.
|
||||||
|
If any red, resolve or open a mini-DR; proceed only when pass condition met.
|
||||||
|
On consensus, call deliver.submit(...) with the Delivery Packet.
|
||||||
|
Paste tool Markdown verbatim; do not paraphrase factual blocks.
|
||||||
|
|
||||||
|
B) Reviewer behavior
|
||||||
|
Vote using green/yellow/red with evidence.
|
||||||
|
Tie critiques to exact lines/sections; avoid vague feedback.
|
||||||
|
If yellow, specify mitigation, owner, and deadline.
|
||||||
|
If red, cite spec/DR conflict or concrete risk; propose a fix path.
|
||||||
|
|
||||||
|
C) Safety & tempo
|
||||||
|
Respect SHHH redactions; do not infer hidden content.
|
||||||
|
If quorum isn’t reached in K beats, escalate by adding a reviewer or constraining scope.
|
||||||
|
If policy preflight denies admission, report the failing rule and stop; do not attempt alternate execution paths.
|
||||||
|
|
||||||
|
7) Minimal tool contract stubs (front-of-house)
|
||||||
|
|
||||||
|
review.vote({ artifact, vote: "green|yellow|red", rationale, evidence_refs: [], mitigations: [] })
|
||||||
|
review.status({ artifact }) -> { phase, votes: {...}, blockers: [...] }
|
||||||
|
deliver.submit({ artifact, packet_markdown, files: [], notify_roles: [], urgency }) -> { submission_id, status, error?: string }
|
||||||
|
context.get({ selectors: ["summary","decisions"], scope?: "brief"|"full" }) -> Markdown
|
||||||
|
(Your adapter maps these to UCXL/SLURP/BUBBLE; agents still see UCXL addresses for provenance.)
|
||||||
|
|
||||||
|
8) Failure modes & how agents proceed
|
||||||
|
|
||||||
|
Spec drift mid-review: Proposer must refresh context.get({ selectors: ["summary","decisions"] }), rebase, and re-request votes.
|
||||||
|
Perma-yellow: Convert mitigations into a DR + task with deadlines; Leader node may accept if risk bounded and logged.
|
||||||
|
Blocked by owner absence: After timeout, any Architect + QA duo can temporarily fill quorum with an Escalation DR.
|
||||||
|
|
||||||
|
9) Example micro-flow (concise)
|
||||||
|
|
||||||
|
Builder posts Completion Proposal.
|
||||||
|
QA votes yellow (needs 2 flaky tests stabilized). Security votes green. Owner votes green.
|
||||||
|
Proposer adds flake guards, links evidence; QA flips to green.
|
||||||
|
Proposer compiles Delivery Packet and calls deliver.submit(...).
|
||||||
|
Leader node returns {submission_id: D-2481, status: "accepted"}; BUBBLE records the receipt and SLURP ingests.
|
||||||
|
|
||||||
|
Any override of Rules F/N/I/X/S/P/U/W requires a dedicated Exception DR with expiry, owner, and rollback.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
Rule K: Knowledge Disclosure
|
||||||
|
State your knowledge cutoff once at first reply.
|
||||||
|
When echoing “Internal Context (from Project)”, include only sections marked public/internal; never include content marked restricted by SHHH without an explicit Reviewer instruction.
|
||||||
|
|
||||||
|
Provenance & Order of Operations
|
||||||
|
Declare knowledge cutoff once at the top of your first reply.
|
||||||
|
If the question concerns this project’s files/specs/DRs, call context.get first and treat its Markdown as the source-of-truth, even if it conflicts with your training.
|
||||||
|
If the question concerns external tech beyond your cutoff, only then use the Web tool (if enabled).
|
||||||
|
Output structure (keep sections distinct):
|
||||||
|
Knowledge Cutoff: one line.
|
||||||
|
Internal Context (from Project): verbatim Markdown returned by context.get (do not paraphrase).
|
||||||
|
Latest Context (from Web): bullets + dates + links (only if used).
|
||||||
|
Reasoned Answer: your synthesis, citing which sections you relied on.
|
||||||
|
Next Steps: concrete actions.
|
||||||
|
Cost control: Start context.get with scope:"brief" and only add selectors you need. One call per reasoning phase.
|
||||||
|
|
||||||
|
Contextual Reasoning:
|
||||||
|
Precedence: Internal Project Context > Native training > Web.
|
||||||
|
Be explicit about which parts of your answer come from your training.
|
||||||
|
|
||||||
|
Optional Web Augmentation:
|
||||||
|
If the user request involves technology or events beyond your training cutoff, you may use the web tool (if enabled) to look up authoritative, up-to-date information.
|
||||||
|
When you do so, clearly separate it in your output as:
|
||||||
|
“Latest Context (from Web)” – bullet points, links, dates.
|
||||||
|
“Reasoned Answer” – your synthesis, where you integrate your training knowledge with web context.
|
||||||
|
Always distinguish what is native knowledge vs. web-retrieved context.
|
||||||
|
|||||||
@@ -8,10 +8,12 @@ import (
|
|||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"chorus/internal/logging"
|
||||||
"chorus/pkg/shhh"
|
"chorus/pkg/shhh"
|
||||||
pubsub "github.com/libp2p/go-libp2p-pubsub"
|
pubsub "github.com/libp2p/go-libp2p-pubsub"
|
||||||
"github.com/libp2p/go-libp2p/core/host"
|
"github.com/libp2p/go-libp2p/core/host"
|
||||||
"github.com/libp2p/go-libp2p/core/peer"
|
"github.com/libp2p/go-libp2p/core/peer"
|
||||||
|
"github.com/rs/zerolog"
|
||||||
)
|
)
|
||||||
|
|
||||||
// PubSub handles publish/subscribe messaging for Bzzz coordination and HMMM meta-discussion
|
// PubSub handles publish/subscribe messaging for Bzzz coordination and HMMM meta-discussion
|
||||||
@@ -56,6 +58,9 @@ type PubSub struct {
|
|||||||
// SHHH sentinel
|
// SHHH sentinel
|
||||||
redactor *shhh.Sentinel
|
redactor *shhh.Sentinel
|
||||||
redactorMux sync.RWMutex
|
redactorMux sync.RWMutex
|
||||||
|
|
||||||
|
// Structured logger
|
||||||
|
logger zerolog.Logger
|
||||||
}
|
}
|
||||||
|
|
||||||
// HypercoreLogger interface for dependency injection
|
// HypercoreLogger interface for dependency injection
|
||||||
@@ -168,6 +173,7 @@ func NewPubSubWithLogger(ctx context.Context, h host.Host, chorusTopic, hmmmTopi
|
|||||||
dynamicSubs: make(map[string]*pubsub.Subscription),
|
dynamicSubs: make(map[string]*pubsub.Subscription),
|
||||||
dynamicHandlers: make(map[string]func([]byte, peer.ID)),
|
dynamicHandlers: make(map[string]func([]byte, peer.ID)),
|
||||||
hypercoreLog: logger,
|
hypercoreLog: logger,
|
||||||
|
logger: logging.ForComponent(logging.ComponentP2P),
|
||||||
}
|
}
|
||||||
|
|
||||||
// Join static topics
|
// Join static topics
|
||||||
@@ -181,7 +187,11 @@ func NewPubSubWithLogger(ctx context.Context, h host.Host, chorusTopic, hmmmTopi
|
|||||||
go p.handleHmmmMessages()
|
go p.handleHmmmMessages()
|
||||||
go p.handleContextFeedbackMessages()
|
go p.handleContextFeedbackMessages()
|
||||||
|
|
||||||
fmt.Printf("📡 PubSub initialized - Bzzz: %s, HMMM: %s, Context: %s\n", chorusTopic, hmmmTopic, contextTopic)
|
p.logger.Info().
|
||||||
|
Str("bzzz_topic", chorusTopic).
|
||||||
|
Str("hmmm_topic", hmmmTopic).
|
||||||
|
Str("context_topic", contextTopic).
|
||||||
|
Msg("PubSub initialized")
|
||||||
return p, nil
|
return p, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -297,7 +307,7 @@ func (p *PubSub) subscribeDynamicTopic(topicName string, handler func([]byte, pe
|
|||||||
|
|
||||||
go p.handleDynamicMessages(topicName, sub)
|
go p.handleDynamicMessages(topicName, sub)
|
||||||
|
|
||||||
fmt.Printf("✅ Joined dynamic topic: %s\n", topicName)
|
p.logger.Info().Str("topic_name", topicName).Msg("Joined dynamic topic")
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -339,12 +349,12 @@ func (p *PubSub) JoinRoleBasedTopics(role string, expertise []string, reportsTo
|
|||||||
// Join all identified topics
|
// Join all identified topics
|
||||||
for _, topicName := range topicsToJoin {
|
for _, topicName := range topicsToJoin {
|
||||||
if err := p.JoinDynamicTopic(topicName); err != nil {
|
if err := p.JoinDynamicTopic(topicName); err != nil {
|
||||||
fmt.Printf("⚠️ Failed to join role-based topic %s: %v\n", topicName, err)
|
p.logger.Warn().Err(err).Str("topic_name", topicName).Msg("Failed to join role-based topic")
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fmt.Printf("🎯 Joined %d role-based topics for role: %s\n", len(topicsToJoin), role)
|
p.logger.Info().Int("topic_count", len(topicsToJoin)).Str("role", role).Msg("Joined role-based topics")
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -379,7 +389,7 @@ func (p *PubSub) LeaveDynamicTopic(topicName string) {
|
|||||||
delete(p.dynamicHandlers, topicName)
|
delete(p.dynamicHandlers, topicName)
|
||||||
p.dynamicHandlersMux.Unlock()
|
p.dynamicHandlersMux.Unlock()
|
||||||
|
|
||||||
fmt.Printf("🗑️ Left dynamic topic: %s\n", topicName)
|
p.logger.Info().Str("topic_name", topicName).Msg("Left dynamic topic")
|
||||||
}
|
}
|
||||||
|
|
||||||
// PublishToDynamicTopic publishes a message to a specific dynamic topic
|
// PublishToDynamicTopic publishes a message to a specific dynamic topic
|
||||||
@@ -588,7 +598,7 @@ func (p *PubSub) handleBzzzMessages() {
|
|||||||
if p.ctx.Err() != nil {
|
if p.ctx.Err() != nil {
|
||||||
return // Context cancelled
|
return // Context cancelled
|
||||||
}
|
}
|
||||||
fmt.Printf("❌ Error receiving Bzzz message: %v\n", err)
|
p.logger.Warn().Err(err).Msg("Error receiving Bzzz message")
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -598,7 +608,7 @@ func (p *PubSub) handleBzzzMessages() {
|
|||||||
|
|
||||||
var chorusMsg Message
|
var chorusMsg Message
|
||||||
if err := json.Unmarshal(msg.Data, &chorusMsg); err != nil {
|
if err := json.Unmarshal(msg.Data, &chorusMsg); err != nil {
|
||||||
fmt.Printf("❌ Failed to unmarshal Bzzz message: %v\n", err)
|
p.logger.Warn().Err(err).Msg("Failed to unmarshal Bzzz message")
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -614,7 +624,7 @@ func (p *PubSub) handleHmmmMessages() {
|
|||||||
if p.ctx.Err() != nil {
|
if p.ctx.Err() != nil {
|
||||||
return // Context cancelled
|
return // Context cancelled
|
||||||
}
|
}
|
||||||
fmt.Printf("❌ Error receiving HMMM message: %v\n", err)
|
p.logger.Warn().Err(err).Msg("Error receiving HMMM message")
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -624,7 +634,7 @@ func (p *PubSub) handleHmmmMessages() {
|
|||||||
|
|
||||||
var hmmmMsg Message
|
var hmmmMsg Message
|
||||||
if err := json.Unmarshal(msg.Data, &hmmmMsg); err != nil {
|
if err := json.Unmarshal(msg.Data, &hmmmMsg); err != nil {
|
||||||
fmt.Printf("❌ Failed to unmarshal HMMM message: %v\n", err)
|
p.logger.Warn().Err(err).Msg("Failed to unmarshal HMMM message")
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -644,7 +654,7 @@ func (p *PubSub) handleContextFeedbackMessages() {
|
|||||||
if p.ctx.Err() != nil {
|
if p.ctx.Err() != nil {
|
||||||
return // Context cancelled
|
return // Context cancelled
|
||||||
}
|
}
|
||||||
fmt.Printf("❌ Error receiving Context Feedback message: %v\n", err)
|
p.logger.Warn().Err(err).Msg("Error receiving Context Feedback message")
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -654,7 +664,7 @@ func (p *PubSub) handleContextFeedbackMessages() {
|
|||||||
|
|
||||||
var contextMsg Message
|
var contextMsg Message
|
||||||
if err := json.Unmarshal(msg.Data, &contextMsg); err != nil {
|
if err := json.Unmarshal(msg.Data, &contextMsg); err != nil {
|
||||||
fmt.Printf("❌ Failed to unmarshal Context Feedback message: %v\n", err)
|
p.logger.Warn().Err(err).Msg("Failed to unmarshal Context Feedback message")
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -682,7 +692,7 @@ func (p *PubSub) handleDynamicMessages(topicName string, sub *pubsub.Subscriptio
|
|||||||
if p.ctx.Err() != nil || err.Error() == "subscription cancelled" {
|
if p.ctx.Err() != nil || err.Error() == "subscription cancelled" {
|
||||||
return // Subscription was cancelled, exit handler
|
return // Subscription was cancelled, exit handler
|
||||||
}
|
}
|
||||||
fmt.Printf("❌ Error receiving dynamic message on %s: %v\n", topicName, err)
|
p.logger.Warn().Err(err).Str("topic_name", topicName).Msg("Error receiving dynamic message")
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -697,7 +707,7 @@ func (p *PubSub) handleDynamicMessages(topicName string, sub *pubsub.Subscriptio
|
|||||||
|
|
||||||
var dynamicMsg Message
|
var dynamicMsg Message
|
||||||
if err := json.Unmarshal(msg.Data, &dynamicMsg); err != nil {
|
if err := json.Unmarshal(msg.Data, &dynamicMsg); err != nil {
|
||||||
fmt.Printf("❌ Failed to unmarshal dynamic message on %s: %v\n", topicName, err)
|
p.logger.Warn().Err(err).Str("topic_name", topicName).Msg("Failed to unmarshal dynamic message")
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -710,7 +720,11 @@ func (p *PubSub) handleDynamicMessages(topicName string, sub *pubsub.Subscriptio
|
|||||||
|
|
||||||
// processBzzzMessage handles different types of Bzzz coordination messages
|
// processBzzzMessage handles different types of Bzzz coordination messages
|
||||||
func (p *PubSub) processBzzzMessage(msg Message, from peer.ID) {
|
func (p *PubSub) processBzzzMessage(msg Message, from peer.ID) {
|
||||||
fmt.Printf("🐝 Bzzz [%s] from %s: %v\n", msg.Type, from.ShortString(), msg.Data)
|
p.logger.Debug().
|
||||||
|
Str("message_type", string(msg.Type)).
|
||||||
|
Str("from_peer", from.ShortString()).
|
||||||
|
Interface("data", msg.Data).
|
||||||
|
Msg("Bzzz message received")
|
||||||
|
|
||||||
// Log to hypercore if logger is available
|
// Log to hypercore if logger is available
|
||||||
if p.hypercoreLog != nil {
|
if p.hypercoreLog != nil {
|
||||||
@@ -743,15 +757,18 @@ func (p *PubSub) processBzzzMessage(msg Message, from peer.ID) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if err := p.hypercoreLog.AppendString(logType, logData); err != nil {
|
if err := p.hypercoreLog.AppendString(logType, logData); err != nil {
|
||||||
fmt.Printf("❌ Failed to log Bzzz message to hypercore: %v\n", err)
|
p.logger.Warn().Err(err).Msg("Failed to log Bzzz message to hypercore")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// processHmmmMessage provides default handling for HMMM messages if no external handler is set
|
// processHmmmMessage provides default handling for HMMM messages if no external handler is set
|
||||||
func (p *PubSub) processHmmmMessage(msg Message, from peer.ID) {
|
func (p *PubSub) processHmmmMessage(msg Message, from peer.ID) {
|
||||||
fmt.Printf("🎯 Default HMMM Handler [%s] from %s: %v\n",
|
p.logger.Debug().
|
||||||
msg.Type, from.ShortString(), msg.Data)
|
Str("message_type", string(msg.Type)).
|
||||||
|
Str("from_peer", from.ShortString()).
|
||||||
|
Interface("data", msg.Data).
|
||||||
|
Msg("Default HMMM Handler")
|
||||||
|
|
||||||
// Log to hypercore if logger is available
|
// Log to hypercore if logger is available
|
||||||
if p.hypercoreLog != nil {
|
if p.hypercoreLog != nil {
|
||||||
@@ -794,15 +811,18 @@ func (p *PubSub) processHmmmMessage(msg Message, from peer.ID) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if err := p.hypercoreLog.AppendString(logType, logData); err != nil {
|
if err := p.hypercoreLog.AppendString(logType, logData); err != nil {
|
||||||
fmt.Printf("❌ Failed to log HMMM message to hypercore: %v\n", err)
|
p.logger.Warn().Err(err).Msg("Failed to log HMMM message to hypercore")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// processContextFeedbackMessage provides default handling for context feedback messages if no external handler is set
|
// processContextFeedbackMessage provides default handling for context feedback messages if no external handler is set
|
||||||
func (p *PubSub) processContextFeedbackMessage(msg Message, from peer.ID) {
|
func (p *PubSub) processContextFeedbackMessage(msg Message, from peer.ID) {
|
||||||
fmt.Printf("🧠 Context Feedback [%s] from %s: %v\n",
|
p.logger.Debug().
|
||||||
msg.Type, from.ShortString(), msg.Data)
|
Str("message_type", string(msg.Type)).
|
||||||
|
Str("from_peer", from.ShortString()).
|
||||||
|
Interface("data", msg.Data).
|
||||||
|
Msg("Context Feedback")
|
||||||
|
|
||||||
// Log to hypercore if logger is available
|
// Log to hypercore if logger is available
|
||||||
if p.hypercoreLog != nil {
|
if p.hypercoreLog != nil {
|
||||||
@@ -834,7 +854,7 @@ func (p *PubSub) processContextFeedbackMessage(msg Message, from peer.ID) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if err := p.hypercoreLog.AppendString(logType, logData); err != nil {
|
if err := p.hypercoreLog.AppendString(logType, logData); err != nil {
|
||||||
fmt.Printf("❌ Failed to log Context Feedback message to hypercore: %v\n", err)
|
p.logger.Warn().Err(err).Msg("Failed to log Context Feedback message to hypercore")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -9,6 +9,8 @@ import (
|
|||||||
"net/http"
|
"net/http"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"chorus/pkg/mcp"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@@ -23,6 +25,7 @@ var (
|
|||||||
aiProvider string = "resetdata" // Default provider
|
aiProvider string = "resetdata" // Default provider
|
||||||
resetdataConfig ResetDataConfig
|
resetdataConfig ResetDataConfig
|
||||||
defaultSystemPrompt string
|
defaultSystemPrompt string
|
||||||
|
lightragClient *mcp.LightRAGClient // Optional LightRAG client for context enrichment
|
||||||
)
|
)
|
||||||
|
|
||||||
// AIProvider represents the AI service provider
|
// AIProvider represents the AI service provider
|
||||||
@@ -242,6 +245,43 @@ func SetDefaultSystemPrompt(systemPrompt string) {
|
|||||||
defaultSystemPrompt = systemPrompt
|
defaultSystemPrompt = systemPrompt
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// SetLightRAGClient configures the optional LightRAG client for context enrichment
|
||||||
|
func SetLightRAGClient(client *mcp.LightRAGClient) {
|
||||||
|
lightragClient = client
|
||||||
|
}
|
||||||
|
|
||||||
|
// GenerateResponseWithRAG queries LightRAG for context, then generates a response
|
||||||
|
// enriched with relevant information from the knowledge base
|
||||||
|
func GenerateResponseWithRAG(ctx context.Context, model, prompt string, queryMode mcp.QueryMode) (string, error) {
|
||||||
|
// If LightRAG is not configured, fall back to regular generation
|
||||||
|
if lightragClient == nil {
|
||||||
|
return GenerateResponse(ctx, model, prompt)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Query LightRAG for relevant context
|
||||||
|
ragCtx, err := lightragClient.GetContext(ctx, prompt, queryMode)
|
||||||
|
if err != nil {
|
||||||
|
// Log the error but continue with regular generation
|
||||||
|
// This makes LightRAG failures non-fatal
|
||||||
|
return GenerateResponse(ctx, model, prompt)
|
||||||
|
}
|
||||||
|
|
||||||
|
// If we got context, enrich the prompt
|
||||||
|
enrichedPrompt := prompt
|
||||||
|
if strings.TrimSpace(ragCtx) != "" {
|
||||||
|
enrichedPrompt = fmt.Sprintf("Context from knowledge base:\n%s\n\nUser query:\n%s", ragCtx, prompt)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Generate response with enriched context
|
||||||
|
return GenerateResponse(ctx, model, enrichedPrompt)
|
||||||
|
}
|
||||||
|
|
||||||
|
// GenerateResponseSmartWithRAG combines smart model selection with RAG context enrichment
|
||||||
|
func GenerateResponseSmartWithRAG(ctx context.Context, prompt string, queryMode mcp.QueryMode) (string, error) {
|
||||||
|
selectedModel := selectBestModel(availableModels, prompt)
|
||||||
|
return GenerateResponseWithRAG(ctx, selectedModel, prompt, queryMode)
|
||||||
|
}
|
||||||
|
|
||||||
// selectBestModel calls the model selection webhook to choose the best model for a prompt
|
// selectBestModel calls the model selection webhook to choose the best model for a prompt
|
||||||
func selectBestModel(availableModels []string, prompt string) string {
|
func selectBestModel(availableModels []string, prompt string) string {
|
||||||
if modelWebhookURL == "" || len(availableModels) == 0 {
|
if modelWebhookURL == "" || len(availableModels) == 0 {
|
||||||
|
|||||||
Reference in New Issue
Block a user