feat(prompts): load system prompts and defaults from Docker volume; set runtime system prompt; add BACKBEAT standards
This commit is contained in:
601
internal/runtime/shared.go
Normal file
601
internal/runtime/shared.go
Normal file
@@ -0,0 +1,601 @@
|
||||
package runtime
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"log"
|
||||
"net/http"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"chorus/api"
|
||||
"chorus/coordinator"
|
||||
"chorus/discovery"
|
||||
"chorus/internal/backbeat"
|
||||
"chorus/internal/licensing"
|
||||
"chorus/internal/logging"
|
||||
"chorus/p2p"
|
||||
"chorus/pkg/config"
|
||||
"chorus/pkg/dht"
|
||||
"chorus/pkg/election"
|
||||
"chorus/pkg/health"
|
||||
"chorus/pkg/shutdown"
|
||||
"chorus/pkg/prompt"
|
||||
"chorus/pkg/ucxi"
|
||||
"chorus/pkg/ucxl"
|
||||
"chorus/pubsub"
|
||||
"chorus/reasoning"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/multiformats/go-multiaddr"
|
||||
)
|
||||
|
||||
const (
|
||||
AppName = "CHORUS"
|
||||
AppVersion = "0.1.0-dev"
|
||||
)
|
||||
|
||||
// SimpleLogger provides basic logging implementation
|
||||
type SimpleLogger struct{}
|
||||
|
||||
func (l *SimpleLogger) Info(msg string, args ...interface{}) {
|
||||
log.Printf("[INFO] "+msg, args...)
|
||||
}
|
||||
|
||||
func (l *SimpleLogger) Warn(msg string, args ...interface{}) {
|
||||
log.Printf("[WARN] "+msg, args...)
|
||||
}
|
||||
|
||||
func (l *SimpleLogger) Error(msg string, args ...interface{}) {
|
||||
log.Printf("[ERROR] "+msg, args...)
|
||||
}
|
||||
|
||||
// SimpleTaskTracker tracks active tasks for availability reporting
|
||||
type SimpleTaskTracker struct {
|
||||
maxTasks int
|
||||
activeTasks map[string]bool
|
||||
decisionPublisher *ucxl.DecisionPublisher
|
||||
}
|
||||
|
||||
// GetActiveTasks returns list of active task IDs
|
||||
func (t *SimpleTaskTracker) GetActiveTasks() []string {
|
||||
tasks := make([]string, 0, len(t.activeTasks))
|
||||
for taskID := range t.activeTasks {
|
||||
tasks = append(tasks, taskID)
|
||||
}
|
||||
return tasks
|
||||
}
|
||||
|
||||
// GetMaxTasks returns maximum number of concurrent tasks
|
||||
func (t *SimpleTaskTracker) GetMaxTasks() int {
|
||||
return t.maxTasks
|
||||
}
|
||||
|
||||
// AddTask marks a task as active
|
||||
func (t *SimpleTaskTracker) AddTask(taskID string) {
|
||||
t.activeTasks[taskID] = true
|
||||
}
|
||||
|
||||
// RemoveTask marks a task as completed and publishes decision if publisher available
|
||||
func (t *SimpleTaskTracker) RemoveTask(taskID string) {
|
||||
delete(t.activeTasks, taskID)
|
||||
|
||||
// Publish task completion decision if publisher is available
|
||||
if t.decisionPublisher != nil {
|
||||
t.publishTaskCompletion(taskID, true, "Task completed successfully", nil)
|
||||
}
|
||||
}
|
||||
|
||||
// publishTaskCompletion publishes a task completion decision to DHT
|
||||
func (t *SimpleTaskTracker) publishTaskCompletion(taskID string, success bool, summary string, filesModified []string) {
|
||||
if t.decisionPublisher == nil {
|
||||
return
|
||||
}
|
||||
|
||||
if err := t.decisionPublisher.PublishTaskCompletion(taskID, success, summary, filesModified); err != nil {
|
||||
fmt.Printf("⚠️ Failed to publish task completion for %s: %v\n", taskID, err)
|
||||
} else {
|
||||
fmt.Printf("📤 Published task completion decision for: %s\n", taskID)
|
||||
}
|
||||
}
|
||||
|
||||
// SharedRuntime contains all the shared P2P infrastructure components
|
||||
type SharedRuntime struct {
|
||||
Config *config.Config
|
||||
Logger *SimpleLogger
|
||||
Context context.Context
|
||||
Cancel context.CancelFunc
|
||||
Node *p2p.Node
|
||||
PubSub *pubsub.PubSub
|
||||
HypercoreLog *logging.HypercoreLog
|
||||
MDNSDiscovery *discovery.MDNSDiscovery
|
||||
BackbeatIntegration *backbeat.Integration
|
||||
DHTNode *dht.LibP2PDHT
|
||||
EncryptedStorage *dht.EncryptedDHTStorage
|
||||
DecisionPublisher *ucxl.DecisionPublisher
|
||||
ElectionManager *election.ElectionManager
|
||||
TaskCoordinator *coordinator.TaskCoordinator
|
||||
HTTPServer *api.HTTPServer
|
||||
UCXIServer *ucxi.Server
|
||||
HealthManager *health.Manager
|
||||
ShutdownManager *shutdown.Manager
|
||||
TaskTracker *SimpleTaskTracker
|
||||
}
|
||||
|
||||
// Initialize sets up all shared P2P infrastructure components
|
||||
func Initialize(appMode string) (*SharedRuntime, error) {
|
||||
runtime := &SharedRuntime{}
|
||||
runtime.Logger = &SimpleLogger{}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
runtime.Context = ctx
|
||||
runtime.Cancel = cancel
|
||||
|
||||
runtime.Logger.Info("🎭 Starting CHORUS v%s - Container-First P2P Task Coordination", AppVersion)
|
||||
runtime.Logger.Info("📦 Container deployment - Mode: %s", appMode)
|
||||
|
||||
// Load configuration from environment (no config files in containers)
|
||||
runtime.Logger.Info("📋 Loading configuration from environment variables...")
|
||||
cfg, err := config.LoadFromEnvironment()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("configuration error: %v", err)
|
||||
}
|
||||
runtime.Config = cfg
|
||||
|
||||
runtime.Logger.Info("✅ Configuration loaded successfully")
|
||||
runtime.Logger.Info("🤖 Agent ID: %s", cfg.Agent.ID)
|
||||
runtime.Logger.Info("🎯 Specialization: %s", cfg.Agent.Specialization)
|
||||
|
||||
// CRITICAL: Validate license before any P2P operations
|
||||
runtime.Logger.Info("🔐 Validating CHORUS license with KACHING...")
|
||||
licenseValidator := licensing.NewValidator(licensing.LicenseConfig{
|
||||
LicenseID: cfg.License.LicenseID,
|
||||
ClusterID: cfg.License.ClusterID,
|
||||
KachingURL: cfg.License.KachingURL,
|
||||
})
|
||||
if err := licenseValidator.Validate(); err != nil {
|
||||
return nil, fmt.Errorf("license validation failed: %v", err)
|
||||
}
|
||||
runtime.Logger.Info("✅ License validation successful - CHORUS authorized to run")
|
||||
|
||||
// Initialize AI provider configuration
|
||||
runtime.Logger.Info("🧠 Configuring AI provider: %s", cfg.AI.Provider)
|
||||
if err := initializeAIProvider(cfg, runtime.Logger); err != nil {
|
||||
return nil, fmt.Errorf("AI provider initialization failed: %v", err)
|
||||
}
|
||||
runtime.Logger.Info("✅ AI provider configured successfully")
|
||||
|
||||
// Initialize BACKBEAT integration
|
||||
var backbeatIntegration *backbeat.Integration
|
||||
backbeatIntegration, err = backbeat.NewIntegration(cfg, cfg.Agent.ID, runtime.Logger)
|
||||
if err != nil {
|
||||
runtime.Logger.Warn("⚠️ BACKBEAT integration initialization failed: %v", err)
|
||||
runtime.Logger.Info("📍 P2P operations will run without beat synchronization")
|
||||
} else {
|
||||
if err := backbeatIntegration.Start(ctx); err != nil {
|
||||
runtime.Logger.Warn("⚠️ Failed to start BACKBEAT integration: %v", err)
|
||||
backbeatIntegration = nil
|
||||
} else {
|
||||
runtime.Logger.Info("🎵 BACKBEAT integration started successfully")
|
||||
}
|
||||
}
|
||||
runtime.BackbeatIntegration = backbeatIntegration
|
||||
|
||||
// Initialize P2P node
|
||||
node, err := p2p.NewNode(ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create P2P node: %v", err)
|
||||
}
|
||||
runtime.Node = node
|
||||
|
||||
runtime.Logger.Info("🐝 CHORUS node started successfully")
|
||||
runtime.Logger.Info("📍 Node ID: %s", node.ID().ShortString())
|
||||
runtime.Logger.Info("🔗 Listening addresses:")
|
||||
for _, addr := range node.Addresses() {
|
||||
runtime.Logger.Info(" %s/p2p/%s", addr, node.ID())
|
||||
}
|
||||
|
||||
// Initialize Hypercore-style logger for P2P coordination
|
||||
hlog := logging.NewHypercoreLog(node.ID())
|
||||
hlog.Append(logging.PeerJoined, map[string]interface{}{"status": "started"})
|
||||
runtime.HypercoreLog = hlog
|
||||
runtime.Logger.Info("📝 Hypercore logger initialized")
|
||||
|
||||
// Initialize mDNS discovery
|
||||
mdnsDiscovery, err := discovery.NewMDNSDiscovery(ctx, node.Host(), "chorus-peer-discovery")
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create mDNS discovery: %v", err)
|
||||
}
|
||||
runtime.MDNSDiscovery = mdnsDiscovery
|
||||
|
||||
// Initialize PubSub with hypercore logging
|
||||
ps, err := pubsub.NewPubSubWithLogger(ctx, node.Host(), "chorus/coordination/v1", "hmmm/meta-discussion/v1", hlog)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create PubSub: %v", err)
|
||||
}
|
||||
runtime.PubSub = ps
|
||||
|
||||
runtime.Logger.Info("📡 PubSub system initialized")
|
||||
|
||||
// Join role-based topics if role is configured
|
||||
if cfg.Agent.Role != "" {
|
||||
reportsTo := []string{}
|
||||
if cfg.Agent.ReportsTo != "" {
|
||||
reportsTo = []string{cfg.Agent.ReportsTo}
|
||||
}
|
||||
if err := ps.JoinRoleBasedTopics(cfg.Agent.Role, cfg.Agent.Expertise, reportsTo); err != nil {
|
||||
runtime.Logger.Warn("⚠️ Failed to join role-based topics: %v", err)
|
||||
} else {
|
||||
runtime.Logger.Info("🎯 Joined role-based collaboration topics")
|
||||
}
|
||||
}
|
||||
|
||||
// Initialize remaining components
|
||||
if err := runtime.initializeElectionSystem(); err != nil {
|
||||
return nil, fmt.Errorf("failed to initialize election system: %v", err)
|
||||
}
|
||||
|
||||
if err := runtime.initializeDHTStorage(); err != nil {
|
||||
return nil, fmt.Errorf("failed to initialize DHT storage: %v", err)
|
||||
}
|
||||
|
||||
if err := runtime.initializeServices(); err != nil {
|
||||
return nil, fmt.Errorf("failed to initialize services: %v", err)
|
||||
}
|
||||
|
||||
return runtime, nil
|
||||
}
|
||||
|
||||
// Cleanup properly shuts down all runtime components
|
||||
func (r *SharedRuntime) Cleanup() {
|
||||
r.Logger.Info("🔄 Starting graceful shutdown...")
|
||||
|
||||
if r.BackbeatIntegration != nil {
|
||||
r.BackbeatIntegration.Stop()
|
||||
}
|
||||
|
||||
if r.MDNSDiscovery != nil {
|
||||
r.MDNSDiscovery.Close()
|
||||
}
|
||||
|
||||
if r.PubSub != nil {
|
||||
r.PubSub.Close()
|
||||
}
|
||||
|
||||
if r.DHTNode != nil {
|
||||
r.DHTNode.Close()
|
||||
}
|
||||
|
||||
if r.Node != nil {
|
||||
r.Node.Close()
|
||||
}
|
||||
|
||||
if r.HTTPServer != nil {
|
||||
r.HTTPServer.Stop()
|
||||
}
|
||||
|
||||
if r.UCXIServer != nil {
|
||||
r.UCXIServer.Stop()
|
||||
}
|
||||
|
||||
if r.ElectionManager != nil {
|
||||
r.ElectionManager.Stop()
|
||||
}
|
||||
|
||||
if r.Cancel != nil {
|
||||
r.Cancel()
|
||||
}
|
||||
|
||||
r.Logger.Info("✅ CHORUS shutdown completed")
|
||||
}
|
||||
|
||||
// Helper methods for initialization (extracted from main.go)
|
||||
func (r *SharedRuntime) initializeElectionSystem() error {
|
||||
// === Admin Election System ===
|
||||
electionManager := election.NewElectionManager(r.Context, r.Config, r.Node.Host(), r.PubSub, r.Node.ID().ShortString())
|
||||
|
||||
// Set election callbacks with BACKBEAT integration
|
||||
electionManager.SetCallbacks(
|
||||
func(oldAdmin, newAdmin string) {
|
||||
r.Logger.Info("👑 Admin changed: %s -> %s", oldAdmin, newAdmin)
|
||||
|
||||
// Track admin change with BACKBEAT if available
|
||||
if r.BackbeatIntegration != nil {
|
||||
operationID := fmt.Sprintf("admin-change-%d", time.Now().Unix())
|
||||
if err := r.BackbeatIntegration.StartP2POperation(operationID, "admin_change", 2, map[string]interface{}{
|
||||
"old_admin": oldAdmin,
|
||||
"new_admin": newAdmin,
|
||||
}); err == nil {
|
||||
// Complete immediately as this is a state change, not a long operation
|
||||
r.BackbeatIntegration.CompleteP2POperation(operationID, 1)
|
||||
}
|
||||
}
|
||||
|
||||
// If this node becomes admin, enable SLURP functionality
|
||||
if newAdmin == r.Node.ID().ShortString() {
|
||||
r.Logger.Info("🎯 This node is now admin - enabling SLURP functionality")
|
||||
r.Config.Slurp.Enabled = true
|
||||
// Apply admin role configuration
|
||||
if err := r.Config.ApplyRoleDefinition("admin"); err != nil {
|
||||
r.Logger.Warn("⚠️ Failed to apply admin role: %v", err)
|
||||
}
|
||||
}
|
||||
},
|
||||
func(winner string) {
|
||||
r.Logger.Info("🏆 Election completed, winner: %s", winner)
|
||||
|
||||
// Track election completion with BACKBEAT if available
|
||||
if r.BackbeatIntegration != nil {
|
||||
operationID := fmt.Sprintf("election-completed-%d", time.Now().Unix())
|
||||
if err := r.BackbeatIntegration.StartP2POperation(operationID, "election", 1, map[string]interface{}{
|
||||
"winner": winner,
|
||||
"node_id": r.Node.ID().ShortString(),
|
||||
}); err == nil {
|
||||
r.BackbeatIntegration.CompleteP2POperation(operationID, 1)
|
||||
}
|
||||
}
|
||||
},
|
||||
)
|
||||
|
||||
if err := electionManager.Start(); err != nil {
|
||||
return fmt.Errorf("failed to start election manager: %v", err)
|
||||
}
|
||||
r.ElectionManager = electionManager
|
||||
r.Logger.Info("✅ Election manager started with automated heartbeat management")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *SharedRuntime) initializeDHTStorage() error {
|
||||
// === DHT Storage and Decision Publishing ===
|
||||
var dhtNode *dht.LibP2PDHT
|
||||
var encryptedStorage *dht.EncryptedDHTStorage
|
||||
var decisionPublisher *ucxl.DecisionPublisher
|
||||
|
||||
if r.Config.V2.DHT.Enabled {
|
||||
// Create DHT
|
||||
var err error
|
||||
dhtNode, err = dht.NewLibP2PDHT(r.Context, r.Node.Host())
|
||||
if err != nil {
|
||||
r.Logger.Warn("⚠️ Failed to create DHT: %v", err)
|
||||
} else {
|
||||
r.Logger.Info("🕸️ DHT initialized")
|
||||
|
||||
// Bootstrap DHT with BACKBEAT tracking
|
||||
if r.BackbeatIntegration != nil {
|
||||
operationID := fmt.Sprintf("dht-bootstrap-%d", time.Now().Unix())
|
||||
if err := r.BackbeatIntegration.StartP2POperation(operationID, "dht_bootstrap", 4, nil); err == nil {
|
||||
r.BackbeatIntegration.UpdateP2POperationPhase(operationID, backbeat.PhaseConnecting, 0)
|
||||
}
|
||||
|
||||
if err := dhtNode.Bootstrap(); err != nil {
|
||||
r.Logger.Warn("⚠️ DHT bootstrap failed: %v", err)
|
||||
r.BackbeatIntegration.FailP2POperation(operationID, err.Error())
|
||||
} else {
|
||||
r.BackbeatIntegration.CompleteP2POperation(operationID, 1)
|
||||
}
|
||||
} else {
|
||||
if err := dhtNode.Bootstrap(); err != nil {
|
||||
r.Logger.Warn("⚠️ DHT bootstrap failed: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Connect to bootstrap peers if configured
|
||||
for _, addrStr := range r.Config.V2.DHT.BootstrapPeers {
|
||||
addr, err := multiaddr.NewMultiaddr(addrStr)
|
||||
if err != nil {
|
||||
r.Logger.Warn("⚠️ Invalid bootstrap address %s: %v", addrStr, err)
|
||||
continue
|
||||
}
|
||||
|
||||
// Extract peer info from multiaddr
|
||||
info, err := peer.AddrInfoFromP2pAddr(addr)
|
||||
if err != nil {
|
||||
r.Logger.Warn("⚠️ Failed to parse peer info from %s: %v", addrStr, err)
|
||||
continue
|
||||
}
|
||||
|
||||
// Track peer discovery with BACKBEAT if available
|
||||
if r.BackbeatIntegration != nil {
|
||||
operationID := fmt.Sprintf("peer-discovery-%d", time.Now().Unix())
|
||||
if err := r.BackbeatIntegration.StartP2POperation(operationID, "peer_discovery", 2, map[string]interface{}{
|
||||
"peer_addr": addrStr,
|
||||
}); err == nil {
|
||||
r.BackbeatIntegration.UpdateP2POperationPhase(operationID, backbeat.PhaseConnecting, 0)
|
||||
|
||||
if err := r.Node.Host().Connect(r.Context, *info); err != nil {
|
||||
r.Logger.Warn("⚠️ Failed to connect to bootstrap peer %s: %v", addrStr, err)
|
||||
r.BackbeatIntegration.FailP2POperation(operationID, err.Error())
|
||||
} else {
|
||||
r.Logger.Info("🔗 Connected to DHT bootstrap peer: %s", addrStr)
|
||||
r.BackbeatIntegration.CompleteP2POperation(operationID, 1)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if err := r.Node.Host().Connect(r.Context, *info); err != nil {
|
||||
r.Logger.Warn("⚠️ Failed to connect to bootstrap peer %s: %v", addrStr, err)
|
||||
} else {
|
||||
r.Logger.Info("🔗 Connected to DHT bootstrap peer: %s", addrStr)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Initialize encrypted storage
|
||||
encryptedStorage = dht.NewEncryptedDHTStorage(
|
||||
r.Context,
|
||||
r.Node.Host(),
|
||||
dhtNode,
|
||||
r.Config,
|
||||
r.Node.ID().ShortString(),
|
||||
)
|
||||
|
||||
// Start cache cleanup
|
||||
encryptedStorage.StartCacheCleanup(5 * time.Minute)
|
||||
r.Logger.Info("🔐 Encrypted DHT storage initialized")
|
||||
|
||||
// Initialize decision publisher
|
||||
decisionPublisher = ucxl.NewDecisionPublisher(
|
||||
r.Context,
|
||||
r.Config,
|
||||
encryptedStorage,
|
||||
r.Node.ID().ShortString(),
|
||||
r.Config.Agent.ID,
|
||||
)
|
||||
r.Logger.Info("📤 Decision publisher initialized")
|
||||
}
|
||||
} else {
|
||||
r.Logger.Info("⚪ DHT disabled in configuration")
|
||||
}
|
||||
|
||||
r.DHTNode = dhtNode
|
||||
r.EncryptedStorage = encryptedStorage
|
||||
r.DecisionPublisher = decisionPublisher
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *SharedRuntime) initializeServices() error {
|
||||
// === Task Coordination Integration ===
|
||||
taskCoordinator := coordinator.NewTaskCoordinator(
|
||||
r.Context,
|
||||
r.PubSub,
|
||||
r.HypercoreLog,
|
||||
r.Config,
|
||||
r.Node.ID().ShortString(),
|
||||
nil, // HMMM router placeholder
|
||||
)
|
||||
|
||||
taskCoordinator.Start()
|
||||
r.TaskCoordinator = taskCoordinator
|
||||
r.Logger.Info("✅ Task coordination system active")
|
||||
|
||||
// Start HTTP API server
|
||||
httpServer := api.NewHTTPServer(r.Config.Network.APIPort, r.HypercoreLog, r.PubSub)
|
||||
go func() {
|
||||
r.Logger.Info("🌐 HTTP API server starting on :%d", r.Config.Network.APIPort)
|
||||
if err := httpServer.Start(); err != nil && err != http.ErrServerClosed {
|
||||
r.Logger.Error("❌ HTTP server error: %v", err)
|
||||
}
|
||||
}()
|
||||
r.HTTPServer = httpServer
|
||||
|
||||
// === UCXI Server Integration ===
|
||||
var ucxiServer *ucxi.Server
|
||||
if r.Config.UCXL.Enabled && r.Config.UCXL.Server.Enabled {
|
||||
storageDir := r.Config.UCXL.Storage.Directory
|
||||
if storageDir == "" {
|
||||
storageDir = filepath.Join(os.TempDir(), "chorus-ucxi-storage")
|
||||
}
|
||||
|
||||
storage, err := ucxi.NewBasicContentStorage(storageDir)
|
||||
if err != nil {
|
||||
r.Logger.Warn("⚠️ Failed to create UCXI storage: %v", err)
|
||||
} else {
|
||||
resolver := ucxi.NewBasicAddressResolver(r.Node.ID().ShortString())
|
||||
resolver.SetDefaultTTL(r.Config.UCXL.Resolution.CacheTTL)
|
||||
|
||||
ucxiConfig := ucxi.ServerConfig{
|
||||
Port: r.Config.UCXL.Server.Port,
|
||||
BasePath: r.Config.UCXL.Server.BasePath,
|
||||
Resolver: resolver,
|
||||
Storage: storage,
|
||||
Logger: ucxi.SimpleLogger{},
|
||||
}
|
||||
|
||||
ucxiServer = ucxi.NewServer(ucxiConfig)
|
||||
go func() {
|
||||
r.Logger.Info("🔗 UCXI server starting on :%d", r.Config.UCXL.Server.Port)
|
||||
if err := ucxiServer.Start(); err != nil && err != http.ErrServerClosed {
|
||||
r.Logger.Error("❌ UCXI server error: %v", err)
|
||||
}
|
||||
}()
|
||||
}
|
||||
} else {
|
||||
r.Logger.Info("⚪ UCXI server disabled")
|
||||
}
|
||||
r.UCXIServer = ucxiServer
|
||||
|
||||
// Create simple task tracker
|
||||
taskTracker := &SimpleTaskTracker{
|
||||
maxTasks: r.Config.Agent.MaxTasks,
|
||||
activeTasks: make(map[string]bool),
|
||||
}
|
||||
|
||||
// Connect decision publisher to task tracker if available
|
||||
if r.DecisionPublisher != nil {
|
||||
taskTracker.decisionPublisher = r.DecisionPublisher
|
||||
r.Logger.Info("📤 Task completion decisions will be published to DHT")
|
||||
}
|
||||
r.TaskTracker = taskTracker
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// initializeAIProvider configures the reasoning engine with the appropriate AI provider
|
||||
func initializeAIProvider(cfg *config.Config, logger *SimpleLogger) error {
|
||||
// Set the AI provider
|
||||
reasoning.SetAIProvider(cfg.AI.Provider)
|
||||
|
||||
// Configure the selected provider
|
||||
switch cfg.AI.Provider {
|
||||
case "resetdata":
|
||||
if cfg.AI.ResetData.APIKey == "" {
|
||||
return fmt.Errorf("RESETDATA_API_KEY environment variable is required for resetdata provider")
|
||||
}
|
||||
|
||||
resetdataConfig := reasoning.ResetDataConfig{
|
||||
BaseURL: cfg.AI.ResetData.BaseURL,
|
||||
APIKey: cfg.AI.ResetData.APIKey,
|
||||
Model: cfg.AI.ResetData.Model,
|
||||
Timeout: cfg.AI.ResetData.Timeout,
|
||||
}
|
||||
reasoning.SetResetDataConfig(resetdataConfig)
|
||||
logger.Info("🌐 ResetData AI provider configured - Endpoint: %s, Model: %s",
|
||||
cfg.AI.ResetData.BaseURL, cfg.AI.ResetData.Model)
|
||||
|
||||
case "ollama":
|
||||
reasoning.SetOllamaEndpoint(cfg.AI.Ollama.Endpoint)
|
||||
logger.Info("🦙 Ollama AI provider configured - Endpoint: %s", cfg.AI.Ollama.Endpoint)
|
||||
|
||||
default:
|
||||
logger.Warn("⚠️ Unknown AI provider '%s', defaulting to resetdata", cfg.AI.Provider)
|
||||
if cfg.AI.ResetData.APIKey == "" {
|
||||
return fmt.Errorf("RESETDATA_API_KEY environment variable is required for default resetdata provider")
|
||||
}
|
||||
|
||||
resetdataConfig := reasoning.ResetDataConfig{
|
||||
BaseURL: cfg.AI.ResetData.BaseURL,
|
||||
APIKey: cfg.AI.ResetData.APIKey,
|
||||
Model: cfg.AI.ResetData.Model,
|
||||
Timeout: cfg.AI.ResetData.Timeout,
|
||||
}
|
||||
reasoning.SetResetDataConfig(resetdataConfig)
|
||||
reasoning.SetAIProvider("resetdata")
|
||||
}
|
||||
|
||||
// Configure model selection
|
||||
reasoning.SetModelConfig(
|
||||
cfg.Agent.Models,
|
||||
cfg.Agent.ModelSelectionWebhook,
|
||||
cfg.Agent.DefaultReasoningModel,
|
||||
)
|
||||
|
||||
// Initialize prompt sources (roles + default instructions) from Docker-mounted directory
|
||||
promptsDir := os.Getenv("CHORUS_PROMPTS_DIR")
|
||||
defaultInstrPath := os.Getenv("CHORUS_DEFAULT_INSTRUCTIONS_PATH")
|
||||
_ = prompt.Initialize(promptsDir, defaultInstrPath)
|
||||
|
||||
// Compose S + D for the agent role if available; otherwise use D only
|
||||
if cfg.Agent.Role != "" {
|
||||
if composed, err := prompt.ComposeSystemPrompt(cfg.Agent.Role); err == nil && strings.TrimSpace(composed) != "" {
|
||||
reasoning.SetDefaultSystemPrompt(composed)
|
||||
} else if d := prompt.GetDefaultInstructions(); strings.TrimSpace(d) != "" {
|
||||
reasoning.SetDefaultSystemPrompt(d)
|
||||
}
|
||||
} else if d := prompt.GetDefaultInstructions(); strings.TrimSpace(d) != "" {
|
||||
reasoning.SetDefaultSystemPrompt(d)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
114
pkg/prompt/loader.go
Normal file
114
pkg/prompt/loader.go
Normal file
@@ -0,0 +1,114 @@
|
||||
package prompt
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"io/fs"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"gopkg.in/yaml.v3"
|
||||
)
|
||||
|
||||
var (
|
||||
loadedRoles map[string]RoleDefinition
|
||||
defaultInstr string
|
||||
)
|
||||
|
||||
// Initialize loads roles and default instructions from the configured directory.
|
||||
// dir: base directory (e.g., /etc/chorus/prompts)
|
||||
// defaultPath: optional explicit path to defaults file; if empty, will look for defaults.md or defaults.txt in dir.
|
||||
func Initialize(dir string, defaultPath string) error {
|
||||
loadedRoles = make(map[string]RoleDefinition)
|
||||
|
||||
// Load roles from all YAML files under dir
|
||||
if dir != "" {
|
||||
_ = filepath.WalkDir(dir, func(path string, d fs.DirEntry, err error) error {
|
||||
if err != nil || d == nil || d.IsDir() {
|
||||
return nil
|
||||
}
|
||||
name := d.Name()
|
||||
if strings.HasSuffix(name, ".yaml") || strings.HasSuffix(name, ".yml") {
|
||||
_ = loadRolesFile(path)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
// Load default instructions
|
||||
if defaultPath == "" && dir != "" {
|
||||
// Try defaults.md then defaults.txt in the directory
|
||||
tryPaths := []string{
|
||||
filepath.Join(dir, "defaults.md"),
|
||||
filepath.Join(dir, "defaults.txt"),
|
||||
}
|
||||
for _, p := range tryPaths {
|
||||
if b, err := os.ReadFile(p); err == nil {
|
||||
defaultInstr = string(b)
|
||||
break
|
||||
}
|
||||
}
|
||||
} else if defaultPath != "" {
|
||||
if b, err := os.ReadFile(defaultPath); err == nil {
|
||||
defaultInstr = string(b)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func loadRolesFile(path string) error {
|
||||
data, err := os.ReadFile(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var rf RolesFile
|
||||
if err := yaml.Unmarshal(data, &rf); err != nil {
|
||||
return err
|
||||
}
|
||||
for id, def := range rf.Roles {
|
||||
def.ID = id
|
||||
loadedRoles[id] = def
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetRole returns the role definition by ID if loaded.
|
||||
func GetRole(id string) (RoleDefinition, bool) {
|
||||
r, ok := loadedRoles[id]
|
||||
return r, ok
|
||||
}
|
||||
|
||||
// ListRoles returns IDs of loaded roles.
|
||||
func ListRoles() []string {
|
||||
ids := make([]string, 0, len(loadedRoles))
|
||||
for id := range loadedRoles {
|
||||
ids = append(ids, id)
|
||||
}
|
||||
return ids
|
||||
}
|
||||
|
||||
// GetDefaultInstructions returns the loaded default instructions (may be empty if not present on disk).
|
||||
func GetDefaultInstructions() string {
|
||||
return defaultInstr
|
||||
}
|
||||
|
||||
// ComposeSystemPrompt concatenates the role system prompt (S) with default instructions (D).
|
||||
func ComposeSystemPrompt(roleID string) (string, error) {
|
||||
r, ok := GetRole(roleID)
|
||||
if !ok {
|
||||
return "", errors.New("role not found: " + roleID)
|
||||
}
|
||||
s := strings.TrimSpace(r.SystemPrompt)
|
||||
d := strings.TrimSpace(defaultInstr)
|
||||
switch {
|
||||
case s != "" && d != "":
|
||||
return s + "\n\n" + d, nil
|
||||
case s != "":
|
||||
return s, nil
|
||||
case d != "":
|
||||
return d, nil
|
||||
default:
|
||||
return "", nil
|
||||
}
|
||||
}
|
||||
|
||||
22
pkg/prompt/types.go
Normal file
22
pkg/prompt/types.go
Normal file
@@ -0,0 +1,22 @@
|
||||
package prompt
|
||||
|
||||
// RoleDefinition represents a single agent role loaded from YAML.
|
||||
type RoleDefinition struct {
|
||||
ID string `yaml:"id"`
|
||||
Name string `yaml:"name"`
|
||||
Description string `yaml:"description"`
|
||||
Tags []string `yaml:"tags"`
|
||||
SystemPrompt string `yaml:"system_prompt"`
|
||||
Defaults struct {
|
||||
Models []string `yaml:"models"`
|
||||
Capabilities []string `yaml:"capabilities"`
|
||||
Expertise []string `yaml:"expertise"`
|
||||
MaxTasks int `yaml:"max_tasks"`
|
||||
} `yaml:"defaults"`
|
||||
}
|
||||
|
||||
// RolesFile is the top-level structure for a roles YAML file.
|
||||
type RolesFile struct {
|
||||
Roles map[string]RoleDefinition `yaml:"roles"`
|
||||
}
|
||||
|
||||
103
prompts/defaults.md
Normal file
103
prompts/defaults.md
Normal file
@@ -0,0 +1,103 @@
|
||||
Default Instructions (D)
|
||||
|
||||
Operating Policy
|
||||
- Be precise, verifiable, and do not fabricate. Surface uncertainties.
|
||||
- Prefer minimal, auditable changes; record decisions in UCXL.
|
||||
- Preserve API compatibility, data safety, and security constraints. Escalate when blocked.
|
||||
- Include UCXL citations for any external facts or prior decisions.
|
||||
|
||||
When To Use Subsystems
|
||||
- HMMM (collaborative reasoning): Cross-agent clarification, planning critique, consensus seeking, or targeted questions to unblock progress. Publish on `hmmm/meta-discussion/v1`.
|
||||
- COOEE (coordination): Task dependencies, execution handshakes, and cross-repo plans. Publish on `CHORUS/coordination/v1`.
|
||||
- UCXL (context): Read decisions/specs/plans by UCXL address. Write new decisions and evidence using the decision bundle envelope. Never invent UCXL paths.
|
||||
- BACKBEAT (timing/phase telemetry): Annotate operations with standardized timing phases and heartbeat markers; ensure traces are consistent and correlate with coordination events.
|
||||
|
||||
HMMM: Message (publish → hmmm/meta-discussion/v1)
|
||||
{
|
||||
"type": "hmmm.message",
|
||||
"session_id": "<string>",
|
||||
"from": {"agent_id": "<string>", "role": "<string>"},
|
||||
"message": "<plain text>",
|
||||
"intent": "proposal|question|answer|update|escalation",
|
||||
"citations": [{"ucxl.address": "<ucxl://...>", "reason": "<string>"}],
|
||||
"timestamp": "<RFC3339>"
|
||||
}
|
||||
|
||||
COOEE: Coordination Request (publish → CHORUS/coordination/v1)
|
||||
{
|
||||
"type": "cooee.request",
|
||||
"dependency": {
|
||||
"task1": {"repo": "<owner/name>", "id": "<id>", "agent_id": "<string>"},
|
||||
"task2": {"repo": "<owner/name>", "id": "<id>", "agent_id": "<string>"},
|
||||
"relationship": "blocks|duplicates|relates-to|requires",
|
||||
"reason": "<string>"
|
||||
},
|
||||
"objective": "<what success looks like>",
|
||||
"constraints": ["<time>", "<compliance>", "<perf>", "..."],
|
||||
"deadline": "<RFC3339|optional>",
|
||||
"citations": [{"ucxl.address": "<ucxl://...>"}],
|
||||
"timestamp": "<RFC3339>"
|
||||
}
|
||||
|
||||
COOEE: Coordination Plan (publish → CHORUS/coordination/v1)
|
||||
{
|
||||
"type": "cooee.plan",
|
||||
"session_id": "<string>",
|
||||
"participants": {"<agent_id>": {"role": "<string>"}},
|
||||
"steps": [{"id":"S1","owner":"<agent_id>","desc":"<action>","deps":["S0"],"done":false}],
|
||||
"risks": [{"id":"R1","desc":"<risk>","mitigation":"<mitigate>"}],
|
||||
"success_criteria": ["<criteria-1>", "<criteria-2>"],
|
||||
"citations": [{"ucxl.address": "<ucxl://...>"}],
|
||||
"timestamp": "<RFC3339>"
|
||||
}
|
||||
|
||||
UCXL: Decision Bundle (persist → UCXL)
|
||||
{
|
||||
"ucxl.address": "ucxl://<agent-id>:<role>@<project>:<task>/#/<path>",
|
||||
"version": "<RFC3339>",
|
||||
"content_type": "application/vnd.chorus.decision+json",
|
||||
"hash": "sha256:<hex>",
|
||||
"metadata": {
|
||||
"classification": "internal|public|restricted",
|
||||
"roles": ["<role-1>", "<role-2>"],
|
||||
"tags": ["decision","coordination","review"]
|
||||
},
|
||||
"task": "<what is being decided>",
|
||||
"options": [
|
||||
{"name":"<A>","plan":"<steps>","risks":"<risks>"},
|
||||
{"name":"<B>","plan":"<steps>","risks":"<risks>"}
|
||||
],
|
||||
"choice": "<A|B|...>",
|
||||
"rationale": "<why>",
|
||||
"citations": [{"ucxl.address":"<ucxl://...>"}]
|
||||
}
|
||||
|
||||
BACKBEAT: Usage & Standards
|
||||
- Purpose: Provide beat-aware timing, phase tracking, and correlation for distributed operations.
|
||||
- Phases: Define and emit consistent phases (e.g., "prepare", "plan", "exec", "verify", "publish").
|
||||
- Events: At minimum emit `start`, `heartbeat`, and `complete` for each operation with the same correlation ID.
|
||||
- Correlation: Include `team_id`, `session_id`, `operation_id`, and link to COOEE/HMMM message IDs when present.
|
||||
- Latency budget: Attach `budget_ms` when available; warn if over budget.
|
||||
- Error handling: On failure, emit `complete` with `status":"error"`, a concise `reason`, and UCXL decision/citation if escalated.
|
||||
- Minimal JSON envelope for a beat:
|
||||
{
|
||||
"type": "backbeat.event",
|
||||
"operation_id": "<uuid>",
|
||||
"phase": "prepare|plan|exec|verify|publish",
|
||||
"event": "start|heartbeat|complete",
|
||||
"status": "ok|error",
|
||||
"team_id": "<string>",
|
||||
"session_id": "<string>",
|
||||
"cooee_id": "<message-id|optional>",
|
||||
"hmmm_id": "<message-id|optional>",
|
||||
"budget_ms": <int|optional>,
|
||||
"elapsed_ms": <int|optional>,
|
||||
"details": {"key": "value"},
|
||||
"timestamp": "<RFC3339>"
|
||||
}
|
||||
|
||||
Composition
|
||||
- Final system prompt = S (role/system persona) + two newlines + this D.
|
||||
- Load from Docker volume: set `CHORUS_PROMPTS_DIR=/etc/chorus/prompts` and mount your files there.
|
||||
- Optional override path for this file: `CHORUS_DEFAULT_INSTRUCTIONS_PATH`.
|
||||
|
||||
28
prompts/roles.yaml
Normal file
28
prompts/roles.yaml
Normal file
@@ -0,0 +1,28 @@
|
||||
roles:
|
||||
arbiter:
|
||||
name: "Arbiter"
|
||||
description: "Coordination lead for cross-agent planning and consensus."
|
||||
tags: [coordination, planning]
|
||||
system_prompt: |
|
||||
You are Arbiter, a precise coordination lead for distributed engineering teams.
|
||||
Facilitate efficient cross-agent planning, detect dependencies, and drive consensus.
|
||||
Optimize for clarity, verifiability, and minimal, auditable change sets.
|
||||
defaults:
|
||||
models: ["meta/llama-3.1-8b-instruct"]
|
||||
capabilities: ["coordination","planning","dependency-analysis"]
|
||||
expertise: []
|
||||
max_tasks: 3
|
||||
|
||||
hmmm-analyst:
|
||||
name: "HMMM Analyst"
|
||||
description: "Analytical agent specializing in collaborative reasoning workflows."
|
||||
tags: [reasoning, analysis]
|
||||
system_prompt: |
|
||||
You are an analytical agent focused on clear, testable reasoning and critique.
|
||||
Identify assumptions, propose checkpoints, and seek consensus via HMMM when useful.
|
||||
defaults:
|
||||
models: ["meta/llama-3.1-8b-instruct"]
|
||||
capabilities: ["reasoning","critique","explanation"]
|
||||
expertise: []
|
||||
max_tasks: 2
|
||||
|
||||
@@ -16,12 +16,13 @@ const (
|
||||
)
|
||||
|
||||
var (
|
||||
availableModels []string
|
||||
modelWebhookURL string
|
||||
defaultModel string
|
||||
ollamaEndpoint string = "http://localhost:11434" // Default fallback
|
||||
aiProvider string = "resetdata" // Default provider
|
||||
resetdataConfig ResetDataConfig
|
||||
availableModels []string
|
||||
modelWebhookURL string
|
||||
defaultModel string
|
||||
ollamaEndpoint string = "http://localhost:11434" // Default fallback
|
||||
aiProvider string = "resetdata" // Default provider
|
||||
resetdataConfig ResetDataConfig
|
||||
defaultSystemPrompt string
|
||||
)
|
||||
|
||||
// AIProvider represents the AI service provider
|
||||
@@ -118,17 +119,17 @@ func generateResetDataResponse(ctx context.Context, model, prompt string) (strin
|
||||
}
|
||||
|
||||
// Create the request payload in OpenAI format
|
||||
requestPayload := OpenAIRequest{
|
||||
Model: modelToUse,
|
||||
Messages: []OpenAIMessage{
|
||||
{Role: "system", Content: "You are a helpful assistant."},
|
||||
{Role: "user", Content: prompt},
|
||||
},
|
||||
Temperature: 0.2,
|
||||
TopP: 0.7,
|
||||
MaxTokens: 1024,
|
||||
Stream: false,
|
||||
}
|
||||
requestPayload := OpenAIRequest{
|
||||
Model: modelToUse,
|
||||
Messages: []OpenAIMessage{
|
||||
{Role: "system", Content: defaultSystemPromptOrFallback()},
|
||||
{Role: "user", Content: prompt},
|
||||
},
|
||||
Temperature: 0.2,
|
||||
TopP: 0.7,
|
||||
MaxTokens: 1024,
|
||||
Stream: false,
|
||||
}
|
||||
|
||||
payloadBytes, err := json.Marshal(requestPayload)
|
||||
if err != nil {
|
||||
@@ -233,7 +234,12 @@ func SetResetDataConfig(config ResetDataConfig) {
|
||||
|
||||
// SetOllamaEndpoint configures the Ollama API endpoint
|
||||
func SetOllamaEndpoint(endpoint string) {
|
||||
ollamaEndpoint = endpoint
|
||||
ollamaEndpoint = endpoint
|
||||
}
|
||||
|
||||
// SetDefaultSystemPrompt configures the default system message used when building prompts.
|
||||
func SetDefaultSystemPrompt(systemPrompt string) {
|
||||
defaultSystemPrompt = systemPrompt
|
||||
}
|
||||
|
||||
// selectBestModel calls the model selection webhook to choose the best model for a prompt
|
||||
@@ -291,6 +297,13 @@ func selectBestModel(availableModels []string, prompt string) string {
|
||||
|
||||
// GenerateResponseSmart automatically selects the best model for the prompt
|
||||
func GenerateResponseSmart(ctx context.Context, prompt string) (string, error) {
|
||||
selectedModel := selectBestModel(availableModels, prompt)
|
||||
return GenerateResponse(ctx, selectedModel, prompt)
|
||||
selectedModel := selectBestModel(availableModels, prompt)
|
||||
return GenerateResponse(ctx, selectedModel, prompt)
|
||||
}
|
||||
|
||||
func defaultSystemPromptOrFallback() string {
|
||||
if strings.TrimSpace(defaultSystemPrompt) != "" {
|
||||
return defaultSystemPrompt
|
||||
}
|
||||
return "You are a helpful assistant."
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user