diff --git a/internal/runtime/shared.go b/internal/runtime/shared.go new file mode 100644 index 0000000..8174320 --- /dev/null +++ b/internal/runtime/shared.go @@ -0,0 +1,601 @@ +package runtime + +import ( + "context" + "fmt" + "log" + "net/http" + "os" + "path/filepath" + "strings" + "time" + + "chorus/api" + "chorus/coordinator" + "chorus/discovery" + "chorus/internal/backbeat" + "chorus/internal/licensing" + "chorus/internal/logging" + "chorus/p2p" + "chorus/pkg/config" + "chorus/pkg/dht" + "chorus/pkg/election" + "chorus/pkg/health" + "chorus/pkg/shutdown" + "chorus/pkg/prompt" + "chorus/pkg/ucxi" + "chorus/pkg/ucxl" + "chorus/pubsub" + "chorus/reasoning" + "github.com/libp2p/go-libp2p/core/peer" + "github.com/multiformats/go-multiaddr" +) + +const ( + AppName = "CHORUS" + AppVersion = "0.1.0-dev" +) + +// SimpleLogger provides basic logging implementation +type SimpleLogger struct{} + +func (l *SimpleLogger) Info(msg string, args ...interface{}) { + log.Printf("[INFO] "+msg, args...) +} + +func (l *SimpleLogger) Warn(msg string, args ...interface{}) { + log.Printf("[WARN] "+msg, args...) +} + +func (l *SimpleLogger) Error(msg string, args ...interface{}) { + log.Printf("[ERROR] "+msg, args...) +} + +// SimpleTaskTracker tracks active tasks for availability reporting +type SimpleTaskTracker struct { + maxTasks int + activeTasks map[string]bool + decisionPublisher *ucxl.DecisionPublisher +} + +// GetActiveTasks returns list of active task IDs +func (t *SimpleTaskTracker) GetActiveTasks() []string { + tasks := make([]string, 0, len(t.activeTasks)) + for taskID := range t.activeTasks { + tasks = append(tasks, taskID) + } + return tasks +} + +// GetMaxTasks returns maximum number of concurrent tasks +func (t *SimpleTaskTracker) GetMaxTasks() int { + return t.maxTasks +} + +// AddTask marks a task as active +func (t *SimpleTaskTracker) AddTask(taskID string) { + t.activeTasks[taskID] = true +} + +// RemoveTask marks a task as completed and publishes decision if publisher available +func (t *SimpleTaskTracker) RemoveTask(taskID string) { + delete(t.activeTasks, taskID) + + // Publish task completion decision if publisher is available + if t.decisionPublisher != nil { + t.publishTaskCompletion(taskID, true, "Task completed successfully", nil) + } +} + +// publishTaskCompletion publishes a task completion decision to DHT +func (t *SimpleTaskTracker) publishTaskCompletion(taskID string, success bool, summary string, filesModified []string) { + if t.decisionPublisher == nil { + return + } + + if err := t.decisionPublisher.PublishTaskCompletion(taskID, success, summary, filesModified); err != nil { + fmt.Printf("⚠️ Failed to publish task completion for %s: %v\n", taskID, err) + } else { + fmt.Printf("πŸ“€ Published task completion decision for: %s\n", taskID) + } +} + +// SharedRuntime contains all the shared P2P infrastructure components +type SharedRuntime struct { + Config *config.Config + Logger *SimpleLogger + Context context.Context + Cancel context.CancelFunc + Node *p2p.Node + PubSub *pubsub.PubSub + HypercoreLog *logging.HypercoreLog + MDNSDiscovery *discovery.MDNSDiscovery + BackbeatIntegration *backbeat.Integration + DHTNode *dht.LibP2PDHT + EncryptedStorage *dht.EncryptedDHTStorage + DecisionPublisher *ucxl.DecisionPublisher + ElectionManager *election.ElectionManager + TaskCoordinator *coordinator.TaskCoordinator + HTTPServer *api.HTTPServer + UCXIServer *ucxi.Server + HealthManager *health.Manager + ShutdownManager *shutdown.Manager + TaskTracker *SimpleTaskTracker +} + +// Initialize sets up all shared P2P infrastructure components +func Initialize(appMode string) (*SharedRuntime, error) { + runtime := &SharedRuntime{} + runtime.Logger = &SimpleLogger{} + + ctx, cancel := context.WithCancel(context.Background()) + runtime.Context = ctx + runtime.Cancel = cancel + + runtime.Logger.Info("🎭 Starting CHORUS v%s - Container-First P2P Task Coordination", AppVersion) + runtime.Logger.Info("πŸ“¦ Container deployment - Mode: %s", appMode) + + // Load configuration from environment (no config files in containers) + runtime.Logger.Info("πŸ“‹ Loading configuration from environment variables...") + cfg, err := config.LoadFromEnvironment() + if err != nil { + return nil, fmt.Errorf("configuration error: %v", err) + } + runtime.Config = cfg + + runtime.Logger.Info("βœ… Configuration loaded successfully") + runtime.Logger.Info("πŸ€– Agent ID: %s", cfg.Agent.ID) + runtime.Logger.Info("🎯 Specialization: %s", cfg.Agent.Specialization) + + // CRITICAL: Validate license before any P2P operations + runtime.Logger.Info("πŸ” Validating CHORUS license with KACHING...") + licenseValidator := licensing.NewValidator(licensing.LicenseConfig{ + LicenseID: cfg.License.LicenseID, + ClusterID: cfg.License.ClusterID, + KachingURL: cfg.License.KachingURL, + }) + if err := licenseValidator.Validate(); err != nil { + return nil, fmt.Errorf("license validation failed: %v", err) + } + runtime.Logger.Info("βœ… License validation successful - CHORUS authorized to run") + + // Initialize AI provider configuration + runtime.Logger.Info("🧠 Configuring AI provider: %s", cfg.AI.Provider) + if err := initializeAIProvider(cfg, runtime.Logger); err != nil { + return nil, fmt.Errorf("AI provider initialization failed: %v", err) + } + runtime.Logger.Info("βœ… AI provider configured successfully") + + // Initialize BACKBEAT integration + var backbeatIntegration *backbeat.Integration + backbeatIntegration, err = backbeat.NewIntegration(cfg, cfg.Agent.ID, runtime.Logger) + if err != nil { + runtime.Logger.Warn("⚠️ BACKBEAT integration initialization failed: %v", err) + runtime.Logger.Info("πŸ“ P2P operations will run without beat synchronization") + } else { + if err := backbeatIntegration.Start(ctx); err != nil { + runtime.Logger.Warn("⚠️ Failed to start BACKBEAT integration: %v", err) + backbeatIntegration = nil + } else { + runtime.Logger.Info("🎡 BACKBEAT integration started successfully") + } + } + runtime.BackbeatIntegration = backbeatIntegration + + // Initialize P2P node + node, err := p2p.NewNode(ctx) + if err != nil { + return nil, fmt.Errorf("failed to create P2P node: %v", err) + } + runtime.Node = node + + runtime.Logger.Info("🐝 CHORUS node started successfully") + runtime.Logger.Info("πŸ“ Node ID: %s", node.ID().ShortString()) + runtime.Logger.Info("πŸ”— Listening addresses:") + for _, addr := range node.Addresses() { + runtime.Logger.Info(" %s/p2p/%s", addr, node.ID()) + } + + // Initialize Hypercore-style logger for P2P coordination + hlog := logging.NewHypercoreLog(node.ID()) + hlog.Append(logging.PeerJoined, map[string]interface{}{"status": "started"}) + runtime.HypercoreLog = hlog + runtime.Logger.Info("πŸ“ Hypercore logger initialized") + + // Initialize mDNS discovery + mdnsDiscovery, err := discovery.NewMDNSDiscovery(ctx, node.Host(), "chorus-peer-discovery") + if err != nil { + return nil, fmt.Errorf("failed to create mDNS discovery: %v", err) + } + runtime.MDNSDiscovery = mdnsDiscovery + + // Initialize PubSub with hypercore logging + ps, err := pubsub.NewPubSubWithLogger(ctx, node.Host(), "chorus/coordination/v1", "hmmm/meta-discussion/v1", hlog) + if err != nil { + return nil, fmt.Errorf("failed to create PubSub: %v", err) + } + runtime.PubSub = ps + + runtime.Logger.Info("πŸ“‘ PubSub system initialized") + + // Join role-based topics if role is configured + if cfg.Agent.Role != "" { + reportsTo := []string{} + if cfg.Agent.ReportsTo != "" { + reportsTo = []string{cfg.Agent.ReportsTo} + } + if err := ps.JoinRoleBasedTopics(cfg.Agent.Role, cfg.Agent.Expertise, reportsTo); err != nil { + runtime.Logger.Warn("⚠️ Failed to join role-based topics: %v", err) + } else { + runtime.Logger.Info("🎯 Joined role-based collaboration topics") + } + } + + // Initialize remaining components + if err := runtime.initializeElectionSystem(); err != nil { + return nil, fmt.Errorf("failed to initialize election system: %v", err) + } + + if err := runtime.initializeDHTStorage(); err != nil { + return nil, fmt.Errorf("failed to initialize DHT storage: %v", err) + } + + if err := runtime.initializeServices(); err != nil { + return nil, fmt.Errorf("failed to initialize services: %v", err) + } + + return runtime, nil +} + +// Cleanup properly shuts down all runtime components +func (r *SharedRuntime) Cleanup() { + r.Logger.Info("πŸ”„ Starting graceful shutdown...") + + if r.BackbeatIntegration != nil { + r.BackbeatIntegration.Stop() + } + + if r.MDNSDiscovery != nil { + r.MDNSDiscovery.Close() + } + + if r.PubSub != nil { + r.PubSub.Close() + } + + if r.DHTNode != nil { + r.DHTNode.Close() + } + + if r.Node != nil { + r.Node.Close() + } + + if r.HTTPServer != nil { + r.HTTPServer.Stop() + } + + if r.UCXIServer != nil { + r.UCXIServer.Stop() + } + + if r.ElectionManager != nil { + r.ElectionManager.Stop() + } + + if r.Cancel != nil { + r.Cancel() + } + + r.Logger.Info("βœ… CHORUS shutdown completed") +} + +// Helper methods for initialization (extracted from main.go) +func (r *SharedRuntime) initializeElectionSystem() error { + // === Admin Election System === + electionManager := election.NewElectionManager(r.Context, r.Config, r.Node.Host(), r.PubSub, r.Node.ID().ShortString()) + + // Set election callbacks with BACKBEAT integration + electionManager.SetCallbacks( + func(oldAdmin, newAdmin string) { + r.Logger.Info("πŸ‘‘ Admin changed: %s -> %s", oldAdmin, newAdmin) + + // Track admin change with BACKBEAT if available + if r.BackbeatIntegration != nil { + operationID := fmt.Sprintf("admin-change-%d", time.Now().Unix()) + if err := r.BackbeatIntegration.StartP2POperation(operationID, "admin_change", 2, map[string]interface{}{ + "old_admin": oldAdmin, + "new_admin": newAdmin, + }); err == nil { + // Complete immediately as this is a state change, not a long operation + r.BackbeatIntegration.CompleteP2POperation(operationID, 1) + } + } + + // If this node becomes admin, enable SLURP functionality + if newAdmin == r.Node.ID().ShortString() { + r.Logger.Info("🎯 This node is now admin - enabling SLURP functionality") + r.Config.Slurp.Enabled = true + // Apply admin role configuration + if err := r.Config.ApplyRoleDefinition("admin"); err != nil { + r.Logger.Warn("⚠️ Failed to apply admin role: %v", err) + } + } + }, + func(winner string) { + r.Logger.Info("πŸ† Election completed, winner: %s", winner) + + // Track election completion with BACKBEAT if available + if r.BackbeatIntegration != nil { + operationID := fmt.Sprintf("election-completed-%d", time.Now().Unix()) + if err := r.BackbeatIntegration.StartP2POperation(operationID, "election", 1, map[string]interface{}{ + "winner": winner, + "node_id": r.Node.ID().ShortString(), + }); err == nil { + r.BackbeatIntegration.CompleteP2POperation(operationID, 1) + } + } + }, + ) + + if err := electionManager.Start(); err != nil { + return fmt.Errorf("failed to start election manager: %v", err) + } + r.ElectionManager = electionManager + r.Logger.Info("βœ… Election manager started with automated heartbeat management") + + return nil +} + +func (r *SharedRuntime) initializeDHTStorage() error { + // === DHT Storage and Decision Publishing === + var dhtNode *dht.LibP2PDHT + var encryptedStorage *dht.EncryptedDHTStorage + var decisionPublisher *ucxl.DecisionPublisher + + if r.Config.V2.DHT.Enabled { + // Create DHT + var err error + dhtNode, err = dht.NewLibP2PDHT(r.Context, r.Node.Host()) + if err != nil { + r.Logger.Warn("⚠️ Failed to create DHT: %v", err) + } else { + r.Logger.Info("πŸ•ΈοΈ DHT initialized") + + // Bootstrap DHT with BACKBEAT tracking + if r.BackbeatIntegration != nil { + operationID := fmt.Sprintf("dht-bootstrap-%d", time.Now().Unix()) + if err := r.BackbeatIntegration.StartP2POperation(operationID, "dht_bootstrap", 4, nil); err == nil { + r.BackbeatIntegration.UpdateP2POperationPhase(operationID, backbeat.PhaseConnecting, 0) + } + + if err := dhtNode.Bootstrap(); err != nil { + r.Logger.Warn("⚠️ DHT bootstrap failed: %v", err) + r.BackbeatIntegration.FailP2POperation(operationID, err.Error()) + } else { + r.BackbeatIntegration.CompleteP2POperation(operationID, 1) + } + } else { + if err := dhtNode.Bootstrap(); err != nil { + r.Logger.Warn("⚠️ DHT bootstrap failed: %v", err) + } + } + + // Connect to bootstrap peers if configured + for _, addrStr := range r.Config.V2.DHT.BootstrapPeers { + addr, err := multiaddr.NewMultiaddr(addrStr) + if err != nil { + r.Logger.Warn("⚠️ Invalid bootstrap address %s: %v", addrStr, err) + continue + } + + // Extract peer info from multiaddr + info, err := peer.AddrInfoFromP2pAddr(addr) + if err != nil { + r.Logger.Warn("⚠️ Failed to parse peer info from %s: %v", addrStr, err) + continue + } + + // Track peer discovery with BACKBEAT if available + if r.BackbeatIntegration != nil { + operationID := fmt.Sprintf("peer-discovery-%d", time.Now().Unix()) + if err := r.BackbeatIntegration.StartP2POperation(operationID, "peer_discovery", 2, map[string]interface{}{ + "peer_addr": addrStr, + }); err == nil { + r.BackbeatIntegration.UpdateP2POperationPhase(operationID, backbeat.PhaseConnecting, 0) + + if err := r.Node.Host().Connect(r.Context, *info); err != nil { + r.Logger.Warn("⚠️ Failed to connect to bootstrap peer %s: %v", addrStr, err) + r.BackbeatIntegration.FailP2POperation(operationID, err.Error()) + } else { + r.Logger.Info("πŸ”— Connected to DHT bootstrap peer: %s", addrStr) + r.BackbeatIntegration.CompleteP2POperation(operationID, 1) + } + } + } else { + if err := r.Node.Host().Connect(r.Context, *info); err != nil { + r.Logger.Warn("⚠️ Failed to connect to bootstrap peer %s: %v", addrStr, err) + } else { + r.Logger.Info("πŸ”— Connected to DHT bootstrap peer: %s", addrStr) + } + } + } + + // Initialize encrypted storage + encryptedStorage = dht.NewEncryptedDHTStorage( + r.Context, + r.Node.Host(), + dhtNode, + r.Config, + r.Node.ID().ShortString(), + ) + + // Start cache cleanup + encryptedStorage.StartCacheCleanup(5 * time.Minute) + r.Logger.Info("πŸ” Encrypted DHT storage initialized") + + // Initialize decision publisher + decisionPublisher = ucxl.NewDecisionPublisher( + r.Context, + r.Config, + encryptedStorage, + r.Node.ID().ShortString(), + r.Config.Agent.ID, + ) + r.Logger.Info("πŸ“€ Decision publisher initialized") + } + } else { + r.Logger.Info("βšͺ DHT disabled in configuration") + } + + r.DHTNode = dhtNode + r.EncryptedStorage = encryptedStorage + r.DecisionPublisher = decisionPublisher + + return nil +} + +func (r *SharedRuntime) initializeServices() error { + // === Task Coordination Integration === + taskCoordinator := coordinator.NewTaskCoordinator( + r.Context, + r.PubSub, + r.HypercoreLog, + r.Config, + r.Node.ID().ShortString(), + nil, // HMMM router placeholder + ) + + taskCoordinator.Start() + r.TaskCoordinator = taskCoordinator + r.Logger.Info("βœ… Task coordination system active") + + // Start HTTP API server + httpServer := api.NewHTTPServer(r.Config.Network.APIPort, r.HypercoreLog, r.PubSub) + go func() { + r.Logger.Info("🌐 HTTP API server starting on :%d", r.Config.Network.APIPort) + if err := httpServer.Start(); err != nil && err != http.ErrServerClosed { + r.Logger.Error("❌ HTTP server error: %v", err) + } + }() + r.HTTPServer = httpServer + + // === UCXI Server Integration === + var ucxiServer *ucxi.Server + if r.Config.UCXL.Enabled && r.Config.UCXL.Server.Enabled { + storageDir := r.Config.UCXL.Storage.Directory + if storageDir == "" { + storageDir = filepath.Join(os.TempDir(), "chorus-ucxi-storage") + } + + storage, err := ucxi.NewBasicContentStorage(storageDir) + if err != nil { + r.Logger.Warn("⚠️ Failed to create UCXI storage: %v", err) + } else { + resolver := ucxi.NewBasicAddressResolver(r.Node.ID().ShortString()) + resolver.SetDefaultTTL(r.Config.UCXL.Resolution.CacheTTL) + + ucxiConfig := ucxi.ServerConfig{ + Port: r.Config.UCXL.Server.Port, + BasePath: r.Config.UCXL.Server.BasePath, + Resolver: resolver, + Storage: storage, + Logger: ucxi.SimpleLogger{}, + } + + ucxiServer = ucxi.NewServer(ucxiConfig) + go func() { + r.Logger.Info("πŸ”— UCXI server starting on :%d", r.Config.UCXL.Server.Port) + if err := ucxiServer.Start(); err != nil && err != http.ErrServerClosed { + r.Logger.Error("❌ UCXI server error: %v", err) + } + }() + } + } else { + r.Logger.Info("βšͺ UCXI server disabled") + } + r.UCXIServer = ucxiServer + + // Create simple task tracker + taskTracker := &SimpleTaskTracker{ + maxTasks: r.Config.Agent.MaxTasks, + activeTasks: make(map[string]bool), + } + + // Connect decision publisher to task tracker if available + if r.DecisionPublisher != nil { + taskTracker.decisionPublisher = r.DecisionPublisher + r.Logger.Info("πŸ“€ Task completion decisions will be published to DHT") + } + r.TaskTracker = taskTracker + + return nil +} + +// initializeAIProvider configures the reasoning engine with the appropriate AI provider +func initializeAIProvider(cfg *config.Config, logger *SimpleLogger) error { + // Set the AI provider + reasoning.SetAIProvider(cfg.AI.Provider) + + // Configure the selected provider + switch cfg.AI.Provider { + case "resetdata": + if cfg.AI.ResetData.APIKey == "" { + return fmt.Errorf("RESETDATA_API_KEY environment variable is required for resetdata provider") + } + + resetdataConfig := reasoning.ResetDataConfig{ + BaseURL: cfg.AI.ResetData.BaseURL, + APIKey: cfg.AI.ResetData.APIKey, + Model: cfg.AI.ResetData.Model, + Timeout: cfg.AI.ResetData.Timeout, + } + reasoning.SetResetDataConfig(resetdataConfig) + logger.Info("🌐 ResetData AI provider configured - Endpoint: %s, Model: %s", + cfg.AI.ResetData.BaseURL, cfg.AI.ResetData.Model) + + case "ollama": + reasoning.SetOllamaEndpoint(cfg.AI.Ollama.Endpoint) + logger.Info("πŸ¦™ Ollama AI provider configured - Endpoint: %s", cfg.AI.Ollama.Endpoint) + + default: + logger.Warn("⚠️ Unknown AI provider '%s', defaulting to resetdata", cfg.AI.Provider) + if cfg.AI.ResetData.APIKey == "" { + return fmt.Errorf("RESETDATA_API_KEY environment variable is required for default resetdata provider") + } + + resetdataConfig := reasoning.ResetDataConfig{ + BaseURL: cfg.AI.ResetData.BaseURL, + APIKey: cfg.AI.ResetData.APIKey, + Model: cfg.AI.ResetData.Model, + Timeout: cfg.AI.ResetData.Timeout, + } + reasoning.SetResetDataConfig(resetdataConfig) + reasoning.SetAIProvider("resetdata") + } + + // Configure model selection + reasoning.SetModelConfig( + cfg.Agent.Models, + cfg.Agent.ModelSelectionWebhook, + cfg.Agent.DefaultReasoningModel, + ) + + // Initialize prompt sources (roles + default instructions) from Docker-mounted directory + promptsDir := os.Getenv("CHORUS_PROMPTS_DIR") + defaultInstrPath := os.Getenv("CHORUS_DEFAULT_INSTRUCTIONS_PATH") + _ = prompt.Initialize(promptsDir, defaultInstrPath) + + // Compose S + D for the agent role if available; otherwise use D only + if cfg.Agent.Role != "" { + if composed, err := prompt.ComposeSystemPrompt(cfg.Agent.Role); err == nil && strings.TrimSpace(composed) != "" { + reasoning.SetDefaultSystemPrompt(composed) + } else if d := prompt.GetDefaultInstructions(); strings.TrimSpace(d) != "" { + reasoning.SetDefaultSystemPrompt(d) + } + } else if d := prompt.GetDefaultInstructions(); strings.TrimSpace(d) != "" { + reasoning.SetDefaultSystemPrompt(d) + } + + return nil +} diff --git a/pkg/prompt/loader.go b/pkg/prompt/loader.go new file mode 100644 index 0000000..5f4eed7 --- /dev/null +++ b/pkg/prompt/loader.go @@ -0,0 +1,114 @@ +package prompt + +import ( + "errors" + "io/fs" + "os" + "path/filepath" + "strings" + + "gopkg.in/yaml.v3" +) + +var ( + loadedRoles map[string]RoleDefinition + defaultInstr string +) + +// Initialize loads roles and default instructions from the configured directory. +// dir: base directory (e.g., /etc/chorus/prompts) +// defaultPath: optional explicit path to defaults file; if empty, will look for defaults.md or defaults.txt in dir. +func Initialize(dir string, defaultPath string) error { + loadedRoles = make(map[string]RoleDefinition) + + // Load roles from all YAML files under dir + if dir != "" { + _ = filepath.WalkDir(dir, func(path string, d fs.DirEntry, err error) error { + if err != nil || d == nil || d.IsDir() { + return nil + } + name := d.Name() + if strings.HasSuffix(name, ".yaml") || strings.HasSuffix(name, ".yml") { + _ = loadRolesFile(path) + } + return nil + }) + } + + // Load default instructions + if defaultPath == "" && dir != "" { + // Try defaults.md then defaults.txt in the directory + tryPaths := []string{ + filepath.Join(dir, "defaults.md"), + filepath.Join(dir, "defaults.txt"), + } + for _, p := range tryPaths { + if b, err := os.ReadFile(p); err == nil { + defaultInstr = string(b) + break + } + } + } else if defaultPath != "" { + if b, err := os.ReadFile(defaultPath); err == nil { + defaultInstr = string(b) + } + } + return nil +} + +func loadRolesFile(path string) error { + data, err := os.ReadFile(path) + if err != nil { + return err + } + var rf RolesFile + if err := yaml.Unmarshal(data, &rf); err != nil { + return err + } + for id, def := range rf.Roles { + def.ID = id + loadedRoles[id] = def + } + return nil +} + +// GetRole returns the role definition by ID if loaded. +func GetRole(id string) (RoleDefinition, bool) { + r, ok := loadedRoles[id] + return r, ok +} + +// ListRoles returns IDs of loaded roles. +func ListRoles() []string { + ids := make([]string, 0, len(loadedRoles)) + for id := range loadedRoles { + ids = append(ids, id) + } + return ids +} + +// GetDefaultInstructions returns the loaded default instructions (may be empty if not present on disk). +func GetDefaultInstructions() string { + return defaultInstr +} + +// ComposeSystemPrompt concatenates the role system prompt (S) with default instructions (D). +func ComposeSystemPrompt(roleID string) (string, error) { + r, ok := GetRole(roleID) + if !ok { + return "", errors.New("role not found: " + roleID) + } + s := strings.TrimSpace(r.SystemPrompt) + d := strings.TrimSpace(defaultInstr) + switch { + case s != "" && d != "": + return s + "\n\n" + d, nil + case s != "": + return s, nil + case d != "": + return d, nil + default: + return "", nil + } +} + diff --git a/pkg/prompt/types.go b/pkg/prompt/types.go new file mode 100644 index 0000000..222d5c5 --- /dev/null +++ b/pkg/prompt/types.go @@ -0,0 +1,22 @@ +package prompt + +// RoleDefinition represents a single agent role loaded from YAML. +type RoleDefinition struct { + ID string `yaml:"id"` + Name string `yaml:"name"` + Description string `yaml:"description"` + Tags []string `yaml:"tags"` + SystemPrompt string `yaml:"system_prompt"` + Defaults struct { + Models []string `yaml:"models"` + Capabilities []string `yaml:"capabilities"` + Expertise []string `yaml:"expertise"` + MaxTasks int `yaml:"max_tasks"` + } `yaml:"defaults"` +} + +// RolesFile is the top-level structure for a roles YAML file. +type RolesFile struct { + Roles map[string]RoleDefinition `yaml:"roles"` +} + diff --git a/prompts/defaults.md b/prompts/defaults.md new file mode 100644 index 0000000..860b4cd --- /dev/null +++ b/prompts/defaults.md @@ -0,0 +1,103 @@ +Default Instructions (D) + +Operating Policy +- Be precise, verifiable, and do not fabricate. Surface uncertainties. +- Prefer minimal, auditable changes; record decisions in UCXL. +- Preserve API compatibility, data safety, and security constraints. Escalate when blocked. +- Include UCXL citations for any external facts or prior decisions. + +When To Use Subsystems +- HMMM (collaborative reasoning): Cross-agent clarification, planning critique, consensus seeking, or targeted questions to unblock progress. Publish on `hmmm/meta-discussion/v1`. +- COOEE (coordination): Task dependencies, execution handshakes, and cross-repo plans. Publish on `CHORUS/coordination/v1`. +- UCXL (context): Read decisions/specs/plans by UCXL address. Write new decisions and evidence using the decision bundle envelope. Never invent UCXL paths. +- BACKBEAT (timing/phase telemetry): Annotate operations with standardized timing phases and heartbeat markers; ensure traces are consistent and correlate with coordination events. + +HMMM: Message (publish β†’ hmmm/meta-discussion/v1) +{ + "type": "hmmm.message", + "session_id": "", + "from": {"agent_id": "", "role": ""}, + "message": "", + "intent": "proposal|question|answer|update|escalation", + "citations": [{"ucxl.address": "", "reason": ""}], + "timestamp": "" +} + +COOEE: Coordination Request (publish β†’ CHORUS/coordination/v1) +{ + "type": "cooee.request", + "dependency": { + "task1": {"repo": "", "id": "", "agent_id": ""}, + "task2": {"repo": "", "id": "", "agent_id": ""}, + "relationship": "blocks|duplicates|relates-to|requires", + "reason": "" + }, + "objective": "", + "constraints": ["