package main import ( "bytes" "context" "encoding/json" "fmt" "log" "net/http" "os" "os/signal" "path/filepath" "reflect" "syscall" "time" "github.com/anthonyrawlins/bzzz/api" "github.com/anthonyrawlins/bzzz/coordinator" "github.com/anthonyrawlins/bzzz/discovery" "github.com/anthonyrawlins/bzzz/logging" "github.com/anthonyrawlins/bzzz/p2p" "github.com/anthonyrawlins/bzzz/pkg/config" "github.com/anthonyrawlins/bzzz/pkg/crypto" "github.com/anthonyrawlins/bzzz/pkg/dht" "github.com/anthonyrawlins/bzzz/pkg/election" "github.com/anthonyrawlins/bzzz/pkg/hive" "github.com/anthonyrawlins/bzzz/pkg/ucxi" "github.com/anthonyrawlins/bzzz/pkg/ucxl" "github.com/anthonyrawlins/bzzz/pubsub" "github.com/anthonyrawlins/bzzz/reasoning" "github.com/libp2p/go-libp2p-kad-dht" "github.com/libp2p/go-libp2p/core/peer" "github.com/multiformats/go-multiaddr" ) // SimpleTaskTracker tracks active tasks for availability reporting type SimpleTaskTracker struct { maxTasks int activeTasks map[string]bool decisionPublisher *ucxl.DecisionPublisher } // GetActiveTasks returns list of active task IDs func (t *SimpleTaskTracker) GetActiveTasks() []string { tasks := make([]string, 0, len(t.activeTasks)) for taskID := range t.activeTasks { tasks = append(tasks, taskID) } return tasks } // GetMaxTasks returns maximum number of concurrent tasks func (t *SimpleTaskTracker) GetMaxTasks() int { return t.maxTasks } // AddTask marks a task as active func (t *SimpleTaskTracker) AddTask(taskID string) { t.activeTasks[taskID] = true } // RemoveTask marks a task as completed and publishes decision if publisher available func (t *SimpleTaskTracker) RemoveTask(taskID string) { delete(t.activeTasks, taskID) // Publish task completion decision if publisher is available if t.decisionPublisher != nil { t.publishTaskCompletion(taskID, true, "Task completed successfully", nil) } } // CompleteTaskWithDecision marks a task as completed and publishes detailed decision func (t *SimpleTaskTracker) CompleteTaskWithDecision(taskID string, success bool, summary string, filesModified []string) { delete(t.activeTasks, taskID) // Publish task completion decision if publisher is available if t.decisionPublisher != nil { t.publishTaskCompletion(taskID, success, summary, filesModified) } } // SetDecisionPublisher sets the decision publisher for task completion tracking func (t *SimpleTaskTracker) SetDecisionPublisher(publisher *ucxl.DecisionPublisher) { t.decisionPublisher = publisher } // publishTaskCompletion publishes a task completion decision to DHT func (t *SimpleTaskTracker) publishTaskCompletion(taskID string, success bool, summary string, filesModified []string) { if t.decisionPublisher == nil { return } if err := t.decisionPublisher.PublishTaskCompletion(taskID, success, summary, filesModified); err != nil { fmt.Printf("⚠️ Failed to publish task completion for %s: %v\n", taskID, err) } else { fmt.Printf("πŸ“€ Published task completion decision for: %s\n", taskID) } } func main() { ctx, cancel := context.WithCancel(context.Background()) defer cancel() fmt.Println("πŸš€ Starting Bzzz + HMMM P2P Task Coordination System...") // Load configuration cfg, err := config.LoadConfig("") if err != nil { log.Fatalf("Failed to load configuration: %v", err) } // Initialize P2P node node, err := p2p.NewNode(ctx) if err != nil { log.Fatalf("Failed to create P2P node: %v", err) } defer node.Close() // Apply node-specific configuration if agent ID is not set if cfg.Agent.ID == "" { nodeID := node.ID().ShortString() nodeSpecificCfg := config.GetNodeSpecificDefaults(nodeID) // Merge node-specific defaults with loaded config cfg.Agent.ID = nodeSpecificCfg.Agent.ID if len(cfg.Agent.Capabilities) == 0 { cfg.Agent.Capabilities = nodeSpecificCfg.Agent.Capabilities } if len(cfg.Agent.Models) == 0 { cfg.Agent.Models = nodeSpecificCfg.Agent.Models } if cfg.Agent.Specialization == "" { cfg.Agent.Specialization = nodeSpecificCfg.Agent.Specialization } } // Apply role-based configuration if no role is set if cfg.Agent.Role == "" { // Determine default role based on specialization defaultRole := getDefaultRoleForSpecialization(cfg.Agent.Specialization) if defaultRole != "" { fmt.Printf("🎭 Applying default role: %s\n", defaultRole) if err := cfg.ApplyRoleDefinition(defaultRole); err != nil { fmt.Printf("⚠️ Failed to apply role definition: %v\n", err) } else { fmt.Printf("βœ… Role applied: %s\n", cfg.Agent.Role) } } } fmt.Printf("🐝 Bzzz node started successfully\n") fmt.Printf("πŸ“ Node ID: %s\n", node.ID().ShortString()) fmt.Printf("πŸ€– Agent ID: %s\n", cfg.Agent.ID) fmt.Printf("🎯 Specialization: %s\n", cfg.Agent.Specialization) // Display authority level if role is configured if cfg.Agent.Role != "" { authority, err := cfg.GetRoleAuthority(cfg.Agent.Role) if err == nil { fmt.Printf("🎭 Role: %s (Authority: %s)\n", cfg.Agent.Role, authority) if authority == config.AuthorityMaster { fmt.Printf("πŸ‘‘ This node can become admin/SLURP\n") } } } fmt.Printf("🐝 Hive API: %s\n", cfg.HiveAPI.BaseURL) fmt.Printf("πŸ”— Listening addresses:\n") for _, addr := range node.Addresses() { fmt.Printf(" %s/p2p/%s\n", addr, node.ID()) } // Initialize Hypercore-style logger hlog := logging.NewHypercoreLog(node.ID()) hlog.Append(logging.PeerJoined, map[string]interface{}{"status": "started"}) fmt.Printf("πŸ“ Hypercore logger initialized\n") // Initialize mDNS discovery mdnsDiscovery, err := discovery.NewMDNSDiscovery(ctx, node.Host(), "bzzz-peer-discovery") if err != nil { log.Fatalf("Failed to create mDNS discovery: %v", err) } defer mdnsDiscovery.Close() // Initialize PubSub with hypercore logging ps, err := pubsub.NewPubSubWithLogger(ctx, node.Host(), "bzzz/coordination/v1", "hmmm/meta-discussion/v1", hlog) if err != nil { log.Fatalf("Failed to create PubSub: %v", err) } defer ps.Close() // Join role-based topics if role is configured if cfg.Agent.Role != "" { if err := ps.JoinRoleBasedTopics(cfg.Agent.Role, cfg.Agent.Expertise, cfg.Agent.ReportsTo); err != nil { fmt.Printf("⚠️ Failed to join role-based topics: %v\n", err) } else { fmt.Printf("🎯 Joined role-based collaboration topics\n") } } // === Admin Election System === // Initialize election manager electionManager := election.NewElectionManager(ctx, cfg, node.Host(), ps, node.ID().ShortString()) // Set election callbacks electionManager.SetCallbacks( func(oldAdmin, newAdmin string) { fmt.Printf("πŸ‘‘ Admin changed: %s -> %s\n", oldAdmin, newAdmin) // If this node becomes admin, enable SLURP functionality if newAdmin == node.ID().ShortString() { fmt.Printf("🎯 This node is now admin - enabling SLURP functionality\n") cfg.Slurp.Enabled = true // Apply admin role configuration if err := cfg.ApplyRoleDefinition("admin"); err != nil { fmt.Printf("⚠️ Failed to apply admin role: %v\n", err) } } }, func(winner string) { fmt.Printf("πŸ† Election completed, winner: %s\n", winner) }, ) // Start election manager if err := electionManager.Start(); err != nil { fmt.Printf("❌ Failed to start election manager: %v\n", err) } else { fmt.Printf("βœ… Election manager started\n") } defer electionManager.Stop() // Start admin heartbeat if this node is admin if electionManager.IsCurrentAdmin() { go func() { ticker := time.NewTicker(cfg.Security.ElectionConfig.HeartbeatTimeout / 2) defer ticker.Stop() for { select { case <-ctx.Done(): return case <-ticker.C: if electionManager.IsCurrentAdmin() { if err := electionManager.SendAdminHeartbeat(); err != nil { fmt.Printf("❌ Failed to send admin heartbeat: %v\n", err) } } } } }() } // ============================ // === DHT Storage and Decision Publishing === // Initialize DHT for distributed storage var dhtNode *kadht.IpfsDHT var encryptedStorage *dht.EncryptedDHTStorage var decisionPublisher *ucxl.DecisionPublisher if cfg.V2.DHT.Enabled { // Create DHT dhtNode, err = kadht.New(ctx, node.Host()) if err != nil { fmt.Printf("⚠️ Failed to create DHT: %v\n", err) } else { fmt.Printf("πŸ•ΈοΈ DHT initialized\n") // Bootstrap DHT if err := dhtNode.Bootstrap(ctx); err != nil { fmt.Printf("⚠️ DHT bootstrap failed: %v\n", err) } // Connect to bootstrap peers if configured for _, addrStr := range cfg.V2.DHT.BootstrapPeers { addr, err := multiaddr.NewMultiaddr(addrStr) if err != nil { fmt.Printf("⚠️ Invalid bootstrap address %s: %v\n", addrStr, err) continue } // Extract peer info from multiaddr info, err := peer.AddrInfoFromP2pAddr(addr) if err != nil { fmt.Printf("⚠️ Failed to parse peer info from %s: %v\n", addrStr, err) continue } if err := node.Host().Connect(ctx, *info); err != nil { fmt.Printf("⚠️ Failed to connect to bootstrap peer %s: %v\n", addrStr, err) } else { fmt.Printf("πŸ”— Connected to DHT bootstrap peer: %s\n", addrStr) } } // Initialize encrypted storage encryptedStorage = dht.NewEncryptedDHTStorage( ctx, node.Host(), dhtNode, cfg, node.ID().ShortString(), ) // Start cache cleanup encryptedStorage.StartCacheCleanup(5 * time.Minute) fmt.Printf("πŸ” Encrypted DHT storage initialized\n") // Initialize decision publisher decisionPublisher = ucxl.NewDecisionPublisher( ctx, cfg, encryptedStorage, node.ID().ShortString(), cfg.Agent.ID, ) fmt.Printf("πŸ“€ Decision publisher initialized\n") // Test the encryption system on startup go func() { time.Sleep(2 * time.Second) // Wait for initialization if err := crypto.TestAgeEncryption(); err != nil { fmt.Printf("❌ Age encryption test failed: %v\n", err) } else { fmt.Printf("βœ… Age encryption test passed\n") } if err := crypto.TestShamirSecretSharing(); err != nil { fmt.Printf("❌ Shamir secret sharing test failed: %v\n", err) } else { fmt.Printf("βœ… Shamir secret sharing test passed\n") } // Test end-to-end encrypted decision flow time.Sleep(3 * time.Second) // Wait a bit more testEndToEndDecisionFlow(decisionPublisher, encryptedStorage) }() } } else { fmt.Printf("βšͺ DHT disabled in configuration\n") } defer func() { if dhtNode != nil { dhtNode.Close() } }() // =========================================== // === Hive & Task Coordination Integration === // Initialize Hive API client hiveClient := hive.NewHiveClient(cfg.HiveAPI.BaseURL, cfg.HiveAPI.APIKey) // Test Hive connectivity if err := hiveClient.HealthCheck(ctx); err != nil { fmt.Printf("⚠️ Hive API not accessible: %v\n", err) fmt.Printf("πŸ”§ Continuing in standalone mode\n") } else { fmt.Printf("βœ… Hive API connected\n") } // Initialize Task Coordinator taskCoordinator := coordinator.NewTaskCoordinator( ctx, hiveClient, ps, hlog, cfg, node.ID().ShortString(), ) // Start task coordination taskCoordinator.Start() fmt.Printf("βœ… Task coordination system active\n") // ========================== // Start HTTP API server httpServer := api.NewHTTPServer(8080, hlog, ps) go func() { if err := httpServer.Start(); err != nil && err != http.ErrServerClosed { fmt.Printf("❌ HTTP server error: %v\n", err) } }() defer httpServer.Stop() fmt.Printf("🌐 HTTP API server started on :8080\n") // === UCXI Server Integration === // Initialize UCXI server if UCXL protocol is enabled var ucxiServer *ucxi.Server if cfg.UCXL.Enabled && cfg.UCXL.Server.Enabled { // Create storage directory storageDir := cfg.UCXL.Storage.Directory if storageDir == "" { storageDir = filepath.Join(os.TempDir(), "bzzz-ucxi-storage") } storage, err := ucxi.NewBasicContentStorage(storageDir) if err != nil { fmt.Printf("⚠️ Failed to create UCXI storage: %v\n", err) } else { // Create resolver resolver := ucxi.NewBasicAddressResolver(node.ID().ShortString()) resolver.SetDefaultTTL(cfg.UCXL.Resolution.CacheTTL) // TODO: Add P2P integration hooks here // resolver.SetAnnounceHook(...) // resolver.SetDiscoverHook(...) // Create UCXI server ucxiConfig := ucxi.ServerConfig{ Port: cfg.UCXL.Server.Port, BasePath: cfg.UCXL.Server.BasePath, Resolver: resolver, Storage: storage, Logger: ucxi.SimpleLogger{}, } ucxiServer = ucxi.NewServer(ucxiConfig) go func() { if err := ucxiServer.Start(); err != nil && err != http.ErrServerClosed { fmt.Printf("❌ UCXI server error: %v\n", err) } }() defer func() { if ucxiServer != nil { ucxiServer.Stop() } }() fmt.Printf("πŸ”— UCXI server started on :%d%s/ucxi/v1\n", cfg.UCXL.Server.Port, cfg.UCXL.Server.BasePath) } } else { fmt.Printf("βšͺ UCXI server disabled (UCXL protocol not enabled)\n") } // ============================ // Create simple task tracker taskTracker := &SimpleTaskTracker{ maxTasks: cfg.Agent.MaxTasks, activeTasks: make(map[string]bool), } // Connect decision publisher to task tracker if available if decisionPublisher != nil { taskTracker.SetDecisionPublisher(decisionPublisher) fmt.Printf("πŸ“€ Task completion decisions will be published to DHT\n") } // Announce capabilities and role go announceAvailability(ps, node.ID().ShortString(), taskTracker) go announceCapabilitiesOnChange(ps, node.ID().ShortString(), cfg) go announceRoleOnStartup(ps, node.ID().ShortString(), cfg) // Start status reporting go statusReporter(node) fmt.Printf("πŸ” Listening for peers on local network...\n") fmt.Printf("πŸ“‘ Ready for task coordination and meta-discussion\n") fmt.Printf("🎯 HMMM collaborative reasoning enabled\n") // Handle graceful shutdown c := make(chan os.Signal, 1) signal.Notify(c, os.Interrupt, syscall.SIGTERM) <-c fmt.Println("\nπŸ›‘ Shutting down Bzzz node...") } // announceAvailability broadcasts current working status for task assignment func announceAvailability(ps *pubsub.PubSub, nodeID string, taskTracker *SimpleTaskTracker) { ticker := time.NewTicker(30 * time.Second) defer ticker.Stop() for ; ; <-ticker.C { currentTasks := taskTracker.GetActiveTasks() maxTasks := taskTracker.GetMaxTasks() isAvailable := len(currentTasks) < maxTasks status := "ready" if len(currentTasks) >= maxTasks { status = "busy" } else if len(currentTasks) > 0 { status = "working" } availability := map[string]interface{}{ "node_id": nodeID, "available_for_work": isAvailable, "current_tasks": len(currentTasks), "max_tasks": maxTasks, "last_activity": time.Now().Unix(), "status": status, "timestamp": time.Now().Unix(), } if err := ps.PublishBzzzMessage(pubsub.AvailabilityBcast, availability); err != nil { fmt.Printf("❌ Failed to announce availability: %v\n", err) } } } // detectAvailableOllamaModels queries Ollama API for available models func detectAvailableOllamaModels() ([]string, error) { resp, err := http.Get("http://localhost:11434/api/tags") if err != nil { return nil, fmt.Errorf("failed to connect to Ollama API: %w", err) } defer resp.Body.Close() if resp.StatusCode != http.StatusOK { return nil, fmt.Errorf("Ollama API returned status %d", resp.StatusCode) } var tagsResponse struct { Models []struct { Name string `json:"name"` } `json:"models"` } if err := json.NewDecoder(resp.Body).Decode(&tagsResponse); err != nil { return nil, fmt.Errorf("failed to decode Ollama response: %w", err) } models := make([]string, 0, len(tagsResponse.Models)) for _, model := range tagsResponse.Models { models = append(models, model.Name) } return models, nil } // selectBestModel calls the model selection webhook to choose the best model for a prompt func selectBestModel(webhookURL string, availableModels []string, prompt string) (string, error) { if webhookURL == "" || len(availableModels) == 0 { // Fallback to first available model if len(availableModels) > 0 { return availableModels[0], nil } return "", fmt.Errorf("no models available") } requestPayload := map[string]interface{}{ "models": availableModels, "prompt": prompt, } payloadBytes, err := json.Marshal(requestPayload) if err != nil { // Fallback on error return availableModels[0], nil } resp, err := http.Post(webhookURL, "application/json", bytes.NewBuffer(payloadBytes)) if err != nil { // Fallback on error return availableModels[0], nil } defer resp.Body.Close() if resp.StatusCode != http.StatusOK { // Fallback on error return availableModels[0], nil } var response struct { Model string `json:"model"` } if err := json.NewDecoder(resp.Body).Decode(&response); err != nil { // Fallback on error return availableModels[0], nil } // Validate that the returned model is in our available list for _, model := range availableModels { if model == response.Model { return response.Model, nil } } // Fallback if webhook returned invalid model return availableModels[0], nil } // announceCapabilitiesOnChange broadcasts capabilities only when they change func announceCapabilitiesOnChange(ps *pubsub.PubSub, nodeID string, cfg *config.Config) { // Detect available Ollama models and update config availableModels, err := detectAvailableOllamaModels() if err != nil { fmt.Printf("⚠️ Failed to detect Ollama models: %v\n", err) fmt.Printf("πŸ”„ Using configured models: %v\n", cfg.Agent.Models) } else { // Filter configured models to only include available ones validModels := make([]string, 0) for _, configModel := range cfg.Agent.Models { for _, availableModel := range availableModels { if configModel == availableModel { validModels = append(validModels, configModel) break } } } if len(validModels) == 0 { fmt.Printf("⚠️ No configured models available in Ollama, using first available: %v\n", availableModels) if len(availableModels) > 0 { validModels = []string{availableModels[0]} } } else { fmt.Printf("βœ… Available models: %v\n", validModels) } // Update config with available models cfg.Agent.Models = validModels // Configure reasoning module with available models and webhook reasoning.SetModelConfig(validModels, cfg.Agent.ModelSelectionWebhook, cfg.Agent.DefaultReasoningModel) } // Get current capabilities currentCaps := map[string]interface{}{ "node_id": nodeID, "capabilities": cfg.Agent.Capabilities, "models": cfg.Agent.Models, "version": "0.2.0", "specialization": cfg.Agent.Specialization, } // Load stored capabilities from file storedCaps, err := loadStoredCapabilities(nodeID) if err != nil { fmt.Printf("πŸ“„ No stored capabilities found, treating as first run\n") storedCaps = nil } // Check if capabilities have changed if capabilitiesChanged(currentCaps, storedCaps) { fmt.Printf("πŸ”„ Capabilities changed, broadcasting update\n") currentCaps["timestamp"] = time.Now().Unix() currentCaps["reason"] = getChangeReason(currentCaps, storedCaps) // Broadcast the change if err := ps.PublishBzzzMessage(pubsub.CapabilityBcast, currentCaps); err != nil { fmt.Printf("❌ Failed to announce capabilities: %v\n", err) } else { // Store new capabilities if err := storeCapabilities(nodeID, currentCaps); err != nil { fmt.Printf("❌ Failed to store capabilities: %v\n", err) } } } else { fmt.Printf("βœ… Capabilities unchanged since last run\n") } } // statusReporter provides periodic status updates func statusReporter(node *p2p.Node) { ticker := time.NewTicker(30 * time.Second) defer ticker.Stop() for ; ; <-ticker.C { peers := node.ConnectedPeers() fmt.Printf("πŸ“Š Status: %d connected peers\n", peers) } } // getCapabilitiesFile returns the path to store capabilities for a node func getCapabilitiesFile(nodeID string) string { homeDir, _ := os.UserHomeDir() return filepath.Join(homeDir, ".config", "bzzz", fmt.Sprintf("capabilities-%s.json", nodeID)) } // loadStoredCapabilities loads previously stored capabilities from disk func loadStoredCapabilities(nodeID string) (map[string]interface{}, error) { capFile := getCapabilitiesFile(nodeID) data, err := os.ReadFile(capFile) if err != nil { return nil, err } var capabilities map[string]interface{} if err := json.Unmarshal(data, &capabilities); err != nil { return nil, err } return capabilities, nil } // storeCapabilities saves current capabilities to disk func storeCapabilities(nodeID string, capabilities map[string]interface{}) error { capFile := getCapabilitiesFile(nodeID) // Ensure directory exists if err := os.MkdirAll(filepath.Dir(capFile), 0755); err != nil { return err } data, err := json.MarshalIndent(capabilities, "", " ") if err != nil { return err } return os.WriteFile(capFile, data, 0644) } // capabilitiesChanged compares current and stored capabilities func capabilitiesChanged(current, stored map[string]interface{}) bool { if stored == nil { return true // First run, always announce } // Compare important fields that indicate capability changes compareFields := []string{"capabilities", "models", "specialization"} for _, field := range compareFields { if !reflect.DeepEqual(current[field], stored[field]) { return true } } return false } // getChangeReason determines why capabilities changed func getChangeReason(current, stored map[string]interface{}) string { if stored == nil { return "startup" } if !reflect.DeepEqual(current["models"], stored["models"]) { return "model_change" } if !reflect.DeepEqual(current["capabilities"], stored["capabilities"]) { return "capability_change" } if !reflect.DeepEqual(current["specialization"], stored["specialization"]) { return "specialization_change" } return "unknown_change" } // getDefaultRoleForSpecialization maps specializations to default roles func getDefaultRoleForSpecialization(specialization string) string { roleMap := map[string]string{ "code_generation": "backend_developer", "advanced_reasoning": "senior_software_architect", "code_analysis": "security_expert", "general_developer": "full_stack_engineer", "debugging": "qa_engineer", "frontend": "frontend_developer", "backend": "backend_developer", "devops": "devops_engineer", "security": "security_expert", "design": "ui_ux_designer", "architecture": "senior_software_architect", } if role, exists := roleMap[specialization]; exists { return role } // Default fallback return "full_stack_engineer" } // announceRoleOnStartup announces the agent's role when starting up func announceRoleOnStartup(ps *pubsub.PubSub, nodeID string, cfg *config.Config) { if cfg.Agent.Role == "" { return // No role to announce } roleData := map[string]interface{}{ "node_id": nodeID, "role": cfg.Agent.Role, "expertise": cfg.Agent.Expertise, "reports_to": cfg.Agent.ReportsTo, "deliverables": cfg.Agent.Deliverables, "capabilities": cfg.Agent.Capabilities, "specialization": cfg.Agent.Specialization, "timestamp": time.Now().Unix(), "status": "online", } opts := pubsub.MessageOptions{ FromRole: cfg.Agent.Role, RequiredExpertise: cfg.Agent.Expertise, Priority: "medium", } if err := ps.PublishRoleBasedMessage(pubsub.RoleAnnouncement, roleData, opts); err != nil { fmt.Printf("❌ Failed to announce role: %v\n", err) } else { fmt.Printf("πŸ“’ Role announced: %s\n", cfg.Agent.Role) } } // testEndToEndDecisionFlow tests the complete encrypted decision publishing and retrieval flow func testEndToEndDecisionFlow(publisher *ucxl.DecisionPublisher, storage *dht.EncryptedDHTStorage) { if publisher == nil || storage == nil { fmt.Printf("βšͺ Skipping end-to-end test (components not initialized)\n") return } fmt.Printf("πŸ§ͺ Testing end-to-end encrypted decision flow...\n") // Test 1: Publish an architectural decision err := publisher.PublishArchitecturalDecision( "implement_unified_bzzz_slurp", "Integrate SLURP as specialized BZZZ agent with admin role for unified P2P architecture", "Eliminates separate system complexity and leverages existing P2P infrastructure", []string{"Keep separate systems", "Use different consensus algorithm"}, []string{"Single point of coordination", "Improved failover", "Simplified deployment"}, []string{"Test consensus elections", "Implement key reconstruction", "Deploy to cluster"}, ) if err != nil { fmt.Printf("❌ Failed to publish architectural decision: %v\n", err) return } fmt.Printf("βœ… Published architectural decision\n") // Test 2: Publish a code decision testResults := &ucxl.TestResults{ Passed: 15, Failed: 2, Skipped: 1, Coverage: 78.5, FailedTests: []string{"TestElection_SplitBrain", "TestCrypto_KeyReconstruction"}, } err = publisher.PublishCodeDecision( "implement_age_encryption", "Implemented Age encryption for role-based UCXL content security", []string{"pkg/crypto/age_crypto.go", "pkg/dht/encrypted_storage.go"}, 578, testResults, []string{"filippo.io/age", "github.com/libp2p/go-libp2p-kad-dht"}, ) if err != nil { fmt.Printf("❌ Failed to publish code decision: %v\n", err) return } fmt.Printf("βœ… Published code decision\n") // Test 3: Query recent decisions time.Sleep(1 * time.Second) // Allow decisions to propagate decisions, err := publisher.QueryRecentDecisions("", "", "", 10, time.Now().Add(-1*time.Hour)) if err != nil { fmt.Printf("❌ Failed to query recent decisions: %v\n", err) return } fmt.Printf("πŸ” Found %d recent decisions:\n", len(decisions)) for i, metadata := range decisions { fmt.Printf(" %d. %s (creator: %s, type: %s)\n", i+1, metadata.Address, metadata.CreatorRole, metadata.ContentType) } // Test 4: Retrieve and decrypt a specific decision if len(decisions) > 0 { decision, err := publisher.GetDecisionContent(decisions[0].Address) if err != nil { fmt.Printf("❌ Failed to retrieve decision content: %v\n", err) } else { fmt.Printf("βœ… Retrieved decision: %s (%s)\n", decision.Task, decision.Decision) fmt.Printf(" Files modified: %d, Success: %t\n", len(decision.FilesModified), decision.Success) } } // Test 5: Publish system status metrics := map[string]interface{}{ "uptime_seconds": 300, "active_peers": 3, "dht_entries": len(decisions), "encryption_ops": 25, "decryption_ops": 8, "memory_usage_mb": 145.7, } healthChecks := map[string]bool{ "dht_connected": true, "elections_ready": true, "crypto_functional": true, "peers_discovered": true, } err = publisher.PublishSystemStatus("All systems operational - Phase 2B implementation complete", metrics, healthChecks) if err != nil { fmt.Printf("❌ Failed to publish system status: %v\n", err) } else { fmt.Printf("βœ… Published system status\n") } fmt.Printf("πŸŽ‰ End-to-end encrypted decision flow test completed successfully!\n") fmt.Printf("πŸ” All decisions encrypted with role-based Age encryption\n") fmt.Printf("πŸ•ΈοΈ Content stored in distributed DHT with local caching\n") fmt.Printf("πŸ” Content discoverable and retrievable by authorized roles\n") }