Integrate BACKBEAT SDK and resolve KACHING license validation
Major integrations and fixes: - Added BACKBEAT SDK integration for P2P operation timing - Implemented beat-aware status tracking for distributed operations - Added Docker secrets support for secure license management - Resolved KACHING license validation via HTTPS/TLS - Updated docker-compose configuration for clean stack deployment - Disabled rollback policies to prevent deployment failures - Added license credential storage (CHORUS-DEV-MULTI-001) Technical improvements: - BACKBEAT P2P operation tracking with phase management - Enhanced configuration system with file-based secrets - Improved error handling for license validation - Clean separation of KACHING and CHORUS deployment stacks 🤖 Generated with [Claude Code](https://claude.ai/code) Co-Authored-By: Claude <noreply@anthropic.com>
This commit is contained in:
@@ -1,37 +1,30 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"log"
|
||||
"net/http"
|
||||
"os"
|
||||
"os/signal"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"runtime"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"chorus.services/chorus/api"
|
||||
"chorus.services/chorus/coordinator"
|
||||
"chorus.services/chorus/discovery"
|
||||
"chorus.services/chorus/internal/licensing"
|
||||
"chorus.services/chorus/internal/logging"
|
||||
"chorus.services/chorus/p2p"
|
||||
"chorus.services/chorus/pkg/config"
|
||||
"chorus.services/chorus/pkg/crypto"
|
||||
"chorus.services/chorus/pkg/dht"
|
||||
"chorus.services/chorus/pkg/election"
|
||||
"chorus.services/chorus/pkg/health"
|
||||
"chorus.services/chorus/pkg/shutdown"
|
||||
"chorus.services/chorus/pkg/ucxi"
|
||||
"chorus.services/chorus/pkg/ucxl"
|
||||
"chorus.services/chorus/pkg/version"
|
||||
"chorus.services/chorus/pubsub"
|
||||
"chorus.services/chorus/reasoning"
|
||||
"chorus/api"
|
||||
"chorus/coordinator"
|
||||
"chorus/discovery"
|
||||
"chorus/internal/backbeat"
|
||||
"chorus/internal/licensing"
|
||||
"chorus/internal/logging"
|
||||
"chorus/p2p"
|
||||
"chorus/pkg/config"
|
||||
"chorus/pkg/dht"
|
||||
"chorus/pkg/election"
|
||||
"chorus/pkg/health"
|
||||
"chorus/pkg/shutdown"
|
||||
"chorus/pkg/ucxi"
|
||||
"chorus/pkg/ucxl"
|
||||
"chorus/pubsub"
|
||||
"chorus/reasoning"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/multiformats/go-multiaddr"
|
||||
)
|
||||
@@ -41,6 +34,21 @@ const (
|
||||
AppVersion = "0.1.0-dev"
|
||||
)
|
||||
|
||||
// SimpleLogger provides basic logging implementation
|
||||
type SimpleLogger struct{}
|
||||
|
||||
func (l *SimpleLogger) Info(msg string, args ...interface{}) {
|
||||
log.Printf("[INFO] "+msg, args...)
|
||||
}
|
||||
|
||||
func (l *SimpleLogger) Warn(msg string, args ...interface{}) {
|
||||
log.Printf("[WARN] "+msg, args...)
|
||||
}
|
||||
|
||||
func (l *SimpleLogger) Error(msg string, args ...interface{}) {
|
||||
log.Printf("[ERROR] "+msg, args...)
|
||||
}
|
||||
|
||||
// SimpleTaskTracker tracks active tasks for availability reporting
|
||||
type SimpleTaskTracker struct {
|
||||
maxTasks int
|
||||
@@ -91,14 +99,42 @@ func (t *SimpleTaskTracker) publishTaskCompletion(taskID string, success bool, s
|
||||
}
|
||||
|
||||
func main() {
|
||||
// Initialize container-optimized logger
|
||||
logger := logging.NewContainerLogger(AppName)
|
||||
// Early CLI handling: print help/version without requiring env/config
|
||||
for _, a := range os.Args[1:] {
|
||||
switch a {
|
||||
case "--help", "-h", "help":
|
||||
fmt.Printf("%s %s\n\n", AppName, AppVersion)
|
||||
fmt.Println("Usage:")
|
||||
fmt.Printf(" %s [--help] [--version]\n\n", filepath.Base(os.Args[0]))
|
||||
fmt.Println("Environment (common):")
|
||||
fmt.Println(" CHORUS_LICENSE_ID (required)")
|
||||
fmt.Println(" CHORUS_AGENT_ID (optional; auto-generated if empty)")
|
||||
fmt.Println(" CHORUS_P2P_PORT (default 9000)")
|
||||
fmt.Println(" CHORUS_API_PORT (default 8080)")
|
||||
fmt.Println(" CHORUS_HEALTH_PORT (default 8081)")
|
||||
fmt.Println(" CHORUS_DHT_ENABLED (default true)")
|
||||
fmt.Println(" CHORUS_BOOTSTRAP_PEERS (comma-separated multiaddrs)")
|
||||
fmt.Println(" OLLAMA_ENDPOINT (default http://localhost:11434)")
|
||||
fmt.Println()
|
||||
fmt.Println("Example:")
|
||||
fmt.Println(" CHORUS_LICENSE_ID=dev-123 \\")
|
||||
fmt.Println(" CHORUS_AGENT_ID=chorus-dev \\")
|
||||
fmt.Println(" CHORUS_P2P_PORT=9000 CHORUS_API_PORT=8080 ./chorus")
|
||||
return
|
||||
case "--version", "-v":
|
||||
fmt.Printf("%s %s\n", AppName, AppVersion)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Initialize container-optimized logger
|
||||
logger := &SimpleLogger{}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
logger.Info("🎭 Starting CHORUS v%s - Container-First P2P Task Coordination", AppVersion)
|
||||
logger.Info("📦 Container deployment of proven BZZZ functionality")
|
||||
logger.Info("📦 Container deployment of proven CHORUS functionality")
|
||||
|
||||
// Load configuration from environment (no config files in containers)
|
||||
logger.Info("📋 Loading configuration from environment variables...")
|
||||
@@ -114,7 +150,11 @@ func main() {
|
||||
|
||||
// CRITICAL: Validate license before any P2P operations
|
||||
logger.Info("🔐 Validating CHORUS license with KACHING...")
|
||||
licenseValidator := licensing.NewValidator(cfg.License)
|
||||
licenseValidator := licensing.NewValidator(licensing.LicenseConfig{
|
||||
LicenseID: cfg.License.LicenseID,
|
||||
ClusterID: cfg.License.ClusterID,
|
||||
KachingURL: cfg.License.KachingURL,
|
||||
})
|
||||
if err := licenseValidator.Validate(); err != nil {
|
||||
logger.Error("❌ License validation failed: %v", err)
|
||||
logger.Error("💰 CHORUS requires a valid license to operate")
|
||||
@@ -123,6 +163,34 @@ func main() {
|
||||
}
|
||||
logger.Info("✅ License validation successful - CHORUS authorized to run")
|
||||
|
||||
// Initialize AI provider configuration
|
||||
logger.Info("🧠 Configuring AI provider: %s", cfg.AI.Provider)
|
||||
if err := initializeAIProvider(cfg, logger); err != nil {
|
||||
logger.Error("❌ AI provider initialization failed: %v", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
logger.Info("✅ AI provider configured successfully")
|
||||
|
||||
// Initialize BACKBEAT integration
|
||||
var backbeatIntegration *backbeat.Integration
|
||||
backbeatIntegration, err = backbeat.NewIntegration(cfg, cfg.Agent.ID, logger)
|
||||
if err != nil {
|
||||
logger.Warn("⚠️ BACKBEAT integration initialization failed: %v", err)
|
||||
logger.Info("📍 P2P operations will run without beat synchronization")
|
||||
} else {
|
||||
if err := backbeatIntegration.Start(ctx); err != nil {
|
||||
logger.Warn("⚠️ Failed to start BACKBEAT integration: %v", err)
|
||||
backbeatIntegration = nil
|
||||
} else {
|
||||
logger.Info("🎵 BACKBEAT integration started successfully")
|
||||
}
|
||||
}
|
||||
defer func() {
|
||||
if backbeatIntegration != nil {
|
||||
backbeatIntegration.Stop()
|
||||
}
|
||||
}()
|
||||
|
||||
// Initialize P2P node
|
||||
node, err := p2p.NewNode(ctx)
|
||||
if err != nil {
|
||||
@@ -160,7 +228,11 @@ func main() {
|
||||
|
||||
// Join role-based topics if role is configured
|
||||
if cfg.Agent.Role != "" {
|
||||
if err := ps.JoinRoleBasedTopics(cfg.Agent.Role, cfg.Agent.Expertise, cfg.Agent.ReportsTo); err != nil {
|
||||
reportsTo := []string{}
|
||||
if cfg.Agent.ReportsTo != "" {
|
||||
reportsTo = []string{cfg.Agent.ReportsTo}
|
||||
}
|
||||
if err := ps.JoinRoleBasedTopics(cfg.Agent.Role, cfg.Agent.Expertise, reportsTo); err != nil {
|
||||
logger.Warn("⚠️ Failed to join role-based topics: %v", err)
|
||||
} else {
|
||||
logger.Info("🎯 Joined role-based collaboration topics")
|
||||
@@ -170,11 +242,23 @@ func main() {
|
||||
// === Admin Election System ===
|
||||
electionManager := election.NewElectionManager(ctx, cfg, node.Host(), ps, node.ID().ShortString())
|
||||
|
||||
// Set election callbacks
|
||||
// Set election callbacks with BACKBEAT integration
|
||||
electionManager.SetCallbacks(
|
||||
func(oldAdmin, newAdmin string) {
|
||||
logger.Info("👑 Admin changed: %s -> %s", oldAdmin, newAdmin)
|
||||
|
||||
// Track admin change with BACKBEAT if available
|
||||
if backbeatIntegration != nil {
|
||||
operationID := fmt.Sprintf("admin-change-%d", time.Now().Unix())
|
||||
if err := backbeatIntegration.StartP2POperation(operationID, "admin_change", 2, map[string]interface{}{
|
||||
"old_admin": oldAdmin,
|
||||
"new_admin": newAdmin,
|
||||
}); err == nil {
|
||||
// Complete immediately as this is a state change, not a long operation
|
||||
backbeatIntegration.CompleteP2POperation(operationID, 1)
|
||||
}
|
||||
}
|
||||
|
||||
// If this node becomes admin, enable SLURP functionality
|
||||
if newAdmin == node.ID().ShortString() {
|
||||
logger.Info("🎯 This node is now admin - enabling SLURP functionality")
|
||||
@@ -187,6 +271,17 @@ func main() {
|
||||
},
|
||||
func(winner string) {
|
||||
logger.Info("🏆 Election completed, winner: %s", winner)
|
||||
|
||||
// Track election completion with BACKBEAT if available
|
||||
if backbeatIntegration != nil {
|
||||
operationID := fmt.Sprintf("election-completed-%d", time.Now().Unix())
|
||||
if err := backbeatIntegration.StartP2POperation(operationID, "election", 1, map[string]interface{}{
|
||||
"winner": winner,
|
||||
"node_id": node.ID().ShortString(),
|
||||
}); err == nil {
|
||||
backbeatIntegration.CompleteP2POperation(operationID, 1)
|
||||
}
|
||||
}
|
||||
},
|
||||
)
|
||||
|
||||
@@ -210,9 +305,23 @@ func main() {
|
||||
} else {
|
||||
logger.Info("🕸️ DHT initialized")
|
||||
|
||||
// Bootstrap DHT
|
||||
if err := dhtNode.Bootstrap(); err != nil {
|
||||
logger.Warn("⚠️ DHT bootstrap failed: %v", err)
|
||||
// Bootstrap DHT with BACKBEAT tracking
|
||||
if backbeatIntegration != nil {
|
||||
operationID := fmt.Sprintf("dht-bootstrap-%d", time.Now().Unix())
|
||||
if err := backbeatIntegration.StartP2POperation(operationID, "dht_bootstrap", 4, nil); err == nil {
|
||||
backbeatIntegration.UpdateP2POperationPhase(operationID, backbeat.PhaseConnecting, 0)
|
||||
}
|
||||
|
||||
if err := dhtNode.Bootstrap(); err != nil {
|
||||
logger.Warn("⚠️ DHT bootstrap failed: %v", err)
|
||||
backbeatIntegration.FailP2POperation(operationID, err.Error())
|
||||
} else {
|
||||
backbeatIntegration.CompleteP2POperation(operationID, 1)
|
||||
}
|
||||
} else {
|
||||
if err := dhtNode.Bootstrap(); err != nil {
|
||||
logger.Warn("⚠️ DHT bootstrap failed: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Connect to bootstrap peers if configured
|
||||
@@ -230,10 +339,28 @@ func main() {
|
||||
continue
|
||||
}
|
||||
|
||||
if err := node.Host().Connect(ctx, *info); err != nil {
|
||||
logger.Warn("⚠️ Failed to connect to bootstrap peer %s: %v", addrStr, err)
|
||||
// Track peer discovery with BACKBEAT if available
|
||||
if backbeatIntegration != nil {
|
||||
operationID := fmt.Sprintf("peer-discovery-%d", time.Now().Unix())
|
||||
if err := backbeatIntegration.StartP2POperation(operationID, "peer_discovery", 2, map[string]interface{}{
|
||||
"peer_addr": addrStr,
|
||||
}); err == nil {
|
||||
backbeatIntegration.UpdateP2POperationPhase(operationID, backbeat.PhaseConnecting, 0)
|
||||
|
||||
if err := node.Host().Connect(ctx, *info); err != nil {
|
||||
logger.Warn("⚠️ Failed to connect to bootstrap peer %s: %v", addrStr, err)
|
||||
backbeatIntegration.FailP2POperation(operationID, err.Error())
|
||||
} else {
|
||||
logger.Info("🔗 Connected to DHT bootstrap peer: %s", addrStr)
|
||||
backbeatIntegration.CompleteP2POperation(operationID, 1)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
logger.Info("🔗 Connected to DHT bootstrap peer: %s", addrStr)
|
||||
if err := node.Host().Connect(ctx, *info); err != nil {
|
||||
logger.Warn("⚠️ Failed to connect to bootstrap peer %s: %v", addrStr, err)
|
||||
} else {
|
||||
logger.Info("🔗 Connected to DHT bootstrap peer: %s", addrStr)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -364,7 +491,7 @@ func main() {
|
||||
healthManager.SetShutdownManager(shutdownManager)
|
||||
|
||||
// Register health checks
|
||||
setupHealthChecks(healthManager, ps, node, dhtNode)
|
||||
setupHealthChecks(healthManager, ps, node, dhtNode, backbeatIntegration)
|
||||
|
||||
// Register components for graceful shutdown
|
||||
setupGracefulShutdown(shutdownManager, healthManager, node, ps, mdnsDiscovery,
|
||||
@@ -395,8 +522,8 @@ func main() {
|
||||
logger.Info("✅ CHORUS system shutdown completed")
|
||||
}
|
||||
|
||||
// Rest of the functions (setupHealthChecks, etc.) would be adapted from BZZZ...
|
||||
// For brevity, I'll include key functions but the full implementation would port all BZZZ functionality
|
||||
// Rest of the functions (setupHealthChecks, etc.) would be adapted from CHORUS...
|
||||
// For brevity, I'll include key functions but the full implementation would port all CHORUS functionality
|
||||
|
||||
// simpleLogger implements basic logging for shutdown and health systems
|
||||
type simpleLogger struct {
|
||||
@@ -458,21 +585,104 @@ func statusReporter(node *p2p.Node, logger logging.Logger) {
|
||||
}
|
||||
}
|
||||
|
||||
// Placeholder functions for full BZZZ port - these would be fully implemented
|
||||
// Placeholder functions for full CHORUS port - these would be fully implemented
|
||||
func announceCapabilitiesOnChange(ps *pubsub.PubSub, nodeID string, cfg *config.Config, logger logging.Logger) {
|
||||
// Implementation from BZZZ would go here
|
||||
// Implementation from CHORUS would go here
|
||||
}
|
||||
|
||||
func announceRoleOnStartup(ps *pubsub.PubSub, nodeID string, cfg *config.Config, logger logging.Logger) {
|
||||
// Implementation from BZZZ would go here
|
||||
// Implementation from CHORUS would go here
|
||||
}
|
||||
|
||||
func setupHealthChecks(healthManager *health.Manager, ps *pubsub.PubSub, node *p2p.Node, dhtNode *dht.LibP2PDHT) {
|
||||
// Implementation from BZZZ would go here
|
||||
func setupHealthChecks(healthManager *health.Manager, ps *pubsub.PubSub, node *p2p.Node, dhtNode *dht.LibP2PDHT, backbeatIntegration *backbeat.Integration) {
|
||||
// Add BACKBEAT health check
|
||||
if backbeatIntegration != nil {
|
||||
backbeatCheck := &health.HealthCheck{
|
||||
Name: "backbeat",
|
||||
Description: "BACKBEAT timing integration health",
|
||||
Interval: 30 * time.Second,
|
||||
Timeout: 10 * time.Second,
|
||||
Enabled: true,
|
||||
Critical: false,
|
||||
Checker: func(ctx context.Context) health.CheckResult {
|
||||
healthInfo := backbeatIntegration.GetHealth()
|
||||
connected, _ := healthInfo["connected"].(bool)
|
||||
|
||||
result := health.CheckResult{
|
||||
Healthy: connected,
|
||||
Details: healthInfo,
|
||||
Timestamp: time.Now(),
|
||||
}
|
||||
|
||||
if connected {
|
||||
result.Message = "BACKBEAT integration healthy and connected"
|
||||
} else {
|
||||
result.Message = "BACKBEAT integration not connected"
|
||||
}
|
||||
|
||||
return result
|
||||
},
|
||||
}
|
||||
healthManager.RegisterCheck(backbeatCheck)
|
||||
}
|
||||
|
||||
// Implementation from CHORUS would go here - other health checks
|
||||
}
|
||||
|
||||
func setupGracefulShutdown(shutdownManager *shutdown.Manager, healthManager *health.Manager,
|
||||
node *p2p.Node, ps *pubsub.PubSub, mdnsDiscovery interface{}, electionManager interface{},
|
||||
httpServer *api.HTTPServer, ucxiServer *ucxi.Server, taskCoordinator interface{}, dhtNode *dht.LibP2PDHT) {
|
||||
// Implementation from BZZZ would go here
|
||||
}
|
||||
// Implementation from CHORUS would go here
|
||||
}
|
||||
|
||||
// initializeAIProvider configures the reasoning engine with the appropriate AI provider
|
||||
func initializeAIProvider(cfg *config.Config, logger logging.Logger) error {
|
||||
// Set the AI provider
|
||||
reasoning.SetAIProvider(cfg.AI.Provider)
|
||||
|
||||
// Configure the selected provider
|
||||
switch cfg.AI.Provider {
|
||||
case "resetdata":
|
||||
if cfg.AI.ResetData.APIKey == "" {
|
||||
return fmt.Errorf("RESETDATA_API_KEY environment variable is required for resetdata provider")
|
||||
}
|
||||
|
||||
resetdataConfig := reasoning.ResetDataConfig{
|
||||
BaseURL: cfg.AI.ResetData.BaseURL,
|
||||
APIKey: cfg.AI.ResetData.APIKey,
|
||||
Model: cfg.AI.ResetData.Model,
|
||||
Timeout: cfg.AI.ResetData.Timeout,
|
||||
}
|
||||
reasoning.SetResetDataConfig(resetdataConfig)
|
||||
logger.Info("🌐 ResetData AI provider configured - Endpoint: %s, Model: %s",
|
||||
cfg.AI.ResetData.BaseURL, cfg.AI.ResetData.Model)
|
||||
|
||||
case "ollama":
|
||||
reasoning.SetOllamaEndpoint(cfg.AI.Ollama.Endpoint)
|
||||
logger.Info("🦙 Ollama AI provider configured - Endpoint: %s", cfg.AI.Ollama.Endpoint)
|
||||
|
||||
default:
|
||||
logger.Warn("⚠️ Unknown AI provider '%s', defaulting to resetdata", cfg.AI.Provider)
|
||||
if cfg.AI.ResetData.APIKey == "" {
|
||||
return fmt.Errorf("RESETDATA_API_KEY environment variable is required for default resetdata provider")
|
||||
}
|
||||
|
||||
resetdataConfig := reasoning.ResetDataConfig{
|
||||
BaseURL: cfg.AI.ResetData.BaseURL,
|
||||
APIKey: cfg.AI.ResetData.APIKey,
|
||||
Model: cfg.AI.ResetData.Model,
|
||||
Timeout: cfg.AI.ResetData.Timeout,
|
||||
}
|
||||
reasoning.SetResetDataConfig(resetdataConfig)
|
||||
reasoning.SetAIProvider("resetdata")
|
||||
}
|
||||
|
||||
// Configure model selection
|
||||
reasoning.SetModelConfig(
|
||||
cfg.Agent.Models,
|
||||
cfg.Agent.ModelSelectionWebhook,
|
||||
cfg.Agent.DefaultReasoningModel,
|
||||
)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user