Harden CHORUS security and messaging stack

This commit is contained in:
anthonyrawlins
2025-09-20 23:21:35 +10:00
parent 57751f277a
commit 1bb736c09a
25 changed files with 2793 additions and 2474 deletions

View File

@@ -2,9 +2,11 @@ package runtime
import (
"context"
"fmt"
"time"
"chorus/internal/logging"
"chorus/pkg/dht"
"chorus/pkg/health"
"chorus/pkg/shutdown"
"chorus/pubsub"
@@ -43,37 +45,37 @@ func (r *SharedRuntime) StartAgentMode() error {
// === Comprehensive Health Monitoring & Graceful Shutdown ===
shutdownManager := shutdown.NewManager(30*time.Second, &simpleLogger{logger: r.Logger})
healthManager := health.NewManager(r.Node.ID().ShortString(), AppVersion, &simpleLogger{logger: r.Logger})
healthManager.SetShutdownManager(shutdownManager)
// Register health checks
r.setupHealthChecks(healthManager)
// Register components for graceful shutdown
r.setupGracefulShutdown(shutdownManager, healthManager)
// Start health monitoring
if err := healthManager.Start(); err != nil {
return err
}
r.HealthManager = healthManager
r.Logger.Info("❤️ Health monitoring started")
// Start health HTTP server
if err := healthManager.StartHTTPServer(r.Config.Network.HealthPort); err != nil {
r.Logger.Error("❌ Failed to start health HTTP server: %v", err)
} else {
r.Logger.Info("🏥 Health endpoints available at http://localhost:%d/health", r.Config.Network.HealthPort)
}
// Start shutdown manager
shutdownManager.Start()
r.ShutdownManager = shutdownManager
r.Logger.Info("🛡️ Graceful shutdown manager started")
r.Logger.Info("✅ CHORUS agent system fully operational with health monitoring")
// Wait for graceful shutdown
shutdownManager.Wait()
r.Logger.Info("✅ CHORUS agent system shutdown completed")
@@ -90,7 +92,7 @@ func (r *SharedRuntime) announceAvailability() {
currentTasks := r.TaskTracker.GetActiveTasks()
maxTasks := r.TaskTracker.GetMaxTasks()
isAvailable := len(currentTasks) < maxTasks
status := "ready"
if len(currentTasks) >= maxTasks {
status = "busy"
@@ -99,13 +101,13 @@ func (r *SharedRuntime) announceAvailability() {
}
availability := map[string]interface{}{
"node_id": r.Node.ID().ShortString(),
"node_id": r.Node.ID().ShortString(),
"available_for_work": isAvailable,
"current_tasks": len(currentTasks),
"max_tasks": maxTasks,
"last_activity": time.Now().Unix(),
"status": status,
"timestamp": time.Now().Unix(),
"current_tasks": len(currentTasks),
"max_tasks": maxTasks,
"last_activity": time.Now().Unix(),
"status": status,
"timestamp": time.Now().Unix(),
}
if err := r.PubSub.PublishBzzzMessage(pubsub.AvailabilityBcast, availability); err != nil {
r.Logger.Error("❌ Failed to announce availability: %v", err)
@@ -126,16 +128,79 @@ func (r *SharedRuntime) statusReporter() {
// announceCapabilitiesOnChange announces capabilities when they change
func (r *SharedRuntime) announceCapabilitiesOnChange() {
// Implementation from CHORUS would go here
// For now, just log that capabilities would be announced
r.Logger.Info("📢 Agent capabilities announcement enabled")
if r.PubSub == nil {
r.Logger.Warn("⚠️ Capability broadcast skipped: PubSub not initialized")
return
}
r.Logger.Info("📢 Broadcasting agent capabilities to network")
activeTaskCount := 0
if r.TaskTracker != nil {
activeTaskCount = len(r.TaskTracker.GetActiveTasks())
}
announcement := map[string]interface{}{
"agent_id": r.Config.Agent.ID,
"node_id": r.Node.ID().ShortString(),
"version": AppVersion,
"capabilities": r.Config.Agent.Capabilities,
"expertise": r.Config.Agent.Expertise,
"models": r.Config.Agent.Models,
"specialization": r.Config.Agent.Specialization,
"max_tasks": r.Config.Agent.MaxTasks,
"current_tasks": activeTaskCount,
"timestamp": time.Now().Unix(),
"availability": "ready",
}
if err := r.PubSub.PublishBzzzMessage(pubsub.CapabilityBcast, announcement); err != nil {
r.Logger.Error("❌ Failed to broadcast capabilities: %v", err)
return
}
r.Logger.Info("✅ Capabilities broadcast published")
// TODO: Watch for live capability changes (role updates, model changes) and re-broadcast
}
// announceRoleOnStartup announces role when the agent starts
func (r *SharedRuntime) announceRoleOnStartup() {
// Implementation from CHORUS would go here
// For now, just log that role would be announced
r.Logger.Info("🎭 Agent role announcement enabled")
role := r.Config.Agent.Role
if role == "" {
r.Logger.Info("🎭 No agent role configured; skipping role announcement")
return
}
if r.PubSub == nil {
r.Logger.Warn("⚠️ Role announcement skipped: PubSub not initialized")
return
}
r.Logger.Info("🎭 Announcing agent role to collaboration mesh")
announcement := map[string]interface{}{
"agent_id": r.Config.Agent.ID,
"node_id": r.Node.ID().ShortString(),
"role": role,
"expertise": r.Config.Agent.Expertise,
"capabilities": r.Config.Agent.Capabilities,
"reports_to": r.Config.Agent.ReportsTo,
"specialization": r.Config.Agent.Specialization,
"timestamp": time.Now().Unix(),
}
opts := pubsub.MessageOptions{
FromRole: role,
Priority: "medium",
ThreadID: fmt.Sprintf("role:%s", role),
}
if err := r.PubSub.PublishRoleBasedMessage(pubsub.RoleAnnouncement, announcement, opts); err != nil {
r.Logger.Error("❌ Failed to announce role: %v", err)
return
}
r.Logger.Info("✅ Role announcement published")
}
func (r *SharedRuntime) setupHealthChecks(healthManager *health.Manager) {
@@ -151,31 +216,108 @@ func (r *SharedRuntime) setupHealthChecks(healthManager *health.Manager) {
Checker: func(ctx context.Context) health.CheckResult {
healthInfo := r.BackbeatIntegration.GetHealth()
connected, _ := healthInfo["connected"].(bool)
result := health.CheckResult{
Healthy: connected,
Details: healthInfo,
Timestamp: time.Now(),
}
if connected {
result.Message = "BACKBEAT integration healthy and connected"
} else {
result.Message = "BACKBEAT integration not connected"
}
return result
},
}
healthManager.RegisterCheck(backbeatCheck)
}
// Add other health checks (P2P, DHT, etc.)
// Implementation from CHORUS would go here
// Register enhanced health instrumentation when core subsystems are available
if r.PubSub == nil {
r.Logger.Warn("⚠️ Skipping enhanced health checks: PubSub not initialized")
return
}
if r.ElectionManager == nil {
r.Logger.Warn("⚠️ Skipping enhanced health checks: election manager not ready")
return
}
var replication *dht.ReplicationManager
if r.DHTNode != nil {
replication = r.DHTNode.ReplicationManager()
}
enhanced := health.NewEnhancedHealthChecks(
healthManager,
r.ElectionManager,
r.DHTNode,
r.PubSub,
replication,
&simpleLogger{logger: r.Logger},
)
r.EnhancedHealth = enhanced
r.Logger.Info("🩺 Enhanced health checks registered")
}
func (r *SharedRuntime) setupGracefulShutdown(shutdownManager *shutdown.Manager, healthManager *health.Manager) {
// Register components for graceful shutdown
// Implementation would register all components that need graceful shutdown
if shutdownManager == nil {
r.Logger.Warn("⚠️ Shutdown manager not initialized; graceful teardown skipped")
return
}
if r.HTTPServer != nil {
httpComponent := shutdown.NewGenericComponent("http-api-server", 10, true).
SetShutdownFunc(func(ctx context.Context) error {
return r.HTTPServer.Stop()
})
shutdownManager.Register(httpComponent)
}
if healthManager != nil {
healthComponent := shutdown.NewGenericComponent("health-manager", 15, true).
SetShutdownFunc(func(ctx context.Context) error {
return healthManager.Stop()
})
shutdownManager.Register(healthComponent)
}
if r.UCXIServer != nil {
ucxiComponent := shutdown.NewGenericComponent("ucxi-server", 20, true).
SetShutdownFunc(func(ctx context.Context) error {
return r.UCXIServer.Stop()
})
shutdownManager.Register(ucxiComponent)
}
if r.PubSub != nil {
shutdownManager.Register(shutdown.NewPubSubComponent("pubsub", r.PubSub.Close, 30))
}
if r.DHTNode != nil {
dhtComponent := shutdown.NewGenericComponent("dht-node", 35, true).
SetCloser(r.DHTNode.Close)
shutdownManager.Register(dhtComponent)
}
if r.Node != nil {
shutdownManager.Register(shutdown.NewP2PNodeComponent("p2p-node", r.Node.Close, 40))
}
if r.ElectionManager != nil {
shutdownManager.Register(shutdown.NewElectionManagerComponent("election-manager", r.ElectionManager.Stop, 45))
}
if r.BackbeatIntegration != nil {
backbeatComponent := shutdown.NewGenericComponent("backbeat-integration", 50, true).
SetShutdownFunc(func(ctx context.Context) error {
return r.BackbeatIntegration.Stop()
})
shutdownManager.Register(backbeatComponent)
}
r.Logger.Info("🛡️ Graceful shutdown components registered")
}
}

View File

@@ -21,8 +21,10 @@ import (
"chorus/pkg/dht"
"chorus/pkg/election"
"chorus/pkg/health"
"chorus/pkg/shutdown"
"chorus/pkg/metrics"
"chorus/pkg/prompt"
"chorus/pkg/shhh"
"chorus/pkg/shutdown"
"chorus/pkg/ucxi"
"chorus/pkg/ucxl"
"chorus/pubsub"
@@ -53,8 +55,8 @@ func (l *SimpleLogger) Error(msg string, args ...interface{}) {
// SimpleTaskTracker tracks active tasks for availability reporting
type SimpleTaskTracker struct {
maxTasks int
activeTasks map[string]bool
maxTasks int
activeTasks map[string]bool
decisionPublisher *ucxl.DecisionPublisher
}
@@ -80,7 +82,7 @@ func (t *SimpleTaskTracker) AddTask(taskID string) {
// RemoveTask marks a task as completed and publishes decision if publisher available
func (t *SimpleTaskTracker) RemoveTask(taskID string) {
delete(t.activeTasks, taskID)
// Publish task completion decision if publisher is available
if t.decisionPublisher != nil {
t.publishTaskCompletion(taskID, true, "Task completed successfully", nil)
@@ -92,7 +94,7 @@ func (t *SimpleTaskTracker) publishTaskCompletion(taskID string, success bool, s
if t.decisionPublisher == nil {
return
}
if err := t.decisionPublisher.PublishTaskCompletion(taskID, success, summary, filesModified); err != nil {
fmt.Printf("⚠️ Failed to publish task completion for %s: %v\n", taskID, err)
} else {
@@ -102,32 +104,35 @@ func (t *SimpleTaskTracker) publishTaskCompletion(taskID string, success bool, s
// SharedRuntime contains all the shared P2P infrastructure components
type SharedRuntime struct {
Config *config.Config
Logger *SimpleLogger
Context context.Context
Cancel context.CancelFunc
Node *p2p.Node
PubSub *pubsub.PubSub
HypercoreLog *logging.HypercoreLog
MDNSDiscovery *discovery.MDNSDiscovery
BackbeatIntegration *backbeat.Integration
DHTNode *dht.LibP2PDHT
EncryptedStorage *dht.EncryptedDHTStorage
DecisionPublisher *ucxl.DecisionPublisher
ElectionManager *election.ElectionManager
TaskCoordinator *coordinator.TaskCoordinator
HTTPServer *api.HTTPServer
UCXIServer *ucxi.Server
HealthManager *health.Manager
ShutdownManager *shutdown.Manager
TaskTracker *SimpleTaskTracker
Config *config.Config
Logger *SimpleLogger
Context context.Context
Cancel context.CancelFunc
Node *p2p.Node
PubSub *pubsub.PubSub
HypercoreLog *logging.HypercoreLog
MDNSDiscovery *discovery.MDNSDiscovery
BackbeatIntegration *backbeat.Integration
DHTNode *dht.LibP2PDHT
EncryptedStorage *dht.EncryptedDHTStorage
DecisionPublisher *ucxl.DecisionPublisher
ElectionManager *election.ElectionManager
TaskCoordinator *coordinator.TaskCoordinator
HTTPServer *api.HTTPServer
UCXIServer *ucxi.Server
HealthManager *health.Manager
EnhancedHealth *health.EnhancedHealthChecks
ShutdownManager *shutdown.Manager
TaskTracker *SimpleTaskTracker
Metrics *metrics.CHORUSMetrics
Shhh *shhh.Sentinel
}
// Initialize sets up all shared P2P infrastructure components
func Initialize(appMode string) (*SharedRuntime, error) {
runtime := &SharedRuntime{}
runtime.Logger = &SimpleLogger{}
ctx, cancel := context.WithCancel(context.Background())
runtime.Context = ctx
runtime.Cancel = cancel
@@ -142,7 +147,7 @@ func Initialize(appMode string) (*SharedRuntime, error) {
return nil, fmt.Errorf("configuration error: %v", err)
}
runtime.Config = cfg
runtime.Logger.Info("✅ Configuration loaded successfully")
runtime.Logger.Info("🤖 Agent ID: %s", cfg.Agent.ID)
runtime.Logger.Info("🎯 Specialization: %s", cfg.Agent.Specialization)
@@ -166,6 +171,21 @@ func Initialize(appMode string) (*SharedRuntime, error) {
}
runtime.Logger.Info("✅ AI provider configured successfully")
// Initialize metrics collector
runtime.Metrics = metrics.NewCHORUSMetrics(nil)
// Initialize SHHH sentinel
sentinel, err := shhh.NewSentinel(
shhh.Config{},
shhh.WithFindingObserver(runtime.handleShhhFindings),
)
if err != nil {
return nil, fmt.Errorf("failed to initialize SHHH sentinel: %v", err)
}
sentinel.SetAuditSink(&shhhAuditSink{logger: runtime.Logger})
runtime.Shhh = sentinel
runtime.Logger.Info("🛡️ SHHH sentinel initialized")
// Initialize BACKBEAT integration
var backbeatIntegration *backbeat.Integration
backbeatIntegration, err = backbeat.NewIntegration(cfg, cfg.Agent.ID, runtime.Logger)
@@ -198,6 +218,9 @@ func Initialize(appMode string) (*SharedRuntime, error) {
// Initialize Hypercore-style logger for P2P coordination
hlog := logging.NewHypercoreLog(node.ID())
if runtime.Shhh != nil {
hlog.SetRedactor(runtime.Shhh)
}
hlog.Append(logging.PeerJoined, map[string]interface{}{"status": "started"})
runtime.HypercoreLog = hlog
runtime.Logger.Info("📝 Hypercore logger initialized")
@@ -214,8 +237,11 @@ func Initialize(appMode string) (*SharedRuntime, error) {
if err != nil {
return nil, fmt.Errorf("failed to create PubSub: %v", err)
}
if runtime.Shhh != nil {
ps.SetRedactor(runtime.Shhh)
}
runtime.PubSub = ps
runtime.Logger.Info("📡 PubSub system initialized")
// Join role-based topics if role is configured
@@ -294,12 +320,12 @@ func (r *SharedRuntime) Cleanup() {
func (r *SharedRuntime) initializeElectionSystem() error {
// === Admin Election System ===
electionManager := election.NewElectionManager(r.Context, r.Config, r.Node.Host(), r.PubSub, r.Node.ID().ShortString())
// Set election callbacks with BACKBEAT integration
electionManager.SetCallbacks(
func(oldAdmin, newAdmin string) {
r.Logger.Info("👑 Admin changed: %s -> %s", oldAdmin, newAdmin)
// Track admin change with BACKBEAT if available
if r.BackbeatIntegration != nil {
operationID := fmt.Sprintf("admin-change-%d", time.Now().Unix())
@@ -311,7 +337,7 @@ func (r *SharedRuntime) initializeElectionSystem() error {
r.BackbeatIntegration.CompleteP2POperation(operationID, 1)
}
}
// If this node becomes admin, enable SLURP functionality
if newAdmin == r.Node.ID().ShortString() {
r.Logger.Info("🎯 This node is now admin - enabling SLURP functionality")
@@ -324,12 +350,12 @@ func (r *SharedRuntime) initializeElectionSystem() error {
},
func(winner string) {
r.Logger.Info("🏆 Election completed, winner: %s", winner)
// Track election completion with BACKBEAT if available
if r.BackbeatIntegration != nil {
operationID := fmt.Sprintf("election-completed-%d", time.Now().Unix())
if err := r.BackbeatIntegration.StartP2POperation(operationID, "election", 1, map[string]interface{}{
"winner": winner,
"winner": winner,
"node_id": r.Node.ID().ShortString(),
}); err == nil {
r.BackbeatIntegration.CompleteP2POperation(operationID, 1)
@@ -337,22 +363,22 @@ func (r *SharedRuntime) initializeElectionSystem() error {
}
},
)
if err := electionManager.Start(); err != nil {
return fmt.Errorf("failed to start election manager: %v", err)
}
r.ElectionManager = electionManager
r.Logger.Info("✅ Election manager started with automated heartbeat management")
return nil
}
func (r *SharedRuntime) initializeDHTStorage() error {
// === DHT Storage and Decision Publishing ===
var dhtNode *dht.LibP2PDHT
var encryptedStorage *dht.EncryptedDHTStorage
var encryptedStorage *dht.EncryptedDHTStorage
var decisionPublisher *ucxl.DecisionPublisher
if r.Config.V2.DHT.Enabled {
// Create DHT
var err error
@@ -361,14 +387,14 @@ func (r *SharedRuntime) initializeDHTStorage() error {
r.Logger.Warn("⚠️ Failed to create DHT: %v", err)
} else {
r.Logger.Info("🕸️ DHT initialized")
// Bootstrap DHT with BACKBEAT tracking
if r.BackbeatIntegration != nil {
operationID := fmt.Sprintf("dht-bootstrap-%d", time.Now().Unix())
if err := r.BackbeatIntegration.StartP2POperation(operationID, "dht_bootstrap", 4, nil); err == nil {
r.BackbeatIntegration.UpdateP2POperationPhase(operationID, backbeat.PhaseConnecting, 0)
}
if err := dhtNode.Bootstrap(); err != nil {
r.Logger.Warn("⚠️ DHT bootstrap failed: %v", err)
r.BackbeatIntegration.FailP2POperation(operationID, err.Error())
@@ -380,22 +406,22 @@ func (r *SharedRuntime) initializeDHTStorage() error {
r.Logger.Warn("⚠️ DHT bootstrap failed: %v", err)
}
}
// Connect to bootstrap peers if configured
// Connect to bootstrap peers if configured
for _, addrStr := range r.Config.V2.DHT.BootstrapPeers {
addr, err := multiaddr.NewMultiaddr(addrStr)
if err != nil {
r.Logger.Warn("⚠️ Invalid bootstrap address %s: %v", addrStr, err)
continue
}
// Extract peer info from multiaddr
info, err := peer.AddrInfoFromP2pAddr(addr)
if err != nil {
r.Logger.Warn("⚠️ Failed to parse peer info from %s: %v", addrStr, err)
continue
}
// Track peer discovery with BACKBEAT if available
if r.BackbeatIntegration != nil {
operationID := fmt.Sprintf("peer-discovery-%d", time.Now().Unix())
@@ -403,7 +429,7 @@ func (r *SharedRuntime) initializeDHTStorage() error {
"peer_addr": addrStr,
}); err == nil {
r.BackbeatIntegration.UpdateP2POperationPhase(operationID, backbeat.PhaseConnecting, 0)
if err := r.Node.Host().Connect(r.Context, *info); err != nil {
r.Logger.Warn("⚠️ Failed to connect to bootstrap peer %s: %v", addrStr, err)
r.BackbeatIntegration.FailP2POperation(operationID, err.Error())
@@ -420,20 +446,20 @@ func (r *SharedRuntime) initializeDHTStorage() error {
}
}
}
// Initialize encrypted storage
encryptedStorage = dht.NewEncryptedDHTStorage(
r.Context,
r.Node.Host(),
r.Node.Host(),
dhtNode,
r.Config,
r.Node.ID().ShortString(),
)
// Start cache cleanup
encryptedStorage.StartCacheCleanup(5 * time.Minute)
r.Logger.Info("🔐 Encrypted DHT storage initialized")
// Initialize decision publisher
decisionPublisher = ucxl.NewDecisionPublisher(
r.Context,
@@ -451,11 +477,24 @@ func (r *SharedRuntime) initializeDHTStorage() error {
r.DHTNode = dhtNode
r.EncryptedStorage = encryptedStorage
r.DecisionPublisher = decisionPublisher
return nil
}
func (r *SharedRuntime) initializeServices() error {
// Create simple task tracker ahead of coordinator so broadcasts stay accurate
taskTracker := &SimpleTaskTracker{
maxTasks: r.Config.Agent.MaxTasks,
activeTasks: make(map[string]bool),
}
// Connect decision publisher to task tracker if available
if r.DecisionPublisher != nil {
taskTracker.decisionPublisher = r.DecisionPublisher
r.Logger.Info("📤 Task completion decisions will be published to DHT")
}
r.TaskTracker = taskTracker
// === Task Coordination Integration ===
taskCoordinator := coordinator.NewTaskCoordinator(
r.Context,
@@ -464,8 +503,9 @@ func (r *SharedRuntime) initializeServices() error {
r.Config,
r.Node.ID().ShortString(),
nil, // HMMM router placeholder
taskTracker,
)
taskCoordinator.Start()
r.TaskCoordinator = taskCoordinator
r.Logger.Info("✅ Task coordination system active")
@@ -487,14 +527,14 @@ func (r *SharedRuntime) initializeServices() error {
if storageDir == "" {
storageDir = filepath.Join(os.TempDir(), "chorus-ucxi-storage")
}
storage, err := ucxi.NewBasicContentStorage(storageDir)
if err != nil {
r.Logger.Warn("⚠️ Failed to create UCXI storage: %v", err)
} else {
resolver := ucxi.NewBasicAddressResolver(r.Node.ID().ShortString())
resolver.SetDefaultTTL(r.Config.UCXL.Resolution.CacheTTL)
ucxiConfig := ucxi.ServerConfig{
Port: r.Config.UCXL.Server.Port,
BasePath: r.Config.UCXL.Server.BasePath,
@@ -502,7 +542,7 @@ func (r *SharedRuntime) initializeServices() error {
Storage: storage,
Logger: ucxi.SimpleLogger{},
}
ucxiServer = ucxi.NewServer(ucxiConfig)
go func() {
r.Logger.Info("🔗 UCXI server starting on :%d", r.Config.UCXL.Server.Port)
@@ -515,35 +555,41 @@ func (r *SharedRuntime) initializeServices() error {
r.Logger.Info("⚪ UCXI server disabled")
}
r.UCXIServer = ucxiServer
// Create simple task tracker
taskTracker := &SimpleTaskTracker{
maxTasks: r.Config.Agent.MaxTasks,
activeTasks: make(map[string]bool),
}
// Connect decision publisher to task tracker if available
if r.DecisionPublisher != nil {
taskTracker.decisionPublisher = r.DecisionPublisher
r.Logger.Info("📤 Task completion decisions will be published to DHT")
}
r.TaskTracker = taskTracker
return nil
}
func (r *SharedRuntime) handleShhhFindings(ctx context.Context, findings []shhh.Finding) {
if r == nil || r.Metrics == nil {
return
}
for _, finding := range findings {
r.Metrics.IncrementSHHHFindings(finding.Rule, string(finding.Severity), finding.Count)
}
}
type shhhAuditSink struct {
logger *SimpleLogger
}
func (s *shhhAuditSink) RecordRedaction(_ context.Context, event shhh.AuditEvent) {
if s == nil || s.logger == nil {
return
}
s.logger.Warn("🔒 SHHH redaction applied (rule=%s severity=%s path=%s)", event.Rule, event.Severity, event.Path)
}
// initializeAIProvider configures the reasoning engine with the appropriate AI provider
func initializeAIProvider(cfg *config.Config, logger *SimpleLogger) error {
// Set the AI provider
reasoning.SetAIProvider(cfg.AI.Provider)
// Configure the selected provider
switch cfg.AI.Provider {
case "resetdata":
if cfg.AI.ResetData.APIKey == "" {
return fmt.Errorf("RESETDATA_API_KEY environment variable is required for resetdata provider")
}
resetdataConfig := reasoning.ResetDataConfig{
BaseURL: cfg.AI.ResetData.BaseURL,
APIKey: cfg.AI.ResetData.APIKey,
@@ -551,19 +597,19 @@ func initializeAIProvider(cfg *config.Config, logger *SimpleLogger) error {
Timeout: cfg.AI.ResetData.Timeout,
}
reasoning.SetResetDataConfig(resetdataConfig)
logger.Info("🌐 ResetData AI provider configured - Endpoint: %s, Model: %s",
logger.Info("🌐 ResetData AI provider configured - Endpoint: %s, Model: %s",
cfg.AI.ResetData.BaseURL, cfg.AI.ResetData.Model)
case "ollama":
reasoning.SetOllamaEndpoint(cfg.AI.Ollama.Endpoint)
logger.Info("🦙 Ollama AI provider configured - Endpoint: %s", cfg.AI.Ollama.Endpoint)
default:
logger.Warn("⚠️ Unknown AI provider '%s', defaulting to resetdata", cfg.AI.Provider)
if cfg.AI.ResetData.APIKey == "" {
return fmt.Errorf("RESETDATA_API_KEY environment variable is required for default resetdata provider")
}
resetdataConfig := reasoning.ResetDataConfig{
BaseURL: cfg.AI.ResetData.BaseURL,
APIKey: cfg.AI.ResetData.APIKey,
@@ -573,7 +619,7 @@ func initializeAIProvider(cfg *config.Config, logger *SimpleLogger) error {
reasoning.SetResetDataConfig(resetdataConfig)
reasoning.SetAIProvider("resetdata")
}
// Configure model selection
reasoning.SetModelConfig(
cfg.Agent.Models,