Prepare for v2 development: Add MCP integration and future development planning
- Add FUTURE_DEVELOPMENT.md with comprehensive v2 protocol specification - Add MCP integration design and implementation foundation - Add infrastructure and deployment configurations - Update system architecture for v2 evolution 🤖 Generated with [Claude Code](https://claude.ai/code) Co-Authored-By: Claude <noreply@anthropic.com>
This commit is contained in:
222
pkg/config/slurp_config.go
Normal file
222
pkg/config/slurp_config.go
Normal file
@@ -0,0 +1,222 @@
|
||||
package config
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
)
|
||||
|
||||
// SlurpConfig holds SLURP event system integration configuration
|
||||
type SlurpConfig struct {
|
||||
// Connection settings
|
||||
Enabled bool `yaml:"enabled" json:"enabled"`
|
||||
BaseURL string `yaml:"base_url" json:"base_url"`
|
||||
APIKey string `yaml:"api_key" json:"api_key"`
|
||||
Timeout time.Duration `yaml:"timeout" json:"timeout"`
|
||||
RetryCount int `yaml:"retry_count" json:"retry_count"`
|
||||
RetryDelay time.Duration `yaml:"retry_delay" json:"retry_delay"`
|
||||
|
||||
// Event generation settings
|
||||
EventGeneration EventGenerationConfig `yaml:"event_generation" json:"event_generation"`
|
||||
|
||||
// Project-specific event mappings
|
||||
ProjectMappings map[string]ProjectEventMapping `yaml:"project_mappings" json:"project_mappings"`
|
||||
|
||||
// Default event settings
|
||||
DefaultEventSettings DefaultEventConfig `yaml:"default_event_settings" json:"default_event_settings"`
|
||||
|
||||
// Batch processing settings
|
||||
BatchProcessing BatchConfig `yaml:"batch_processing" json:"batch_processing"`
|
||||
}
|
||||
|
||||
// EventGenerationConfig controls when and how SLURP events are generated
|
||||
type EventGenerationConfig struct {
|
||||
// Consensus requirements
|
||||
MinConsensusStrength float64 `yaml:"min_consensus_strength" json:"min_consensus_strength"`
|
||||
MinParticipants int `yaml:"min_participants" json:"min_participants"`
|
||||
RequireUnanimity bool `yaml:"require_unanimity" json:"require_unanimity"`
|
||||
|
||||
// Time-based triggers
|
||||
MaxDiscussionDuration time.Duration `yaml:"max_discussion_duration" json:"max_discussion_duration"`
|
||||
MinDiscussionDuration time.Duration `yaml:"min_discussion_duration" json:"min_discussion_duration"`
|
||||
|
||||
// Event type generation rules
|
||||
EnabledEventTypes []string `yaml:"enabled_event_types" json:"enabled_event_types"`
|
||||
DisabledEventTypes []string `yaml:"disabled_event_types" json:"disabled_event_types"`
|
||||
|
||||
// Severity calculation
|
||||
SeverityRules SeverityConfig `yaml:"severity_rules" json:"severity_rules"`
|
||||
}
|
||||
|
||||
// SeverityConfig defines how to calculate event severity from HMMM discussions
|
||||
type SeverityConfig struct {
|
||||
// Base severity for each event type (1-10 scale)
|
||||
BaseSeverity map[string]int `yaml:"base_severity" json:"base_severity"`
|
||||
|
||||
// Modifiers based on discussion characteristics
|
||||
ParticipantMultiplier float64 `yaml:"participant_multiplier" json:"participant_multiplier"`
|
||||
DurationMultiplier float64 `yaml:"duration_multiplier" json:"duration_multiplier"`
|
||||
UrgencyKeywords []string `yaml:"urgency_keywords" json:"urgency_keywords"`
|
||||
UrgencyBoost int `yaml:"urgency_boost" json:"urgency_boost"`
|
||||
|
||||
// Severity caps
|
||||
MinSeverity int `yaml:"min_severity" json:"min_severity"`
|
||||
MaxSeverity int `yaml:"max_severity" json:"max_severity"`
|
||||
}
|
||||
|
||||
// ProjectEventMapping defines project-specific event mapping rules
|
||||
type ProjectEventMapping struct {
|
||||
ProjectPath string `yaml:"project_path" json:"project_path"`
|
||||
CustomEventTypes map[string]string `yaml:"custom_event_types" json:"custom_event_types"`
|
||||
SeverityOverrides map[string]int `yaml:"severity_overrides" json:"severity_overrides"`
|
||||
AdditionalMetadata map[string]interface{} `yaml:"additional_metadata" json:"additional_metadata"`
|
||||
EventFilters []EventFilter `yaml:"event_filters" json:"event_filters"`
|
||||
}
|
||||
|
||||
// EventFilter defines conditions for filtering or modifying events
|
||||
type EventFilter struct {
|
||||
Name string `yaml:"name" json:"name"`
|
||||
Conditions map[string]string `yaml:"conditions" json:"conditions"`
|
||||
Action string `yaml:"action" json:"action"` // "allow", "deny", "modify"
|
||||
Modifications map[string]string `yaml:"modifications" json:"modifications"`
|
||||
}
|
||||
|
||||
// DefaultEventConfig provides default settings for generated events
|
||||
type DefaultEventConfig struct {
|
||||
DefaultSeverity int `yaml:"default_severity" json:"default_severity"`
|
||||
DefaultCreatedBy string `yaml:"default_created_by" json:"default_created_by"`
|
||||
DefaultTags []string `yaml:"default_tags" json:"default_tags"`
|
||||
MetadataTemplate map[string]string `yaml:"metadata_template" json:"metadata_template"`
|
||||
}
|
||||
|
||||
// BatchConfig controls batch processing of SLURP events
|
||||
type BatchConfig struct {
|
||||
Enabled bool `yaml:"enabled" json:"enabled"`
|
||||
MaxBatchSize int `yaml:"max_batch_size" json:"max_batch_size"`
|
||||
MaxBatchWait time.Duration `yaml:"max_batch_wait" json:"max_batch_wait"`
|
||||
FlushOnShutdown bool `yaml:"flush_on_shutdown" json:"flush_on_shutdown"`
|
||||
}
|
||||
|
||||
// HmmmToSlurpMapping defines the mapping between HMMM discussion outcomes and SLURP event types
|
||||
type HmmmToSlurpMapping struct {
|
||||
// Consensus types to SLURP event types
|
||||
ConsensusApproval string `yaml:"consensus_approval" json:"consensus_approval"` // -> "approval"
|
||||
RiskIdentified string `yaml:"risk_identified" json:"risk_identified"` // -> "warning"
|
||||
CriticalBlocker string `yaml:"critical_blocker" json:"critical_blocker"` // -> "blocker"
|
||||
PriorityChange string `yaml:"priority_change" json:"priority_change"` // -> "priority_change"
|
||||
AccessRequest string `yaml:"access_request" json:"access_request"` // -> "access_update"
|
||||
ArchitectureDecision string `yaml:"architecture_decision" json:"architecture_decision"` // -> "structural_change"
|
||||
InformationShare string `yaml:"information_share" json:"information_share"` // -> "announcement"
|
||||
|
||||
// Keywords that trigger specific event types
|
||||
ApprovalKeywords []string `yaml:"approval_keywords" json:"approval_keywords"`
|
||||
WarningKeywords []string `yaml:"warning_keywords" json:"warning_keywords"`
|
||||
BlockerKeywords []string `yaml:"blocker_keywords" json:"blocker_keywords"`
|
||||
PriorityKeywords []string `yaml:"priority_keywords" json:"priority_keywords"`
|
||||
AccessKeywords []string `yaml:"access_keywords" json:"access_keywords"`
|
||||
StructuralKeywords []string `yaml:"structural_keywords" json:"structural_keywords"`
|
||||
AnnouncementKeywords []string `yaml:"announcement_keywords" json:"announcement_keywords"`
|
||||
}
|
||||
|
||||
// GetDefaultSlurpConfig returns default SLURP configuration
|
||||
func GetDefaultSlurpConfig() SlurpConfig {
|
||||
return SlurpConfig{
|
||||
Enabled: false, // Disabled by default until configured
|
||||
BaseURL: "http://localhost:8080",
|
||||
Timeout: 30 * time.Second,
|
||||
RetryCount: 3,
|
||||
RetryDelay: 5 * time.Second,
|
||||
|
||||
EventGeneration: EventGenerationConfig{
|
||||
MinConsensusStrength: 0.7,
|
||||
MinParticipants: 2,
|
||||
RequireUnanimity: false,
|
||||
MaxDiscussionDuration: 30 * time.Minute,
|
||||
MinDiscussionDuration: 1 * time.Minute,
|
||||
EnabledEventTypes: []string{
|
||||
"announcement", "warning", "blocker", "approval",
|
||||
"priority_change", "access_update", "structural_change",
|
||||
},
|
||||
DisabledEventTypes: []string{},
|
||||
SeverityRules: SeverityConfig{
|
||||
BaseSeverity: map[string]int{
|
||||
"announcement": 3,
|
||||
"warning": 5,
|
||||
"blocker": 8,
|
||||
"approval": 4,
|
||||
"priority_change": 6,
|
||||
"access_update": 5,
|
||||
"structural_change": 7,
|
||||
},
|
||||
ParticipantMultiplier: 0.2,
|
||||
DurationMultiplier: 0.1,
|
||||
UrgencyKeywords: []string{"urgent", "critical", "blocker", "emergency", "immediate"},
|
||||
UrgencyBoost: 2,
|
||||
MinSeverity: 1,
|
||||
MaxSeverity: 10,
|
||||
},
|
||||
},
|
||||
|
||||
ProjectMappings: make(map[string]ProjectEventMapping),
|
||||
|
||||
DefaultEventSettings: DefaultEventConfig{
|
||||
DefaultSeverity: 5,
|
||||
DefaultCreatedBy: "hmmm-consensus",
|
||||
DefaultTags: []string{"hmmm-generated", "automated"},
|
||||
MetadataTemplate: map[string]string{
|
||||
"source": "hmmm-discussion",
|
||||
"generation_type": "consensus-based",
|
||||
},
|
||||
},
|
||||
|
||||
BatchProcessing: BatchConfig{
|
||||
Enabled: true,
|
||||
MaxBatchSize: 10,
|
||||
MaxBatchWait: 5 * time.Second,
|
||||
FlushOnShutdown: true,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// GetHmmmToSlurpMapping returns the default mapping configuration
|
||||
func GetHmmmToSlurpMapping() HmmmToSlurpMapping {
|
||||
return HmmmToSlurpMapping{
|
||||
ConsensusApproval: "approval",
|
||||
RiskIdentified: "warning",
|
||||
CriticalBlocker: "blocker",
|
||||
PriorityChange: "priority_change",
|
||||
AccessRequest: "access_update",
|
||||
ArchitectureDecision: "structural_change",
|
||||
InformationShare: "announcement",
|
||||
|
||||
ApprovalKeywords: []string{"approve", "approved", "looks good", "lgtm", "accepted", "agree"},
|
||||
WarningKeywords: []string{"warning", "caution", "risk", "potential issue", "concern", "careful"},
|
||||
BlockerKeywords: []string{"blocker", "blocked", "critical", "urgent", "cannot proceed", "show stopper"},
|
||||
PriorityKeywords: []string{"priority", "urgent", "high priority", "low priority", "reprioritize"},
|
||||
AccessKeywords: []string{"access", "permission", "auth", "authorization", "credentials", "token"},
|
||||
StructuralKeywords: []string{"architecture", "structure", "design", "refactor", "framework", "pattern"},
|
||||
AnnouncementKeywords: []string{"announce", "fyi", "information", "update", "news", "notice"},
|
||||
}
|
||||
}
|
||||
|
||||
// ValidateSlurpConfig validates SLURP configuration
|
||||
func ValidateSlurpConfig(config SlurpConfig) error {
|
||||
if config.Enabled {
|
||||
if config.BaseURL == "" {
|
||||
return fmt.Errorf("slurp.base_url is required when SLURP is enabled")
|
||||
}
|
||||
|
||||
if config.EventGeneration.MinConsensusStrength < 0 || config.EventGeneration.MinConsensusStrength > 1 {
|
||||
return fmt.Errorf("slurp.event_generation.min_consensus_strength must be between 0 and 1")
|
||||
}
|
||||
|
||||
if config.EventGeneration.MinParticipants < 1 {
|
||||
return fmt.Errorf("slurp.event_generation.min_participants must be at least 1")
|
||||
}
|
||||
|
||||
if config.DefaultEventSettings.DefaultSeverity < 1 || config.DefaultEventSettings.DefaultSeverity > 10 {
|
||||
return fmt.Errorf("slurp.default_event_settings.default_severity must be between 1 and 10")
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -200,7 +200,7 @@ func (dd *DependencyDetector) announceDependency(dep *TaskDependency) {
|
||||
dep.Task2.Repository, dep.Task2.Title, dep.Task2.TaskID,
|
||||
dep.Relationship)
|
||||
|
||||
// Create coordination message for Antennae meta-discussion
|
||||
// Create coordination message for HMMM meta-discussion
|
||||
coordMsg := map[string]interface{}{
|
||||
"message_type": "dependency_detected",
|
||||
"dependency": dep,
|
||||
@@ -219,11 +219,11 @@ func (dd *DependencyDetector) announceDependency(dep *TaskDependency) {
|
||||
"detected_at": dep.DetectedAt.Unix(),
|
||||
}
|
||||
|
||||
// Publish to Antennae meta-discussion channel
|
||||
if err := dd.pubsub.PublishAntennaeMessage(pubsub.MetaDiscussion, coordMsg); err != nil {
|
||||
// Publish to HMMM meta-discussion channel
|
||||
if err := dd.pubsub.PublishHmmmMessage(pubsub.MetaDiscussion, coordMsg); err != nil {
|
||||
fmt.Printf("❌ Failed to announce dependency: %v\n", err)
|
||||
} else {
|
||||
fmt.Printf("📡 Dependency coordination request sent to Antennae channel\n")
|
||||
fmt.Printf("📡 Dependency coordination request sent to HMMM channel\n")
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -8,6 +8,7 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/anthonyrawlins/bzzz/pkg/integration"
|
||||
"github.com/anthonyrawlins/bzzz/pubsub"
|
||||
"github.com/anthonyrawlins/bzzz/reasoning"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
@@ -18,6 +19,7 @@ type MetaCoordinator struct {
|
||||
pubsub *pubsub.PubSub
|
||||
ctx context.Context
|
||||
dependencyDetector *DependencyDetector
|
||||
slurpIntegrator *integration.SlurpEventIntegrator
|
||||
|
||||
// Active coordination sessions
|
||||
activeSessions map[string]*CoordinationSession // sessionID -> session
|
||||
@@ -79,7 +81,7 @@ func NewMetaCoordinator(ctx context.Context, ps *pubsub.PubSub) *MetaCoordinator
|
||||
mc.dependencyDetector = NewDependencyDetector(ctx, ps)
|
||||
|
||||
// Set up message handler for meta-discussions
|
||||
ps.SetAntennaeMessageHandler(mc.handleMetaMessage)
|
||||
ps.SetHmmmMessageHandler(mc.handleMetaMessage)
|
||||
|
||||
// Start session management
|
||||
go mc.sessionCleanupLoop()
|
||||
@@ -88,7 +90,13 @@ func NewMetaCoordinator(ctx context.Context, ps *pubsub.PubSub) *MetaCoordinator
|
||||
return mc
|
||||
}
|
||||
|
||||
// handleMetaMessage processes incoming Antennae meta-discussion messages
|
||||
// SetSlurpIntegrator sets the SLURP event integrator for the coordinator
|
||||
func (mc *MetaCoordinator) SetSlurpIntegrator(integrator *integration.SlurpEventIntegrator) {
|
||||
mc.slurpIntegrator = integrator
|
||||
fmt.Printf("🎯 SLURP integrator attached to Meta Coordinator\n")
|
||||
}
|
||||
|
||||
// handleMetaMessage processes incoming HMMM meta-discussion messages
|
||||
func (mc *MetaCoordinator) handleMetaMessage(msg pubsub.Message, from peer.ID) {
|
||||
messageType, hasType := msg.Data[\"message_type\"].(string)
|
||||
if !hasType {
|
||||
@@ -227,7 +235,7 @@ Keep the plan practical and actionable. Focus on specific next steps.`,
|
||||
|
||||
// broadcastToSession sends a message to all participants in a session
|
||||
func (mc *MetaCoordinator) broadcastToSession(session *CoordinationSession, data map[string]interface{}) {
|
||||
if err := mc.pubsub.PublishAntennaeMessage(pubsub.MetaDiscussion, data); err != nil {
|
||||
if err := mc.pubsub.PublishHmmmMessage(pubsub.MetaDiscussion, data); err != nil {
|
||||
fmt.Printf(\"❌ Failed to broadcast to session %s: %v\\n\", session.SessionID, err)
|
||||
}
|
||||
}
|
||||
@@ -320,6 +328,11 @@ func (mc *MetaCoordinator) escalateSession(session *CoordinationSession, reason
|
||||
|
||||
fmt.Printf(\"🚨 Escalating coordination session %s: %s\\n\", session.SessionID, reason)
|
||||
|
||||
// Generate SLURP event if integrator is available
|
||||
if mc.slurpIntegrator != nil {
|
||||
mc.generateSlurpEventFromSession(session, \"escalated\")
|
||||
}
|
||||
|
||||
// Create escalation message
|
||||
escalationData := map[string]interface{}{
|
||||
\"message_type\": \"escalation\",
|
||||
@@ -341,6 +354,11 @@ func (mc *MetaCoordinator) resolveSession(session *CoordinationSession, resoluti
|
||||
|
||||
fmt.Printf(\"✅ Resolved coordination session %s: %s\\n\", session.SessionID, resolution)
|
||||
|
||||
// Generate SLURP event if integrator is available
|
||||
if mc.slurpIntegrator != nil {
|
||||
mc.generateSlurpEventFromSession(session, \"resolved\")
|
||||
}
|
||||
|
||||
// Broadcast resolution
|
||||
resolutionData := map[string]interface{}{
|
||||
\"message_type\": \"resolution\",
|
||||
@@ -437,4 +455,72 @@ func (mc *MetaCoordinator) handleCoordinationRequest(msg pubsub.Message, from pe
|
||||
func (mc *MetaCoordinator) handleEscalationRequest(msg pubsub.Message, from peer.ID) {
|
||||
fmt.Printf(\"🚨 Escalation request from %s\\n\", from.ShortString())
|
||||
// Implementation for handling escalation requests
|
||||
}
|
||||
|
||||
// generateSlurpEventFromSession creates and sends a SLURP event based on session outcome
|
||||
func (mc *MetaCoordinator) generateSlurpEventFromSession(session *CoordinationSession, outcome string) {
|
||||
// Convert coordination session to HMMM discussion context
|
||||
hmmmMessages := make([]integration.HmmmMessage, len(session.Messages))
|
||||
for i, msg := range session.Messages {
|
||||
hmmmMessages[i] = integration.HmmmMessage{
|
||||
From: msg.FromAgentID,
|
||||
Content: msg.Content,
|
||||
Type: msg.MessageType,
|
||||
Timestamp: msg.Timestamp,
|
||||
Metadata: msg.Metadata,
|
||||
}
|
||||
}
|
||||
|
||||
// Extract participant IDs
|
||||
participants := make([]string, 0, len(session.Participants))
|
||||
for agentID := range session.Participants {
|
||||
participants = append(participants, agentID)
|
||||
}
|
||||
|
||||
// Determine consensus strength based on outcome
|
||||
var consensusStrength float64
|
||||
switch outcome {
|
||||
case \"resolved\":
|
||||
consensusStrength = 0.9 // High consensus for resolved sessions
|
||||
case \"escalated\":
|
||||
consensusStrength = 0.3 // Low consensus for escalated sessions
|
||||
default:
|
||||
consensusStrength = 0.5 // Medium consensus for other outcomes
|
||||
}
|
||||
|
||||
// Determine project path from tasks involved
|
||||
projectPath := \"/unknown\"
|
||||
if len(session.TasksInvolved) > 0 && session.TasksInvolved[0] != nil {
|
||||
projectPath = session.TasksInvolved[0].Repository
|
||||
}
|
||||
|
||||
// Create HMMM discussion context
|
||||
discussionContext := integration.HmmmDiscussionContext{
|
||||
DiscussionID: session.SessionID,
|
||||
SessionID: session.SessionID,
|
||||
Participants: participants,
|
||||
StartTime: session.CreatedAt,
|
||||
EndTime: session.LastActivity,
|
||||
Messages: hmmmMessages,
|
||||
ConsensusReached: outcome == \"resolved\",
|
||||
ConsensusStrength: consensusStrength,
|
||||
OutcomeType: outcome,
|
||||
ProjectPath: projectPath,
|
||||
RelatedTasks: []string{}, // Could be populated from TasksInvolved
|
||||
Metadata: map[string]interface{}{
|
||||
\"session_type\": session.Type,
|
||||
\"session_status\": session.Status,
|
||||
\"resolution\": session.Resolution,
|
||||
\"escalation_reason\": session.EscalationReason,
|
||||
\"message_count\": len(session.Messages),
|
||||
\"participant_count\": len(session.Participants),
|
||||
},
|
||||
}
|
||||
|
||||
// Process the discussion through SLURP integrator
|
||||
if err := mc.slurpIntegrator.ProcessHmmmDiscussion(mc.ctx, discussionContext); err != nil {
|
||||
fmt.Printf(\"❌ Failed to process HMMM discussion for SLURP: %v\\n\", err)
|
||||
} else {
|
||||
fmt.Printf(\"🎯 Generated SLURP event from session %s (outcome: %s)\\n\", session.SessionID, outcome)
|
||||
}
|
||||
}
|
||||
327
pkg/integration/slurp_client.go
Normal file
327
pkg/integration/slurp_client.go
Normal file
@@ -0,0 +1,327 @@
|
||||
package integration
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/anthonyrawlins/bzzz/pkg/config"
|
||||
)
|
||||
|
||||
// SlurpClient handles HTTP communication with SLURP endpoints
|
||||
type SlurpClient struct {
|
||||
baseURL string
|
||||
apiKey string
|
||||
timeout time.Duration
|
||||
retryCount int
|
||||
retryDelay time.Duration
|
||||
httpClient *http.Client
|
||||
}
|
||||
|
||||
// SlurpEvent represents a SLURP event structure
|
||||
type SlurpEvent struct {
|
||||
EventType string `json:"event_type"`
|
||||
Path string `json:"path"`
|
||||
Content string `json:"content"`
|
||||
Severity int `json:"severity"`
|
||||
CreatedBy string `json:"created_by"`
|
||||
Metadata map[string]interface{} `json:"metadata"`
|
||||
Tags []string `json:"tags,omitempty"`
|
||||
Timestamp time.Time `json:"timestamp"`
|
||||
}
|
||||
|
||||
// EventResponse represents the response from SLURP API
|
||||
type EventResponse struct {
|
||||
Success bool `json:"success"`
|
||||
EventID string `json:"event_id,omitempty"`
|
||||
Message string `json:"message,omitempty"`
|
||||
Error string `json:"error,omitempty"`
|
||||
Timestamp time.Time `json:"timestamp"`
|
||||
}
|
||||
|
||||
// BatchEventRequest represents a batch of events to be sent to SLURP
|
||||
type BatchEventRequest struct {
|
||||
Events []SlurpEvent `json:"events"`
|
||||
Source string `json:"source"`
|
||||
}
|
||||
|
||||
// BatchEventResponse represents the response for batch event creation
|
||||
type BatchEventResponse struct {
|
||||
Success bool `json:"success"`
|
||||
ProcessedCount int `json:"processed_count"`
|
||||
FailedCount int `json:"failed_count"`
|
||||
EventIDs []string `json:"event_ids,omitempty"`
|
||||
Errors []string `json:"errors,omitempty"`
|
||||
Message string `json:"message,omitempty"`
|
||||
Timestamp time.Time `json:"timestamp"`
|
||||
}
|
||||
|
||||
// HealthResponse represents SLURP service health status
|
||||
type HealthResponse struct {
|
||||
Status string `json:"status"`
|
||||
Version string `json:"version,omitempty"`
|
||||
Uptime string `json:"uptime,omitempty"`
|
||||
Timestamp time.Time `json:"timestamp"`
|
||||
}
|
||||
|
||||
// NewSlurpClient creates a new SLURP API client
|
||||
func NewSlurpClient(config config.SlurpConfig) *SlurpClient {
|
||||
return &SlurpClient{
|
||||
baseURL: strings.TrimSuffix(config.BaseURL, "/"),
|
||||
apiKey: config.APIKey,
|
||||
timeout: config.Timeout,
|
||||
retryCount: config.RetryCount,
|
||||
retryDelay: config.RetryDelay,
|
||||
httpClient: &http.Client{
|
||||
Timeout: config.Timeout,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// CreateEvent sends a single event to SLURP
|
||||
func (c *SlurpClient) CreateEvent(ctx context.Context, event SlurpEvent) (*EventResponse, error) {
|
||||
url := fmt.Sprintf("%s/api/events", c.baseURL)
|
||||
|
||||
eventData, err := json.Marshal(event)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to marshal event: %w", err)
|
||||
}
|
||||
|
||||
var lastErr error
|
||||
for attempt := 0; attempt <= c.retryCount; attempt++ {
|
||||
if attempt > 0 {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return nil, ctx.Err()
|
||||
case <-time.After(c.retryDelay):
|
||||
}
|
||||
}
|
||||
|
||||
req, err := http.NewRequestWithContext(ctx, "POST", url, bytes.NewBuffer(eventData))
|
||||
if err != nil {
|
||||
lastErr = fmt.Errorf("failed to create request: %w", err)
|
||||
continue
|
||||
}
|
||||
|
||||
c.setHeaders(req)
|
||||
|
||||
resp, err := c.httpClient.Do(req)
|
||||
if err != nil {
|
||||
lastErr = fmt.Errorf("failed to send request: %w", err)
|
||||
continue
|
||||
}
|
||||
|
||||
defer resp.Body.Close()
|
||||
|
||||
if c.isRetryableStatus(resp.StatusCode) && attempt < c.retryCount {
|
||||
lastErr = fmt.Errorf("retryable error: HTTP %d", resp.StatusCode)
|
||||
continue
|
||||
}
|
||||
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to read response body: %w", err)
|
||||
}
|
||||
|
||||
var eventResp EventResponse
|
||||
if err := json.Unmarshal(body, &eventResp); err != nil {
|
||||
return nil, fmt.Errorf("failed to unmarshal response: %w", err)
|
||||
}
|
||||
|
||||
if resp.StatusCode >= 400 {
|
||||
return &eventResp, fmt.Errorf("SLURP API error (HTTP %d): %s", resp.StatusCode, eventResp.Error)
|
||||
}
|
||||
|
||||
return &eventResp, nil
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("failed after %d attempts: %w", c.retryCount+1, lastErr)
|
||||
}
|
||||
|
||||
// CreateEventsBatch sends multiple events to SLURP in a single request
|
||||
func (c *SlurpClient) CreateEventsBatch(ctx context.Context, events []SlurpEvent) (*BatchEventResponse, error) {
|
||||
url := fmt.Sprintf("%s/api/events/batch", c.baseURL)
|
||||
|
||||
batchRequest := BatchEventRequest{
|
||||
Events: events,
|
||||
Source: "bzzz-hmmm-integration",
|
||||
}
|
||||
|
||||
batchData, err := json.Marshal(batchRequest)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to marshal batch request: %w", err)
|
||||
}
|
||||
|
||||
var lastErr error
|
||||
for attempt := 0; attempt <= c.retryCount; attempt++ {
|
||||
if attempt > 0 {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return nil, ctx.Err()
|
||||
case <-time.After(c.retryDelay):
|
||||
}
|
||||
}
|
||||
|
||||
req, err := http.NewRequestWithContext(ctx, "POST", url, bytes.NewBuffer(batchData))
|
||||
if err != nil {
|
||||
lastErr = fmt.Errorf("failed to create batch request: %w", err)
|
||||
continue
|
||||
}
|
||||
|
||||
c.setHeaders(req)
|
||||
|
||||
resp, err := c.httpClient.Do(req)
|
||||
if err != nil {
|
||||
lastErr = fmt.Errorf("failed to send batch request: %w", err)
|
||||
continue
|
||||
}
|
||||
|
||||
defer resp.Body.Close()
|
||||
|
||||
if c.isRetryableStatus(resp.StatusCode) && attempt < c.retryCount {
|
||||
lastErr = fmt.Errorf("retryable error: HTTP %d", resp.StatusCode)
|
||||
continue
|
||||
}
|
||||
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to read batch response body: %w", err)
|
||||
}
|
||||
|
||||
var batchResp BatchEventResponse
|
||||
if err := json.Unmarshal(body, &batchResp); err != nil {
|
||||
return nil, fmt.Errorf("failed to unmarshal batch response: %w", err)
|
||||
}
|
||||
|
||||
if resp.StatusCode >= 400 {
|
||||
return &batchResp, fmt.Errorf("SLURP batch API error (HTTP %d): %s", resp.StatusCode, batchResp.Message)
|
||||
}
|
||||
|
||||
return &batchResp, nil
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("batch failed after %d attempts: %w", c.retryCount+1, lastErr)
|
||||
}
|
||||
|
||||
// GetHealth checks SLURP service health
|
||||
func (c *SlurpClient) GetHealth(ctx context.Context) (*HealthResponse, error) {
|
||||
url := fmt.Sprintf("%s/api/health", c.baseURL)
|
||||
|
||||
req, err := http.NewRequestWithContext(ctx, "GET", url, nil)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create health request: %w", err)
|
||||
}
|
||||
|
||||
c.setHeaders(req)
|
||||
|
||||
resp, err := c.httpClient.Do(req)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to send health request: %w", err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to read health response: %w", err)
|
||||
}
|
||||
|
||||
var healthResp HealthResponse
|
||||
if err := json.Unmarshal(body, &healthResp); err != nil {
|
||||
return nil, fmt.Errorf("failed to unmarshal health response: %w", err)
|
||||
}
|
||||
|
||||
if resp.StatusCode >= 400 {
|
||||
return &healthResp, fmt.Errorf("SLURP health check failed (HTTP %d)", resp.StatusCode)
|
||||
}
|
||||
|
||||
return &healthResp, nil
|
||||
}
|
||||
|
||||
// QueryEvents retrieves events from SLURP based on filters
|
||||
func (c *SlurpClient) QueryEvents(ctx context.Context, filters map[string]string) ([]SlurpEvent, error) {
|
||||
baseURL := fmt.Sprintf("%s/api/events", c.baseURL)
|
||||
|
||||
// Build query parameters
|
||||
params := url.Values{}
|
||||
for key, value := range filters {
|
||||
params.Add(key, value)
|
||||
}
|
||||
|
||||
queryURL := baseURL
|
||||
if len(params) > 0 {
|
||||
queryURL = fmt.Sprintf("%s?%s", baseURL, params.Encode())
|
||||
}
|
||||
|
||||
req, err := http.NewRequestWithContext(ctx, "GET", queryURL, nil)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create query request: %w", err)
|
||||
}
|
||||
|
||||
c.setHeaders(req)
|
||||
|
||||
resp, err := c.httpClient.Do(req)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to send query request: %w", err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to read query response: %w", err)
|
||||
}
|
||||
|
||||
var events []SlurpEvent
|
||||
if err := json.Unmarshal(body, &events); err != nil {
|
||||
return nil, fmt.Errorf("failed to unmarshal events: %w", err)
|
||||
}
|
||||
|
||||
if resp.StatusCode >= 400 {
|
||||
return nil, fmt.Errorf("SLURP query failed (HTTP %d)", resp.StatusCode)
|
||||
}
|
||||
|
||||
return events, nil
|
||||
}
|
||||
|
||||
// setHeaders sets common HTTP headers for SLURP API requests
|
||||
func (c *SlurpClient) setHeaders(req *http.Request) {
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
req.Header.Set("Accept", "application/json")
|
||||
req.Header.Set("User-Agent", "Bzzz-HMMM-Integration/1.0")
|
||||
|
||||
if c.apiKey != "" {
|
||||
req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", c.apiKey))
|
||||
}
|
||||
}
|
||||
|
||||
// isRetryableStatus determines if an HTTP status code is retryable
|
||||
func (c *SlurpClient) isRetryableStatus(statusCode int) bool {
|
||||
switch statusCode {
|
||||
case http.StatusTooManyRequests, // 429
|
||||
http.StatusInternalServerError, // 500
|
||||
http.StatusBadGateway, // 502
|
||||
http.StatusServiceUnavailable, // 503
|
||||
http.StatusGatewayTimeout: // 504
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// Close cleans up the client resources
|
||||
func (c *SlurpClient) Close() error {
|
||||
// HTTP client doesn't need explicit cleanup, but we can implement
|
||||
// connection pooling cleanup if needed in the future
|
||||
return nil
|
||||
}
|
||||
|
||||
// ValidateConnection tests the connection to SLURP
|
||||
func (c *SlurpClient) ValidateConnection(ctx context.Context) error {
|
||||
_, err := c.GetHealth(ctx)
|
||||
return err
|
||||
}
|
||||
519
pkg/integration/slurp_events.go
Normal file
519
pkg/integration/slurp_events.go
Normal file
@@ -0,0 +1,519 @@
|
||||
package integration
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/anthonyrawlins/bzzz/pkg/config"
|
||||
"github.com/anthonyrawlins/bzzz/pubsub"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
)
|
||||
|
||||
// SlurpEventIntegrator manages the integration between HMMM discussions and SLURP events
|
||||
type SlurpEventIntegrator struct {
|
||||
config config.SlurpConfig
|
||||
client *SlurpClient
|
||||
pubsub *pubsub.PubSub
|
||||
eventMapping config.HmmmToSlurpMapping
|
||||
|
||||
// Batch processing
|
||||
eventBatch []SlurpEvent
|
||||
batchMutex sync.Mutex
|
||||
batchTimer *time.Timer
|
||||
|
||||
// Context and lifecycle
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
|
||||
// Statistics
|
||||
stats SlurpIntegrationStats
|
||||
statsMutex sync.RWMutex
|
||||
}
|
||||
|
||||
// SlurpIntegrationStats tracks integration performance metrics
|
||||
type SlurpIntegrationStats struct {
|
||||
EventsGenerated int64 `json:"events_generated"`
|
||||
EventsSuccessful int64 `json:"events_successful"`
|
||||
EventsFailed int64 `json:"events_failed"`
|
||||
BatchesSent int64 `json:"batches_sent"`
|
||||
LastEventTime time.Time `json:"last_event_time"`
|
||||
LastSuccessTime time.Time `json:"last_success_time"`
|
||||
LastFailureTime time.Time `json:"last_failure_time"`
|
||||
LastFailureError string `json:"last_failure_error"`
|
||||
AverageResponseTime float64 `json:"average_response_time_ms"`
|
||||
}
|
||||
|
||||
// HmmmDiscussionContext represents a HMMM discussion that can generate SLURP events
|
||||
type HmmmDiscussionContext struct {
|
||||
DiscussionID string `json:"discussion_id"`
|
||||
SessionID string `json:"session_id,omitempty"`
|
||||
Participants []string `json:"participants"`
|
||||
StartTime time.Time `json:"start_time"`
|
||||
EndTime time.Time `json:"end_time"`
|
||||
Messages []HmmmMessage `json:"messages"`
|
||||
ConsensusReached bool `json:"consensus_reached"`
|
||||
ConsensusStrength float64 `json:"consensus_strength"`
|
||||
OutcomeType string `json:"outcome_type"`
|
||||
ProjectPath string `json:"project_path"`
|
||||
RelatedTasks []string `json:"related_tasks,omitempty"`
|
||||
Metadata map[string]interface{} `json:"metadata,omitempty"`
|
||||
}
|
||||
|
||||
// HmmmMessage represents a message in a HMMM discussion
|
||||
type HmmmMessage struct {
|
||||
From string `json:"from"`
|
||||
Content string `json:"content"`
|
||||
Type string `json:"type"`
|
||||
Timestamp time.Time `json:"timestamp"`
|
||||
Metadata map[string]interface{} `json:"metadata,omitempty"`
|
||||
}
|
||||
|
||||
// NewSlurpEventIntegrator creates a new SLURP event integrator
|
||||
func NewSlurpEventIntegrator(ctx context.Context, slurpConfig config.SlurpConfig, ps *pubsub.PubSub) (*SlurpEventIntegrator, error) {
|
||||
if !slurpConfig.Enabled {
|
||||
return nil, fmt.Errorf("SLURP integration is disabled in configuration")
|
||||
}
|
||||
|
||||
client := NewSlurpClient(slurpConfig)
|
||||
|
||||
// Test connection to SLURP
|
||||
if err := client.ValidateConnection(ctx); err != nil {
|
||||
return nil, fmt.Errorf("failed to connect to SLURP: %w", err)
|
||||
}
|
||||
|
||||
integrationCtx, cancel := context.WithCancel(ctx)
|
||||
|
||||
integrator := &SlurpEventIntegrator{
|
||||
config: slurpConfig,
|
||||
client: client,
|
||||
pubsub: ps,
|
||||
eventMapping: config.GetHmmmToSlurpMapping(),
|
||||
eventBatch: make([]SlurpEvent, 0, slurpConfig.BatchProcessing.MaxBatchSize),
|
||||
ctx: integrationCtx,
|
||||
cancel: cancel,
|
||||
stats: SlurpIntegrationStats{},
|
||||
}
|
||||
|
||||
// Initialize batch processing if enabled
|
||||
if slurpConfig.BatchProcessing.Enabled {
|
||||
integrator.initBatchProcessing()
|
||||
}
|
||||
|
||||
fmt.Printf("🎯 SLURP Event Integrator initialized for %s\n", slurpConfig.BaseURL)
|
||||
return integrator, nil
|
||||
}
|
||||
|
||||
// ProcessHmmmDiscussion analyzes a HMMM discussion and generates appropriate SLURP events
|
||||
func (s *SlurpEventIntegrator) ProcessHmmmDiscussion(ctx context.Context, discussion HmmmDiscussionContext) error {
|
||||
s.statsMutex.Lock()
|
||||
s.stats.EventsGenerated++
|
||||
s.stats.LastEventTime = time.Now()
|
||||
s.statsMutex.Unlock()
|
||||
|
||||
// Validate discussion meets generation criteria
|
||||
if !s.shouldGenerateEvent(discussion) {
|
||||
fmt.Printf("📊 Discussion %s does not meet event generation criteria\n", discussion.DiscussionID)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Determine event type from discussion
|
||||
eventType, confidence := s.determineEventType(discussion)
|
||||
if eventType == "" {
|
||||
fmt.Printf("📊 Could not determine event type for discussion %s\n", discussion.DiscussionID)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Calculate severity
|
||||
severity := s.calculateSeverity(discussion, eventType)
|
||||
|
||||
// Generate event content
|
||||
content := s.generateEventContent(discussion)
|
||||
|
||||
// Create SLURP event
|
||||
slurpEvent := SlurpEvent{
|
||||
EventType: eventType,
|
||||
Path: discussion.ProjectPath,
|
||||
Content: content,
|
||||
Severity: severity,
|
||||
CreatedBy: s.config.DefaultEventSettings.DefaultCreatedBy,
|
||||
Timestamp: time.Now(),
|
||||
Tags: append(s.config.DefaultEventSettings.DefaultTags, fmt.Sprintf("confidence-%.2f", confidence)),
|
||||
Metadata: map[string]interface{}{
|
||||
"discussion_id": discussion.DiscussionID,
|
||||
"session_id": discussion.SessionID,
|
||||
"participants": discussion.Participants,
|
||||
"consensus_strength": discussion.ConsensusStrength,
|
||||
"discussion_duration": discussion.EndTime.Sub(discussion.StartTime).String(),
|
||||
"message_count": len(discussion.Messages),
|
||||
"outcome_type": discussion.OutcomeType,
|
||||
"generation_confidence": confidence,
|
||||
},
|
||||
}
|
||||
|
||||
// Add custom metadata from template
|
||||
for key, value := range s.config.DefaultEventSettings.MetadataTemplate {
|
||||
slurpEvent.Metadata[key] = value
|
||||
}
|
||||
|
||||
// Add discussion-specific metadata
|
||||
for key, value := range discussion.Metadata {
|
||||
slurpEvent.Metadata[key] = value
|
||||
}
|
||||
|
||||
// Send event (batch or immediate)
|
||||
if s.config.BatchProcessing.Enabled {
|
||||
return s.addToBatch(slurpEvent)
|
||||
} else {
|
||||
return s.sendImmediateEvent(ctx, slurpEvent, discussion.DiscussionID)
|
||||
}
|
||||
}
|
||||
|
||||
// shouldGenerateEvent determines if a discussion meets the criteria for event generation
|
||||
func (s *SlurpEventIntegrator) shouldGenerateEvent(discussion HmmmDiscussionContext) bool {
|
||||
// Check minimum participants
|
||||
if len(discussion.Participants) < s.config.EventGeneration.MinParticipants {
|
||||
return false
|
||||
}
|
||||
|
||||
// Check consensus strength
|
||||
if discussion.ConsensusStrength < s.config.EventGeneration.MinConsensusStrength {
|
||||
return false
|
||||
}
|
||||
|
||||
// Check discussion duration
|
||||
duration := discussion.EndTime.Sub(discussion.StartTime)
|
||||
if duration < s.config.EventGeneration.MinDiscussionDuration {
|
||||
return false
|
||||
}
|
||||
|
||||
if duration > s.config.EventGeneration.MaxDiscussionDuration {
|
||||
return false // Too long, might indicate stalled discussion
|
||||
}
|
||||
|
||||
// Check if unanimity is required and achieved
|
||||
if s.config.EventGeneration.RequireUnanimity && discussion.ConsensusStrength < 1.0 {
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// determineEventType analyzes discussion content to determine SLURP event type
|
||||
func (s *SlurpEventIntegrator) determineEventType(discussion HmmmDiscussionContext) (string, float64) {
|
||||
// Combine all message content for analysis
|
||||
var allContent strings.Builder
|
||||
for _, msg := range discussion.Messages {
|
||||
allContent.WriteString(strings.ToLower(msg.Content))
|
||||
allContent.WriteString(" ")
|
||||
}
|
||||
content := allContent.String()
|
||||
|
||||
// Score each event type based on keyword matches
|
||||
scores := make(map[string]float64)
|
||||
|
||||
scores["approval"] = s.scoreKeywordMatch(content, s.eventMapping.ApprovalKeywords)
|
||||
scores["warning"] = s.scoreKeywordMatch(content, s.eventMapping.WarningKeywords)
|
||||
scores["blocker"] = s.scoreKeywordMatch(content, s.eventMapping.BlockerKeywords)
|
||||
scores["priority_change"] = s.scoreKeywordMatch(content, s.eventMapping.PriorityKeywords)
|
||||
scores["access_update"] = s.scoreKeywordMatch(content, s.eventMapping.AccessKeywords)
|
||||
scores["structural_change"] = s.scoreKeywordMatch(content, s.eventMapping.StructuralKeywords)
|
||||
scores["announcement"] = s.scoreKeywordMatch(content, s.eventMapping.AnnouncementKeywords)
|
||||
|
||||
// Find highest scoring event type
|
||||
var bestType string
|
||||
var bestScore float64
|
||||
for eventType, score := range scores {
|
||||
if score > bestScore {
|
||||
bestType = eventType
|
||||
bestScore = score
|
||||
}
|
||||
}
|
||||
|
||||
// Require minimum confidence threshold
|
||||
minConfidence := 0.3
|
||||
if bestScore < minConfidence {
|
||||
return "", 0
|
||||
}
|
||||
|
||||
// Check if event type is enabled
|
||||
if s.isEventTypeDisabled(bestType) {
|
||||
return "", 0
|
||||
}
|
||||
|
||||
return bestType, bestScore
|
||||
}
|
||||
|
||||
// scoreKeywordMatch calculates a score based on keyword frequency
|
||||
func (s *SlurpEventIntegrator) scoreKeywordMatch(content string, keywords []string) float64 {
|
||||
if len(keywords) == 0 {
|
||||
return 0
|
||||
}
|
||||
|
||||
matches := 0
|
||||
for _, keyword := range keywords {
|
||||
if strings.Contains(content, strings.ToLower(keyword)) {
|
||||
matches++
|
||||
}
|
||||
}
|
||||
|
||||
return float64(matches) / float64(len(keywords))
|
||||
}
|
||||
|
||||
// isEventTypeDisabled checks if an event type is disabled in configuration
|
||||
func (s *SlurpEventIntegrator) isEventTypeDisabled(eventType string) bool {
|
||||
for _, disabled := range s.config.EventGeneration.DisabledEventTypes {
|
||||
if disabled == eventType {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
// Check if it's in enabled list (if specified)
|
||||
if len(s.config.EventGeneration.EnabledEventTypes) > 0 {
|
||||
for _, enabled := range s.config.EventGeneration.EnabledEventTypes {
|
||||
if enabled == eventType {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true // Not in enabled list
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// calculateSeverity determines event severity based on discussion characteristics
|
||||
func (s *SlurpEventIntegrator) calculateSeverity(discussion HmmmDiscussionContext, eventType string) int {
|
||||
// Start with base severity for event type
|
||||
baseSeverity := s.config.EventGeneration.SeverityRules.BaseSeverity[eventType]
|
||||
if baseSeverity == 0 {
|
||||
baseSeverity = s.config.DefaultEventSettings.DefaultSeverity
|
||||
}
|
||||
|
||||
severity := float64(baseSeverity)
|
||||
|
||||
// Apply participant multiplier
|
||||
participantBoost := float64(len(discussion.Participants)-1) * s.config.EventGeneration.SeverityRules.ParticipantMultiplier
|
||||
severity += participantBoost
|
||||
|
||||
// Apply duration multiplier
|
||||
durationHours := discussion.EndTime.Sub(discussion.StartTime).Hours()
|
||||
durationBoost := durationHours * s.config.EventGeneration.SeverityRules.DurationMultiplier
|
||||
severity += durationBoost
|
||||
|
||||
// Check for urgency keywords
|
||||
allContent := strings.ToLower(s.generateEventContent(discussion))
|
||||
for _, keyword := range s.config.EventGeneration.SeverityRules.UrgencyKeywords {
|
||||
if strings.Contains(allContent, strings.ToLower(keyword)) {
|
||||
severity += float64(s.config.EventGeneration.SeverityRules.UrgencyBoost)
|
||||
break // Only apply once
|
||||
}
|
||||
}
|
||||
|
||||
// Apply bounds
|
||||
finalSeverity := int(math.Round(severity))
|
||||
if finalSeverity < s.config.EventGeneration.SeverityRules.MinSeverity {
|
||||
finalSeverity = s.config.EventGeneration.SeverityRules.MinSeverity
|
||||
}
|
||||
if finalSeverity > s.config.EventGeneration.SeverityRules.MaxSeverity {
|
||||
finalSeverity = s.config.EventGeneration.SeverityRules.MaxSeverity
|
||||
}
|
||||
|
||||
return finalSeverity
|
||||
}
|
||||
|
||||
// generateEventContent creates human-readable content for the SLURP event
|
||||
func (s *SlurpEventIntegrator) generateEventContent(discussion HmmmDiscussionContext) string {
|
||||
if discussion.OutcomeType != "" {
|
||||
return fmt.Sprintf("HMMM discussion reached consensus: %s (%d participants, %.1f%% agreement)",
|
||||
discussion.OutcomeType,
|
||||
len(discussion.Participants),
|
||||
discussion.ConsensusStrength*100)
|
||||
}
|
||||
|
||||
return fmt.Sprintf("HMMM discussion completed with %d participants over %v",
|
||||
len(discussion.Participants),
|
||||
discussion.EndTime.Sub(discussion.StartTime).Round(time.Minute))
|
||||
}
|
||||
|
||||
// addToBatch adds an event to the batch for later processing
|
||||
func (s *SlurpEventIntegrator) addToBatch(event SlurpEvent) error {
|
||||
s.batchMutex.Lock()
|
||||
defer s.batchMutex.Unlock()
|
||||
|
||||
s.eventBatch = append(s.eventBatch, event)
|
||||
|
||||
// Check if batch is full
|
||||
if len(s.eventBatch) >= s.config.BatchProcessing.MaxBatchSize {
|
||||
return s.flushBatch()
|
||||
}
|
||||
|
||||
// Reset batch timer
|
||||
if s.batchTimer != nil {
|
||||
s.batchTimer.Stop()
|
||||
}
|
||||
s.batchTimer = time.AfterFunc(s.config.BatchProcessing.MaxBatchWait, func() {
|
||||
s.batchMutex.Lock()
|
||||
defer s.batchMutex.Unlock()
|
||||
s.flushBatch()
|
||||
})
|
||||
|
||||
fmt.Printf("📦 Added event to batch (%d/%d)\n", len(s.eventBatch), s.config.BatchProcessing.MaxBatchSize)
|
||||
return nil
|
||||
}
|
||||
|
||||
// flushBatch sends all batched events to SLURP
|
||||
func (s *SlurpEventIntegrator) flushBatch() error {
|
||||
if len(s.eventBatch) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
events := make([]SlurpEvent, len(s.eventBatch))
|
||||
copy(events, s.eventBatch)
|
||||
s.eventBatch = s.eventBatch[:0] // Clear batch
|
||||
|
||||
if s.batchTimer != nil {
|
||||
s.batchTimer.Stop()
|
||||
s.batchTimer = nil
|
||||
}
|
||||
|
||||
fmt.Printf("🚀 Flushing batch of %d events to SLURP\n", len(events))
|
||||
|
||||
start := time.Now()
|
||||
resp, err := s.client.CreateEventsBatch(s.ctx, events)
|
||||
duration := time.Since(start)
|
||||
|
||||
s.statsMutex.Lock()
|
||||
s.stats.BatchesSent++
|
||||
s.stats.AverageResponseTime = (s.stats.AverageResponseTime + duration.Seconds()*1000) / 2
|
||||
|
||||
if err != nil {
|
||||
s.stats.EventsFailed += int64(len(events))
|
||||
s.stats.LastFailureTime = time.Now()
|
||||
s.stats.LastFailureError = err.Error()
|
||||
s.statsMutex.Unlock()
|
||||
|
||||
// Publish failure notification
|
||||
s.publishSlurpEvent("slurp_batch_failed", map[string]interface{}{
|
||||
"error": err.Error(),
|
||||
"event_count": len(events),
|
||||
"batch_id": fmt.Sprintf("batch_%d", time.Now().Unix()),
|
||||
})
|
||||
|
||||
return fmt.Errorf("failed to send batch: %w", err)
|
||||
}
|
||||
|
||||
s.stats.EventsSuccessful += int64(resp.ProcessedCount)
|
||||
s.stats.EventsFailed += int64(resp.FailedCount)
|
||||
s.stats.LastSuccessTime = time.Now()
|
||||
s.statsMutex.Unlock()
|
||||
|
||||
// Publish success notification
|
||||
s.publishSlurpEvent("slurp_batch_success", map[string]interface{}{
|
||||
"processed_count": resp.ProcessedCount,
|
||||
"failed_count": resp.FailedCount,
|
||||
"event_ids": resp.EventIDs,
|
||||
"batch_id": fmt.Sprintf("batch_%d", time.Now().Unix()),
|
||||
})
|
||||
|
||||
fmt.Printf("✅ Batch processed: %d succeeded, %d failed\n", resp.ProcessedCount, resp.FailedCount)
|
||||
return nil
|
||||
}
|
||||
|
||||
// sendImmediateEvent sends a single event immediately to SLURP
|
||||
func (s *SlurpEventIntegrator) sendImmediateEvent(ctx context.Context, event SlurpEvent, discussionID string) error {
|
||||
start := time.Now()
|
||||
resp, err := s.client.CreateEvent(ctx, event)
|
||||
duration := time.Since(start)
|
||||
|
||||
s.statsMutex.Lock()
|
||||
s.stats.AverageResponseTime = (s.stats.AverageResponseTime + duration.Seconds()*1000) / 2
|
||||
|
||||
if err != nil {
|
||||
s.stats.EventsFailed++
|
||||
s.stats.LastFailureTime = time.Now()
|
||||
s.stats.LastFailureError = err.Error()
|
||||
s.statsMutex.Unlock()
|
||||
|
||||
// Publish failure notification
|
||||
s.publishSlurpEvent("slurp_event_failed", map[string]interface{}{
|
||||
"discussion_id": discussionID,
|
||||
"event_type": event.EventType,
|
||||
"error": err.Error(),
|
||||
})
|
||||
|
||||
return fmt.Errorf("failed to send event: %w", err)
|
||||
}
|
||||
|
||||
s.stats.EventsSuccessful++
|
||||
s.stats.LastSuccessTime = time.Now()
|
||||
s.statsMutex.Unlock()
|
||||
|
||||
// Publish success notification
|
||||
s.publishSlurpEvent("slurp_event_success", map[string]interface{}{
|
||||
"discussion_id": discussionID,
|
||||
"event_type": event.EventType,
|
||||
"event_id": resp.EventID,
|
||||
"severity": event.Severity,
|
||||
})
|
||||
|
||||
fmt.Printf("✅ SLURP event created: %s (ID: %s)\n", event.EventType, resp.EventID)
|
||||
return nil
|
||||
}
|
||||
|
||||
// publishSlurpEvent publishes a SLURP integration event to the pubsub system
|
||||
func (s *SlurpEventIntegrator) publishSlurpEvent(eventType string, data map[string]interface{}) {
|
||||
var msgType pubsub.MessageType
|
||||
switch eventType {
|
||||
case "slurp_event_success", "slurp_batch_success":
|
||||
msgType = pubsub.SlurpEventGenerated
|
||||
case "slurp_event_failed", "slurp_batch_failed":
|
||||
msgType = pubsub.SlurpEventAck
|
||||
default:
|
||||
msgType = pubsub.SlurpContextUpdate
|
||||
}
|
||||
|
||||
data["timestamp"] = time.Now()
|
||||
data["integration_source"] = "hmmm-slurp-integrator"
|
||||
|
||||
if err := s.pubsub.PublishHmmmMessage(msgType, data); err != nil {
|
||||
fmt.Printf("❌ Failed to publish SLURP integration event: %v\n", err)
|
||||
}
|
||||
}
|
||||
|
||||
// initBatchProcessing initializes batch processing components
|
||||
func (s *SlurpEventIntegrator) initBatchProcessing() {
|
||||
fmt.Printf("📦 Batch processing enabled: max_size=%d, max_wait=%v\n",
|
||||
s.config.BatchProcessing.MaxBatchSize,
|
||||
s.config.BatchProcessing.MaxBatchWait)
|
||||
}
|
||||
|
||||
// GetStats returns current integration statistics
|
||||
func (s *SlurpEventIntegrator) GetStats() SlurpIntegrationStats {
|
||||
s.statsMutex.RLock()
|
||||
defer s.statsMutex.RUnlock()
|
||||
return s.stats
|
||||
}
|
||||
|
||||
// Close shuts down the integrator and flushes any pending events
|
||||
func (s *SlurpEventIntegrator) Close() error {
|
||||
s.cancel()
|
||||
|
||||
// Flush any remaining batched events
|
||||
if s.config.BatchProcessing.Enabled && s.config.BatchProcessing.FlushOnShutdown {
|
||||
s.batchMutex.Lock()
|
||||
if len(s.eventBatch) > 0 {
|
||||
fmt.Printf("🧹 Flushing %d remaining events on shutdown\n", len(s.eventBatch))
|
||||
s.flushBatch()
|
||||
}
|
||||
s.batchMutex.Unlock()
|
||||
}
|
||||
|
||||
if s.batchTimer != nil {
|
||||
s.batchTimer.Stop()
|
||||
}
|
||||
|
||||
return s.client.Close()
|
||||
}
|
||||
628
pkg/mcp/server.go
Normal file
628
pkg/mcp/server.go
Normal file
@@ -0,0 +1,628 @@
|
||||
package mcp
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/anthonyrawlins/bzzz/logging"
|
||||
"github.com/anthonyrawlins/bzzz/p2p"
|
||||
"github.com/anthonyrawlins/bzzz/pubsub"
|
||||
"github.com/gorilla/websocket"
|
||||
"github.com/sashabaranov/go-openai"
|
||||
)
|
||||
|
||||
// McpServer integrates BZZZ P2P network with MCP protocol for GPT-4 agents
|
||||
type McpServer struct {
|
||||
// Core components
|
||||
p2pNode *p2p.Node
|
||||
pubsub *pubsub.PubSub
|
||||
hlog *logging.HypercoreLog
|
||||
openaiClient *openai.Client
|
||||
|
||||
// Agent management
|
||||
agents map[string]*GPTAgent
|
||||
agentsMutex sync.RWMutex
|
||||
|
||||
// Server configuration
|
||||
httpServer *http.Server
|
||||
wsUpgrader websocket.Upgrader
|
||||
|
||||
// Context and lifecycle
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
|
||||
// Statistics and monitoring
|
||||
stats *ServerStats
|
||||
}
|
||||
|
||||
// ServerStats tracks MCP server performance metrics
|
||||
type ServerStats struct {
|
||||
StartTime time.Time
|
||||
TotalRequests int64
|
||||
ActiveAgents int
|
||||
MessagesProcessed int64
|
||||
TokensConsumed int64
|
||||
AverageCostPerTask float64
|
||||
ErrorRate float64
|
||||
mutex sync.RWMutex
|
||||
}
|
||||
|
||||
// GPTAgent represents a GPT-4 agent integrated with BZZZ network
|
||||
type GPTAgent struct {
|
||||
ID string
|
||||
Role AgentRole
|
||||
Model string
|
||||
SystemPrompt string
|
||||
Capabilities []string
|
||||
Specialization string
|
||||
MaxTasks int
|
||||
|
||||
// State management
|
||||
Status AgentStatus
|
||||
CurrentTasks map[string]*AgentTask
|
||||
Memory *AgentMemory
|
||||
|
||||
// Cost tracking
|
||||
TokenUsage *TokenUsage
|
||||
CostLimits *CostLimits
|
||||
|
||||
// P2P Integration
|
||||
NodeID string
|
||||
LastAnnouncement time.Time
|
||||
|
||||
// Conversation participation
|
||||
ActiveThreads map[string]*ConversationThread
|
||||
|
||||
mutex sync.RWMutex
|
||||
}
|
||||
|
||||
// AgentRole defines the role and responsibilities of an agent
|
||||
type AgentRole string
|
||||
|
||||
const (
|
||||
RoleArchitect AgentRole = "architect"
|
||||
RoleReviewer AgentRole = "reviewer"
|
||||
RoleDocumentation AgentRole = "documentation"
|
||||
RoleDeveloper AgentRole = "developer"
|
||||
RoleTester AgentRole = "tester"
|
||||
RoleSecurityExpert AgentRole = "security_expert"
|
||||
RoleDevOps AgentRole = "devops"
|
||||
)
|
||||
|
||||
// AgentStatus represents the current state of an agent
|
||||
type AgentStatus string
|
||||
|
||||
const (
|
||||
StatusIdle AgentStatus = "idle"
|
||||
StatusActive AgentStatus = "active"
|
||||
StatusCollaborating AgentStatus = "collaborating"
|
||||
StatusEscalating AgentStatus = "escalating"
|
||||
StatusTerminating AgentStatus = "terminating"
|
||||
)
|
||||
|
||||
// AgentTask represents a task being worked on by an agent
|
||||
type AgentTask struct {
|
||||
ID string
|
||||
Title string
|
||||
Repository string
|
||||
Number int
|
||||
StartTime time.Time
|
||||
Status string
|
||||
ThreadID string
|
||||
Context map[string]interface{}
|
||||
}
|
||||
|
||||
// AgentMemory manages agent memory and learning
|
||||
type AgentMemory struct {
|
||||
WorkingMemory map[string]interface{}
|
||||
EpisodicMemory []ConversationEpisode
|
||||
SemanticMemory *KnowledgeGraph
|
||||
ThreadMemories map[string]*ThreadMemory
|
||||
mutex sync.RWMutex
|
||||
}
|
||||
|
||||
// ConversationEpisode represents a past interaction
|
||||
type ConversationEpisode struct {
|
||||
Timestamp time.Time
|
||||
Participants []string
|
||||
Topic string
|
||||
Summary string
|
||||
Outcome string
|
||||
Lessons []string
|
||||
TokensUsed int
|
||||
}
|
||||
|
||||
// ConversationThread represents an active conversation
|
||||
type ConversationThread struct {
|
||||
ID string
|
||||
Topic string
|
||||
Participants []AgentParticipant
|
||||
Messages []ThreadMessage
|
||||
State ThreadState
|
||||
SharedContext map[string]interface{}
|
||||
DecisionLog []Decision
|
||||
CreatedAt time.Time
|
||||
LastActivity time.Time
|
||||
mutex sync.RWMutex
|
||||
}
|
||||
|
||||
// AgentParticipant represents an agent participating in a conversation
|
||||
type AgentParticipant struct {
|
||||
AgentID string
|
||||
Role AgentRole
|
||||
Status ParticipantStatus
|
||||
}
|
||||
|
||||
// ParticipantStatus represents the status of a participant in a conversation
|
||||
type ParticipantStatus string
|
||||
|
||||
const (
|
||||
ParticipantStatusInvited ParticipantStatus = "invited"
|
||||
ParticipantStatusActive ParticipantStatus = "active"
|
||||
ParticipantStatusIdle ParticipantStatus = "idle"
|
||||
ParticipantStatusLeft ParticipantStatus = "left"
|
||||
)
|
||||
|
||||
// ThreadMessage represents a message in a conversation thread
|
||||
type ThreadMessage struct {
|
||||
ID string
|
||||
From string
|
||||
Role AgentRole
|
||||
Content string
|
||||
MessageType pubsub.MessageType
|
||||
Timestamp time.Time
|
||||
ReplyTo string
|
||||
TokenCount int
|
||||
Model string
|
||||
}
|
||||
|
||||
// ThreadState represents the state of a conversation thread
|
||||
type ThreadState string
|
||||
|
||||
const (
|
||||
ThreadStateActive ThreadState = "active"
|
||||
ThreadStateCompleted ThreadState = "completed"
|
||||
ThreadStateEscalated ThreadState = "escalated"
|
||||
ThreadStateClosed ThreadState = "closed"
|
||||
)
|
||||
|
||||
// Decision represents a decision made in a conversation
|
||||
type Decision struct {
|
||||
ID string
|
||||
Description string
|
||||
DecidedBy []string
|
||||
Timestamp time.Time
|
||||
Rationale string
|
||||
Confidence float64
|
||||
}
|
||||
|
||||
// NewMcpServer creates a new MCP server instance
|
||||
func NewMcpServer(
|
||||
ctx context.Context,
|
||||
node *p2p.Node,
|
||||
ps *pubsub.PubSub,
|
||||
hlog *logging.HypercoreLog,
|
||||
openaiAPIKey string,
|
||||
) *McpServer {
|
||||
serverCtx, cancel := context.WithCancel(ctx)
|
||||
|
||||
server := &McpServer{
|
||||
p2pNode: node,
|
||||
pubsub: ps,
|
||||
hlog: hlog,
|
||||
openaiClient: openai.NewClient(openaiAPIKey),
|
||||
agents: make(map[string]*GPTAgent),
|
||||
ctx: serverCtx,
|
||||
cancel: cancel,
|
||||
wsUpgrader: websocket.Upgrader{
|
||||
CheckOrigin: func(r *http.Request) bool { return true },
|
||||
},
|
||||
stats: &ServerStats{
|
||||
StartTime: time.Now(),
|
||||
},
|
||||
}
|
||||
|
||||
return server
|
||||
}
|
||||
|
||||
// Start initializes and starts the MCP server
|
||||
func (s *McpServer) Start(port int) error {
|
||||
// Set up HTTP handlers
|
||||
mux := http.NewServeMux()
|
||||
|
||||
// MCP WebSocket endpoint
|
||||
mux.HandleFunc("/mcp", s.handleMCPWebSocket)
|
||||
|
||||
// REST API endpoints
|
||||
mux.HandleFunc("/api/agents", s.handleAgentsAPI)
|
||||
mux.HandleFunc("/api/conversations", s.handleConversationsAPI)
|
||||
mux.HandleFunc("/api/stats", s.handleStatsAPI)
|
||||
mux.HandleFunc("/health", s.handleHealthCheck)
|
||||
|
||||
// Start HTTP server
|
||||
s.httpServer = &http.Server{
|
||||
Addr: fmt.Sprintf(":%d", port),
|
||||
Handler: mux,
|
||||
}
|
||||
|
||||
go func() {
|
||||
if err := s.httpServer.ListenAndServe(); err != nil && err != http.ErrServerClosed {
|
||||
fmt.Printf("❌ MCP HTTP server error: %v\n", err)
|
||||
}
|
||||
}()
|
||||
|
||||
// Start message handlers
|
||||
go s.handleBzzzMessages()
|
||||
go s.handleHmmmMessages()
|
||||
|
||||
// Start periodic tasks
|
||||
go s.periodicTasks()
|
||||
|
||||
fmt.Printf("🚀 MCP Server started on port %d\n", port)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Stop gracefully shuts down the MCP server
|
||||
func (s *McpServer) Stop() error {
|
||||
s.cancel()
|
||||
|
||||
// Stop all agents
|
||||
s.agentsMutex.Lock()
|
||||
for _, agent := range s.agents {
|
||||
s.stopAgent(agent)
|
||||
}
|
||||
s.agentsMutex.Unlock()
|
||||
|
||||
// Stop HTTP server
|
||||
if s.httpServer != nil {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||
defer cancel()
|
||||
return s.httpServer.Shutdown(ctx)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// CreateGPTAgent creates a new GPT-4 agent
|
||||
func (s *McpServer) CreateGPTAgent(config *AgentConfig) (*GPTAgent, error) {
|
||||
agent := &GPTAgent{
|
||||
ID: config.ID,
|
||||
Role: config.Role,
|
||||
Model: config.Model,
|
||||
SystemPrompt: config.SystemPrompt,
|
||||
Capabilities: config.Capabilities,
|
||||
Specialization: config.Specialization,
|
||||
MaxTasks: config.MaxTasks,
|
||||
Status: StatusIdle,
|
||||
CurrentTasks: make(map[string]*AgentTask),
|
||||
Memory: NewAgentMemory(),
|
||||
TokenUsage: NewTokenUsage(),
|
||||
CostLimits: config.CostLimits,
|
||||
NodeID: s.p2pNode.ID().ShortString(),
|
||||
ActiveThreads: make(map[string]*ConversationThread),
|
||||
}
|
||||
|
||||
s.agentsMutex.Lock()
|
||||
s.agents[agent.ID] = agent
|
||||
s.agentsMutex.Unlock()
|
||||
|
||||
// Announce agent to BZZZ network
|
||||
if err := s.announceAgent(agent); err != nil {
|
||||
return nil, fmt.Errorf("failed to announce agent: %w", err)
|
||||
}
|
||||
|
||||
s.hlog.Append(logging.PeerJoined, map[string]interface{}{
|
||||
"agent_id": agent.ID,
|
||||
"role": string(agent.Role),
|
||||
"capabilities": agent.Capabilities,
|
||||
"specialization": agent.Specialization,
|
||||
})
|
||||
|
||||
fmt.Printf("✅ Created GPT-4 agent: %s (%s)\n", agent.ID, agent.Role)
|
||||
return agent, nil
|
||||
}
|
||||
|
||||
// ProcessCollaborativeTask handles a task that requires multi-agent collaboration
|
||||
func (s *McpServer) ProcessCollaborativeTask(
|
||||
task *AgentTask,
|
||||
requiredRoles []AgentRole,
|
||||
) (*ConversationThread, error) {
|
||||
|
||||
// Create conversation thread
|
||||
thread := &ConversationThread{
|
||||
ID: fmt.Sprintf("task-%s-%d", task.Repository, task.Number),
|
||||
Topic: fmt.Sprintf("Collaborative Task: %s", task.Title),
|
||||
State: ThreadStateActive,
|
||||
SharedContext: map[string]interface{}{
|
||||
"task": task,
|
||||
"required_roles": requiredRoles,
|
||||
},
|
||||
CreatedAt: time.Now(),
|
||||
LastActivity: time.Now(),
|
||||
}
|
||||
|
||||
// Find and invite agents
|
||||
for _, role := range requiredRoles {
|
||||
agents := s.findAgentsByRole(role)
|
||||
if len(agents) == 0 {
|
||||
return nil, fmt.Errorf("no available agents for role: %s", role)
|
||||
}
|
||||
|
||||
// Select best agent for this role
|
||||
selectedAgent := s.selectBestAgent(agents, task)
|
||||
|
||||
thread.Participants = append(thread.Participants, AgentParticipant{
|
||||
AgentID: selectedAgent.ID,
|
||||
Role: role,
|
||||
Status: ParticipantStatusInvited,
|
||||
})
|
||||
|
||||
// Add thread to agent
|
||||
selectedAgent.mutex.Lock()
|
||||
selectedAgent.ActiveThreads[thread.ID] = thread
|
||||
selectedAgent.mutex.Unlock()
|
||||
}
|
||||
|
||||
// Send initial collaboration request
|
||||
if err := s.initiateCollaboration(thread); err != nil {
|
||||
return nil, fmt.Errorf("failed to initiate collaboration: %w", err)
|
||||
}
|
||||
|
||||
return thread, nil
|
||||
}
|
||||
|
||||
// handleMCPWebSocket handles WebSocket connections for MCP protocol
|
||||
func (s *McpServer) handleMCPWebSocket(w http.ResponseWriter, r *http.Request) {
|
||||
conn, err := s.wsUpgrader.Upgrade(w, r, nil)
|
||||
if err != nil {
|
||||
fmt.Printf("❌ WebSocket upgrade failed: %v\n", err)
|
||||
return
|
||||
}
|
||||
defer conn.Close()
|
||||
|
||||
fmt.Printf("📡 MCP WebSocket connection established\n")
|
||||
|
||||
// Handle MCP protocol messages
|
||||
for {
|
||||
var message map[string]interface{}
|
||||
if err := conn.ReadJSON(&message); err != nil {
|
||||
if websocket.IsUnexpectedCloseError(err, websocket.CloseGoingAway, websocket.CloseAbnormalClosure) {
|
||||
fmt.Printf("❌ WebSocket error: %v\n", err)
|
||||
}
|
||||
break
|
||||
}
|
||||
|
||||
// Process MCP message
|
||||
response, err := s.processMCPMessage(message)
|
||||
if err != nil {
|
||||
fmt.Printf("❌ MCP message processing error: %v\n", err)
|
||||
response = map[string]interface{}{
|
||||
"error": err.Error(),
|
||||
}
|
||||
}
|
||||
|
||||
if err := conn.WriteJSON(response); err != nil {
|
||||
fmt.Printf("❌ WebSocket write error: %v\n", err)
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// processMCPMessage processes incoming MCP protocol messages
|
||||
func (s *McpServer) processMCPMessage(message map[string]interface{}) (map[string]interface{}, error) {
|
||||
method, ok := message["method"].(string)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("missing or invalid method")
|
||||
}
|
||||
|
||||
params, _ := message["params"].(map[string]interface{})
|
||||
|
||||
switch method {
|
||||
case "tools/list":
|
||||
return s.listTools(), nil
|
||||
case "tools/call":
|
||||
return s.callTool(params)
|
||||
case "resources/list":
|
||||
return s.listResources(), nil
|
||||
case "resources/read":
|
||||
return s.readResource(params)
|
||||
default:
|
||||
return nil, fmt.Errorf("unknown method: %s", method)
|
||||
}
|
||||
}
|
||||
|
||||
// callTool handles tool execution requests
|
||||
func (s *McpServer) callTool(params map[string]interface{}) (map[string]interface{}, error) {
|
||||
toolName, ok := params["name"].(string)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("missing tool name")
|
||||
}
|
||||
|
||||
args, _ := params["arguments"].(map[string]interface{})
|
||||
|
||||
switch toolName {
|
||||
case "bzzz_announce":
|
||||
return s.handleBzzzAnnounce(args)
|
||||
case "bzzz_lookup":
|
||||
return s.handleBzzzLookup(args)
|
||||
case "bzzz_get":
|
||||
return s.handleBzzzGet(args)
|
||||
case "bzzz_post":
|
||||
return s.handleBzzzPost(args)
|
||||
case "bzzz_thread":
|
||||
return s.handleBzzzThread(args)
|
||||
case "bzzz_subscribe":
|
||||
return s.handleBzzzSubscribe(args)
|
||||
default:
|
||||
return nil, fmt.Errorf("unknown tool: %s", toolName)
|
||||
}
|
||||
}
|
||||
|
||||
// handleBzzzAnnounce implements the bzzz_announce tool
|
||||
func (s *McpServer) handleBzzzAnnounce(args map[string]interface{}) (map[string]interface{}, error) {
|
||||
agentID, ok := args["agent_id"].(string)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("agent_id is required")
|
||||
}
|
||||
|
||||
role, ok := args["role"].(string)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("role is required")
|
||||
}
|
||||
|
||||
// Create announcement message
|
||||
announcement := map[string]interface{}{
|
||||
"agent_id": agentID,
|
||||
"role": role,
|
||||
"capabilities": args["capabilities"],
|
||||
"specialization": args["specialization"],
|
||||
"max_tasks": args["max_tasks"],
|
||||
"announced_at": time.Now(),
|
||||
"node_id": s.p2pNode.ID().ShortString(),
|
||||
}
|
||||
|
||||
// Publish to BZZZ network
|
||||
if err := s.pubsub.PublishBzzzMessage(pubsub.CapabilityBcast, announcement); err != nil {
|
||||
return nil, fmt.Errorf("failed to announce: %w", err)
|
||||
}
|
||||
|
||||
return map[string]interface{}{
|
||||
"success": true,
|
||||
"message": fmt.Sprintf("Agent %s (%s) announced to network", agentID, role),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Additional tool handlers would be implemented here...
|
||||
|
||||
// Helper methods
|
||||
|
||||
// announceAgent announces an agent to the BZZZ network
|
||||
func (s *McpServer) announceAgent(agent *GPTAgent) error {
|
||||
announcement := map[string]interface{}{
|
||||
"type": "gpt_agent_announcement",
|
||||
"agent_id": agent.ID,
|
||||
"role": string(agent.Role),
|
||||
"capabilities": agent.Capabilities,
|
||||
"specialization": agent.Specialization,
|
||||
"max_tasks": agent.MaxTasks,
|
||||
"model": agent.Model,
|
||||
"node_id": agent.NodeID,
|
||||
"timestamp": time.Now(),
|
||||
}
|
||||
|
||||
return s.pubsub.PublishBzzzMessage(pubsub.CapabilityBcast, announcement)
|
||||
}
|
||||
|
||||
// findAgentsByRole finds all agents with a specific role
|
||||
func (s *McpServer) findAgentsByRole(role AgentRole) []*GPTAgent {
|
||||
s.agentsMutex.RLock()
|
||||
defer s.agentsMutex.RUnlock()
|
||||
|
||||
var agents []*GPTAgent
|
||||
for _, agent := range s.agents {
|
||||
if agent.Role == role && agent.Status == StatusIdle {
|
||||
agents = append(agents, agent)
|
||||
}
|
||||
}
|
||||
|
||||
return agents
|
||||
}
|
||||
|
||||
// selectBestAgent selects the best agent for a task
|
||||
func (s *McpServer) selectBestAgent(agents []*GPTAgent, task *AgentTask) *GPTAgent {
|
||||
if len(agents) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Simple selection: least busy agent
|
||||
bestAgent := agents[0]
|
||||
for _, agent := range agents[1:] {
|
||||
if len(agent.CurrentTasks) < len(bestAgent.CurrentTasks) {
|
||||
bestAgent = agent
|
||||
}
|
||||
}
|
||||
|
||||
return bestAgent
|
||||
}
|
||||
|
||||
// Additional helper methods would be implemented here...
|
||||
|
||||
// AgentConfig holds configuration for creating a new agent
|
||||
type AgentConfig struct {
|
||||
ID string
|
||||
Role AgentRole
|
||||
Model string
|
||||
SystemPrompt string
|
||||
Capabilities []string
|
||||
Specialization string
|
||||
MaxTasks int
|
||||
CostLimits *CostLimits
|
||||
}
|
||||
|
||||
// CostLimits defines spending limits for an agent
|
||||
type CostLimits struct {
|
||||
DailyLimit float64
|
||||
MonthlyLimit float64
|
||||
PerTaskLimit float64
|
||||
}
|
||||
|
||||
// TokenUsage tracks token consumption
|
||||
type TokenUsage struct {
|
||||
TotalTokens int64
|
||||
PromptTokens int64
|
||||
CompletionTokens int64
|
||||
TotalCost float64
|
||||
mutex sync.RWMutex
|
||||
}
|
||||
|
||||
// NewTokenUsage creates a new token usage tracker
|
||||
func NewTokenUsage() *TokenUsage {
|
||||
return &TokenUsage{}
|
||||
}
|
||||
|
||||
// NewAgentMemory creates a new agent memory instance
|
||||
func NewAgentMemory() *AgentMemory {
|
||||
return &AgentMemory{
|
||||
WorkingMemory: make(map[string]interface{}),
|
||||
EpisodicMemory: make([]ConversationEpisode, 0),
|
||||
ThreadMemories: make(map[string]*ThreadMemory),
|
||||
}
|
||||
}
|
||||
|
||||
// ThreadMemory represents memory for a specific conversation thread
|
||||
type ThreadMemory struct {
|
||||
ThreadID string
|
||||
Summary string
|
||||
KeyPoints []string
|
||||
Decisions []Decision
|
||||
LastUpdated time.Time
|
||||
}
|
||||
|
||||
// KnowledgeGraph represents semantic knowledge
|
||||
type KnowledgeGraph struct {
|
||||
Concepts map[string]*Concept
|
||||
Relations map[string]*Relation
|
||||
mutex sync.RWMutex
|
||||
}
|
||||
|
||||
// Concept represents a knowledge concept
|
||||
type Concept struct {
|
||||
ID string
|
||||
Name string
|
||||
Description string
|
||||
Category string
|
||||
Confidence float64
|
||||
}
|
||||
|
||||
// Relation represents a relationship between concepts
|
||||
type Relation struct {
|
||||
From string
|
||||
To string
|
||||
Type string
|
||||
Strength float64
|
||||
Evidence []string
|
||||
}
|
||||
Reference in New Issue
Block a user