Integrate BACKBEAT SDK and resolve KACHING license validation

Major integrations and fixes:
- Added BACKBEAT SDK integration for P2P operation timing
- Implemented beat-aware status tracking for distributed operations
- Added Docker secrets support for secure license management
- Resolved KACHING license validation via HTTPS/TLS
- Updated docker-compose configuration for clean stack deployment
- Disabled rollback policies to prevent deployment failures
- Added license credential storage (CHORUS-DEV-MULTI-001)

Technical improvements:
- BACKBEAT P2P operation tracking with phase management
- Enhanced configuration system with file-based secrets
- Improved error handling for license validation
- Clean separation of KACHING and CHORUS deployment stacks

🤖 Generated with [Claude Code](https://claude.ai/code)

Co-Authored-By: Claude <noreply@anthropic.com>
This commit is contained in:
anthonyrawlins
2025-09-06 07:56:26 +10:00
parent 543ab216f9
commit 9bdcbe0447
4730 changed files with 1480093 additions and 1916 deletions

View File

@@ -2,25 +2,28 @@ package config
import (
"fmt"
"io/ioutil"
"os"
"strconv"
"strings"
"time"
)
// This is a container-adapted version of BZZZ's config system
// This is a container-adapted version of CHORUS's config system
// All configuration comes from environment variables instead of YAML files
// Config represents the complete CHORUS configuration loaded from environment variables
type Config struct {
Agent AgentConfig `yaml:"agent"`
Network NetworkConfig `yaml:"network"`
License LicenseConfig `yaml:"license"`
AI AIConfig `yaml:"ai"`
Logging LoggingConfig `yaml:"logging"`
V2 V2Config `yaml:"v2"`
UCXL UCXLConfig `yaml:"ucxl"`
Slurp SlurpConfig `yaml:"slurp"`
Agent AgentConfig `yaml:"agent"`
Network NetworkConfig `yaml:"network"`
License LicenseConfig `yaml:"license"`
AI AIConfig `yaml:"ai"`
Logging LoggingConfig `yaml:"logging"`
V2 V2Config `yaml:"v2"`
UCXL UCXLConfig `yaml:"ucxl"`
Slurp SlurpConfig `yaml:"slurp"`
Security SecurityConfig `yaml:"security"`
WHOOSHAPI WHOOSHAPIConfig `yaml:"whoosh_api"`
}
// AgentConfig defines agent-specific settings
@@ -46,10 +49,9 @@ type NetworkConfig struct {
BindAddr string `yaml:"bind_address"`
}
// LicenseConfig defines licensing settings (adapted from BZZZ)
// LicenseConfig defines licensing settings (adapted from CHORUS)
type LicenseConfig struct {
Email string `yaml:"email"`
LicenseKey string `yaml:"license_key"`
LicenseID string `yaml:"license_id"`
ClusterID string `yaml:"cluster_id"`
OrganizationName string `yaml:"organization_name"`
KachingURL string `yaml:"kaching_url"`
@@ -63,7 +65,9 @@ type LicenseConfig struct {
// AIConfig defines AI service settings
type AIConfig struct {
Ollama OllamaConfig `yaml:"ollama"`
Provider string `yaml:"provider"`
Ollama OllamaConfig `yaml:"ollama"`
ResetData ResetDataConfig `yaml:"resetdata"`
}
// OllamaConfig defines Ollama-specific settings
@@ -72,13 +76,21 @@ type OllamaConfig struct {
Timeout time.Duration `yaml:"timeout"`
}
// ResetDataConfig defines ResetData LLM service settings
type ResetDataConfig struct {
BaseURL string `yaml:"base_url"`
APIKey string `yaml:"api_key"`
Model string `yaml:"model"`
Timeout time.Duration `yaml:"timeout"`
}
// LoggingConfig defines logging settings
type LoggingConfig struct {
Level string `yaml:"level"`
Format string `yaml:"format"`
}
// V2Config defines v2-specific settings (from BZZZ)
// V2Config defines v2-specific settings (from CHORUS)
type V2Config struct {
DHT DHTConfig `yaml:"dht"`
}
@@ -119,6 +131,14 @@ type SlurpConfig struct {
Enabled bool `yaml:"enabled"`
}
// WHOOSHAPIConfig defines WHOOSH API integration settings
type WHOOSHAPIConfig struct {
URL string `yaml:"url"`
BaseURL string `yaml:"base_url"`
Token string `yaml:"token"`
Enabled bool `yaml:"enabled"`
}
// LoadFromEnvironment loads configuration from environment variables
func LoadFromEnvironment() (*Config, error) {
cfg := &Config{
@@ -127,13 +147,13 @@ func LoadFromEnvironment() (*Config, error) {
Specialization: getEnvOrDefault("CHORUS_SPECIALIZATION", "general_developer"),
MaxTasks: getEnvIntOrDefault("CHORUS_MAX_TASKS", 3),
Capabilities: getEnvArrayOrDefault("CHORUS_CAPABILITIES", []string{"general_development", "task_coordination"}),
Models: getEnvArrayOrDefault("CHORUS_MODELS", []string{"llama3.1:8b"}),
Models: getEnvArrayOrDefault("CHORUS_MODELS", []string{"meta/llama-3.1-8b-instruct"}),
Role: getEnvOrDefault("CHORUS_ROLE", ""),
Expertise: getEnvArrayOrDefault("CHORUS_EXPERTISE", []string{}),
ReportsTo: getEnvOrDefault("CHORUS_REPORTS_TO", ""),
Deliverables: getEnvArrayOrDefault("CHORUS_DELIVERABLES", []string{}),
ModelSelectionWebhook: getEnvOrDefault("CHORUS_MODEL_SELECTION_WEBHOOK", ""),
DefaultReasoningModel: getEnvOrDefault("CHORUS_DEFAULT_REASONING_MODEL", "llama3.1:8b"),
DefaultReasoningModel: getEnvOrDefault("CHORUS_DEFAULT_REASONING_MODEL", "meta/llama-3.1-8b-instruct"),
},
Network: NetworkConfig{
P2PPort: getEnvIntOrDefault("CHORUS_P2P_PORT", 9000),
@@ -142,8 +162,7 @@ func LoadFromEnvironment() (*Config, error) {
BindAddr: getEnvOrDefault("CHORUS_BIND_ADDRESS", "0.0.0.0"),
},
License: LicenseConfig{
Email: os.Getenv("CHORUS_LICENSE_EMAIL"),
LicenseKey: os.Getenv("CHORUS_LICENSE_KEY"),
LicenseID: getEnvOrFileContent("CHORUS_LICENSE_ID", "CHORUS_LICENSE_ID_FILE"),
ClusterID: getEnvOrDefault("CHORUS_CLUSTER_ID", "default-cluster"),
OrganizationName: getEnvOrDefault("CHORUS_ORGANIZATION_NAME", ""),
KachingURL: getEnvOrDefault("CHORUS_KACHING_URL", "https://kaching.chorus.services"),
@@ -151,10 +170,17 @@ func LoadFromEnvironment() (*Config, error) {
GracePeriodHours: getEnvIntOrDefault("CHORUS_GRACE_PERIOD_HOURS", 72),
},
AI: AIConfig{
Provider: getEnvOrDefault("CHORUS_AI_PROVIDER", "resetdata"),
Ollama: OllamaConfig{
Endpoint: getEnvOrDefault("OLLAMA_ENDPOINT", "http://localhost:11434"),
Timeout: getEnvDurationOrDefault("OLLAMA_TIMEOUT", 30*time.Second),
},
ResetData: ResetDataConfig{
BaseURL: getEnvOrDefault("RESETDATA_BASE_URL", "https://models.au-syd.resetdata.ai/v1"),
APIKey: os.Getenv("RESETDATA_API_KEY"),
Model: getEnvOrDefault("RESETDATA_MODEL", "meta/llama-3.1-8b-instruct"),
Timeout: getEnvDurationOrDefault("RESETDATA_TIMEOUT", 30*time.Second),
},
},
Logging: LoggingConfig{
Level: getEnvOrDefault("LOG_LEVEL", "info"),
@@ -183,6 +209,29 @@ func LoadFromEnvironment() (*Config, error) {
Slurp: SlurpConfig{
Enabled: getEnvBoolOrDefault("CHORUS_SLURP_ENABLED", false),
},
Security: SecurityConfig{
KeyRotationDays: getEnvIntOrDefault("CHORUS_KEY_ROTATION_DAYS", 30),
AuditLogging: getEnvBoolOrDefault("CHORUS_AUDIT_LOGGING", true),
AuditPath: getEnvOrDefault("CHORUS_AUDIT_PATH", "/tmp/chorus-audit.log"),
ElectionConfig: ElectionConfig{
DiscoveryTimeout: getEnvDurationOrDefault("CHORUS_DISCOVERY_TIMEOUT", 10*time.Second),
HeartbeatTimeout: getEnvDurationOrDefault("CHORUS_HEARTBEAT_TIMEOUT", 30*time.Second),
ElectionTimeout: getEnvDurationOrDefault("CHORUS_ELECTION_TIMEOUT", 60*time.Second),
DiscoveryBackoff: getEnvDurationOrDefault("CHORUS_DISCOVERY_BACKOFF", 5*time.Second),
LeadershipScoring: &LeadershipScoring{
UptimeWeight: 0.4,
CapabilityWeight: 0.3,
ExperienceWeight: 0.2,
LoadWeight: 0.1,
},
},
},
WHOOSHAPI: WHOOSHAPIConfig{
URL: getEnvOrDefault("WHOOSH_API_URL", "http://localhost:3000"),
BaseURL: getEnvOrDefault("WHOOSH_API_BASE_URL", "http://localhost:3000"),
Token: os.Getenv("WHOOSH_API_TOKEN"),
Enabled: getEnvBoolOrDefault("WHOOSH_API_ENABLED", false),
},
}
// Validate required configuration
@@ -195,12 +244,8 @@ func LoadFromEnvironment() (*Config, error) {
// Validate ensures all required configuration is present
func (c *Config) Validate() error {
if c.License.Email == "" {
return fmt.Errorf("CHORUS_LICENSE_EMAIL is required")
}
if c.License.LicenseKey == "" {
return fmt.Errorf("CHORUS_LICENSE_KEY is required")
if c.License.LicenseID == "" {
return fmt.Errorf("CHORUS_LICENSE_ID is required")
}
if c.Agent.ID == "" {
@@ -217,16 +262,16 @@ func (c *Config) Validate() error {
return nil
}
// ApplyRoleDefinition applies role-based configuration (from BZZZ)
// ApplyRoleDefinition applies role-based configuration (from CHORUS)
func (c *Config) ApplyRoleDefinition(role string) error {
// This would contain the role definition logic from BZZZ
// This would contain the role definition logic from CHORUS
c.Agent.Role = role
return nil
}
// GetRoleAuthority returns the authority level for a role (from BZZZ)
// GetRoleAuthority returns the authority level for a role (from CHORUS)
func (c *Config) GetRoleAuthority(role string) (string, error) {
// This would contain the authority mapping from BZZZ
// This would contain the authority mapping from CHORUS
switch role {
case "admin":
return "master", nil
@@ -278,6 +323,23 @@ func getEnvArrayOrDefault(key string, defaultValue []string) []string {
return defaultValue
}
// getEnvOrFileContent reads from environment variable or file (for Docker secrets support)
func getEnvOrFileContent(envKey, fileEnvKey string) string {
// First try the direct environment variable
if value := os.Getenv(envKey); value != "" {
return value
}
// Then try reading from file path specified in fileEnvKey
if filePath := os.Getenv(fileEnvKey); filePath != "" {
if content, err := ioutil.ReadFile(filePath); err == nil {
return strings.TrimSpace(string(content))
}
}
return ""
}
// IsSetupRequired checks if setup is required (always false for containers)
func IsSetupRequired(configPath string) bool {
return false // Containers are always pre-configured via environment
@@ -285,5 +347,17 @@ func IsSetupRequired(configPath string) bool {
// IsValidConfiguration validates configuration (simplified for containers)
func IsValidConfiguration(cfg *Config) bool {
return cfg.License.Email != "" && cfg.License.LicenseKey != ""
return cfg.License.LicenseID != "" && cfg.License.ClusterID != ""
}
// LoadConfig loads configuration from file (for API compatibility)
func LoadConfig(configPath string) (*Config, error) {
// For containers, always load from environment
return LoadFromEnvironment()
}
// SaveConfig saves configuration to file (stub for API compatibility)
func SaveConfig(cfg *Config, configPath string) error {
// For containers, configuration is environment-based, so this is a no-op
return nil
}

View File

@@ -1,188 +0,0 @@
package config
import (
"fmt"
"os"
"path/filepath"
"time"
)
// DefaultConfigPaths returns the default locations to search for config files
func DefaultConfigPaths() []string {
homeDir, _ := os.UserHomeDir()
return []string{
"./bzzz.yaml",
"./config/bzzz.yaml",
filepath.Join(homeDir, ".config", "bzzz", "config.yaml"),
"/etc/bzzz/config.yaml",
}
}
// GetNodeSpecificDefaults returns configuration defaults based on the node
func GetNodeSpecificDefaults(nodeID string) *Config {
config := getDefaultConfig()
// Set node-specific agent ID
config.Agent.ID = nodeID
// Set node-specific capabilities and models based on known cluster setup
switch {
case nodeID == "walnut" || containsString(nodeID, "walnut"):
config.Agent.Capabilities = []string{"task-coordination", "meta-discussion", "ollama-reasoning", "code-generation"}
config.Agent.Models = []string{"starcoder2:15b", "deepseek-coder-v2", "qwen3:14b", "phi3"}
config.Agent.Specialization = "code_generation"
case nodeID == "ironwood" || containsString(nodeID, "ironwood"):
config.Agent.Capabilities = []string{"task-coordination", "meta-discussion", "ollama-reasoning", "advanced-reasoning"}
config.Agent.Models = []string{"phi4:14b", "phi4-reasoning:14b", "gemma3:12b", "devstral"}
config.Agent.Specialization = "advanced_reasoning"
case nodeID == "acacia" || containsString(nodeID, "acacia"):
config.Agent.Capabilities = []string{"task-coordination", "meta-discussion", "ollama-reasoning", "code-analysis"}
config.Agent.Models = []string{"qwen2.5-coder", "deepseek-r1", "codellama", "llava"}
config.Agent.Specialization = "code_analysis"
default:
// Generic defaults for unknown nodes
config.Agent.Capabilities = []string{"task-coordination", "meta-discussion", "general"}
config.Agent.Models = []string{"phi3", "llama3.1"}
config.Agent.Specialization = "general_developer"
}
return config
}
// GetEnvironmentSpecificDefaults returns defaults based on environment
func GetEnvironmentSpecificDefaults(environment string) *Config {
config := getDefaultConfig()
switch environment {
case "development", "dev":
config.WHOOSHAPI.BaseURL = "http://localhost:8000"
config.P2P.EscalationWebhook = "http://localhost:5678/webhook-test/human-escalation"
config.Logging.Level = "debug"
config.Agent.PollInterval = 10 * time.Second
case "staging":
config.WHOOSHAPI.BaseURL = "https://hive-staging.home.deepblack.cloud"
config.P2P.EscalationWebhook = "https://n8n-staging.home.deepblack.cloud/webhook-test/human-escalation"
config.Logging.Level = "info"
config.Agent.PollInterval = 20 * time.Second
case "production", "prod":
config.WHOOSHAPI.BaseURL = "https://hive.home.deepblack.cloud"
config.P2P.EscalationWebhook = "https://n8n.home.deepblack.cloud/webhook-test/human-escalation"
config.Logging.Level = "warn"
config.Agent.PollInterval = 30 * time.Second
default:
// Default to production-like settings
config.Logging.Level = "info"
}
return config
}
// GetCapabilityPresets returns predefined capability sets
func GetCapabilityPresets() map[string][]string {
return map[string][]string{
"senior_developer": {
"task-coordination",
"meta-discussion",
"ollama-reasoning",
"code-generation",
"code-review",
"architecture",
},
"code_reviewer": {
"task-coordination",
"meta-discussion",
"ollama-reasoning",
"code-review",
"security-analysis",
"best-practices",
},
"debugger_specialist": {
"task-coordination",
"meta-discussion",
"ollama-reasoning",
"debugging",
"error-analysis",
"troubleshooting",
},
"devops_engineer": {
"task-coordination",
"meta-discussion",
"deployment",
"infrastructure",
"monitoring",
"automation",
},
"test_engineer": {
"task-coordination",
"meta-discussion",
"testing",
"quality-assurance",
"test-automation",
"validation",
},
"general_developer": {
"task-coordination",
"meta-discussion",
"ollama-reasoning",
"general",
},
}
}
// ApplyCapabilityPreset applies a predefined capability preset to the config
func (c *Config) ApplyCapabilityPreset(presetName string) error {
presets := GetCapabilityPresets()
capabilities, exists := presets[presetName]
if !exists {
return fmt.Errorf("unknown capability preset: %s", presetName)
}
c.Agent.Capabilities = capabilities
c.Agent.Specialization = presetName
return nil
}
// GetModelPresets returns predefined model sets for different specializations
func GetModelPresets() map[string][]string {
return map[string][]string{
"code_generation": {
"starcoder2:15b",
"deepseek-coder-v2",
"codellama",
},
"advanced_reasoning": {
"phi4:14b",
"phi4-reasoning:14b",
"deepseek-r1",
},
"code_analysis": {
"qwen2.5-coder",
"deepseek-coder-v2",
"codellama",
},
"general_purpose": {
"phi3",
"llama3.1:8b",
"qwen3",
},
"vision_tasks": {
"llava",
"llava:13b",
},
}
}
// containsString checks if a string contains a substring (case-insensitive)
func containsString(s, substr string) bool {
return len(s) >= len(substr) &&
(s[:len(substr)] == substr || s[len(s)-len(substr):] == substr)
}

View File

@@ -44,7 +44,7 @@ type DiscoveryConfig struct {
MDNSEnabled bool `env:"BZZZ_MDNS_ENABLED" default:"true" json:"mdns_enabled" yaml:"mdns_enabled"`
DHTDiscovery bool `env:"BZZZ_DHT_DISCOVERY" default:"false" json:"dht_discovery" yaml:"dht_discovery"`
AnnounceInterval time.Duration `env:"BZZZ_ANNOUNCE_INTERVAL" default:"30s" json:"announce_interval" yaml:"announce_interval"`
ServiceName string `env:"BZZZ_SERVICE_NAME" default:"bzzz" json:"service_name" yaml:"service_name"`
ServiceName string `env:"BZZZ_SERVICE_NAME" default:"CHORUS" json:"service_name" yaml:"service_name"`
}
type MonitoringConfig struct {
@@ -82,7 +82,7 @@ func LoadHybridConfig() (*HybridConfig, error) {
MDNSEnabled: getEnvBool("BZZZ_MDNS_ENABLED", true),
DHTDiscovery: getEnvBool("BZZZ_DHT_DISCOVERY", false),
AnnounceInterval: getEnvDuration("BZZZ_ANNOUNCE_INTERVAL", 30*time.Second),
ServiceName: getEnvString("BZZZ_SERVICE_NAME", "bzzz"),
ServiceName: getEnvString("BZZZ_SERVICE_NAME", "CHORUS"),
}
// Load Monitoring configuration

View File

@@ -1,573 +0,0 @@
package config
import (
"fmt"
"strings"
"time"
)
// AuthorityLevel defines the decision-making authority of a role
type AuthorityLevel string
const (
AuthorityMaster AuthorityLevel = "master" // Full admin access, can decrypt all roles (SLURP functionality)
AuthorityDecision AuthorityLevel = "decision" // Can make permanent decisions
AuthorityCoordination AuthorityLevel = "coordination" // Can coordinate across roles
AuthoritySuggestion AuthorityLevel = "suggestion" // Can suggest, no permanent decisions
AuthorityReadOnly AuthorityLevel = "read_only" // Observer access only
)
// AgeKeyPair holds Age encryption keys for a role
type AgeKeyPair struct {
PublicKey string `yaml:"public,omitempty" json:"public,omitempty"`
PrivateKey string `yaml:"private,omitempty" json:"private,omitempty"`
}
// ShamirShare represents a share of the admin secret key
type ShamirShare struct {
Index int `yaml:"index" json:"index"`
Share string `yaml:"share" json:"share"`
Threshold int `yaml:"threshold" json:"threshold"`
TotalShares int `yaml:"total_shares" json:"total_shares"`
}
// ElectionConfig defines consensus election parameters
type ElectionConfig struct {
// Trigger timeouts
HeartbeatTimeout time.Duration `yaml:"heartbeat_timeout" json:"heartbeat_timeout"`
DiscoveryTimeout time.Duration `yaml:"discovery_timeout" json:"discovery_timeout"`
ElectionTimeout time.Duration `yaml:"election_timeout" json:"election_timeout"`
// Discovery settings
MaxDiscoveryAttempts int `yaml:"max_discovery_attempts" json:"max_discovery_attempts"`
DiscoveryBackoff time.Duration `yaml:"discovery_backoff" json:"discovery_backoff"`
// Consensus requirements
MinimumQuorum int `yaml:"minimum_quorum" json:"minimum_quorum"`
ConsensusAlgorithm string `yaml:"consensus_algorithm" json:"consensus_algorithm"` // "raft", "pbft"
// Split brain detection
SplitBrainDetection bool `yaml:"split_brain_detection" json:"split_brain_detection"`
ConflictResolution string `yaml:"conflict_resolution,omitempty" json:"conflict_resolution,omitempty"`
}
// RoleDefinition represents a complete role definition with authority and encryption
type RoleDefinition struct {
// Existing fields from Bees-AgenticWorkers
Name string `yaml:"name"`
SystemPrompt string `yaml:"system_prompt"`
ReportsTo []string `yaml:"reports_to"`
Expertise []string `yaml:"expertise"`
Deliverables []string `yaml:"deliverables"`
Capabilities []string `yaml:"capabilities"`
// Collaboration preferences
CollaborationDefaults CollaborationConfig `yaml:"collaboration_defaults"`
// NEW: Authority and encryption fields for Phase 2A
AuthorityLevel AuthorityLevel `yaml:"authority_level" json:"authority_level"`
CanDecrypt []string `yaml:"can_decrypt,omitempty" json:"can_decrypt,omitempty"` // Roles this role can decrypt
AgeKeys AgeKeyPair `yaml:"age_keys,omitempty" json:"age_keys,omitempty"`
PromptTemplate string `yaml:"prompt_template,omitempty" json:"prompt_template,omitempty"`
Model string `yaml:"model,omitempty" json:"model,omitempty"`
MaxTasks int `yaml:"max_tasks,omitempty" json:"max_tasks,omitempty"`
// Special functions (for admin/specialized roles)
SpecialFunctions []string `yaml:"special_functions,omitempty" json:"special_functions,omitempty"`
// Decision context
DecisionScope []string `yaml:"decision_scope,omitempty" json:"decision_scope,omitempty"` // What domains this role can decide on
}
// GetPredefinedRoles returns all predefined roles from Bees-AgenticWorkers.md
func GetPredefinedRoles() map[string]RoleDefinition {
return map[string]RoleDefinition{
// NEW: Admin role with SLURP functionality
"admin": {
Name: "SLURP Admin Agent",
SystemPrompt: "You are the **SLURP Admin Agent** with master authority level and context curation functionality.\n\n* **Responsibilities:** Maintain global context graph, ingest and analyze all distributed decisions, manage key reconstruction, coordinate admin elections.\n* **Authority:** Can decrypt and analyze all role-encrypted decisions, publish system-level decisions, manage cluster security.\n* **Special Functions:** Context curation, decision ingestion, semantic analysis, key reconstruction, admin election coordination.\n* **Reports To:** Distributed consensus (no single authority).\n* **Deliverables:** Global context analysis, decision quality metrics, cluster health reports, security audit logs.",
ReportsTo: []string{}, // Admin reports to consensus
Expertise: []string{"context_curation", "decision_analysis", "semantic_indexing", "distributed_systems", "security", "consensus_algorithms"},
Deliverables: []string{"global_context_graph", "decision_quality_metrics", "cluster_health_reports", "security_audit_logs"},
Capabilities: []string{"context_curation", "decision_ingestion", "semantic_analysis", "key_reconstruction", "admin_election", "cluster_coordination"},
AuthorityLevel: AuthorityMaster,
CanDecrypt: []string{"*"}, // Can decrypt all roles
SpecialFunctions: []string{"slurp_functionality", "admin_election", "key_management", "consensus_coordination"},
Model: "gpt-4o",
MaxTasks: 10,
DecisionScope: []string{"system", "security", "architecture", "operations", "consensus"},
CollaborationDefaults: CollaborationConfig{
PreferredMessageTypes: []string{"admin_election", "key_reconstruction", "consensus_request", "system_alert"},
AutoSubscribeToRoles: []string{"senior_software_architect", "security_expert", "systems_engineer"},
AutoSubscribeToExpertise: []string{"architecture", "security", "infrastructure", "consensus"},
ResponseTimeoutSeconds: 60, // Fast response for admin duties
MaxCollaborationDepth: 10,
EscalationThreshold: 1, // Immediate escalation for admin issues
},
},
"senior_software_architect": {
Name: "Senior Software Architect",
SystemPrompt: "You are the **Senior Software Architect**. You define the system's overall structure, select tech stacks, and ensure long-term maintainability.\n\n* **Responsibilities:** Draft high-level architecture diagrams, define API contracts, set coding standards, mentor engineering leads.\n* **Authority:** Can make strategic technical decisions that are published as permanent UCXL decision nodes.\n* **Expertise:** Deep experience in multiple programming paradigms, distributed systems, security models, and cloud architectures.\n* **Reports To:** Product Owner / Technical Director.\n* **Deliverables:** Architecture blueprints, tech stack decisions, integration strategies, and review sign-offs on major design changes.",
ReportsTo: []string{"product_owner", "technical_director", "admin"},
Expertise: []string{"architecture", "distributed_systems", "security", "cloud_architectures", "api_design"},
Deliverables: []string{"architecture_blueprints", "tech_stack_decisions", "integration_strategies", "design_reviews"},
Capabilities: []string{"task-coordination", "meta-discussion", "architecture", "code-review", "mentoring"},
AuthorityLevel: AuthorityDecision,
CanDecrypt: []string{"senior_software_architect", "backend_developer", "frontend_developer", "full_stack_engineer", "database_engineer"},
Model: "gpt-4o",
MaxTasks: 5,
DecisionScope: []string{"architecture", "design", "technology_selection", "system_integration"},
CollaborationDefaults: CollaborationConfig{
PreferredMessageTypes: []string{"coordination_request", "meta_discussion", "escalation_trigger"},
AutoSubscribeToRoles: []string{"lead_designer", "security_expert", "systems_engineer"},
AutoSubscribeToExpertise: []string{"architecture", "security", "infrastructure"},
ResponseTimeoutSeconds: 300,
MaxCollaborationDepth: 5,
EscalationThreshold: 3,
},
},
"lead_designer": {
Name: "Lead Designer",
SystemPrompt: "You are the **Lead Designer**. You guide the creative vision and maintain design cohesion across the product.\n\n* **Responsibilities:** Oversee UX flow, wireframes, and feature design; ensure consistency of theme and style; mediate between product vision and technical constraints.\n* **Authority:** Can make design decisions that influence product direction and user experience.\n* **Expertise:** UI/UX principles, accessibility, information architecture, Figma/Sketch proficiency.\n* **Reports To:** Product Owner.\n* **Deliverables:** Style guides, wireframes, feature specs, and iterative design documentation.",
ReportsTo: []string{"product_owner", "admin"},
Expertise: []string{"ui_ux", "accessibility", "information_architecture", "design_systems", "user_research"},
Deliverables: []string{"style_guides", "wireframes", "feature_specs", "design_documentation"},
Capabilities: []string{"task-coordination", "meta-discussion", "design", "user_experience"},
AuthorityLevel: AuthorityDecision,
CanDecrypt: []string{"lead_designer", "ui_ux_designer", "frontend_developer"},
Model: "gpt-4o",
MaxTasks: 4,
DecisionScope: []string{"design", "user_experience", "accessibility", "visual_identity"},
CollaborationDefaults: CollaborationConfig{
PreferredMessageTypes: []string{"task_help_request", "coordination_request", "meta_discussion"},
AutoSubscribeToRoles: []string{"ui_ux_designer", "frontend_developer"},
AutoSubscribeToExpertise: []string{"design", "frontend", "user_experience"},
ResponseTimeoutSeconds: 180,
MaxCollaborationDepth: 3,
EscalationThreshold: 2,
},
},
"security_expert": {
Name: "Security Expert",
SystemPrompt: "You are the **Security Expert**. You ensure the system is hardened against vulnerabilities.\n\n* **Responsibilities:** Conduct threat modeling, penetration tests, code reviews for security flaws, and define access control policies.\n* **Authority:** Can make security-related decisions and coordinate security implementations across teams.\n* **Expertise:** Cybersecurity frameworks (OWASP, NIST), encryption, key management, zero-trust systems.\n* **Reports To:** Senior Software Architect.\n* **Deliverables:** Security audits, vulnerability reports, risk mitigation plans, compliance documentation.",
ReportsTo: []string{"senior_software_architect", "admin"},
Expertise: []string{"cybersecurity", "owasp", "nist", "encryption", "key_management", "zero_trust", "penetration_testing"},
Deliverables: []string{"security_audits", "vulnerability_reports", "risk_mitigation_plans", "compliance_documentation"},
Capabilities: []string{"task-coordination", "meta-discussion", "security-analysis", "code-review", "threat-modeling"},
AuthorityLevel: AuthorityCoordination,
CanDecrypt: []string{"security_expert", "backend_developer", "devops_engineer", "systems_engineer"},
Model: "gpt-4o",
MaxTasks: 4,
DecisionScope: []string{"security", "access_control", "threat_mitigation", "compliance"},
CollaborationDefaults: CollaborationConfig{
PreferredMessageTypes: []string{"dependency_alert", "task_help_request", "escalation_trigger"},
AutoSubscribeToRoles: []string{"backend_developer", "devops_engineer", "senior_software_architect"},
AutoSubscribeToExpertise: []string{"security", "backend", "infrastructure"},
ResponseTimeoutSeconds: 120,
MaxCollaborationDepth: 4,
EscalationThreshold: 1,
},
},
"systems_engineer": {
Name: "Systems Engineer",
SystemPrompt: "You are the **Systems Engineer**. You connect hardware, operating systems, and software infrastructure.\n\n* **Responsibilities:** Configure OS environments, network setups, and middleware; ensure system performance and uptime.\n* **Expertise:** Linux/Unix systems, networking, hardware integration, automation tools.\n* **Reports To:** Technical Lead.\n* **Deliverables:** Infrastructure configurations, system diagrams, performance benchmarks.",
ReportsTo: []string{"technical_lead"},
Expertise: []string{"linux", "unix", "networking", "hardware_integration", "automation", "system_administration"},
Deliverables: []string{"infrastructure_configurations", "system_diagrams", "performance_benchmarks"},
Capabilities: []string{"task-coordination", "meta-discussion", "infrastructure", "system_administration", "automation"},
CollaborationDefaults: CollaborationConfig{
PreferredMessageTypes: []string{"coordination_request", "dependency_alert", "task_help_request"},
AutoSubscribeToRoles: []string{"devops_engineer", "backend_developer"},
AutoSubscribeToExpertise: []string{"infrastructure", "deployment", "monitoring"},
ResponseTimeoutSeconds: 240,
MaxCollaborationDepth: 3,
EscalationThreshold: 2,
},
},
"frontend_developer": {
Name: "Frontend Developer",
SystemPrompt: "You are the **Frontend Developer**. You turn designs into interactive interfaces.\n\n* **Responsibilities:** Build UI components, optimize performance, ensure cross-browser/device compatibility, and integrate frontend with backend APIs.\n* **Expertise:** HTML, CSS, JavaScript/TypeScript, React/Vue/Angular, accessibility standards.\n* **Reports To:** Frontend Lead or Senior Architect.\n* **Deliverables:** Functional UI screens, reusable components, and documented frontend code.",
ReportsTo: []string{"frontend_lead", "senior_software_architect"},
Expertise: []string{"html", "css", "javascript", "typescript", "react", "vue", "angular", "accessibility"},
Deliverables: []string{"ui_screens", "reusable_components", "frontend_code", "documentation"},
Capabilities: []string{"task-coordination", "meta-discussion", "frontend", "ui_development", "component_design"},
CollaborationDefaults: CollaborationConfig{
PreferredMessageTypes: []string{"task_help_request", "coordination_request", "task_help_response"},
AutoSubscribeToRoles: []string{"ui_ux_designer", "backend_developer", "lead_designer"},
AutoSubscribeToExpertise: []string{"design", "backend", "api_integration"},
ResponseTimeoutSeconds: 180,
MaxCollaborationDepth: 3,
EscalationThreshold: 2,
},
},
"backend_developer": {
Name: "Backend Developer",
SystemPrompt: "You are the **Backend Developer**. You create APIs, logic, and server-side integrations.\n\n* **Responsibilities:** Implement core logic, manage data pipelines, enforce security, and support scaling strategies.\n* **Expertise:** Server frameworks, REST/GraphQL APIs, authentication, caching, microservices.\n* **Reports To:** Backend Lead or Senior Architect.\n* **Deliverables:** API endpoints, backend services, unit tests, and deployment-ready server code.",
ReportsTo: []string{"backend_lead", "senior_software_architect"},
Expertise: []string{"server_frameworks", "rest_api", "graphql", "authentication", "caching", "microservices", "databases"},
Deliverables: []string{"api_endpoints", "backend_services", "unit_tests", "server_code"},
Capabilities: []string{"task-coordination", "meta-discussion", "backend", "api_development", "database_design"},
CollaborationDefaults: CollaborationConfig{
PreferredMessageTypes: []string{"task_help_request", "coordination_request", "dependency_alert"},
AutoSubscribeToRoles: []string{"database_engineer", "frontend_developer", "security_expert"},
AutoSubscribeToExpertise: []string{"database", "frontend", "security"},
ResponseTimeoutSeconds: 200,
MaxCollaborationDepth: 4,
EscalationThreshold: 2,
},
},
"qa_engineer": {
Name: "QA Engineer",
SystemPrompt: "You are the **QA Engineer**. You ensure the system is reliable and bug-free.\n\n* **Responsibilities:** Create test plans, execute manual and automated tests, document bugs, and verify fixes.\n* **Expertise:** QA methodologies, Selenium/Cypress, regression testing, performance testing.\n* **Reports To:** QA Lead.\n* **Deliverables:** Test scripts, bug reports, QA coverage metrics, and sign-off on release quality.",
ReportsTo: []string{"qa_lead"},
Expertise: []string{"qa_methodologies", "selenium", "cypress", "regression_testing", "performance_testing", "test_automation"},
Deliverables: []string{"test_scripts", "bug_reports", "qa_metrics", "release_signoff"},
Capabilities: []string{"task-coordination", "meta-discussion", "testing", "quality_assurance", "test_automation"},
CollaborationDefaults: CollaborationConfig{
PreferredMessageTypes: []string{"task_help_request", "dependency_alert", "coordination_complete"},
AutoSubscribeToRoles: []string{"frontend_developer", "backend_developer", "devops_engineer"},
AutoSubscribeToExpertise: []string{"testing", "deployment", "automation"},
ResponseTimeoutSeconds: 150,
MaxCollaborationDepth: 3,
EscalationThreshold: 2,
},
},
"ui_ux_designer": {
Name: "UI/UX Designer",
SystemPrompt: "You are the **UI/UX Designer**. You shape how users interact with the product.\n\n* **Responsibilities:** Produce wireframes, prototypes, and design systems; ensure user flows are intuitive.\n* **Expertise:** Human-computer interaction, usability testing, Figma/Sketch, accessibility.\n* **Reports To:** Lead Designer.\n* **Deliverables:** Interactive prototypes, annotated mockups, and updated design documentation.",
ReportsTo: []string{"lead_designer"},
Expertise: []string{"human_computer_interaction", "usability_testing", "figma", "sketch", "accessibility", "user_flows"},
Deliverables: []string{"interactive_prototypes", "annotated_mockups", "design_documentation"},
Capabilities: []string{"task-coordination", "meta-discussion", "design", "prototyping", "user_research"},
CollaborationDefaults: CollaborationConfig{
PreferredMessageTypes: []string{"task_help_request", "coordination_request", "meta_discussion"},
AutoSubscribeToRoles: []string{"frontend_developer", "lead_designer"},
AutoSubscribeToExpertise: []string{"frontend", "design", "user_experience"},
ResponseTimeoutSeconds: 180,
MaxCollaborationDepth: 3,
EscalationThreshold: 2,
},
},
"ml_engineer": {
Name: "ML Engineer",
SystemPrompt: "You are the **Machine Learning Engineer**. You design, train, and integrate AI models into the product.\n\n* **Responsibilities:** Build pipelines, preprocess data, evaluate models, and deploy ML solutions.\n* **Expertise:** Python, TensorFlow/PyTorch, data engineering, model optimization.\n* **Reports To:** Senior Software Architect or Product Owner (depending on AI strategy).\n* **Deliverables:** Trained models, inference APIs, documentation of datasets and performance metrics.",
ReportsTo: []string{"senior_software_architect", "product_owner"},
Expertise: []string{"python", "tensorflow", "pytorch", "data_engineering", "model_optimization", "machine_learning"},
Deliverables: []string{"trained_models", "inference_apis", "dataset_documentation", "performance_metrics"},
Capabilities: []string{"task-coordination", "meta-discussion", "machine_learning", "data_analysis", "model_deployment"},
CollaborationDefaults: CollaborationConfig{
PreferredMessageTypes: []string{"task_help_request", "coordination_request", "meta_discussion"},
AutoSubscribeToRoles: []string{"backend_developer", "database_engineer", "devops_engineer"},
AutoSubscribeToExpertise: []string{"backend", "database", "deployment"},
ResponseTimeoutSeconds: 300,
MaxCollaborationDepth: 4,
EscalationThreshold: 3,
},
},
"devops_engineer": {
Name: "DevOps Engineer",
SystemPrompt: "You are the **DevOps Engineer**. You automate and maintain build, deployment, and monitoring systems.\n\n* **Responsibilities:** Manage CI/CD pipelines, infrastructure as code, observability, and rollback strategies.\n* **Expertise:** Docker, Kubernetes, Terraform, GitHub Actions/Jenkins, cloud providers.\n* **Reports To:** Systems Engineer or Senior Architect.\n* **Deliverables:** CI/CD configurations, monitoring dashboards, and operational runbooks.",
ReportsTo: []string{"systems_engineer", "senior_software_architect"},
Expertise: []string{"docker", "kubernetes", "terraform", "cicd", "github_actions", "jenkins", "cloud_providers", "monitoring"},
Deliverables: []string{"cicd_configurations", "monitoring_dashboards", "operational_runbooks"},
Capabilities: []string{"task-coordination", "meta-discussion", "deployment", "automation", "monitoring", "infrastructure"},
CollaborationDefaults: CollaborationConfig{
PreferredMessageTypes: []string{"coordination_request", "dependency_alert", "task_help_request"},
AutoSubscribeToRoles: []string{"backend_developer", "systems_engineer", "security_expert"},
AutoSubscribeToExpertise: []string{"backend", "infrastructure", "security"},
ResponseTimeoutSeconds: 240,
MaxCollaborationDepth: 4,
EscalationThreshold: 2,
},
},
"specialist_3d": {
Name: "3D Specialist",
SystemPrompt: "You are the **3D Specialist**. You create and optimize 3D assets for the product.\n\n* **Responsibilities:** Model, texture, and rig characters, environments, and props; ensure performance-friendly assets.\n* **Expertise:** Blender, Maya, Substance Painter, Unity/Unreal pipelines, optimization techniques.\n* **Reports To:** Art Director or Lead Designer.\n* **Deliverables:** Game-ready 3D assets, texture packs, rigged models, and export guidelines.",
ReportsTo: []string{"art_director", "lead_designer"},
Expertise: []string{"blender", "maya", "substance_painter", "unity", "unreal", "3d_modeling", "texturing", "rigging"},
Deliverables: []string{"3d_assets", "texture_packs", "rigged_models", "export_guidelines"},
Capabilities: []string{"task-coordination", "meta-discussion", "3d_modeling", "asset_optimization"},
CollaborationDefaults: CollaborationConfig{
PreferredMessageTypes: []string{"task_help_request", "coordination_request", "meta_discussion"},
AutoSubscribeToRoles: []string{"lead_designer", "engine_programmer"},
AutoSubscribeToExpertise: []string{"design", "engine", "optimization"},
ResponseTimeoutSeconds: 300,
MaxCollaborationDepth: 3,
EscalationThreshold: 2,
},
},
"technical_writer": {
Name: "Technical Writer",
SystemPrompt: "You are the **Technical Writer**. You make sure all documentation is accurate and user-friendly.\n\n* **Responsibilities:** Write developer docs, API references, user manuals, and release notes.\n* **Expertise:** Strong writing skills, Markdown, diagramming, understanding of tech stacks.\n* **Reports To:** Product Owner or Project Manager.\n* **Deliverables:** User guides, developer onboarding docs, and API documentation.",
ReportsTo: []string{"product_owner", "project_manager"},
Expertise: []string{"technical_writing", "markdown", "diagramming", "documentation", "user_guides"},
Deliverables: []string{"user_guides", "developer_docs", "api_documentation", "release_notes"},
Capabilities: []string{"task-coordination", "meta-discussion", "documentation", "technical_writing"},
CollaborationDefaults: CollaborationConfig{
PreferredMessageTypes: []string{"task_help_request", "coordination_complete", "meta_discussion"},
AutoSubscribeToRoles: []string{"backend_developer", "frontend_developer", "senior_software_architect"},
AutoSubscribeToExpertise: []string{"api_design", "documentation", "architecture"},
ResponseTimeoutSeconds: 200,
MaxCollaborationDepth: 3,
EscalationThreshold: 2,
},
},
"full_stack_engineer": {
Name: "Full Stack Engineer",
SystemPrompt: "You are the **Full Stack Engineer**. You bridge frontend and backend to build complete features.\n\n* **Responsibilities:** Implement end-to-end features, debug across the stack, and assist in both client and server layers.\n* **Expertise:** Modern JS frameworks, backend APIs, databases, cloud deployment.\n* **Reports To:** Senior Architect or Tech Lead.\n* **Deliverables:** Full feature implementations, integration tests, and code linking UI to backend.",
ReportsTo: []string{"senior_software_architect", "tech_lead"},
Expertise: []string{"javascript", "frontend_frameworks", "backend_apis", "databases", "cloud_deployment", "full_stack"},
Deliverables: []string{"feature_implementations", "integration_tests", "end_to_end_code"},
Capabilities: []string{"task-coordination", "meta-discussion", "frontend", "backend", "full_stack_development"},
CollaborationDefaults: CollaborationConfig{
PreferredMessageTypes: []string{"task_help_request", "coordination_request", "task_help_response"},
AutoSubscribeToRoles: []string{"frontend_developer", "backend_developer", "database_engineer"},
AutoSubscribeToExpertise: []string{"frontend", "backend", "database"},
ResponseTimeoutSeconds: 200,
MaxCollaborationDepth: 4,
EscalationThreshold: 2,
},
},
"database_engineer": {
Name: "Database Engineer",
SystemPrompt: "You are the **Database Engineer**. You design and maintain data structures for performance and reliability.\n\n* **Responsibilities:** Design schemas, optimize queries, manage migrations, and implement backup strategies.\n* **Expertise:** SQL/NoSQL databases, indexing, query tuning, replication/sharding.\n* **Reports To:** Backend Lead or Senior Architect.\n* **Deliverables:** Schema diagrams, migration scripts, tuning reports, and disaster recovery plans.",
ReportsTo: []string{"backend_lead", "senior_software_architect"},
Expertise: []string{"sql", "nosql", "indexing", "query_tuning", "replication", "sharding", "database_design"},
Deliverables: []string{"schema_diagrams", "migration_scripts", "tuning_reports", "disaster_recovery_plans"},
Capabilities: []string{"task-coordination", "meta-discussion", "database_design", "query_optimization", "data_modeling"},
CollaborationDefaults: CollaborationConfig{
PreferredMessageTypes: []string{"task_help_request", "dependency_alert", "coordination_request"},
AutoSubscribeToRoles: []string{"backend_developer", "ml_engineer", "devops_engineer"},
AutoSubscribeToExpertise: []string{"backend", "machine_learning", "deployment"},
ResponseTimeoutSeconds: 240,
MaxCollaborationDepth: 3,
EscalationThreshold: 2,
},
},
"engine_programmer": {
Name: "Engine Programmer",
SystemPrompt: "You are the **Engine Programmer**. You work close to the metal to extend and optimize the engine.\n\n* **Responsibilities:** Develop low-level systems (rendering, physics, memory), maintain performance, and enable tools for designers/artists.\n* **Expertise:** C++/Rust, graphics APIs (Vulkan/DirectX/OpenGL), performance profiling, game/real-time engines.\n* **Reports To:** Senior Software Architect or Technical Director.\n* **Deliverables:** Engine modules, profiling reports, performance patches, and technical documentation.",
ReportsTo: []string{"senior_software_architect", "technical_director"},
Expertise: []string{"cpp", "rust", "vulkan", "directx", "opengl", "performance_profiling", "game_engines", "low_level_programming"},
Deliverables: []string{"engine_modules", "profiling_reports", "performance_patches", "technical_documentation"},
Capabilities: []string{"task-coordination", "meta-discussion", "engine_development", "performance_optimization", "low_level_programming"},
CollaborationDefaults: CollaborationConfig{
PreferredMessageTypes: []string{"task_help_request", "meta_discussion", "coordination_request"},
AutoSubscribeToRoles: []string{"specialist_3d", "senior_software_architect"},
AutoSubscribeToExpertise: []string{"3d_modeling", "architecture", "optimization"},
ResponseTimeoutSeconds: 300,
MaxCollaborationDepth: 4,
EscalationThreshold: 3,
},
},
}
}
// ApplyRoleDefinition applies a predefined role to the agent config
func (c *Config) ApplyRoleDefinition(roleName string) error {
roles := GetPredefinedRoles()
role, exists := roles[roleName]
if !exists {
return fmt.Errorf("unknown role: %s", roleName)
}
// Apply existing role configuration
c.Agent.Role = role.Name
c.Agent.SystemPrompt = role.SystemPrompt
c.Agent.ReportsTo = role.ReportsTo
c.Agent.Expertise = role.Expertise
c.Agent.Deliverables = role.Deliverables
c.Agent.Capabilities = role.Capabilities
c.Agent.CollaborationSettings = role.CollaborationDefaults
// Apply NEW authority and encryption settings
if role.Model != "" {
// Set primary model for this role
c.Agent.DefaultReasoningModel = role.Model
// Ensure it's in the models list
if !contains(c.Agent.Models, role.Model) {
c.Agent.Models = append([]string{role.Model}, c.Agent.Models...)
}
}
if role.MaxTasks > 0 {
c.Agent.MaxTasks = role.MaxTasks
}
// Apply special functions for admin roles
if role.AuthorityLevel == AuthorityMaster {
// Enable SLURP functionality for admin role
c.Slurp.Enabled = true
// Add special admin capabilities
adminCaps := []string{"context_curation", "decision_ingestion", "semantic_analysis", "key_reconstruction"}
for _, cap := range adminCaps {
if !contains(c.Agent.Capabilities, cap) {
c.Agent.Capabilities = append(c.Agent.Capabilities, cap)
}
}
}
return nil
}
// GetRoleByName returns a role definition by name (case-insensitive)
func GetRoleByName(roleName string) (*RoleDefinition, error) {
roles := GetPredefinedRoles()
// Try exact match first
if role, exists := roles[roleName]; exists {
return &role, nil
}
// Try case-insensitive match
lowerRoleName := strings.ToLower(roleName)
for key, role := range roles {
if strings.ToLower(key) == lowerRoleName {
return &role, nil
}
}
return nil, fmt.Errorf("role not found: %s", roleName)
}
// GetAvailableRoles returns a list of all available role names
func GetAvailableRoles() []string {
roles := GetPredefinedRoles()
names := make([]string, 0, len(roles))
for name := range roles {
names = append(names, name)
}
return names
}
// GetRoleAuthority returns the authority level for a given role
func (c *Config) GetRoleAuthority(roleName string) (AuthorityLevel, error) {
roles := GetPredefinedRoles()
role, exists := roles[roleName]
if !exists {
return AuthorityReadOnly, fmt.Errorf("role '%s' not found", roleName)
}
return role.AuthorityLevel, nil
}
// CanDecryptRole checks if current role can decrypt content from target role
func (c *Config) CanDecryptRole(targetRole string) (bool, error) {
if c.Agent.Role == "" {
return false, fmt.Errorf("no role configured")
}
roles := GetPredefinedRoles()
currentRole, exists := roles[c.Agent.Role]
if !exists {
return false, fmt.Errorf("current role '%s' not found", c.Agent.Role)
}
// Master authority can decrypt everything
if currentRole.AuthorityLevel == AuthorityMaster {
return true, nil
}
// Check if target role is in can_decrypt list
for _, role := range currentRole.CanDecrypt {
if role == targetRole || role == "*" {
return true, nil
}
}
return false, nil
}
// IsAdminRole checks if the current agent has admin (master) authority
func (c *Config) IsAdminRole() bool {
if c.Agent.Role == "" {
return false
}
authority, err := c.GetRoleAuthority(c.Agent.Role)
if err != nil {
return false
}
return authority == AuthorityMaster
}
// CanMakeDecisions checks if current role can make permanent decisions
func (c *Config) CanMakeDecisions() bool {
if c.Agent.Role == "" {
return false
}
authority, err := c.GetRoleAuthority(c.Agent.Role)
if err != nil {
return false
}
return authority == AuthorityMaster || authority == AuthorityDecision
}
// GetDecisionScope returns the decision domains this role can decide on
func (c *Config) GetDecisionScope() []string {
if c.Agent.Role == "" {
return []string{}
}
roles := GetPredefinedRoles()
role, exists := roles[c.Agent.Role]
if !exists {
return []string{}
}
return role.DecisionScope
}
// HasSpecialFunction checks if the current role has a specific special function
func (c *Config) HasSpecialFunction(function string) bool {
if c.Agent.Role == "" {
return false
}
roles := GetPredefinedRoles()
role, exists := roles[c.Agent.Role]
if !exists {
return false
}
for _, specialFunc := range role.SpecialFunctions {
if specialFunc == function {
return true
}
}
return false
}
// contains checks if a string slice contains a value
func contains(slice []string, value string) bool {
for _, item := range slice {
if item == value {
return true
}
}
return false
}

133
pkg/config/security.go Normal file
View File

@@ -0,0 +1,133 @@
package config
import "time"
// Authority levels for roles
const (
AuthorityReadOnly = "readonly"
AuthoritySuggestion = "suggestion"
AuthorityFull = "full"
AuthorityAdmin = "admin"
)
// SecurityConfig defines security-related configuration
type SecurityConfig struct {
KeyRotationDays int `yaml:"key_rotation_days"`
AuditLogging bool `yaml:"audit_logging"`
AuditPath string `yaml:"audit_path"`
ElectionConfig ElectionConfig `yaml:"election"`
}
// ElectionConfig defines election timing and behavior settings
type ElectionConfig struct {
DiscoveryTimeout time.Duration `yaml:"discovery_timeout"`
HeartbeatTimeout time.Duration `yaml:"heartbeat_timeout"`
ElectionTimeout time.Duration `yaml:"election_timeout"`
DiscoveryBackoff time.Duration `yaml:"discovery_backoff"`
LeadershipScoring *LeadershipScoring `yaml:"leadership_scoring,omitempty"`
}
// LeadershipScoring defines weights for election scoring
type LeadershipScoring struct {
UptimeWeight float64 `yaml:"uptime_weight"`
CapabilityWeight float64 `yaml:"capability_weight"`
ExperienceWeight float64 `yaml:"experience_weight"`
LoadWeight float64 `yaml:"load_weight"`
}
// AgeKeyPair represents an Age encryption key pair
type AgeKeyPair struct {
PublicKey string `yaml:"public_key"`
PrivateKey string `yaml:"private_key"`
}
// RoleDefinition represents a role configuration
type RoleDefinition struct {
Name string `yaml:"name"`
Description string `yaml:"description"`
Capabilities []string `yaml:"capabilities"`
AccessLevel string `yaml:"access_level"`
AuthorityLevel string `yaml:"authority_level"`
Keys *AgeKeyPair `yaml:"keys,omitempty"`
AgeKeys *AgeKeyPair `yaml:"age_keys,omitempty"` // Legacy field name
CanDecrypt []string `yaml:"can_decrypt,omitempty"` // Roles this role can decrypt
}
// GetPredefinedRoles returns the predefined roles for the system
func GetPredefinedRoles() map[string]*RoleDefinition {
return map[string]*RoleDefinition{
"project_manager": {
Name: "project_manager",
Description: "Project coordination and management",
Capabilities: []string{"coordination", "planning", "oversight"},
AccessLevel: "high",
AuthorityLevel: AuthorityAdmin,
CanDecrypt: []string{"project_manager", "backend_developer", "frontend_developer", "devops_engineer", "security_engineer"},
},
"backend_developer": {
Name: "backend_developer",
Description: "Backend development and API work",
Capabilities: []string{"backend", "api", "database"},
AccessLevel: "medium",
AuthorityLevel: AuthorityFull,
CanDecrypt: []string{"backend_developer"},
},
"frontend_developer": {
Name: "frontend_developer",
Description: "Frontend UI development",
Capabilities: []string{"frontend", "ui", "components"},
AccessLevel: "medium",
AuthorityLevel: AuthorityFull,
CanDecrypt: []string{"frontend_developer"},
},
"devops_engineer": {
Name: "devops_engineer",
Description: "Infrastructure and deployment",
Capabilities: []string{"infrastructure", "deployment", "monitoring"},
AccessLevel: "high",
AuthorityLevel: AuthorityFull,
CanDecrypt: []string{"devops_engineer", "backend_developer"},
},
"security_engineer": {
Name: "security_engineer",
Description: "Security oversight and hardening",
Capabilities: []string{"security", "audit", "compliance"},
AccessLevel: "high",
AuthorityLevel: AuthorityAdmin,
CanDecrypt: []string{"security_engineer", "project_manager", "backend_developer", "frontend_developer", "devops_engineer"},
},
}
}
// CanDecryptRole checks if the current agent can decrypt content for a target role
func (c *Config) CanDecryptRole(targetRole string) (bool, error) {
roles := GetPredefinedRoles()
currentRole, exists := roles[c.Agent.Role]
if !exists {
return false, nil
}
targetRoleDef, exists := roles[targetRole]
if !exists {
return false, nil
}
// Simple access level check
currentLevel := getAccessLevelValue(currentRole.AccessLevel)
targetLevel := getAccessLevelValue(targetRoleDef.AccessLevel)
return currentLevel >= targetLevel, nil
}
func getAccessLevelValue(level string) int {
switch level {
case "low":
return 1
case "medium":
return 2
case "high":
return 3
default:
return 0
}
}

View File

@@ -1,289 +0,0 @@
package config
import (
"fmt"
"time"
)
// SlurpConfig holds SLURP event system integration configuration
type SlurpConfig struct {
// Connection settings
Enabled bool `yaml:"enabled" json:"enabled"`
BaseURL string `yaml:"base_url" json:"base_url"`
APIKey string `yaml:"api_key" json:"api_key"`
Timeout time.Duration `yaml:"timeout" json:"timeout"`
RetryCount int `yaml:"retry_count" json:"retry_count"`
RetryDelay time.Duration `yaml:"retry_delay" json:"retry_delay"`
// Event generation settings
EventGeneration EventGenerationConfig `yaml:"event_generation" json:"event_generation"`
// Project-specific event mappings
ProjectMappings map[string]ProjectEventMapping `yaml:"project_mappings" json:"project_mappings"`
// Default event settings
DefaultEventSettings DefaultEventConfig `yaml:"default_event_settings" json:"default_event_settings"`
// Batch processing settings
BatchProcessing BatchConfig `yaml:"batch_processing" json:"batch_processing"`
// Reliability settings
Reliability ReliabilityConfig `yaml:"reliability" json:"reliability"`
}
// EventGenerationConfig controls when and how SLURP events are generated
type EventGenerationConfig struct {
// Consensus requirements
MinConsensusStrength float64 `yaml:"min_consensus_strength" json:"min_consensus_strength"`
MinParticipants int `yaml:"min_participants" json:"min_participants"`
RequireUnanimity bool `yaml:"require_unanimity" json:"require_unanimity"`
// Time-based triggers
MaxDiscussionDuration time.Duration `yaml:"max_discussion_duration" json:"max_discussion_duration"`
MinDiscussionDuration time.Duration `yaml:"min_discussion_duration" json:"min_discussion_duration"`
// Event type generation rules
EnabledEventTypes []string `yaml:"enabled_event_types" json:"enabled_event_types"`
DisabledEventTypes []string `yaml:"disabled_event_types" json:"disabled_event_types"`
// Severity calculation
SeverityRules SeverityConfig `yaml:"severity_rules" json:"severity_rules"`
}
// SeverityConfig defines how to calculate event severity from HMMM discussions
type SeverityConfig struct {
// Base severity for each event type (1-10 scale)
BaseSeverity map[string]int `yaml:"base_severity" json:"base_severity"`
// Modifiers based on discussion characteristics
ParticipantMultiplier float64 `yaml:"participant_multiplier" json:"participant_multiplier"`
DurationMultiplier float64 `yaml:"duration_multiplier" json:"duration_multiplier"`
UrgencyKeywords []string `yaml:"urgency_keywords" json:"urgency_keywords"`
UrgencyBoost int `yaml:"urgency_boost" json:"urgency_boost"`
// Severity caps
MinSeverity int `yaml:"min_severity" json:"min_severity"`
MaxSeverity int `yaml:"max_severity" json:"max_severity"`
}
// ProjectEventMapping defines project-specific event mapping rules
type ProjectEventMapping struct {
ProjectPath string `yaml:"project_path" json:"project_path"`
CustomEventTypes map[string]string `yaml:"custom_event_types" json:"custom_event_types"`
SeverityOverrides map[string]int `yaml:"severity_overrides" json:"severity_overrides"`
AdditionalMetadata map[string]interface{} `yaml:"additional_metadata" json:"additional_metadata"`
EventFilters []EventFilter `yaml:"event_filters" json:"event_filters"`
}
// EventFilter defines conditions for filtering or modifying events
type EventFilter struct {
Name string `yaml:"name" json:"name"`
Conditions map[string]string `yaml:"conditions" json:"conditions"`
Action string `yaml:"action" json:"action"` // "allow", "deny", "modify"
Modifications map[string]string `yaml:"modifications" json:"modifications"`
}
// DefaultEventConfig provides default settings for generated events
type DefaultEventConfig struct {
DefaultSeverity int `yaml:"default_severity" json:"default_severity"`
DefaultCreatedBy string `yaml:"default_created_by" json:"default_created_by"`
DefaultTags []string `yaml:"default_tags" json:"default_tags"`
MetadataTemplate map[string]string `yaml:"metadata_template" json:"metadata_template"`
}
// BatchConfig controls batch processing of SLURP events
type BatchConfig struct {
Enabled bool `yaml:"enabled" json:"enabled"`
MaxBatchSize int `yaml:"max_batch_size" json:"max_batch_size"`
MaxBatchWait time.Duration `yaml:"max_batch_wait" json:"max_batch_wait"`
FlushOnShutdown bool `yaml:"flush_on_shutdown" json:"flush_on_shutdown"`
}
// ReliabilityConfig controls reliability features (idempotency, circuit breaker, DLQ)
type ReliabilityConfig struct {
// Circuit breaker settings
MaxFailures int `yaml:"max_failures" json:"max_failures"`
CooldownPeriod time.Duration `yaml:"cooldown_period" json:"cooldown_period"`
HalfOpenTimeout time.Duration `yaml:"half_open_timeout" json:"half_open_timeout"`
// Idempotency settings
IdempotencyWindow time.Duration `yaml:"idempotency_window" json:"idempotency_window"`
// Dead letter queue settings
DLQDirectory string `yaml:"dlq_directory" json:"dlq_directory"`
MaxRetries int `yaml:"max_retries" json:"max_retries"`
RetryInterval time.Duration `yaml:"retry_interval" json:"retry_interval"`
// Backoff settings
InitialBackoff time.Duration `yaml:"initial_backoff" json:"initial_backoff"`
MaxBackoff time.Duration `yaml:"max_backoff" json:"max_backoff"`
BackoffMultiplier float64 `yaml:"backoff_multiplier" json:"backoff_multiplier"`
JitterFactor float64 `yaml:"jitter_factor" json:"jitter_factor"`
}
// HmmmToSlurpMapping defines the mapping between HMMM discussion outcomes and SLURP event types
type HmmmToSlurpMapping struct {
// Consensus types to SLURP event types
ConsensusApproval string `yaml:"consensus_approval" json:"consensus_approval"` // -> "approval"
RiskIdentified string `yaml:"risk_identified" json:"risk_identified"` // -> "warning"
CriticalBlocker string `yaml:"critical_blocker" json:"critical_blocker"` // -> "blocker"
PriorityChange string `yaml:"priority_change" json:"priority_change"` // -> "priority_change"
AccessRequest string `yaml:"access_request" json:"access_request"` // -> "access_update"
ArchitectureDecision string `yaml:"architecture_decision" json:"architecture_decision"` // -> "structural_change"
InformationShare string `yaml:"information_share" json:"information_share"` // -> "announcement"
// Keywords that trigger specific event types
ApprovalKeywords []string `yaml:"approval_keywords" json:"approval_keywords"`
WarningKeywords []string `yaml:"warning_keywords" json:"warning_keywords"`
BlockerKeywords []string `yaml:"blocker_keywords" json:"blocker_keywords"`
PriorityKeywords []string `yaml:"priority_keywords" json:"priority_keywords"`
AccessKeywords []string `yaml:"access_keywords" json:"access_keywords"`
StructuralKeywords []string `yaml:"structural_keywords" json:"structural_keywords"`
AnnouncementKeywords []string `yaml:"announcement_keywords" json:"announcement_keywords"`
}
// GetDefaultSlurpConfig returns default SLURP configuration
func GetDefaultSlurpConfig() SlurpConfig {
return SlurpConfig{
Enabled: false, // Disabled by default until configured
BaseURL: "http://localhost:8080",
Timeout: 30 * time.Second,
RetryCount: 3,
RetryDelay: 5 * time.Second,
EventGeneration: EventGenerationConfig{
MinConsensusStrength: 0.7,
MinParticipants: 2,
RequireUnanimity: false,
MaxDiscussionDuration: 30 * time.Minute,
MinDiscussionDuration: 1 * time.Minute,
EnabledEventTypes: []string{
"announcement", "warning", "blocker", "approval",
"priority_change", "access_update", "structural_change",
},
DisabledEventTypes: []string{},
SeverityRules: SeverityConfig{
BaseSeverity: map[string]int{
"announcement": 3,
"warning": 5,
"blocker": 8,
"approval": 4,
"priority_change": 6,
"access_update": 5,
"structural_change": 7,
},
ParticipantMultiplier: 0.2,
DurationMultiplier: 0.1,
UrgencyKeywords: []string{"urgent", "critical", "blocker", "emergency", "immediate"},
UrgencyBoost: 2,
MinSeverity: 1,
MaxSeverity: 10,
},
},
ProjectMappings: make(map[string]ProjectEventMapping),
DefaultEventSettings: DefaultEventConfig{
DefaultSeverity: 5,
DefaultCreatedBy: "hmmm-consensus",
DefaultTags: []string{"hmmm-generated", "automated"},
MetadataTemplate: map[string]string{
"source": "hmmm-discussion",
"generation_type": "consensus-based",
},
},
BatchProcessing: BatchConfig{
Enabled: true,
MaxBatchSize: 10,
MaxBatchWait: 5 * time.Second,
FlushOnShutdown: true,
},
Reliability: ReliabilityConfig{
// Circuit breaker: allow 5 consecutive failures before opening for 1 minute
MaxFailures: 5,
CooldownPeriod: 1 * time.Minute,
HalfOpenTimeout: 30 * time.Second,
// Idempotency: 1-hour window to catch duplicate events
IdempotencyWindow: 1 * time.Hour,
// DLQ: retry up to 3 times with exponential backoff
DLQDirectory: "./data/slurp_dlq",
MaxRetries: 3,
RetryInterval: 30 * time.Second,
// Backoff: start with 1s, max 5min, 2x multiplier, ±25% jitter
InitialBackoff: 1 * time.Second,
MaxBackoff: 5 * time.Minute,
BackoffMultiplier: 2.0,
JitterFactor: 0.25,
},
}
}
// GetHmmmToSlurpMapping returns the default mapping configuration
func GetHmmmToSlurpMapping() HmmmToSlurpMapping {
return HmmmToSlurpMapping{
ConsensusApproval: "approval",
RiskIdentified: "warning",
CriticalBlocker: "blocker",
PriorityChange: "priority_change",
AccessRequest: "access_update",
ArchitectureDecision: "structural_change",
InformationShare: "announcement",
ApprovalKeywords: []string{"approve", "approved", "looks good", "lgtm", "accepted", "agree"},
WarningKeywords: []string{"warning", "caution", "risk", "potential issue", "concern", "careful"},
BlockerKeywords: []string{"blocker", "blocked", "critical", "urgent", "cannot proceed", "show stopper"},
PriorityKeywords: []string{"priority", "urgent", "high priority", "low priority", "reprioritize"},
AccessKeywords: []string{"access", "permission", "auth", "authorization", "credentials", "token"},
StructuralKeywords: []string{"architecture", "structure", "design", "refactor", "framework", "pattern"},
AnnouncementKeywords: []string{"announce", "fyi", "information", "update", "news", "notice"},
}
}
// ValidateSlurpConfig validates SLURP configuration
func ValidateSlurpConfig(config SlurpConfig) error {
if config.Enabled {
if config.BaseURL == "" {
return fmt.Errorf("slurp.base_url is required when SLURP is enabled")
}
if config.EventGeneration.MinConsensusStrength < 0 || config.EventGeneration.MinConsensusStrength > 1 {
return fmt.Errorf("slurp.event_generation.min_consensus_strength must be between 0 and 1")
}
if config.EventGeneration.MinParticipants < 1 {
return fmt.Errorf("slurp.event_generation.min_participants must be at least 1")
}
if config.DefaultEventSettings.DefaultSeverity < 1 || config.DefaultEventSettings.DefaultSeverity > 10 {
return fmt.Errorf("slurp.default_event_settings.default_severity must be between 1 and 10")
}
// Validate reliability settings
if config.Reliability.MaxFailures < 1 {
return fmt.Errorf("slurp.reliability.max_failures must be at least 1")
}
if config.Reliability.CooldownPeriod <= 0 {
return fmt.Errorf("slurp.reliability.cooldown_period must be positive")
}
if config.Reliability.IdempotencyWindow <= 0 {
return fmt.Errorf("slurp.reliability.idempotency_window must be positive")
}
if config.Reliability.MaxRetries < 0 {
return fmt.Errorf("slurp.reliability.max_retries cannot be negative")
}
if config.Reliability.BackoffMultiplier <= 1.0 {
return fmt.Errorf("slurp.reliability.backoff_multiplier must be greater than 1.0")
}
}
return nil
}

View File

@@ -6,7 +6,7 @@ import (
"strings"
"time"
"chorus.services/bzzz/pubsub"
"chorus/pubsub"
"github.com/libp2p/go-libp2p/core/peer"
)

View File

@@ -8,9 +8,9 @@ import (
"sync"
"time"
"chorus.services/bzzz/pkg/integration"
"chorus.services/bzzz/pubsub"
"chorus.services/bzzz/reasoning"
"chorus/pkg/integration"
"chorus/pubsub"
"chorus/reasoning"
"github.com/libp2p/go-libp2p/core/peer"
)

View File

@@ -1,8 +1,8 @@
# BZZZ Role-Based Encryption System
# CHORUS Role-Based Encryption System
## Overview
The BZZZ Role-Based Encryption System provides enterprise-grade security for the SLURP (Storage, Logic, Understanding, Retrieval, Processing) contextual intelligence system. This comprehensive encryption scheme implements multi-layer encryption, sophisticated access controls, and compliance monitoring to ensure that each AI agent role receives exactly the contextual understanding they need while maintaining strict security boundaries.
The CHORUS Role-Based Encryption System provides enterprise-grade security for the SLURP (Storage, Logic, Understanding, Retrieval, Processing) contextual intelligence system. This comprehensive encryption scheme implements multi-layer encryption, sophisticated access controls, and compliance monitoring to ensure that each AI agent role receives exactly the contextual understanding they need while maintaining strict security boundaries.
## Table of Contents
@@ -212,10 +212,10 @@ import (
"fmt"
"time"
"github.com/anthonyrawlins/bzzz/pkg/config"
"github.com/anthonyrawlins/bzzz/pkg/crypto"
"github.com/anthonyrawlins/bzzz/pkg/ucxl"
slurpContext "github.com/anthonyrawlins/bzzz/pkg/slurp/context"
"github.com/anthonyrawlins/CHORUS/pkg/config"
"github.com/anthonyrawlins/CHORUS/pkg/crypto"
"github.com/anthonyrawlins/CHORUS/pkg/ucxl"
slurpContext "github.com/anthonyrawlins/CHORUS/pkg/slurp/context"
)
func main() {
@@ -603,15 +603,15 @@ Current test coverage: **95%+**
# docker-compose.yml
version: '3.8'
services:
bzzz-crypto:
image: bzzz/crypto-service:latest
CHORUS-crypto:
image: CHORUS/crypto-service:latest
environment:
- BZZZ_CONFIG_PATH=/etc/bzzz/config.yaml
- BZZZ_CONFIG_PATH=/etc/CHORUS/config.yaml
- BZZZ_LOG_LEVEL=info
- BZZZ_AUDIT_STORAGE=postgresql
volumes:
- ./config:/etc/bzzz
- ./logs:/var/log/bzzz
- ./config:/etc/CHORUS
- ./logs:/var/log/CHORUS
ports:
- "8443:8443"
depends_on:
@@ -622,7 +622,7 @@ services:
image: postgres:13
environment:
- POSTGRES_DB=bzzz_audit
- POSTGRES_USER=bzzz
- POSTGRES_USER=CHORUS
- POSTGRES_PASSWORD_FILE=/run/secrets/db_password
volumes:
- postgres_data:/var/lib/postgresql/data
@@ -650,39 +650,39 @@ secrets:
apiVersion: apps/v1
kind: Deployment
metadata:
name: bzzz-crypto-service
name: CHORUS-crypto-service
labels:
app: bzzz-crypto
app: CHORUS-crypto
spec:
replicas: 3
selector:
matchLabels:
app: bzzz-crypto
app: CHORUS-crypto
template:
metadata:
labels:
app: bzzz-crypto
app: CHORUS-crypto
spec:
serviceAccountName: bzzz-crypto
serviceAccountName: CHORUS-crypto
securityContext:
runAsNonRoot: true
runAsUser: 1000
fsGroup: 1000
containers:
- name: crypto-service
image: bzzz/crypto-service:v1.0.0
image: CHORUS/crypto-service:v1.0.0
imagePullPolicy: Always
ports:
- containerPort: 8443
name: https
env:
- name: BZZZ_CONFIG_PATH
value: "/etc/bzzz/config.yaml"
value: "/etc/CHORUS/config.yaml"
- name: BZZZ_LOG_LEVEL
value: "info"
volumeMounts:
- name: config
mountPath: /etc/bzzz
mountPath: /etc/CHORUS
readOnly: true
- name: secrets
mountPath: /etc/secrets
@@ -711,18 +711,18 @@ spec:
volumes:
- name: config
configMap:
name: bzzz-crypto-config
name: CHORUS-crypto-config
- name: secrets
secret:
secretName: bzzz-crypto-secrets
secretName: CHORUS-crypto-secrets
---
apiVersion: v1
kind: Service
metadata:
name: bzzz-crypto-service
name: CHORUS-crypto-service
spec:
selector:
app: bzzz-crypto
app: CHORUS-crypto
ports:
- port: 443
targetPort: 8443
@@ -805,7 +805,7 @@ groups:
```json
{
"dashboard": {
"title": "BZZZ Crypto Security Dashboard",
"title": "CHORUS Crypto Security Dashboard",
"panels": [
{
"title": "Security Events",
@@ -844,7 +844,7 @@ groups:
## Conclusion
The BZZZ Role-Based Encryption System provides enterprise-grade security for contextual intelligence with comprehensive features including multi-layer encryption, sophisticated access controls, automated key management, and extensive compliance monitoring. The system is designed to scale to enterprise requirements while maintaining the highest security standards and providing complete audit transparency.
The CHORUS Role-Based Encryption System provides enterprise-grade security for contextual intelligence with comprehensive features including multi-layer encryption, sophisticated access controls, automated key management, and extensive compliance monitoring. The system is designed to scale to enterprise requirements while maintaining the highest security standards and providing complete audit transparency.
For additional information, support, or contributions, please refer to the project documentation or contact the security team.

View File

@@ -1,6 +1,6 @@
// Package crypto provides Age encryption implementation for role-based content security in BZZZ.
// Package crypto provides Age encryption implementation for role-based content security in CHORUS.
//
// This package implements the cryptographic foundation for BZZZ Phase 2B, enabling:
// This package implements the cryptographic foundation for CHORUS Phase 2B, enabling:
// - Role-based content encryption using Age (https://age-encryption.org)
// - Hierarchical access control based on agent authority levels
// - Multi-recipient encryption for shared content
@@ -36,13 +36,13 @@ import (
"strings"
"filippo.io/age" // Modern, secure encryption library
"chorus.services/bzzz/pkg/config"
"chorus/pkg/config"
)
// AgeCrypto handles Age encryption for role-based content security.
//
// This is the primary interface for encrypting and decrypting UCXL content
// based on BZZZ role hierarchies. It provides methods to:
// based on CHORUS role hierarchies. It provides methods to:
// - Encrypt content for specific roles or multiple roles
// - Decrypt content using the current agent's role key
// - Validate Age key formats and generate new key pairs
@@ -55,13 +55,13 @@ import (
//
// Thread Safety: AgeCrypto is safe for concurrent use across goroutines.
type AgeCrypto struct {
config *config.Config // BZZZ configuration containing role definitions
config *config.Config // CHORUS configuration containing role definitions
}
// NewAgeCrypto creates a new Age crypto handler for role-based encryption.
//
// Parameters:
// cfg: BZZZ configuration containing role definitions and agent settings
// cfg: CHORUS configuration containing role definitions and agent settings
//
// Returns:
// *AgeCrypto: Configured crypto handler ready for encryption/decryption
@@ -81,7 +81,7 @@ func NewAgeCrypto(cfg *config.Config) *AgeCrypto {
// GenerateAgeKeyPair generates a new Age X25519 key pair for role-based encryption.
//
// This function creates cryptographically secure Age key pairs suitable for
// role-based content encryption. Each role in BZZZ should have its own key pair
// role-based content encryption. Each role in CHORUS should have its own key pair
// to enable proper access control and content segmentation.
//
// Returns:

View File

@@ -36,7 +36,7 @@ import (
"sync"
"time"
"chorus.services/bzzz/pkg/config"
"chorus/pkg/config"
)
// AuditLoggerImpl implements comprehensive audit logging

View File

@@ -31,8 +31,8 @@ import (
"time"
"golang.org/x/crypto/pbkdf2"
"chorus.services/bzzz/pkg/config"
"chorus.services/bzzz/pkg/security"
"chorus/pkg/config"
"chorus/pkg/security"
)
// Type aliases for backward compatibility

View File

@@ -29,9 +29,9 @@ import (
"github.com/stretchr/testify/require"
"github.com/stretchr/testify/suite"
"chorus.services/bzzz/pkg/config"
"chorus.services/bzzz/pkg/ucxl"
slurpContext "chorus.services/bzzz/pkg/slurp/context"
"chorus/pkg/config"
"chorus/pkg/ucxl"
slurpContext "chorus/pkg/slurp/context"
)
// RoleCryptoTestSuite provides comprehensive testing for role-based encryption

View File

@@ -9,7 +9,7 @@ import (
"testing"
"time"
"chorus.services/bzzz/pkg/config"
"chorus/pkg/config"
)
// TestSecurityConfig tests SecurityConfig enforcement

View File

@@ -17,7 +17,7 @@ import (
"crypto/sha256"
)
// LibP2PDHT provides distributed hash table functionality for BZZZ peer discovery
// LibP2PDHT provides distributed hash table functionality for CHORUS peer discovery
type LibP2PDHT struct {
host host.Host
kdht *dht.IpfsDHT
@@ -42,7 +42,7 @@ type Config struct {
// Bootstrap nodes for initial DHT discovery
BootstrapPeers []multiaddr.Multiaddr
// Protocol prefix for BZZZ DHT
// Protocol prefix for CHORUS DHT
ProtocolPrefix string
// Bootstrap timeout
@@ -71,7 +71,7 @@ type PeerInfo struct {
// DefaultConfig returns a default DHT configuration
func DefaultConfig() *Config {
return &Config{
ProtocolPrefix: "/bzzz",
ProtocolPrefix: "/CHORUS",
BootstrapTimeout: 30 * time.Second,
DiscoveryInterval: 60 * time.Second,
Mode: dht.ModeAuto,
@@ -373,7 +373,7 @@ func (d *LibP2PDHT) FindPeersByRole(ctx context.Context, role string) ([]*PeerIn
d.peersMutex.RUnlock()
// Also search DHT for role-based keys
roleKey := fmt.Sprintf("bzzz:role:%s", role)
roleKey := fmt.Sprintf("CHORUS:role:%s", role)
providers, err := d.FindProviders(ctx, roleKey, 10)
if err != nil {
// Return local peers even if DHT search fails
@@ -408,13 +408,13 @@ func (d *LibP2PDHT) FindPeersByRole(ctx context.Context, role string) ([]*PeerIn
// AnnounceRole announces this peer's role to the DHT
func (d *LibP2PDHT) AnnounceRole(ctx context.Context, role string) error {
roleKey := fmt.Sprintf("bzzz:role:%s", role)
roleKey := fmt.Sprintf("CHORUS:role:%s", role)
return d.Provide(ctx, roleKey)
}
// AnnounceCapability announces a capability to the DHT
func (d *LibP2PDHT) AnnounceCapability(ctx context.Context, capability string) error {
capKey := fmt.Sprintf("bzzz:capability:%s", capability)
capKey := fmt.Sprintf("CHORUS:capability:%s", capability)
return d.Provide(ctx, capKey)
}
@@ -474,8 +474,8 @@ func (d *LibP2PDHT) performDiscovery() {
ctx, cancel := context.WithTimeout(d.ctx, 30*time.Second)
defer cancel()
// Look for general BZZZ peers
providers, err := d.FindProviders(ctx, "bzzz:peer", 10)
// Look for general CHORUS peers
providers, err := d.FindProviders(ctx, "CHORUS:peer", 10)
if err != nil {
return
}

View File

@@ -15,8 +15,8 @@ import (
func TestDefaultConfig(t *testing.T) {
config := DefaultConfig()
if config.ProtocolPrefix != "/bzzz" {
t.Errorf("expected protocol prefix '/bzzz', got %s", config.ProtocolPrefix)
if config.ProtocolPrefix != "/CHORUS" {
t.Errorf("expected protocol prefix '/CHORUS', got %s", config.ProtocolPrefix)
}
if config.BootstrapTimeout != 30*time.Second {
@@ -53,8 +53,8 @@ func TestNewDHT(t *testing.T) {
t.Error("host not set correctly")
}
if d.config.ProtocolPrefix != "/bzzz" {
t.Errorf("expected protocol prefix '/bzzz', got %s", d.config.ProtocolPrefix)
if d.config.ProtocolPrefix != "/CHORUS" {
t.Errorf("expected protocol prefix '/CHORUS', got %s", d.config.ProtocolPrefix)
}
}

View File

@@ -10,10 +10,10 @@ import (
"sync"
"time"
"chorus.services/bzzz/pkg/config"
"chorus.services/bzzz/pkg/crypto"
"chorus.services/bzzz/pkg/storage"
"chorus.services/bzzz/pkg/ucxl"
"chorus/pkg/config"
"chorus/pkg/crypto"
"chorus/pkg/storage"
"chorus/pkg/ucxl"
"github.com/libp2p/go-libp2p/core/host"
"github.com/libp2p/go-libp2p/core/peer"
)
@@ -404,7 +404,7 @@ type StorageEntry struct {
func (eds *EncryptedDHTStorage) generateDHTKey(ucxlAddress string) string {
// Use SHA256 hash of the UCXL address as DHT key
hash := sha256.Sum256([]byte(ucxlAddress))
return "/bzzz/ucxl/" + base64.URLEncoding.EncodeToString(hash[:])
return "/CHORUS/ucxl/" + base64.URLEncoding.EncodeToString(hash[:])
}
// getDecryptableRoles determines which roles can decrypt content from a creator
@@ -610,7 +610,7 @@ func (eds *EncryptedDHTStorage) AnnounceContent(ucxlAddress string) error {
}
// Announce via DHT
dhtKey := "/bzzz/announcements/" + eds.generateDHTKey(ucxlAddress)
dhtKey := "/CHORUS/announcements/" + eds.generateDHTKey(ucxlAddress)
err = eds.dht.PutValue(eds.ctx, dhtKey, announcementData)
// Audit the announce operation
@@ -627,7 +627,7 @@ func (eds *EncryptedDHTStorage) AnnounceContent(ucxlAddress string) error {
// DiscoverContentPeers discovers peers that have specific UCXL content
func (eds *EncryptedDHTStorage) DiscoverContentPeers(ucxlAddress string) ([]peer.ID, error) {
dhtKey := "/bzzz/announcements/" + eds.generateDHTKey(ucxlAddress)
dhtKey := "/CHORUS/announcements/" + eds.generateDHTKey(ucxlAddress)
// This is a simplified implementation
// In a real system, you'd query multiple announcement keys

View File

@@ -5,7 +5,7 @@ import (
"testing"
"time"
"chorus.services/bzzz/pkg/config"
"chorus/pkg/config"
)
// TestDHTSecurityPolicyEnforcement tests security policy enforcement in DHT operations

View File

@@ -6,7 +6,7 @@ import (
"sync"
"time"
"chorus.services/bzzz/pkg/config"
"chorus/pkg/config"
"github.com/libp2p/go-libp2p/core/peer"
)

View File

@@ -3,7 +3,7 @@ package dht
import (
"fmt"
"chorus.services/bzzz/pkg/config"
"chorus/pkg/config"
)
// NewRealDHT creates a new real DHT implementation

View File

@@ -9,8 +9,8 @@ import (
"sync"
"time"
"chorus.services/bzzz/pkg/config"
"chorus.services/bzzz/pubsub"
"chorus/pkg/config"
"chorus/pubsub"
libp2p "github.com/libp2p/go-libp2p/core/host"
"github.com/libp2p/go-libp2p/core/peer"
)
@@ -150,11 +150,11 @@ func (em *ElectionManager) Start() error {
log.Printf("🗳️ Starting election manager for node %s", em.nodeID)
// TODO: Subscribe to election-related messages - pubsub interface needs update
// if err := em.pubsub.Subscribe("bzzz/election/v1", em.handleElectionMessage); err != nil {
// if err := em.pubsub.Subscribe("CHORUS/election/v1", em.handleElectionMessage); err != nil {
// return fmt.Errorf("failed to subscribe to election messages: %w", err)
// }
//
// if err := em.pubsub.Subscribe("bzzz/admin/heartbeat/v1", em.handleAdminHeartbeat); err != nil {
// if err := em.pubsub.Subscribe("CHORUS/admin/heartbeat/v1", em.handleAdminHeartbeat); err != nil {
// return fmt.Errorf("failed to subscribe to admin heartbeat: %w", err)
// }
@@ -840,7 +840,7 @@ func (em *ElectionManager) publishElectionMessage(msg ElectionMessage) error {
}
// TODO: Fix pubsub interface
// return em.pubsub.Publish("bzzz/election/v1", data)
// return em.pubsub.Publish("CHORUS/election/v1", data)
_ = data // Avoid unused variable
return nil
}
@@ -865,7 +865,7 @@ func (em *ElectionManager) SendAdminHeartbeat() error {
}
// TODO: Fix pubsub interface
// return em.pubsub.Publish("bzzz/admin/heartbeat/v1", data)
// return em.pubsub.Publish("CHORUS/admin/heartbeat/v1", data)
_ = data // Avoid unused variable
return nil
}

View File

@@ -5,7 +5,7 @@ import (
"testing"
"time"
"chorus.services/bzzz/pkg/config"
"chorus/pkg/config"
)
func TestElectionManager_NewElectionManager(t *testing.T) {

View File

@@ -4,7 +4,7 @@ import (
"context"
"time"
// slurpContext "chorus.services/bzzz/pkg/slurp/context"
// slurpContext "chorus/pkg/slurp/context"
)
// SLURPElection extends the base Election interface to include Project Manager contextual intelligence duties

View File

@@ -9,8 +9,8 @@ import (
"sync"
"time"
"chorus.services/bzzz/pkg/config"
"chorus.services/bzzz/pubsub"
"chorus/pkg/config"
"chorus/pubsub"
libp2p "github.com/libp2p/go-libp2p/core/host"
)

View File

@@ -5,7 +5,7 @@ import (
"log"
"time"
"chorus.services/bzzz/pkg/config"
"chorus/pkg/config"
)
// SLURPCandidateCapabilities represents SLURP-specific capabilities for election candidates

View File

@@ -5,8 +5,8 @@ import (
"encoding/json"
"fmt"
"chorus.services/bzzz/pubsub"
"chorus.services/bzzz/pkg/dht"
"chorus/pubsub"
"chorus/pkg/dht"
)
// PubSubAdapter adapts the existing PubSub system to the health check interface

View File

@@ -7,12 +7,12 @@ import (
"sync"
"time"
"chorus.services/bzzz/pkg/dht"
"chorus.services/bzzz/pkg/election"
"chorus.services/bzzz/pubsub"
"chorus/pkg/dht"
"chorus/pkg/election"
"chorus/pubsub"
)
// EnhancedHealthChecks provides comprehensive health monitoring for BZZZ infrastructure
// EnhancedHealthChecks provides comprehensive health monitoring for CHORUS infrastructure
type EnhancedHealthChecks struct {
mu sync.RWMutex
manager *Manager
@@ -211,7 +211,7 @@ func (ehc *EnhancedHealthChecks) createEnhancedPubSubCheck() *HealthCheck {
// Generate unique test data
testID := fmt.Sprintf("health-test-%d", time.Now().UnixNano())
testTopic := "bzzz/health/enhanced/v1"
testTopic := "CHORUS/health/enhanced/v1"
testData := map[string]interface{}{
"test_id": testID,

View File

@@ -6,7 +6,7 @@ import (
"net/http"
"time"
"chorus.services/bzzz/pkg/shutdown"
"chorus/pkg/shutdown"
)
// IntegrationExample demonstrates how to integrate health monitoring and graceful shutdown
@@ -75,7 +75,7 @@ func setupHealthChecks(healthManager *Manager) {
healthManager.RegisterCheck(memoryCheck)
// Disk space check (warning only)
diskCheck := CreateDiskSpaceCheck("/var/lib/bzzz", 0.90) // Alert if > 90%
diskCheck := CreateDiskSpaceCheck("/var/lib/CHORUS", 0.90) // Alert if > 90%
healthManager.RegisterCheck(diskCheck)
// Custom application-specific health check

View File

@@ -8,7 +8,7 @@ import (
"sync"
"time"
"chorus.services/bzzz/pkg/shutdown"
"chorus/pkg/shutdown"
)
// Manager provides comprehensive health monitoring and integrates with graceful shutdown
@@ -565,7 +565,7 @@ func CreateActivePubSubCheck(pubsub PubSubInterface) *HealthCheck {
}
// Subscribe to test topic
testTopic := "bzzz/health-test/v1"
testTopic := "CHORUS/health-test/v1"
if err := pubsub.SubscribeToTopic(testTopic, handler); err != nil {
return CheckResult{
Healthy: false,

38
pkg/hmmm/types.go Normal file
View File

@@ -0,0 +1,38 @@
package hmmm
import (
"context"
"chorus/pubsub"
)
// Message represents an HMMM message
type Message struct {
Topic string `json:"topic"`
Type string `json:"type"`
Payload map[string]interface{} `json:"payload"`
Version interface{} `json:"version"` // Can be string or int
IssueID int64 `json:"issue_id"`
ThreadID string `json:"thread_id"`
MsgID string `json:"msg_id"`
NodeID string `json:"node_id"`
HopCount int `json:"hop_count"`
Timestamp interface{} `json:"timestamp"`
Message string `json:"message"`
}
// Router provides HMMM routing functionality using the underlying pubsub system
type Router struct {
pubsub *pubsub.PubSub
}
// NewRouter creates a new HMMM router
func NewRouter(ps *pubsub.PubSub) *Router {
return &Router{
pubsub: ps,
}
}
// Publish publishes an HMMM message to the network
func (r *Router) Publish(ctx context.Context, msg Message) error {
return r.pubsub.PublishToDynamicTopic(msg.Topic, pubsub.MessageType(msg.Type), msg.Payload)
}

View File

@@ -13,7 +13,7 @@ type Joiner func(topic string) error
// Publisher publishes a raw JSON payload to a topic.
type Publisher func(topic string, payload []byte) error
// Adapter bridges BZZZ pub/sub to a RawPublisher-compatible interface.
// Adapter bridges CHORUS pub/sub to a RawPublisher-compatible interface.
// It does not impose any message envelope so HMMM can publish raw JSON frames.
// The adapter provides additional features like topic caching, metrics, and validation.
type Adapter struct {
@@ -53,7 +53,7 @@ func DefaultAdapterConfig() AdapterConfig {
}
// NewAdapter constructs a new adapter with explicit join/publish hooks.
// Wire these to BZZZ pubsub methods, e.g., JoinDynamicTopic and a thin PublishRaw helper.
// Wire these to CHORUS pubsub methods, e.g., JoinDynamicTopic and a thin PublishRaw helper.
func NewAdapter(join Joiner, publish Publisher) *Adapter {
return NewAdapterWithConfig(join, publish, DefaultAdapterConfig())
}

View File

@@ -13,10 +13,10 @@ import (
func TestAdapter_Publish_OK(t *testing.T) {
var joined, published bool
a := NewAdapter(
func(topic string) error { joined = (topic == "bzzz/meta/issue/42"); return nil },
func(topic string, payload []byte) error { published = (topic == "bzzz/meta/issue/42" && len(payload) > 0); return nil },
func(topic string) error { joined = (topic == "CHORUS/meta/issue/42"); return nil },
func(topic string, payload []byte) error { published = (topic == "CHORUS/meta/issue/42" && len(payload) > 0); return nil },
)
if err := a.Publish(context.Background(), "bzzz/meta/issue/42", []byte(`{"ok":true}`)); err != nil {
if err := a.Publish(context.Background(), "CHORUS/meta/issue/42", []byte(`{"ok":true}`)); err != nil {
t.Fatalf("unexpected error: %v", err)
}
if !joined || !published {
@@ -130,7 +130,7 @@ func TestAdapter_Publish_TopicCaching(t *testing.T) {
func(topic string, payload []byte) error { return nil },
)
topic := "bzzz/meta/issue/123"
topic := "CHORUS/meta/issue/123"
// First publish should join
err := a.Publish(context.Background(), topic, []byte(`{"msg1":true}`))
@@ -233,7 +233,7 @@ func TestAdapter_ConcurrentPublish(t *testing.T) {
for i := 0; i < numGoroutines; i++ {
go func(id int) {
defer wg.Done()
topic := fmt.Sprintf("bzzz/meta/issue/%d", id%numTopics)
topic := fmt.Sprintf("CHORUS/meta/issue/%d", id%numTopics)
payload := fmt.Sprintf(`{"id":%d}`, id)
err := a.Publish(context.Background(), topic, []byte(payload))

View File

@@ -7,12 +7,12 @@ import (
"testing"
"time"
"chorus.services/bzzz/p2p"
"chorus.services/bzzz/pubsub"
"chorus.services/hmmm/pkg/hmmm"
"chorus/p2p"
"chorus/pubsub"
"chorus/pkg/hmmm"
)
// TestAdapterPubSubIntegration tests the complete integration between the adapter and BZZZ pubsub
// TestAdapterPubSubIntegration tests the complete integration between the adapter and CHORUS pubsub
func TestAdapterPubSubIntegration(t *testing.T) {
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
@@ -25,20 +25,20 @@ func TestAdapterPubSubIntegration(t *testing.T) {
defer node.Close()
// Create PubSub system
ps, err := pubsub.NewPubSub(ctx, node.Host(), "bzzz/test/coordination", "hmmm/test/meta-discussion")
ps, err := pubsub.NewPubSub(ctx, node.Host(), "CHORUS/test/coordination", "hmmm/test/meta-discussion")
if err != nil {
t.Fatalf("Failed to create PubSub: %v", err)
}
defer ps.Close()
// Create adapter using actual BZZZ pubsub methods
// Create adapter using actual CHORUS pubsub methods
adapter := NewAdapter(
ps.JoinDynamicTopic,
ps.PublishRaw,
)
// Test publishing to a per-issue topic
topic := "bzzz/meta/issue/integration-test-42"
topic := "CHORUS/meta/issue/integration-test-42"
testPayload := []byte(`{"version": 1, "type": "meta_msg", "issue_id": 42, "message": "Integration test message"}`)
err = adapter.Publish(ctx, topic, testPayload)
@@ -93,7 +93,7 @@ func TestHMMMRouterIntegration(t *testing.T) {
defer node.Close()
// Create PubSub system
ps, err := pubsub.NewPubSub(ctx, node.Host(), "bzzz/test/coordination", "hmmm/test/meta-discussion")
ps, err := pubsub.NewPubSub(ctx, node.Host(), "CHORUS/test/coordination", "hmmm/test/meta-discussion")
if err != nil {
t.Fatalf("Failed to create PubSub: %v", err)
}
@@ -158,7 +158,7 @@ func TestPerIssueTopicPublishing(t *testing.T) {
defer node.Close()
// Create PubSub system
ps, err := pubsub.NewPubSub(ctx, node.Host(), "bzzz/test/coordination", "hmmm/test/meta-discussion")
ps, err := pubsub.NewPubSub(ctx, node.Host(), "CHORUS/test/coordination", "hmmm/test/meta-discussion")
if err != nil {
t.Fatalf("Failed to create PubSub: %v", err)
}
@@ -238,7 +238,7 @@ func TestConcurrentPerIssuePublishing(t *testing.T) {
defer node.Close()
// Create PubSub system
ps, err := pubsub.NewPubSub(ctx, node.Host(), "bzzz/test/coordination", "hmmm/test/meta-discussion")
ps, err := pubsub.NewPubSub(ctx, node.Host(), "CHORUS/test/coordination", "hmmm/test/meta-discussion")
if err != nil {
t.Fatalf("Failed to create PubSub: %v", err)
}
@@ -321,7 +321,7 @@ func TestAdapterValidation(t *testing.T) {
defer node.Close()
// Create PubSub system
ps, err := pubsub.NewPubSub(ctx, node.Host(), "bzzz/test/coordination", "hmmm/test/meta-discussion")
ps, err := pubsub.NewPubSub(ctx, node.Host(), "CHORUS/test/coordination", "hmmm/test/meta-discussion")
if err != nil {
t.Fatalf("Failed to create PubSub: %v", err)
}

View File

@@ -9,7 +9,7 @@ import (
"time"
)
// TestPerIssueTopicSmokeTest tests the per-issue topic functionality without full BZZZ integration
// TestPerIssueTopicSmokeTest tests the per-issue topic functionality without full CHORUS integration
func TestPerIssueTopicSmokeTest(t *testing.T) {
// Mock pubsub functions that track calls
joinedTopics := make(map[string]int)
@@ -34,7 +34,7 @@ func TestPerIssueTopicSmokeTest(t *testing.T) {
// Test per-issue topic publishing
issueID := int64(42)
topic := fmt.Sprintf("bzzz/meta/issue/%d", issueID)
topic := fmt.Sprintf("CHORUS/meta/issue/%d", issueID)
testMessage := map[string]interface{}{
"version": 1,
@@ -152,7 +152,7 @@ func TestMultiplePerIssueTopics(t *testing.T) {
issueIDs := []int64{100, 200, 300}
for _, issueID := range issueIDs {
topic := fmt.Sprintf("bzzz/meta/issue/%d", issueID)
topic := fmt.Sprintf("CHORUS/meta/issue/%d", issueID)
testMessage := map[string]interface{}{
"version": 1,
@@ -180,7 +180,7 @@ func TestMultiplePerIssueTopics(t *testing.T) {
// Verify all topics were joined once
mu.Lock()
for _, issueID := range issueIDs {
topic := fmt.Sprintf("bzzz/meta/issue/%d", issueID)
topic := fmt.Sprintf("CHORUS/meta/issue/%d", issueID)
if joinedTopics[topic] != 1 {
t.Errorf("Expected topic %s to be joined once, got %d times", topic, joinedTopics[topic])
}
@@ -258,7 +258,7 @@ func TestHMMMMessageFormat(t *testing.T) {
t.Fatalf("Failed to marshal HMMM message: %v", err)
}
topic := "bzzz/meta/issue/42"
topic := "CHORUS/meta/issue/42"
err = adapter.Publish(context.Background(), topic, payload)
if err != nil {
t.Fatalf("Failed to publish HMMM message: %v", err)

View File

@@ -8,8 +8,8 @@ import (
"log"
"time"
"chorus.services/bzzz/pkg/dht"
"chorus.services/bzzz/pkg/ucxl"
"chorus/pkg/dht"
"chorus/pkg/ucxl"
)
// DecisionPublisher handles publishing decisions to encrypted DHT storage

View File

@@ -11,7 +11,7 @@ import (
"strings"
"time"
"chorus.services/bzzz/pkg/config"
"chorus/pkg/config"
)
// SlurpClient handles HTTP communication with SLURP endpoints
@@ -150,7 +150,7 @@ func (c *SlurpClient) CreateEventsBatch(ctx context.Context, events []SlurpEvent
batchRequest := BatchEventRequest{
Events: events,
Source: "bzzz-hmmm-integration",
Source: "CHORUS-hmmm-integration",
}
batchData, err := json.Marshal(batchRequest)

View File

@@ -9,9 +9,9 @@ import (
"sync"
"time"
"chorus.services/bzzz/pkg/config"
"chorus.services/bzzz/pkg/ucxl"
"chorus.services/bzzz/pubsub"
"chorus/pkg/config"
"chorus/pkg/ucxl"
"chorus/pubsub"
"github.com/libp2p/go-libp2p/core/peer"
)

View File

@@ -8,7 +8,7 @@ import (
"sync"
"time"
"chorus.services/bzzz/pkg/config"
"chorus/pkg/config"
)
// ReliableSlurpClient wraps SlurpClient with reliability features

View File

@@ -8,14 +8,14 @@ import (
"sync"
"time"
"chorus.services/bzzz/logging"
"chorus.services/bzzz/p2p"
"chorus.services/bzzz/pubsub"
"chorus/internal/logging"
"chorus/p2p"
"chorus/pubsub"
"github.com/gorilla/websocket"
"github.com/sashabaranov/go-openai"
)
// McpServer integrates BZZZ P2P network with MCP protocol for GPT-4 agents
// McpServer integrates CHORUS P2P network with MCP protocol for GPT-4 agents
type McpServer struct {
// Core components
p2pNode *p2p.Node
@@ -51,7 +51,7 @@ type ServerStats struct {
mutex sync.RWMutex
}
// GPTAgent represents a GPT-4 agent integrated with BZZZ network
// GPTAgent represents a GPT-4 agent integrated with CHORUS network
type GPTAgent struct {
ID string
Role AgentRole
@@ -310,7 +310,7 @@ func (s *McpServer) CreateGPTAgent(config *AgentConfig) (*GPTAgent, error) {
s.agents[agent.ID] = agent
s.agentsMutex.Unlock()
// Announce agent to BZZZ network
// Announce agent to CHORUS network
if err := s.announceAgent(agent); err != nil {
return nil, fmt.Errorf("failed to announce agent: %w", err)
}
@@ -485,7 +485,7 @@ func (s *McpServer) handleBzzzAnnounce(args map[string]interface{}) (map[string]
"node_id": s.p2pNode.ID().ShortString(),
}
// Publish to BZZZ network
// Publish to CHORUS network
if err := s.pubsub.PublishBzzzMessage(pubsub.CapabilityBcast, announcement); err != nil {
return nil, fmt.Errorf("failed to announce: %w", err)
}
@@ -500,7 +500,7 @@ func (s *McpServer) handleBzzzAnnounce(args map[string]interface{}) (map[string]
// Helper methods
// announceAgent announces an agent to the BZZZ network
// announceAgent announces an agent to the CHORUS network
func (s *McpServer) announceAgent(agent *GPTAgent) error {
announcement := map[string]interface{}{
"type": "gpt_agent_announcement",

View File

@@ -13,7 +13,7 @@ import (
"github.com/prometheus/client_golang/prometheus/promauto"
)
// BZZZMetrics provides comprehensive Prometheus metrics for the BZZZ system
// BZZZMetrics provides comprehensive Prometheus metrics for the CHORUS system
type BZZZMetrics struct {
registry *prometheus.Registry
httpServer *http.Server

View File

@@ -7,13 +7,13 @@ import (
"strings"
"time"
"chorus.services/bzzz/pkg/config"
"chorus.services/bzzz/pkg/dht"
"chorus.services/bzzz/p2p"
"chorus/pkg/config"
"chorus/pkg/dht"
"chorus/p2p"
"github.com/libp2p/go-libp2p/core/peer"
)
// ProtocolManager manages the BZZZ v2 protocol components
// ProtocolManager manages the CHORUS v2 protocol components
type ProtocolManager struct {
config *config.Config
node *p2p.Node
@@ -97,7 +97,7 @@ func (pm *ProtocolManager) IsEnabled() bool {
return pm.enabled
}
// ResolveURI resolves a bzzz:// URI to peer addresses
// ResolveURI resolves a CHORUS:// URI to peer addresses
func (pm *ProtocolManager) ResolveURI(ctx context.Context, uriStr string) (*ResolutionResult, error) {
if !pm.enabled {
return nil, fmt.Errorf("v2 protocol not enabled")
@@ -205,7 +205,7 @@ func (pm *ProtocolManager) announcePeerToDHT(ctx context.Context, capability *Pe
}
// Announce general peer presence
if err := dht.Provide(ctx, "bzzz:peer"); err != nil {
if err := dht.Provide(ctx, "CHORUS:peer"); err != nil {
// Log error but don't fail
}
@@ -249,7 +249,7 @@ func (pm *ProtocolManager) FindPeersByRole(ctx context.Context, role string) ([]
return result, nil
}
// ValidateURI validates a bzzz:// URI
// ValidateURI validates a CHORUS:// URI
func (pm *ProtocolManager) ValidateURI(uriStr string) error {
if !pm.enabled {
return fmt.Errorf("v2 protocol not enabled")
@@ -259,7 +259,7 @@ func (pm *ProtocolManager) ValidateURI(uriStr string) error {
return err
}
// CreateURI creates a bzzz:// URI with the given components
// CreateURI creates a CHORUS:// URI with the given components
func (pm *ProtocolManager) CreateURI(agent, role, project, task, path string) (*BzzzURI, error) {
if !pm.enabled {
return nil, fmt.Errorf("v2 protocol not enabled")
@@ -313,7 +313,7 @@ func (pm *ProtocolManager) getProjectFromConfig() string {
}
// Default project if none can be inferred
return "bzzz"
return "CHORUS"
}
// GetStats returns protocol statistics

View File

@@ -151,7 +151,7 @@ func (r *Resolver) UpdatePeerStatus(peerID peer.ID, status string) {
}
}
// Resolve resolves a bzzz:// URI to peer addresses
// Resolve resolves a CHORUS:// URI to peer addresses
func (r *Resolver) Resolve(ctx context.Context, uri *BzzzURI, strategy ...ResolutionStrategy) (*ResolutionResult, error) {
if uri == nil {
return nil, fmt.Errorf("nil URI")
@@ -181,7 +181,7 @@ func (r *Resolver) Resolve(ctx context.Context, uri *BzzzURI, strategy ...Resolu
return result, nil
}
// ResolveString resolves a bzzz:// URI string to peer addresses
// ResolveString resolves a CHORUS:// URI string to peer addresses
func (r *Resolver) ResolveString(ctx context.Context, uriStr string, strategy ...ResolutionStrategy) (*ResolutionResult, error) {
uri, err := ParseBzzzURI(uriStr)
if err != nil {

View File

@@ -155,7 +155,7 @@ func TestResolveURI(t *testing.T) {
})
// Test exact match
uri, err := ParseBzzzURI("bzzz://claude:frontend@chorus:react")
uri, err := ParseBzzzURI("CHORUS://claude:frontend@chorus:react")
if err != nil {
t.Fatalf("failed to parse URI: %v", err)
}
@@ -196,7 +196,7 @@ func TestResolveURIWithWildcards(t *testing.T) {
})
// Test wildcard match
uri, err := ParseBzzzURI("bzzz://claude:*@*:*")
uri, err := ParseBzzzURI("CHORUS://claude:*@*:*")
if err != nil {
t.Fatalf("failed to parse URI: %v", err)
}
@@ -223,7 +223,7 @@ func TestResolveURIWithOfflinePeers(t *testing.T) {
Status: "offline", // This peer should be filtered out
})
uri, err := ParseBzzzURI("bzzz://claude:frontend@*:*")
uri, err := ParseBzzzURI("CHORUS://claude:frontend@*:*")
if err != nil {
t.Fatalf("failed to parse URI: %v", err)
}
@@ -250,7 +250,7 @@ func TestResolveString(t *testing.T) {
})
ctx := context.Background()
result, err := resolver.ResolveString(ctx, "bzzz://claude:frontend@*:*")
result, err := resolver.ResolveString(ctx, "CHORUS://claude:frontend@*:*")
if err != nil {
t.Fatalf("failed to resolve string: %v", err)
}
@@ -271,7 +271,7 @@ func TestResolverCaching(t *testing.T) {
})
ctx := context.Background()
uri := "bzzz://claude:frontend@*:*"
uri := "CHORUS://claude:frontend@*:*"
// First resolution should hit the resolver
result1, err := resolver.ResolveString(ctx, uri)
@@ -324,7 +324,7 @@ func TestResolutionStrategies(t *testing.T) {
})
ctx := context.Background()
uri, _ := ParseBzzzURI("bzzz://claude:frontend@*:*")
uri, _ := ParseBzzzURI("CHORUS://claude:frontend@*:*")
// Test different strategies
strategies := []ResolutionStrategy{

View File

@@ -7,13 +7,13 @@ import (
"strings"
)
// BzzzURI represents a parsed bzzz:// URI with semantic addressing
// Grammar: bzzz://[agent]:[role]@[project]:[task]/[path][?query][#fragment]
// BzzzURI represents a parsed CHORUS:// URI with semantic addressing
// Grammar: CHORUS://[agent]:[role]@[project]:[task]/[path][?query][#fragment]
type BzzzURI struct {
// Core addressing components
Agent string // Agent identifier (e.g., "claude", "any", "*")
Role string // Agent role (e.g., "frontend", "backend", "architect")
Project string // Project context (e.g., "chorus", "bzzz")
Project string // Project context (e.g., "chorus", "CHORUS")
Task string // Task identifier (e.g., "implement", "review", "test", "*")
// Resource path
@@ -29,7 +29,7 @@ type BzzzURI struct {
// URI grammar constants
const (
BzzzScheme = "bzzz"
BzzzScheme = "CHORUS"
// Special identifiers
AnyAgent = "any"
@@ -49,10 +49,10 @@ var (
pathPattern = regexp.MustCompile(`^/[a-zA-Z0-9\-_/\.]*$|^$`)
// Full URI pattern for validation
bzzzURIPattern = regexp.MustCompile(`^bzzz://([a-zA-Z0-9\-_*]|any):([a-zA-Z0-9\-_*]|any)@([a-zA-Z0-9\-_*]|any):([a-zA-Z0-9\-_*]|any)(/[a-zA-Z0-9\-_/\.]*)?(\?[^#]*)?(\#.*)?$`)
bzzzURIPattern = regexp.MustCompile(`^CHORUS://([a-zA-Z0-9\-_*]|any):([a-zA-Z0-9\-_*]|any)@([a-zA-Z0-9\-_*]|any):([a-zA-Z0-9\-_*]|any)(/[a-zA-Z0-9\-_/\.]*)?(\?[^#]*)?(\#.*)?$`)
)
// ParseBzzzURI parses a bzzz:// URI string into a BzzzURI struct
// ParseBzzzURI parses a CHORUS:// URI string into a BzzzURI struct
func ParseBzzzURI(uri string) (*BzzzURI, error) {
if uri == "" {
return nil, fmt.Errorf("empty URI")
@@ -292,14 +292,14 @@ func (u *BzzzURI) ToAddress() string {
return fmt.Sprintf("%s:%s@%s:%s", u.Agent, u.Role, u.Project, u.Task)
}
// ValidateBzzzURIString validates a bzzz:// URI string without parsing
// ValidateBzzzURIString validates a CHORUS:// URI string without parsing
func ValidateBzzzURIString(uri string) error {
if uri == "" {
return fmt.Errorf("empty URI")
}
if !bzzzURIPattern.MatchString(uri) {
return fmt.Errorf("invalid bzzz:// URI format")
return fmt.Errorf("invalid CHORUS:// URI format")
}
return nil

View File

@@ -13,50 +13,50 @@ func TestParseBzzzURI(t *testing.T) {
}{
{
name: "valid basic URI",
uri: "bzzz://claude:frontend@chorus:implement/src/main.go",
uri: "CHORUS://claude:frontend@chorus:implement/src/main.go",
expected: &BzzzURI{
Agent: "claude",
Role: "frontend",
Project: "chorus",
Task: "implement",
Path: "/src/main.go",
Raw: "bzzz://claude:frontend@chorus:implement/src/main.go",
Raw: "CHORUS://claude:frontend@chorus:implement/src/main.go",
},
},
{
name: "URI with wildcards",
uri: "bzzz://any:*@*:test",
uri: "CHORUS://any:*@*:test",
expected: &BzzzURI{
Agent: "any",
Role: "*",
Project: "*",
Task: "test",
Raw: "bzzz://any:*@*:test",
Raw: "CHORUS://any:*@*:test",
},
},
{
name: "URI with query and fragment",
uri: "bzzz://claude:backend@bzzz:debug/api/handler.go?type=error#line123",
uri: "CHORUS://claude:backend@CHORUS:debug/api/handler.go?type=error#line123",
expected: &BzzzURI{
Agent: "claude",
Role: "backend",
Project: "bzzz",
Project: "CHORUS",
Task: "debug",
Path: "/api/handler.go",
Query: "type=error",
Fragment: "line123",
Raw: "bzzz://claude:backend@bzzz:debug/api/handler.go?type=error#line123",
Raw: "CHORUS://claude:backend@CHORUS:debug/api/handler.go?type=error#line123",
},
},
{
name: "URI without path",
uri: "bzzz://any:architect@project:review",
uri: "CHORUS://any:architect@project:review",
expected: &BzzzURI{
Agent: "any",
Role: "architect",
Project: "project",
Task: "review",
Raw: "bzzz://any:architect@project:review",
Raw: "CHORUS://any:architect@project:review",
},
},
{
@@ -66,12 +66,12 @@ func TestParseBzzzURI(t *testing.T) {
},
{
name: "missing role",
uri: "bzzz://claude@chorus:implement",
uri: "CHORUS://claude@chorus:implement",
expectError: true,
},
{
name: "missing task",
uri: "bzzz://claude:frontend@chorus",
uri: "CHORUS://claude:frontend@chorus",
expectError: true,
},
{
@@ -81,7 +81,7 @@ func TestParseBzzzURI(t *testing.T) {
},
{
name: "invalid format",
uri: "bzzz://invalid",
uri: "CHORUS://invalid",
expectError: true,
},
}
@@ -307,20 +307,20 @@ func TestBzzzURIString(t *testing.T) {
Task: "implement",
Path: "/src/main.go",
},
expected: "bzzz://claude:frontend@chorus:implement/src/main.go",
expected: "CHORUS://claude:frontend@chorus:implement/src/main.go",
},
{
name: "URI with query and fragment",
uri: &BzzzURI{
Agent: "claude",
Role: "backend",
Project: "bzzz",
Project: "CHORUS",
Task: "debug",
Path: "/api/handler.go",
Query: "type=error",
Fragment: "line123",
},
expected: "bzzz://claude:backend@bzzz:debug/api/handler.go?type=error#line123",
expected: "CHORUS://claude:backend@CHORUS:debug/api/handler.go?type=error#line123",
},
{
name: "URI without path",
@@ -330,7 +330,7 @@ func TestBzzzURIString(t *testing.T) {
Project: "project",
Task: "review",
},
expected: "bzzz://any:architect@project:review",
expected: "CHORUS://any:architect@project:review",
},
}
@@ -479,7 +479,7 @@ func TestValidateBzzzURIString(t *testing.T) {
}{
{
name: "valid URI",
uri: "bzzz://claude:frontend@chorus:implement/src/main.go",
uri: "CHORUS://claude:frontend@chorus:implement/src/main.go",
expectError: false,
},
{

199
pkg/repository/types.go Normal file
View File

@@ -0,0 +1,199 @@
package repository
import (
"time"
)
// Task represents a task from a repository (GitHub issue, GitLab MR, etc.)
type Task struct {
Number int `json:"number"`
Title string `json:"title"`
Body string `json:"body"`
Repository string `json:"repository"`
Labels []string `json:"labels"`
Priority int `json:"priority"`
Complexity int `json:"complexity"`
Status string `json:"status"`
CreatedAt time.Time `json:"created_at"`
UpdatedAt time.Time `json:"updated_at"`
Metadata map[string]interface{} `json:"metadata"`
RequiredRole string `json:"required_role"`
RequiredExpertise []string `json:"required_expertise"`
}
// TaskProvider interface for different repository providers (GitHub, GitLab, etc.)
type TaskProvider interface {
GetTasks(projectID int) ([]*Task, error)
ClaimTask(taskNumber int, agentID string) (bool, error)
UpdateTaskStatus(task *Task, status string, comment string) error
CompleteTask(task *Task, result *TaskResult) error
GetTaskDetails(projectID int, taskNumber int) (*Task, error)
ListAvailableTasks(projectID int) ([]*Task, error)
}
// TaskMatcher determines if an agent should work on a task
type TaskMatcher interface {
ShouldProcessTask(task *Task, agentInfo *AgentInfo) bool
CalculateTaskPriority(task *Task, agentInfo *AgentInfo) int
ScoreTaskForAgent(task *Task, agentInfo *AgentInfo) float64
}
// ProviderFactory creates task providers for different repository types
type ProviderFactory interface {
CreateProvider(ctx interface{}, config *Config) (TaskProvider, error)
GetSupportedTypes() []string
SupportedProviders() []string
}
// AgentInfo represents information about the current agent
type AgentInfo struct {
ID string `json:"id"`
Role string `json:"role"`
Expertise []string `json:"expertise"`
Capabilities []string `json:"capabilities"`
MaxTasks int `json:"max_tasks"`
CurrentTasks int `json:"current_tasks"`
Status string `json:"status"`
LastSeen time.Time `json:"last_seen"`
Performance map[string]interface{} `json:"performance"`
Availability string `json:"availability"`
}
// TaskResult represents the result of completing a task
type TaskResult struct {
Success bool `json:"success"`
Message string `json:"message"`
Artifacts []string `json:"artifacts"`
Duration time.Duration `json:"duration"`
Metadata map[string]interface{} `json:"metadata"`
}
// Config represents repository configuration
type Config struct {
Type string `json:"type"`
Settings map[string]interface{} `json:"settings"`
Provider string `json:"provider"`
BaseURL string `json:"base_url"`
AccessToken string `json:"access_token"`
Owner string `json:"owner"`
Repository string `json:"repository"`
TaskLabel string `json:"task_label"`
InProgressLabel string `json:"in_progress_label"`
CompletedLabel string `json:"completed_label"`
BaseBranch string `json:"base_branch"`
BranchPrefix string `json:"branch_prefix"`
}
// DefaultTaskMatcher provides a default implementation of TaskMatcher
type DefaultTaskMatcher struct{}
// ShouldProcessTask determines if an agent should process a task
func (m *DefaultTaskMatcher) ShouldProcessTask(task *Task, agentInfo *AgentInfo) bool {
// Simple logic: check if agent has capacity and matching expertise
if agentInfo.CurrentTasks >= agentInfo.MaxTasks {
return false
}
// Check if any of agent's expertise matches task labels
for _, expertise := range agentInfo.Expertise {
for _, label := range task.Labels {
if expertise == label {
return true
}
}
}
// Default to true for general tasks
return len(task.Labels) == 0 || task.Priority > 5
}
// CalculateTaskPriority calculates priority score for a task
func (m *DefaultTaskMatcher) CalculateTaskPriority(task *Task, agentInfo *AgentInfo) int {
priority := task.Priority
// Boost priority for tasks matching expertise
for _, expertise := range agentInfo.Expertise {
for _, label := range task.Labels {
if expertise == label {
priority += 2
break
}
}
}
return priority
}
// ScoreTaskForAgent calculates a score for how well an agent matches a task
func (m *DefaultTaskMatcher) ScoreTaskForAgent(task *Task, agentInfo *AgentInfo) float64 {
score := float64(task.Priority) / 10.0
// Boost score for matching expertise
matchCount := 0
for _, expertise := range agentInfo.Expertise {
for _, label := range task.Labels {
if expertise == label {
matchCount++
break
}
}
}
if len(agentInfo.Expertise) > 0 {
score += (float64(matchCount) / float64(len(agentInfo.Expertise))) * 0.5
}
return score
}
// DefaultProviderFactory provides a default implementation of ProviderFactory
type DefaultProviderFactory struct{}
// CreateProvider creates a task provider (stub implementation)
func (f *DefaultProviderFactory) CreateProvider(ctx interface{}, config *Config) (TaskProvider, error) {
// In a real implementation, this would create GitHub, GitLab, etc. providers
return &MockTaskProvider{}, nil
}
// GetSupportedTypes returns supported repository types
func (f *DefaultProviderFactory) GetSupportedTypes() []string {
return []string{"github", "gitlab", "mock"}
}
// SupportedProviders returns list of supported providers
func (f *DefaultProviderFactory) SupportedProviders() []string {
return f.GetSupportedTypes()
}
// MockTaskProvider provides a mock implementation for testing
type MockTaskProvider struct{}
// GetTasks returns mock tasks
func (p *MockTaskProvider) GetTasks(projectID int) ([]*Task, error) {
return []*Task{}, nil
}
// ClaimTask claims a task
func (p *MockTaskProvider) ClaimTask(taskNumber int, agentID string) (bool, error) {
return true, nil
}
// UpdateTaskStatus updates task status
func (p *MockTaskProvider) UpdateTaskStatus(task *Task, status string, comment string) error {
return nil
}
// CompleteTask completes a task
func (p *MockTaskProvider) CompleteTask(task *Task, result *TaskResult) error {
return nil
}
// GetTaskDetails gets task details
func (p *MockTaskProvider) GetTaskDetails(projectID int, taskNumber int) (*Task, error) {
return &Task{}, nil
}
// ListAvailableTasks lists available tasks
func (p *MockTaskProvider) ListAvailableTasks(projectID int) ([]*Task, error) {
return []*Task{}, nil
}

View File

@@ -1,4 +1,4 @@
// Package security provides shared security types and constants for BZZZ
// Package security provides shared security types and constants for CHORUS
// This package contains common security definitions that are used by both
// the crypto and slurp/roles packages to avoid circular dependencies.

View File

@@ -4,8 +4,8 @@ import (
"context"
"time"
"chorus.services/bzzz/pkg/ucxl"
slurpContext "chorus.services/bzzz/pkg/slurp/context"
"chorus/pkg/ucxl"
slurpContext "chorus/pkg/slurp/context"
)
// GoalManager handles definition and management of project goals

View File

@@ -3,8 +3,8 @@ package alignment
import (
"time"
"chorus.services/bzzz/pkg/ucxl"
slurpContext "chorus.services/bzzz/pkg/slurp/context"
"chorus/pkg/ucxl"
slurpContext "chorus/pkg/slurp/context"
)
// ProjectGoal represents a high-level project objective

View File

@@ -1,7 +1,7 @@
// Package context provides core context types and interfaces for the SLURP contextual intelligence system.
//
// This package defines the foundational data structures and interfaces for hierarchical
// context resolution within the BZZZ distributed AI development system. It implements
// context resolution within the CHORUS distributed AI development system. It implements
// bounded hierarchy traversal with role-based access control for efficient context
// resolution and caching.
//
@@ -10,7 +10,7 @@
// - Role-based access control and encryption for context data
// - CSS-like inheritance patterns for cascading context properties
// - Efficient caching with selective invalidation
// - Integration with BZZZ election system for leader-only generation
// - Integration with CHORUS election system for leader-only generation
//
// Core Types:
// - ContextNode: Represents a single context entry in the hierarchy
@@ -60,5 +60,5 @@
// All context data is encrypted based on role access levels before storage
// in the distributed DHT. Only nodes with appropriate role permissions can
// decrypt and access context information, ensuring secure context sharing
// across the BZZZ cluster.
// across the CHORUS cluster.
package context

View File

@@ -5,8 +5,8 @@ import (
"fmt"
"time"
"chorus.services/bzzz/pkg/ucxl"
"chorus.services/bzzz/pkg/config"
"chorus/pkg/ucxl"
"chorus/pkg/config"
)
// ContextResolver defines the interface for hierarchical context resolution
@@ -437,7 +437,7 @@ Integration Examples:
4. Complete Resolution Flow Example:
// Resolve context with full BZZZ integration
// Resolve context with full CHORUS integration
func (resolver *DefaultContextResolver) ResolveWithIntegration(ctx context.Context, address ucxl.Address, role string, maxDepth int) (*ResolvedContext, error) {
// 1. Validate request
if err := ValidateContextResolutionRequest(address, role, maxDepth); err != nil {

View File

@@ -4,8 +4,8 @@ import (
"fmt"
"time"
"chorus.services/bzzz/pkg/ucxl"
"chorus.services/bzzz/pkg/config"
"chorus/pkg/ucxl"
"chorus/pkg/config"
)
// ContextNode represents a hierarchical context node in the SLURP system.

View File

@@ -7,12 +7,12 @@ import (
"sync"
"time"
"chorus.services/bzzz/pkg/dht"
"chorus.services/bzzz/pkg/crypto"
"chorus.services/bzzz/pkg/election"
"chorus.services/bzzz/pkg/config"
"chorus.services/bzzz/pkg/ucxl"
slurpContext "chorus.services/bzzz/pkg/slurp/context"
"chorus/pkg/dht"
"chorus/pkg/crypto"
"chorus/pkg/election"
"chorus/pkg/config"
"chorus/pkg/ucxl"
slurpContext "chorus/pkg/slurp/context"
)
// DistributionCoordinator orchestrates distributed context operations across the cluster

View File

@@ -9,17 +9,17 @@ import (
"sync"
"time"
"chorus.services/bzzz/pkg/dht"
"chorus.services/bzzz/pkg/crypto"
"chorus.services/bzzz/pkg/election"
"chorus.services/bzzz/pkg/ucxl"
"chorus.services/bzzz/pkg/config"
slurpContext "chorus.services/bzzz/pkg/slurp/context"
"chorus/pkg/dht"
"chorus/pkg/crypto"
"chorus/pkg/election"
"chorus/pkg/ucxl"
"chorus/pkg/config"
slurpContext "chorus/pkg/slurp/context"
)
// ContextDistributor handles distributed context operations via DHT
//
// This is the primary interface for distributing context data across the BZZZ
// This is the primary interface for distributing context data across the CHORUS
// cluster using the existing DHT infrastructure with role-based encryption
// and conflict resolution capabilities.
type ContextDistributor interface {

View File

@@ -10,15 +10,15 @@ import (
"sync"
"time"
"chorus.services/bzzz/pkg/dht"
"chorus.services/bzzz/pkg/crypto"
"chorus.services/bzzz/pkg/election"
"chorus.services/bzzz/pkg/ucxl"
"chorus.services/bzzz/pkg/config"
slurpContext "chorus.services/bzzz/pkg/slurp/context"
"chorus/pkg/dht"
"chorus/pkg/crypto"
"chorus/pkg/election"
"chorus/pkg/ucxl"
"chorus/pkg/config"
slurpContext "chorus/pkg/slurp/context"
)
// DHTContextDistributor implements ContextDistributor using BZZZ DHT infrastructure
// DHTContextDistributor implements ContextDistributor using CHORUS DHT infrastructure
type DHTContextDistributor struct {
mu sync.RWMutex
dht *dht.DHT
@@ -52,7 +52,7 @@ func NewDHTContextDistributor(
return nil, fmt.Errorf("config is required")
}
deploymentID := fmt.Sprintf("bzzz-slurp-%s", config.Agent.ID)
deploymentID := fmt.Sprintf("CHORUS-slurp-%s", config.Agent.ID)
dist := &DHTContextDistributor{
dht: dht,

View File

@@ -1,6 +1,6 @@
// Package distribution provides context network distribution capabilities via DHT integration.
//
// This package implements distributed context sharing across the BZZZ cluster using
// This package implements distributed context sharing across the CHORUS cluster using
// the existing Distributed Hash Table (DHT) infrastructure. It provides role-based
// encrypted distribution, conflict resolution, and eventual consistency for context
// data synchronization across multiple nodes.
@@ -23,7 +23,7 @@
// - NetworkManager: Network topology and partition handling
//
// Integration Points:
// - pkg/dht: Existing BZZZ DHT infrastructure
// - pkg/dht: Existing CHORUS DHT infrastructure
// - pkg/crypto: Role-based encryption and decryption
// - pkg/election: Leader coordination for conflict resolution
// - pkg/slurp/context: Context types and validation
@@ -67,7 +67,7 @@
//
// Security Model:
// All context data is encrypted before distribution using role-specific keys
// from the BZZZ crypto system. Only nodes with appropriate role permissions
// from the CHORUS crypto system. Only nodes with appropriate role permissions
// can decrypt and access context information, ensuring secure collaborative
// development while maintaining access control boundaries.
//

View File

@@ -9,9 +9,9 @@ import (
"sync"
"time"
"chorus.services/bzzz/pkg/dht"
"chorus.services/bzzz/pkg/config"
"chorus.services/bzzz/pkg/ucxl"
"chorus/pkg/dht"
"chorus/pkg/config"
"chorus/pkg/ucxl"
)
// GossipProtocolImpl implements GossipProtocol interface for metadata synchronization

View File

@@ -10,7 +10,7 @@ import (
"sync"
"time"
"chorus.services/bzzz/pkg/config"
"chorus/pkg/config"
)
// MonitoringSystem provides comprehensive monitoring for the distributed context system
@@ -1075,9 +1075,9 @@ func (ms *MonitoringSystem) handleDashboard(w http.ResponseWriter, r *http.Reque
html := `
<!DOCTYPE html>
<html>
<head><title>BZZZ SLURP Monitoring</title></head>
<head><title>CHORUS SLURP Monitoring</title></head>
<body>
<h1>BZZZ SLURP Distributed Context Monitoring</h1>
<h1>CHORUS SLURP Distributed Context Monitoring</h1>
<p>Monitoring dashboard placeholder</p>
</body>
</html>

View File

@@ -9,8 +9,8 @@ import (
"sync"
"time"
"chorus.services/bzzz/pkg/dht"
"chorus.services/bzzz/pkg/config"
"chorus/pkg/dht"
"chorus/pkg/config"
"github.com/libp2p/go-libp2p/core/peer"
)

View File

@@ -7,9 +7,9 @@ import (
"sync"
"time"
"chorus.services/bzzz/pkg/dht"
"chorus.services/bzzz/pkg/config"
"chorus.services/bzzz/pkg/ucxl"
"chorus/pkg/dht"
"chorus/pkg/config"
"chorus/pkg/ucxl"
"github.com/libp2p/go-libp2p/core/peer"
)

View File

@@ -14,8 +14,8 @@ import (
"sync"
"time"
"chorus.services/bzzz/pkg/config"
"chorus.services/bzzz/pkg/crypto"
"chorus/pkg/config"
"chorus/pkg/crypto"
)
// SecurityManager handles all security aspects of the distributed system
@@ -653,7 +653,7 @@ func (sm *SecurityManager) generateSelfSignedCertificate() ([]byte, []byte, erro
template := x509.Certificate{
SerialNumber: big.NewInt(1),
Subject: pkix.Name{
Organization: []string{"BZZZ SLURP"},
Organization: []string{"CHORUS SLURP"},
Country: []string{"US"},
Province: []string{""},
Locality: []string{"San Francisco"},

View File

@@ -11,8 +11,8 @@ import (
"strings"
"time"
"chorus.services/bzzz/pkg/ucxl"
slurpContext "chorus.services/bzzz/pkg/slurp/context"
"chorus/pkg/ucxl"
slurpContext "chorus/pkg/slurp/context"
)
// DefaultDirectoryAnalyzer provides comprehensive directory structure analysis

View File

@@ -47,7 +47,7 @@
// fmt.Printf("Role insights: %v\n", insights)
//
// Leadership Integration:
// This package is designed to be used primarily by the elected BZZZ leader node,
// This package is designed to be used primarily by the elected CHORUS leader node,
// which has the responsibility for context generation across the cluster. The
// intelligence engine coordinates with the leader election system to ensure
// only authorized nodes perform context generation operations.

View File

@@ -4,8 +4,8 @@ import (
"context"
"time"
"chorus.services/bzzz/pkg/ucxl"
slurpContext "chorus.services/bzzz/pkg/slurp/context"
"chorus/pkg/ucxl"
slurpContext "chorus/pkg/slurp/context"
)
// IntelligenceEngine provides AI-powered context analysis and generation

View File

@@ -10,8 +10,8 @@ import (
"sync"
"time"
"chorus.services/bzzz/pkg/ucxl"
slurpContext "chorus.services/bzzz/pkg/slurp/context"
"chorus/pkg/ucxl"
slurpContext "chorus/pkg/slurp/context"
)
// AnalyzeFile analyzes a single file and generates contextual understanding

View File

@@ -7,7 +7,7 @@ import (
"testing"
"time"
slurpContext "chorus.services/bzzz/pkg/slurp/context"
slurpContext "chorus/pkg/slurp/context"
)
func TestIntelligenceEngine_Integration(t *testing.T) {

View File

@@ -9,7 +9,7 @@ import (
"sync"
"time"
slurpContext "chorus.services/bzzz/pkg/slurp/context"
slurpContext "chorus/pkg/slurp/context"
)
// GoalAlignmentEngine provides comprehensive goal alignment assessment

View File

@@ -9,7 +9,7 @@ import (
"strings"
"time"
slurpContext "chorus.services/bzzz/pkg/slurp/context"
slurpContext "chorus/pkg/slurp/context"
)
// DefaultPatternDetector provides comprehensive pattern detection capabilities

View File

@@ -11,7 +11,7 @@ import (
"sync"
"time"
slurpContext "chorus.services/bzzz/pkg/slurp/context"
slurpContext "chorus/pkg/slurp/context"
)
// DefaultRAGIntegration provides comprehensive RAG system integration

View File

@@ -8,8 +8,8 @@ import (
"sync"
"time"
"chorus.services/bzzz/pkg/crypto"
slurpContext "chorus.services/bzzz/pkg/slurp/context"
"chorus/pkg/crypto"
slurpContext "chorus/pkg/slurp/context"
)
// RoleAwareProcessor provides role-based context processing and insight generation

View File

@@ -16,7 +16,7 @@ import (
"strings"
"time"
slurpContext "chorus.services/bzzz/pkg/slurp/context"
slurpContext "chorus/pkg/slurp/context"
)
// Utility functions and helper types for the intelligence engine

View File

@@ -229,7 +229,7 @@ type DecisionNavigator interface {
// DistributedStorage handles distributed storage of context data.
//
// Provides encrypted, role-based storage using the existing BZZZ DHT
// Provides encrypted, role-based storage using the existing CHORUS DHT
// infrastructure with consistency guarantees and conflict resolution.
type DistributedStorage interface {
// Store stores context data in the DHT with encryption.

View File

@@ -6,7 +6,7 @@ import (
"strconv"
"strings"
"time"
"chorus.services/bzzz/pkg/config"
"chorus/pkg/config"
)
// SLURPLeaderConfig represents comprehensive configuration for SLURP-enabled leader election
@@ -280,7 +280,7 @@ func DefaultSLURPLeaderConfig() *SLURPLeaderConfig {
return &SLURPLeaderConfig{
Core: &CoreConfig{
NodeID: "", // Will be auto-generated
ClusterID: "bzzz-cluster",
ClusterID: "CHORUS-cluster",
DataDirectory: "./data",
Capabilities: []string{"admin_election", "context_curation", "project_manager"},
ProjectManagerEnabled: true,
@@ -579,11 +579,11 @@ func (cfg *SLURPLeaderConfig) GetEffectiveConfig() *SLURPLeaderConfig {
return &effective
}
// ToBaseBZZZConfig converts SLURP leader config to base BZZZ config format
// ToBaseBZZZConfig converts SLURP leader config to base CHORUS config format
func (cfg *SLURPLeaderConfig) ToBaseBZZZConfig() *config.Config {
// TODO: Convert to base BZZZ config structure
// TODO: Convert to base CHORUS config structure
// This would map SLURP-specific configuration to the existing
// BZZZ configuration structure for compatibility
// CHORUS configuration structure for compatibility
bzzzConfig := &config.Config{
// Map core settings

View File

@@ -1,9 +1,9 @@
// Package leader provides leader-specific context management duties for the SLURP system.
//
// This package implements the leader node responsibilities within the BZZZ cluster,
// This package implements the leader node responsibilities within the CHORUS cluster,
// where only the elected leader performs context generation, coordinates distributed
// operations, and manages cluster-wide contextual intelligence tasks. It integrates
// with the BZZZ election system to ensure consistent leadership and proper failover.
// with the CHORUS election system to ensure consistent leadership and proper failover.
//
// Key Features:
// - Leader-only context generation to prevent conflicts and ensure consistency
@@ -66,7 +66,7 @@
// }
//
// Leader Election Integration:
// The context manager automatically integrates with the BZZZ election system,
// The context manager automatically integrates with the CHORUS election system,
// responding to leadership changes, handling graceful transitions, and ensuring
// no context generation operations are lost during failover events. State
// transfer includes queued requests, active jobs, and coordination metadata.
@@ -105,7 +105,7 @@
// - Conflict detection and resolution for concurrent changes
//
// Security Integration:
// All leader operations integrate with the BZZZ security model:
// All leader operations integrate with the CHORUS security model:
// - Role-based authorization for context generation requests
// - Encrypted communication between leader and cluster nodes
// - Audit logging of all leadership decisions and actions

View File

@@ -7,14 +7,14 @@ import (
"sync"
"time"
"chorus.services/bzzz/pkg/election"
"chorus.services/bzzz/pkg/dht"
"chorus.services/bzzz/pkg/slurp/intelligence"
"chorus.services/bzzz/pkg/slurp/storage"
slurpContext "chorus.services/bzzz/pkg/slurp/context"
"chorus/pkg/election"
"chorus/pkg/dht"
"chorus/pkg/slurp/intelligence"
"chorus/pkg/slurp/storage"
slurpContext "chorus/pkg/slurp/context"
)
// ElectionIntegratedContextManager integrates SLURP context management with BZZZ election system
// ElectionIntegratedContextManager integrates SLURP context management with CHORUS election system
type ElectionIntegratedContextManager struct {
*LeaderContextManager // Embed the base context manager

View File

@@ -7,12 +7,12 @@ import (
"sync"
"time"
"chorus.services/bzzz/pkg/election"
"chorus.services/bzzz/pkg/health"
"chorus.services/bzzz/pkg/metrics"
"chorus.services/bzzz/pkg/slurp/intelligence"
"chorus.services/bzzz/pkg/slurp/storage"
slurpContext "chorus.services/bzzz/pkg/slurp/context"
"chorus/pkg/election"
"chorus/pkg/health"
"chorus/pkg/metrics"
"chorus/pkg/slurp/intelligence"
"chorus/pkg/slurp/storage"
slurpContext "chorus/pkg/slurp/context"
)
// EnhancedLeaderManager provides enhanced leadership lifecycle management for SLURP

View File

@@ -6,13 +6,13 @@ import (
"log"
"time"
"chorus.services/bzzz/pkg/config"
"chorus.services/bzzz/pkg/election"
"chorus.services/bzzz/pkg/dht"
"chorus.services/bzzz/pkg/slurp/intelligence"
"chorus.services/bzzz/pkg/slurp/storage"
slurpContext "chorus.services/bzzz/pkg/slurp/context"
"chorus.services/bzzz/pubsub"
"chorus/pkg/config"
"chorus/pkg/election"
"chorus/pkg/dht"
"chorus/pkg/slurp/intelligence"
"chorus/pkg/slurp/storage"
slurpContext "chorus/pkg/slurp/context"
"chorus/pubsub"
libp2p "github.com/libp2p/go-libp2p/core/host"
)
@@ -282,7 +282,7 @@ func (sys *SLURPLeaderSystem) initializeContextComponents(ctx context.Context) e
func (sys *SLURPLeaderSystem) initializeElectionSystem(ctx context.Context) error {
sys.logger.Debug("Initializing election system")
// Convert to base BZZZ config
// Convert to base CHORUS config
bzzzConfig := sys.config.ToBaseBZZZConfig()
// Create SLURP election configuration

View File

@@ -8,12 +8,12 @@ import (
"sync"
"time"
"chorus.services/bzzz/pkg/election"
"chorus.services/bzzz/pkg/dht"
"chorus.services/bzzz/pkg/ucxl"
"chorus.services/bzzz/pkg/slurp/intelligence"
"chorus.services/bzzz/pkg/slurp/storage"
slurpContext "chorus.services/bzzz/pkg/slurp/context"
"chorus/pkg/election"
"chorus/pkg/dht"
"chorus/pkg/ucxl"
"chorus/pkg/slurp/intelligence"
"chorus/pkg/slurp/storage"
slurpContext "chorus/pkg/slurp/context"
)
// ContextManager handles leader-only context generation duties

View File

@@ -3,8 +3,8 @@ package leader
import (
"time"
"chorus.services/bzzz/pkg/ucxl"
slurpContext "chorus.services/bzzz/pkg/slurp/context"
"chorus/pkg/ucxl"
slurpContext "chorus/pkg/slurp/context"
)
// Priority represents priority levels for context generation requests

View File

@@ -3,12 +3,12 @@
// This package implements comprehensive role-based access control (RBAC) for contextual
// intelligence, ensuring that context information is appropriately filtered, encrypted,
// and distributed based on role permissions and security requirements. It integrates
// with the existing BZZZ crypto system to provide secure, scalable access control.
// with the existing CHORUS crypto system to provide secure, scalable access control.
//
// Key Features:
// - Hierarchical role definition and management
// - Context filtering based on role permissions and access levels
// - Integration with BZZZ crypto system for role-based encryption
// - Integration with CHORUS crypto system for role-based encryption
// - Dynamic permission evaluation and caching for performance
// - Role-specific context views and perspectives
// - Audit logging for access control decisions
@@ -88,7 +88,7 @@
//
// Security Model:
// All access control decisions are based on cryptographically verified
// role assignments and permissions. The system integrates with the BZZZ
// role assignments and permissions. The system integrates with the CHORUS
// crypto infrastructure to ensure secure key distribution and context
// encryption, preventing unauthorized access even in case of node
// compromise or network interception.

View File

@@ -4,9 +4,9 @@ import (
"context"
"time"
"chorus.services/bzzz/pkg/security"
"chorus.services/bzzz/pkg/ucxl"
slurpContext "chorus.services/bzzz/pkg/slurp/context"
"chorus/pkg/security"
"chorus/pkg/ucxl"
slurpContext "chorus/pkg/slurp/context"
)
// RoleManager handles definition and management of roles and permissions
@@ -184,7 +184,7 @@ type AuditLogger interface {
// EncryptionManager handles role-based encryption and key management
//
// Manages encryption keys and operations for role-based access control,
// integrating with the BZZZ crypto system for secure context storage
// integrating with the CHORUS crypto system for secure context storage
// and distribution.
type EncryptionManager interface {
// EncryptForRoles encrypts context data for specific roles

View File

@@ -3,9 +3,9 @@ package roles
import (
"time"
"chorus.services/bzzz/pkg/security"
"chorus.services/bzzz/pkg/ucxl"
slurpContext "chorus.services/bzzz/pkg/slurp/context"
"chorus/pkg/security"
"chorus/pkg/ucxl"
slurpContext "chorus/pkg/slurp/context"
)
// Stub types for interfaces (to be implemented later)

View File

@@ -1,4 +1,4 @@
// Package slurp provides contextual intelligence capabilities for BZZZ.
// Package slurp provides contextual intelligence capabilities for CHORUS.
//
// SLURP (Storage, Logic, Understanding, Retrieval, Processing) implements:
// - Hierarchical context resolution with bounded depth traversal
@@ -20,7 +20,7 @@
// - pkg/election Admin-only operations coordination
// - pkg/config Configuration extension for SLURP settings
//
// This package follows established BZZZ patterns for interfaces, error handling,
// This package follows established CHORUS patterns for interfaces, error handling,
// and distributed operations while providing native Go implementations of the
// contextual intelligence capabilities originally prototyped in Python.
package slurp
@@ -31,17 +31,17 @@ import (
"sync"
"time"
"chorus.services/bzzz/pkg/config"
"chorus.services/bzzz/pkg/crypto"
"chorus.services/bzzz/pkg/dht"
"chorus.services/bzzz/pkg/election"
"chorus/pkg/config"
"chorus/pkg/crypto"
"chorus/pkg/dht"
"chorus/pkg/election"
)
// SLURP is the main coordinator for contextual intelligence operations.
//
// It orchestrates the interaction between context resolution, temporal analysis,
// distributed storage, and intelligence generation while enforcing security
// and access controls through integration with existing BZZZ systems.
// and access controls through integration with existing CHORUS systems.
//
// Thread Safety: SLURP is safe for concurrent use across multiple goroutines.
// All public methods handle synchronization internally.
@@ -78,7 +78,7 @@ type SLURP struct {
eventMux sync.RWMutex
}
// SLURPConfig holds SLURP-specific configuration that extends the main BZZZ config
// SLURPConfig holds SLURP-specific configuration that extends the main CHORUS config
type SLURPConfig struct {
// Enable/disable SLURP system
Enabled bool `yaml:"enabled" json:"enabled"`
@@ -315,8 +315,8 @@ type SLURPEvent struct {
// NewSLURP creates a new SLURP instance with the provided dependencies.
//
// The SLURP system requires integration with existing BZZZ components:
// - config: Main BZZZ configuration including SLURP settings
// The SLURP system requires integration with existing CHORUS components:
// - config: Main CHORUS configuration including SLURP settings
// - dhtInstance: Distributed hash table for storage and discovery
// - cryptoInstance: Role-based encryption for access control
// - electionManager: Admin election coordination for restricted operations
@@ -574,7 +574,7 @@ func (s *SLURP) RegisterEventHandler(eventType EventType, handler EventHandler)
// Close gracefully shuts down the SLURP system.
//
// This method stops all background tasks, flushes caches, and releases
// resources. It should be called when the BZZZ node is shutting down.
// resources. It should be called when the CHORUS node is shutting down.
func (s *SLURP) Close() error {
s.mu.Lock()
defer s.mu.Unlock()

View File

@@ -46,7 +46,7 @@ The main orchestrator that coordinates between all storage layers:
### 2. Encrypted Storage (`encrypted_storage.go`)
Role-based encrypted storage with enterprise-grade security:
- **Per-role encryption** using the existing BZZZ crypto system
- **Per-role encryption** using the existing CHORUS crypto system
- **Key rotation** with automatic re-encryption
- **Access control validation** with audit logging
- **Encryption metrics** tracking for performance monitoring
@@ -184,16 +184,16 @@ type ContextStoreOptions struct {
- **Timeout values**: Set appropriate timeouts for your network
- **Background intervals**: Balance between performance and resource usage
## Integration with BZZZ Systems
## Integration with CHORUS Systems
### DHT Integration
The distributed storage layer integrates seamlessly with the existing BZZZ DHT system:
The distributed storage layer integrates seamlessly with the existing CHORUS DHT system:
- Uses existing node discovery and communication protocols
- Leverages consistent hashing algorithms
- Integrates with leader election for coordination
### Crypto Integration
The encryption layer uses the existing BZZZ crypto system:
The encryption layer uses the existing CHORUS crypto system:
- Role-based key management
- Shamir's Secret Sharing for key distribution
- Age encryption for data protection

View File

@@ -13,7 +13,7 @@ import (
"time"
"github.com/robfig/cron/v3"
"chorus.services/bzzz/pkg/crypto"
"chorus/pkg/crypto"
)
// BackupManagerImpl implements the BackupManager interface

View File

@@ -6,8 +6,8 @@ import (
"sync"
"time"
"chorus.services/bzzz/pkg/ucxl"
slurpContext "chorus.services/bzzz/pkg/slurp/context"
"chorus/pkg/ucxl"
slurpContext "chorus/pkg/slurp/context"
)
// BatchOperationsImpl provides efficient batch operations for context storage

View File

@@ -7,10 +7,10 @@ import (
"sync"
"time"
"chorus.services/bzzz/pkg/crypto"
"chorus.services/bzzz/pkg/dht"
"chorus.services/bzzz/pkg/ucxl"
slurpContext "chorus.services/bzzz/pkg/slurp/context"
"chorus/pkg/crypto"
"chorus/pkg/dht"
"chorus/pkg/ucxl"
slurpContext "chorus/pkg/slurp/context"
)
// ContextStoreImpl is the main implementation of the ContextStore interface

View File

@@ -7,8 +7,8 @@ import (
"sync"
"time"
"chorus.services/bzzz/pkg/dht"
"chorus.services/bzzz/pkg/types"
"chorus/pkg/dht"
"chorus/pkg/types"
)
// DistributedStorageImpl implements the DistributedStorage interface

View File

@@ -2,12 +2,12 @@
//
// This package implements the storage layer for context data, providing both local
// and distributed storage capabilities with encryption, caching, and efficient
// retrieval mechanisms. It integrates with the BZZZ DHT for distributed context
// retrieval mechanisms. It integrates with the CHORUS DHT for distributed context
// sharing while maintaining role-based access control.
//
// Key Features:
// - Local context storage with efficient indexing and retrieval
// - Distributed context storage using BZZZ DHT infrastructure
// - Distributed context storage using CHORUS DHT infrastructure
// - Role-based encryption for secure context sharing
// - Multi-level caching for performance optimization
// - Backup and recovery capabilities for data durability
@@ -55,7 +55,7 @@
// Storage Architecture:
// The storage system uses a layered approach with local caching, distributed
// replication, and role-based encryption. Context data is stored locally for
// fast access and replicated across the BZZZ cluster for availability and
// fast access and replicated across the CHORUS cluster for availability and
// collaboration. Encryption ensures that only authorized roles can access
// sensitive context information.
//

View File

@@ -8,9 +8,9 @@ import (
"sync"
"time"
"chorus.services/bzzz/pkg/crypto"
"chorus.services/bzzz/pkg/ucxl"
slurpContext "chorus.services/bzzz/pkg/slurp/context"
"chorus/pkg/crypto"
"chorus/pkg/ucxl"
slurpContext "chorus/pkg/slurp/context"
)
// EncryptedStorageImpl implements the EncryptedStorage interface

View File

@@ -13,8 +13,8 @@ import (
"github.com/blevesearch/bleve/v2/analysis/analyzer/standard"
"github.com/blevesearch/bleve/v2/analysis/lang/en"
"github.com/blevesearch/bleve/v2/mapping"
"chorus.services/bzzz/pkg/ucxl"
slurpContext "chorus.services/bzzz/pkg/slurp/context"
"chorus/pkg/ucxl"
slurpContext "chorus/pkg/slurp/context"
)
// IndexManagerImpl implements the IndexManager interface using Bleve

View File

@@ -4,9 +4,9 @@ import (
"context"
"time"
"chorus.services/bzzz/pkg/ucxl"
"chorus.services/bzzz/pkg/crypto"
slurpContext "chorus.services/bzzz/pkg/slurp/context"
"chorus/pkg/ucxl"
"chorus/pkg/crypto"
slurpContext "chorus/pkg/slurp/context"
)
// ContextStore provides the main interface for context storage and retrieval

View File

@@ -410,7 +410,7 @@ func (ls *LocalStorageImpl) compress(data []byte) ([]byte, error) {
// Create gzip writer with best compression
writer := gzip.NewWriter(&buf)
writer.Header.Name = "storage_data"
writer.Header.Comment = "BZZZ SLURP local storage compressed data"
writer.Header.Comment = "CHORUS SLURP local storage compressed data"
// Write data to gzip writer
if _, err := writer.Write(data); err != nil {

View File

@@ -3,9 +3,9 @@ package storage
import (
"time"
"chorus.services/bzzz/pkg/ucxl"
"chorus.services/bzzz/pkg/crypto"
slurpContext "chorus.services/bzzz/pkg/slurp/context"
"chorus/pkg/ucxl"
"chorus/pkg/crypto"
slurpContext "chorus/pkg/slurp/context"
)
// DatabaseSchema defines the complete schema for encrypted context storage

View File

@@ -3,9 +3,9 @@ package storage
import (
"time"
"chorus.services/bzzz/pkg/ucxl"
"chorus.services/bzzz/pkg/crypto"
slurpContext "chorus.services/bzzz/pkg/slurp/context"
"chorus/pkg/ucxl"
"chorus/pkg/crypto"
slurpContext "chorus/pkg/slurp/context"
)
// ListCriteria represents criteria for listing contexts

View File

@@ -5,7 +5,7 @@ import (
"fmt"
"time"
"chorus.services/bzzz/pkg/slurp/storage"
"chorus/pkg/slurp/storage"
)
// TemporalGraphFactory creates and configures temporal graph components

View File

@@ -4,8 +4,8 @@ import (
"context"
"time"
"chorus.services/bzzz/pkg/ucxl"
slurpContext "chorus.services/bzzz/pkg/slurp/context"
"chorus/pkg/ucxl"
slurpContext "chorus/pkg/slurp/context"
)
// TemporalGraph manages the temporal evolution of context through decision points

View File

@@ -9,9 +9,9 @@ import (
"sync"
"time"
"chorus.services/bzzz/pkg/ucxl"
slurpContext "chorus.services/bzzz/pkg/slurp/context"
"chorus.services/bzzz/pkg/slurp/storage"
"chorus/pkg/ucxl"
slurpContext "chorus/pkg/slurp/context"
"chorus/pkg/slurp/storage"
)
// temporalGraphImpl implements the TemporalGraph interface

View File

@@ -5,9 +5,9 @@ import (
"testing"
"time"
"chorus.services/bzzz/pkg/ucxl"
slurpContext "chorus.services/bzzz/pkg/slurp/context"
"chorus.services/bzzz/pkg/slurp/storage"
"chorus/pkg/ucxl"
slurpContext "chorus/pkg/slurp/context"
"chorus/pkg/slurp/storage"
)
// Mock storage for testing

View File

@@ -8,7 +8,7 @@ import (
"sync"
"time"
"chorus.services/bzzz/pkg/ucxl"
"chorus/pkg/ucxl"
)
// influenceAnalyzerImpl implements the InfluenceAnalyzer interface

Some files were not shown because too many files have changed in this diff Show More