Resolve import cycles and migrate to chorus.services module path

This comprehensive refactoring addresses critical architectural issues:

IMPORT CYCLE RESOLUTION:
• pkg/crypto ↔ pkg/slurp/roles: Created pkg/security/access_levels.go
• pkg/ucxl → pkg/dht: Created pkg/storage/interfaces.go
• pkg/slurp/leader → pkg/election → pkg/slurp/storage: Moved types to pkg/election/interfaces.go

MODULE PATH MIGRATION:
• Changed from github.com/anthonyrawlins/bzzz to chorus.services/bzzz
• Updated all import statements across 115+ files
• Maintains compatibility while removing personal GitHub account dependency

TYPE SYSTEM IMPROVEMENTS:
• Resolved duplicate type declarations in crypto package
• Added missing type definitions (RoleStatus, TimeRestrictions, KeyStatus, KeyRotationResult)
• Proper interface segregation to prevent future cycles

ARCHITECTURAL BENEFITS:
• Build now progresses past structural issues to normal dependency resolution
• Cleaner separation of concerns between packages
• Eliminates circular dependencies that prevented compilation
• Establishes foundation for scalable codebase growth

🤖 Generated with [Claude Code](https://claude.ai/code)

Co-Authored-By: Claude <noreply@anthropic.com>
This commit is contained in:
anthonyrawlins
2025-08-17 10:04:25 +10:00
parent e9252ccddc
commit d96c931a29
115 changed files with 1010 additions and 534 deletions

View File

@@ -9,9 +9,8 @@ import (
"sync"
"time"
"github.com/anthonyrawlins/bzzz/pkg/config"
"github.com/anthonyrawlins/bzzz/pkg/slurp/leader"
"github.com/anthonyrawlins/bzzz/pubsub"
"chorus.services/bzzz/pkg/config"
"chorus.services/bzzz/pubsub"
libp2p "github.com/libp2p/go-libp2p/core/host"
)
@@ -21,7 +20,7 @@ type SLURPElectionManager struct {
// SLURP-specific state
contextMu sync.RWMutex
contextManager leader.ContextManager
contextManager ContextManager
slurpConfig *SLURPElectionConfig
contextCallbacks *ContextLeadershipCallbacks
@@ -75,7 +74,7 @@ func NewSLURPElectionManager(
}
// RegisterContextManager registers a SLURP context manager for leader duties
func (sem *SLURPElectionManager) RegisterContextManager(manager leader.ContextManager) error {
func (sem *SLURPElectionManager) RegisterContextManager(manager ContextManager) error {
sem.contextMu.Lock()
defer sem.contextMu.Unlock()
@@ -102,7 +101,7 @@ func (sem *SLURPElectionManager) IsContextLeader() bool {
}
// GetContextManager returns the registered context manager (if leader)
func (sem *SLURPElectionManager) GetContextManager() (leader.ContextManager, error) {
func (sem *SLURPElectionManager) GetContextManager() (ContextManager, error) {
sem.contextMu.RLock()
defer sem.contextMu.RUnlock()
@@ -175,7 +174,7 @@ func (sem *SLURPElectionManager) TransferContextLeadership(ctx context.Context,
}
// GetContextLeaderInfo returns information about current context leader
func (sem *SLURPElectionManager) GetContextLeaderInfo() (*leader.LeaderInfo, error) {
func (sem *SLURPElectionManager) GetContextLeaderInfo() (*LeaderInfo, error) {
sem.contextMu.RLock()
defer sem.contextMu.RUnlock()
@@ -184,7 +183,7 @@ func (sem *SLURPElectionManager) GetContextLeaderInfo() (*leader.LeaderInfo, err
return nil, fmt.Errorf("no current leader")
}
info := &leader.LeaderInfo{
info := &LeaderInfo{
NodeID: leaderID,
Term: sem.contextTerm,
ElectedAt: time.Now(), // TODO: Track actual election time
@@ -342,14 +341,14 @@ func (sem *SLURPElectionManager) StopContextGeneration(ctx context.Context) erro
}
// GetContextGenerationStatus returns status of context operations
func (sem *SLURPElectionManager) GetContextGenerationStatus() (*leader.GenerationStatus, error) {
func (sem *SLURPElectionManager) GetContextGenerationStatus() (*GenerationStatus, error) {
sem.contextMu.RLock()
manager := sem.contextManager
isLeader := sem.isContextLeader
sem.contextMu.RUnlock()
if manager == nil {
return &leader.GenerationStatus{
return &GenerationStatus{
IsLeader: false,
LeaderID: sem.GetCurrentAdmin(),
LastUpdate: time.Now(),
@@ -369,7 +368,7 @@ func (sem *SLURPElectionManager) GetContextGenerationStatus() (*leader.Generatio
}
// RequestContextGeneration queues a context generation request
func (sem *SLURPElectionManager) RequestContextGeneration(req *leader.ContextGenerationRequest) error {
func (sem *SLURPElectionManager) RequestContextGeneration(req *ContextGenerationRequest) error {
sem.contextMu.RLock()
manager := sem.contextManager
isLeader := sem.isContextLeader
@@ -422,15 +421,15 @@ func (sem *SLURPElectionManager) PrepareContextFailover(ctx context.Context) (*C
if sem.contextManager != nil {
// Get queued requests (if supported)
// TODO: Add interface method to get queued requests
state.QueuedRequests = []*leader.ContextGenerationRequest{}
state.QueuedRequests = []*ContextGenerationRequest{}
// Get active jobs (if supported)
// TODO: Add interface method to get active jobs
state.ActiveJobs = make(map[string]*leader.ContextGenerationJob)
state.ActiveJobs = make(map[string]*ContextGenerationJob)
// Get manager configuration
// TODO: Add interface method to get configuration
state.ManagerConfig = leader.DefaultManagerConfig()
state.ManagerConfig = DefaultManagerConfig()
}
// Get cluster health snapshot
@@ -743,7 +742,7 @@ func (chm *ContextHealthMonitor) GetClusterHealth() *ContextClusterHealth {
}
// UpdateGenerationStatus updates health based on generation status
func (chm *ContextHealthMonitor) UpdateGenerationStatus(status *leader.GenerationStatus) {
func (chm *ContextHealthMonitor) UpdateGenerationStatus(status *GenerationStatus) {
chm.mu.Lock()
defer chm.mu.Unlock()