chore: align slurp config and scaffolding

This commit is contained in:
anthonyrawlins
2025-09-27 21:03:12 +10:00
parent acc4361463
commit 4a77862289
47 changed files with 5133 additions and 4274 deletions

View File

@@ -7,39 +7,39 @@ import (
"sync"
"time"
"chorus/pkg/dht"
"chorus/pkg/config"
"chorus/pkg/dht"
"chorus/pkg/ucxl"
"github.com/libp2p/go-libp2p/core/peer"
)
// ReplicationManagerImpl implements ReplicationManager interface
type ReplicationManagerImpl struct {
mu sync.RWMutex
dht *dht.DHT
config *config.Config
replicationMap map[string]*ReplicationStatus
repairQueue chan *RepairRequest
rebalanceQueue chan *RebalanceRequest
consistentHash ConsistentHashing
policy *ReplicationPolicy
stats *ReplicationStatistics
running bool
mu sync.RWMutex
dht *dht.DHT
config *config.Config
replicationMap map[string]*ReplicationStatus
repairQueue chan *RepairRequest
rebalanceQueue chan *RebalanceRequest
consistentHash ConsistentHashing
policy *ReplicationPolicy
stats *ReplicationStatistics
running bool
}
// RepairRequest represents a repair request
type RepairRequest struct {
Address ucxl.Address
RequestedBy string
Priority Priority
RequestTime time.Time
Address ucxl.Address
RequestedBy string
Priority Priority
RequestTime time.Time
}
// RebalanceRequest represents a rebalance request
type RebalanceRequest struct {
Reason string
RequestedBy string
RequestTime time.Time
Reason string
RequestedBy string
RequestTime time.Time
}
// NewReplicationManagerImpl creates a new replication manager implementation
@@ -220,10 +220,10 @@ func (rm *ReplicationManagerImpl) BalanceReplicas(ctx context.Context) (*Rebalan
start := time.Now()
result := &RebalanceResult{
RebalanceTime: 0,
RebalanceTime: 0,
RebalanceSuccessful: false,
Errors: []string{},
RebalancedAt: time.Now(),
Errors: []string{},
RebalancedAt: time.Now(),
}
// Get current cluster topology
@@ -462,9 +462,9 @@ func (rm *ReplicationManagerImpl) discoverReplicas(ctx context.Context, address
// For now, we'll simulate some replicas
peers := rm.dht.GetConnectedPeers()
if len(peers) > 0 {
status.CurrentReplicas = min(len(peers), rm.policy.DefaultFactor)
status.CurrentReplicas = minInt(len(peers), rm.policy.DefaultFactor)
status.HealthyReplicas = status.CurrentReplicas
for i, peer := range peers {
if i >= status.CurrentReplicas {
break
@@ -478,9 +478,9 @@ func (rm *ReplicationManagerImpl) determineOverallHealth(status *ReplicationStat
if status.HealthyReplicas == 0 {
return HealthFailed
}
healthRatio := float64(status.HealthyReplicas) / float64(status.DesiredReplicas)
if healthRatio >= 1.0 {
return HealthHealthy
} else if healthRatio >= 0.7 {
@@ -579,7 +579,7 @@ func (rm *ReplicationManagerImpl) calculateIdealDistribution(peers []peer.ID) ma
func (rm *ReplicationManagerImpl) getCurrentDistribution(ctx context.Context) map[string]map[string]int {
// Returns current distribution: address -> node -> replica count
distribution := make(map[string]map[string]int)
rm.mu.RLock()
for addr, status := range rm.replicationMap {
distribution[addr] = make(map[string]int)
@@ -588,7 +588,7 @@ func (rm *ReplicationManagerImpl) getCurrentDistribution(ctx context.Context) ma
}
}
rm.mu.RUnlock()
return distribution
}
@@ -630,17 +630,17 @@ func (rm *ReplicationManagerImpl) isNodeOverloaded(nodeID string) bool {
// RebalanceMove represents a replica move operation
type RebalanceMove struct {
Address ucxl.Address `json:"address"`
FromNode string `json:"from_node"`
ToNode string `json:"to_node"`
Priority Priority `json:"priority"`
Reason string `json:"reason"`
Address ucxl.Address `json:"address"`
FromNode string `json:"from_node"`
ToNode string `json:"to_node"`
Priority Priority `json:"priority"`
Reason string `json:"reason"`
}
// Utility functions
func min(a, b int) int {
func minInt(a, b int) int {
if a < b {
return a
}
return b
}
}