🎭 CHORUS now contains full BZZZ functionality adapted for containers Core systems ported: - P2P networking (libp2p with DHT and PubSub) - Task coordination (COOEE protocol) - HMMM collaborative reasoning - SHHH encryption and security - SLURP admin election system - UCXL content addressing - UCXI server integration - Hypercore logging system - Health monitoring and graceful shutdown - License validation with KACHING Container adaptations: - Environment variable configuration (no YAML files) - Container-optimized logging to stdout/stderr - Auto-generated agent IDs for container deployments - Docker-first architecture All proven BZZZ P2P protocols, AI integration, and collaboration features are now available in containerized form. Next: Build and test container deployment. 🤖 Generated with [Claude Code](https://claude.ai/code) Co-Authored-By: Claude <noreply@anthropic.com>
160 lines
4.2 KiB
Go
160 lines
4.2 KiB
Go
package dht
|
|
|
|
import (
|
|
"context"
|
|
"fmt"
|
|
"testing"
|
|
"time"
|
|
)
|
|
|
|
// TestReplicationManager tests basic replication manager functionality
|
|
func TestReplicationManager(t *testing.T) {
|
|
ctx := context.Background()
|
|
|
|
// Create a mock DHT for testing
|
|
mockDHT := NewMockDHTInterface()
|
|
|
|
// Create replication manager
|
|
config := DefaultReplicationConfig()
|
|
config.ReprovideInterval = 1 * time.Second // Short interval for testing
|
|
config.CleanupInterval = 1 * time.Second
|
|
|
|
rm := NewReplicationManager(ctx, mockDHT.Mock(), config)
|
|
defer rm.Stop()
|
|
|
|
// Test adding content
|
|
testKey := "test-content-key"
|
|
testSize := int64(1024)
|
|
testPriority := 5
|
|
|
|
err := rm.AddContent(testKey, testSize, testPriority)
|
|
if err != nil {
|
|
t.Fatalf("Failed to add content: %v", err)
|
|
}
|
|
|
|
// Test getting replication status
|
|
status, err := rm.GetReplicationStatus(testKey)
|
|
if err != nil {
|
|
t.Fatalf("Failed to get replication status: %v", err)
|
|
}
|
|
|
|
if status.Key != testKey {
|
|
t.Errorf("Expected key %s, got %s", testKey, status.Key)
|
|
}
|
|
|
|
if status.Size != testSize {
|
|
t.Errorf("Expected size %d, got %d", testSize, status.Size)
|
|
}
|
|
|
|
if status.Priority != testPriority {
|
|
t.Errorf("Expected priority %d, got %d", testPriority, status.Priority)
|
|
}
|
|
|
|
// Test providing content
|
|
err = rm.ProvideContent(testKey)
|
|
if err != nil {
|
|
t.Fatalf("Failed to provide content: %v", err)
|
|
}
|
|
|
|
// Test metrics
|
|
metrics := rm.GetMetrics()
|
|
if metrics.TotalKeys != 1 {
|
|
t.Errorf("Expected 1 total key, got %d", metrics.TotalKeys)
|
|
}
|
|
|
|
// Test finding providers
|
|
providers, err := rm.FindProviders(ctx, testKey, 10)
|
|
if err != nil {
|
|
t.Fatalf("Failed to find providers: %v", err)
|
|
}
|
|
|
|
t.Logf("Found %d providers for key %s", len(providers), testKey)
|
|
|
|
// Test removing content
|
|
err = rm.RemoveContent(testKey)
|
|
if err != nil {
|
|
t.Fatalf("Failed to remove content: %v", err)
|
|
}
|
|
|
|
// Verify content was removed
|
|
metrics = rm.GetMetrics()
|
|
if metrics.TotalKeys != 0 {
|
|
t.Errorf("Expected 0 total keys after removal, got %d", metrics.TotalKeys)
|
|
}
|
|
}
|
|
|
|
// TestLibP2PDHTReplication tests DHT replication functionality
|
|
func TestLibP2PDHTReplication(t *testing.T) {
|
|
// This would normally require a real libp2p setup
|
|
// For now, just test the interface methods exist
|
|
|
|
// Mock test - in a real implementation, you'd set up actual libp2p hosts
|
|
t.Log("DHT replication interface methods are implemented")
|
|
|
|
// Example of how the replication would be used:
|
|
// 1. Add content for replication
|
|
// 2. Content gets automatically provided to the DHT
|
|
// 3. Other nodes can discover this node as a provider
|
|
// 4. Periodic reproviding ensures content availability
|
|
// 5. Replication metrics track system health
|
|
}
|
|
|
|
// TestReplicationConfig tests replication configuration
|
|
func TestReplicationConfig(t *testing.T) {
|
|
config := DefaultReplicationConfig()
|
|
|
|
// Test default values
|
|
if config.ReplicationFactor != 3 {
|
|
t.Errorf("Expected default replication factor 3, got %d", config.ReplicationFactor)
|
|
}
|
|
|
|
if config.ReprovideInterval != 12*time.Hour {
|
|
t.Errorf("Expected default reprovide interval 12h, got %v", config.ReprovideInterval)
|
|
}
|
|
|
|
if !config.EnableAutoReplication {
|
|
t.Error("Expected auto replication to be enabled by default")
|
|
}
|
|
|
|
if !config.EnableReprovide {
|
|
t.Error("Expected reprovide to be enabled by default")
|
|
}
|
|
}
|
|
|
|
// TestProviderInfo tests provider information tracking
|
|
func TestProviderInfo(t *testing.T) {
|
|
// Test distance calculation
|
|
key := []byte("test-key")
|
|
peerID := "test-peer-id"
|
|
|
|
distance := calculateDistance(key, []byte(peerID))
|
|
|
|
// Distance should be non-zero for different inputs
|
|
if distance == 0 {
|
|
t.Error("Expected non-zero distance for different inputs")
|
|
}
|
|
|
|
t.Logf("Distance between key and peer: %d", distance)
|
|
}
|
|
|
|
// TestReplicationMetrics tests metrics collection
|
|
func TestReplicationMetrics(t *testing.T) {
|
|
ctx := context.Background()
|
|
mockDHT := NewMockDHTInterface()
|
|
rm := NewReplicationManager(ctx, mockDHT.Mock(), DefaultReplicationConfig())
|
|
defer rm.Stop()
|
|
|
|
// Add some content
|
|
for i := 0; i < 3; i++ {
|
|
key := fmt.Sprintf("test-key-%d", i)
|
|
rm.AddContent(key, int64(1000+i*100), i+1)
|
|
}
|
|
|
|
metrics := rm.GetMetrics()
|
|
|
|
if metrics.TotalKeys != 3 {
|
|
t.Errorf("Expected 3 total keys, got %d", metrics.TotalKeys)
|
|
}
|
|
|
|
t.Logf("Replication metrics: %+v", metrics)
|
|
} |