Harden CHORUS security and messaging stack
This commit is contained in:
@@ -2,159 +2,106 @@ package dht
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
// TestReplicationManager tests basic replication manager functionality
|
||||
func TestReplicationManager(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
// Create a mock DHT for testing
|
||||
mockDHT := NewMockDHTInterface()
|
||||
|
||||
// Create replication manager
|
||||
config := DefaultReplicationConfig()
|
||||
config.ReprovideInterval = 1 * time.Second // Short interval for testing
|
||||
config.CleanupInterval = 1 * time.Second
|
||||
|
||||
rm := NewReplicationManager(ctx, mockDHT.Mock(), config)
|
||||
defer rm.Stop()
|
||||
|
||||
// Test adding content
|
||||
testKey := "test-content-key"
|
||||
testSize := int64(1024)
|
||||
testPriority := 5
|
||||
|
||||
err := rm.AddContent(testKey, testSize, testPriority)
|
||||
func newReplicationManagerForTest(t *testing.T) *ReplicationManager {
|
||||
t.Helper()
|
||||
|
||||
cfg := &ReplicationConfig{
|
||||
ReplicationFactor: 3,
|
||||
ReprovideInterval: time.Hour,
|
||||
CleanupInterval: time.Hour,
|
||||
ProviderTTL: 30 * time.Minute,
|
||||
MaxProvidersPerKey: 5,
|
||||
EnableAutoReplication: false,
|
||||
EnableReprovide: false,
|
||||
MaxConcurrentReplications: 1,
|
||||
}
|
||||
|
||||
rm := NewReplicationManager(context.Background(), nil, cfg)
|
||||
t.Cleanup(func() {
|
||||
if rm.reprovideTimer != nil {
|
||||
rm.reprovideTimer.Stop()
|
||||
}
|
||||
if rm.cleanupTimer != nil {
|
||||
rm.cleanupTimer.Stop()
|
||||
}
|
||||
rm.cancel()
|
||||
})
|
||||
return rm
|
||||
}
|
||||
|
||||
func TestAddContentRegistersKey(t *testing.T) {
|
||||
rm := newReplicationManagerForTest(t)
|
||||
|
||||
if err := rm.AddContent("ucxl://example/path", 512, 1); err != nil {
|
||||
t.Fatalf("expected AddContent to succeed, got error: %v", err)
|
||||
}
|
||||
|
||||
rm.keysMutex.RLock()
|
||||
record, ok := rm.contentKeys["ucxl://example/path"]
|
||||
rm.keysMutex.RUnlock()
|
||||
|
||||
if !ok {
|
||||
t.Fatal("expected content key to be registered")
|
||||
}
|
||||
|
||||
if record.Size != 512 {
|
||||
t.Fatalf("expected size 512, got %d", record.Size)
|
||||
}
|
||||
}
|
||||
|
||||
func TestRemoveContentClearsTracking(t *testing.T) {
|
||||
rm := newReplicationManagerForTest(t)
|
||||
|
||||
if err := rm.AddContent("ucxl://example/path", 512, 1); err != nil {
|
||||
t.Fatalf("AddContent returned error: %v", err)
|
||||
}
|
||||
|
||||
if err := rm.RemoveContent("ucxl://example/path"); err != nil {
|
||||
t.Fatalf("RemoveContent returned error: %v", err)
|
||||
}
|
||||
|
||||
rm.keysMutex.RLock()
|
||||
_, exists := rm.contentKeys["ucxl://example/path"]
|
||||
rm.keysMutex.RUnlock()
|
||||
|
||||
if exists {
|
||||
t.Fatal("expected content key to be removed")
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetReplicationStatusReturnsCopy(t *testing.T) {
|
||||
rm := newReplicationManagerForTest(t)
|
||||
|
||||
if err := rm.AddContent("ucxl://example/path", 512, 1); err != nil {
|
||||
t.Fatalf("AddContent returned error: %v", err)
|
||||
}
|
||||
|
||||
status, err := rm.GetReplicationStatus("ucxl://example/path")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to add content: %v", err)
|
||||
t.Fatalf("GetReplicationStatus returned error: %v", err)
|
||||
}
|
||||
|
||||
// Test getting replication status
|
||||
status, err := rm.GetReplicationStatus(testKey)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to get replication status: %v", err)
|
||||
|
||||
if status.Key != "ucxl://example/path" {
|
||||
t.Fatalf("expected status key to match, got %s", status.Key)
|
||||
}
|
||||
|
||||
if status.Key != testKey {
|
||||
t.Errorf("Expected key %s, got %s", testKey, status.Key)
|
||||
|
||||
// Mutating status should not affect internal state
|
||||
status.HealthyProviders = 99
|
||||
internal, _ := rm.GetReplicationStatus("ucxl://example/path")
|
||||
if internal.HealthyProviders == 99 {
|
||||
t.Fatal("expected GetReplicationStatus to return a copy")
|
||||
}
|
||||
|
||||
if status.Size != testSize {
|
||||
t.Errorf("Expected size %d, got %d", testSize, status.Size)
|
||||
}
|
||||
|
||||
if status.Priority != testPriority {
|
||||
t.Errorf("Expected priority %d, got %d", testPriority, status.Priority)
|
||||
}
|
||||
|
||||
// Test providing content
|
||||
err = rm.ProvideContent(testKey)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to provide content: %v", err)
|
||||
}
|
||||
|
||||
// Test metrics
|
||||
}
|
||||
|
||||
func TestGetMetricsReturnsSnapshot(t *testing.T) {
|
||||
rm := newReplicationManagerForTest(t)
|
||||
|
||||
metrics := rm.GetMetrics()
|
||||
if metrics.TotalKeys != 1 {
|
||||
t.Errorf("Expected 1 total key, got %d", metrics.TotalKeys)
|
||||
}
|
||||
|
||||
// Test finding providers
|
||||
providers, err := rm.FindProviders(ctx, testKey, 10)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to find providers: %v", err)
|
||||
}
|
||||
|
||||
t.Logf("Found %d providers for key %s", len(providers), testKey)
|
||||
|
||||
// Test removing content
|
||||
err = rm.RemoveContent(testKey)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to remove content: %v", err)
|
||||
}
|
||||
|
||||
// Verify content was removed
|
||||
metrics = rm.GetMetrics()
|
||||
if metrics.TotalKeys != 0 {
|
||||
t.Errorf("Expected 0 total keys after removal, got %d", metrics.TotalKeys)
|
||||
if metrics == rm.metrics {
|
||||
t.Fatal("expected GetMetrics to return a copy of metrics")
|
||||
}
|
||||
}
|
||||
|
||||
// TestLibP2PDHTReplication tests DHT replication functionality
|
||||
func TestLibP2PDHTReplication(t *testing.T) {
|
||||
// This would normally require a real libp2p setup
|
||||
// For now, just test the interface methods exist
|
||||
|
||||
// Mock test - in a real implementation, you'd set up actual libp2p hosts
|
||||
t.Log("DHT replication interface methods are implemented")
|
||||
|
||||
// Example of how the replication would be used:
|
||||
// 1. Add content for replication
|
||||
// 2. Content gets automatically provided to the DHT
|
||||
// 3. Other nodes can discover this node as a provider
|
||||
// 4. Periodic reproviding ensures content availability
|
||||
// 5. Replication metrics track system health
|
||||
}
|
||||
|
||||
// TestReplicationConfig tests replication configuration
|
||||
func TestReplicationConfig(t *testing.T) {
|
||||
config := DefaultReplicationConfig()
|
||||
|
||||
// Test default values
|
||||
if config.ReplicationFactor != 3 {
|
||||
t.Errorf("Expected default replication factor 3, got %d", config.ReplicationFactor)
|
||||
}
|
||||
|
||||
if config.ReprovideInterval != 12*time.Hour {
|
||||
t.Errorf("Expected default reprovide interval 12h, got %v", config.ReprovideInterval)
|
||||
}
|
||||
|
||||
if !config.EnableAutoReplication {
|
||||
t.Error("Expected auto replication to be enabled by default")
|
||||
}
|
||||
|
||||
if !config.EnableReprovide {
|
||||
t.Error("Expected reprovide to be enabled by default")
|
||||
}
|
||||
}
|
||||
|
||||
// TestProviderInfo tests provider information tracking
|
||||
func TestProviderInfo(t *testing.T) {
|
||||
// Test distance calculation
|
||||
key := []byte("test-key")
|
||||
peerID := "test-peer-id"
|
||||
|
||||
distance := calculateDistance(key, []byte(peerID))
|
||||
|
||||
// Distance should be non-zero for different inputs
|
||||
if distance == 0 {
|
||||
t.Error("Expected non-zero distance for different inputs")
|
||||
}
|
||||
|
||||
t.Logf("Distance between key and peer: %d", distance)
|
||||
}
|
||||
|
||||
// TestReplicationMetrics tests metrics collection
|
||||
func TestReplicationMetrics(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
mockDHT := NewMockDHTInterface()
|
||||
rm := NewReplicationManager(ctx, mockDHT.Mock(), DefaultReplicationConfig())
|
||||
defer rm.Stop()
|
||||
|
||||
// Add some content
|
||||
for i := 0; i < 3; i++ {
|
||||
key := fmt.Sprintf("test-key-%d", i)
|
||||
rm.AddContent(key, int64(1000+i*100), i+1)
|
||||
}
|
||||
|
||||
metrics := rm.GetMetrics()
|
||||
|
||||
if metrics.TotalKeys != 3 {
|
||||
t.Errorf("Expected 3 total keys, got %d", metrics.TotalKeys)
|
||||
}
|
||||
|
||||
t.Logf("Replication metrics: %+v", metrics)
|
||||
}
|
||||
Reference in New Issue
Block a user