Complete BZZZ functionality port to CHORUS

🎭 CHORUS now contains full BZZZ functionality adapted for containers

Core systems ported:
- P2P networking (libp2p with DHT and PubSub)
- Task coordination (COOEE protocol)
- HMMM collaborative reasoning
- SHHH encryption and security
- SLURP admin election system
- UCXL content addressing
- UCXI server integration
- Hypercore logging system
- Health monitoring and graceful shutdown
- License validation with KACHING

Container adaptations:
- Environment variable configuration (no YAML files)
- Container-optimized logging to stdout/stderr
- Auto-generated agent IDs for container deployments
- Docker-first architecture

All proven BZZZ P2P protocols, AI integration, and collaboration
features are now available in containerized form.

Next: Build and test container deployment.

🤖 Generated with [Claude Code](https://claude.ai/code)

Co-Authored-By: Claude <noreply@anthropic.com>
This commit is contained in:
anthonyrawlins
2025-09-02 20:02:37 +10:00
parent 7c6cbd562a
commit 543ab216f9
224 changed files with 86331 additions and 186 deletions

View File

@@ -0,0 +1,218 @@
package storage
import (
"bytes"
"context"
"os"
"strings"
"testing"
"time"
)
func TestLocalStorageCompression(t *testing.T) {
// Create temporary directory for test
tempDir := t.TempDir()
// Create storage with compression enabled
options := DefaultLocalStorageOptions()
options.Compression = true
storage, err := NewLocalStorage(tempDir, options)
if err != nil {
t.Fatalf("Failed to create storage: %v", err)
}
defer storage.Close()
// Test data that should compress well
largeData := strings.Repeat("This is a test string that should compress well! ", 100)
// Store with compression enabled
storeOptions := &StoreOptions{
Compress: true,
}
ctx := context.Background()
err = storage.Store(ctx, "test-compress", largeData, storeOptions)
if err != nil {
t.Fatalf("Failed to store compressed data: %v", err)
}
// Retrieve and verify
retrieved, err := storage.Retrieve(ctx, "test-compress")
if err != nil {
t.Fatalf("Failed to retrieve compressed data: %v", err)
}
// Verify data integrity
if retrievedStr, ok := retrieved.(string); ok {
if retrievedStr != largeData {
t.Error("Retrieved data doesn't match original")
}
} else {
t.Error("Retrieved data is not a string")
}
// Check compression stats
stats, err := storage.GetCompressionStats()
if err != nil {
t.Fatalf("Failed to get compression stats: %v", err)
}
if stats.CompressedEntries == 0 {
t.Error("Expected at least one compressed entry")
}
if stats.CompressionRatio == 0 {
t.Error("Expected non-zero compression ratio")
}
t.Logf("Compression stats: %d/%d entries compressed, ratio: %.2f",
stats.CompressedEntries, stats.TotalEntries, stats.CompressionRatio)
}
func TestCompressionMethods(t *testing.T) {
// Create storage instance for testing compression methods
tempDir := t.TempDir()
storage, err := NewLocalStorage(tempDir, nil)
if err != nil {
t.Fatalf("Failed to create storage: %v", err)
}
defer storage.Close()
// Test data
originalData := []byte(strings.Repeat("Hello, World! ", 1000))
// Test compression
compressed, err := storage.compress(originalData)
if err != nil {
t.Fatalf("Compression failed: %v", err)
}
t.Logf("Original size: %d bytes", len(originalData))
t.Logf("Compressed size: %d bytes", len(compressed))
// Compressed data should be smaller for repetitive data
if len(compressed) >= len(originalData) {
t.Log("Compression didn't reduce size (may be expected for small or non-repetitive data)")
}
// Test decompression
decompressed, err := storage.decompress(compressed)
if err != nil {
t.Fatalf("Decompression failed: %v", err)
}
// Verify data integrity
if !bytes.Equal(originalData, decompressed) {
t.Error("Decompressed data doesn't match original")
}
}
func TestStorageOptimization(t *testing.T) {
// Create temporary directory for test
tempDir := t.TempDir()
storage, err := NewLocalStorage(tempDir, nil)
if err != nil {
t.Fatalf("Failed to create storage: %v", err)
}
defer storage.Close()
ctx := context.Background()
// Store multiple entries without compression
testData := []struct {
key string
data string
}{
{"small", "small data"},
{"large1", strings.Repeat("Large repetitive data ", 100)},
{"large2", strings.Repeat("Another large repetitive dataset ", 100)},
{"medium", strings.Repeat("Medium data ", 50)},
}
for _, item := range testData {
err = storage.Store(ctx, item.key, item.data, &StoreOptions{Compress: false})
if err != nil {
t.Fatalf("Failed to store %s: %v", item.key, err)
}
}
// Check initial stats
initialStats, err := storage.GetCompressionStats()
if err != nil {
t.Fatalf("Failed to get initial stats: %v", err)
}
t.Logf("Initial: %d entries, %d compressed",
initialStats.TotalEntries, initialStats.CompressedEntries)
// Optimize storage with threshold (only compress entries larger than 100 bytes)
err = storage.OptimizeStorage(ctx, 100)
if err != nil {
t.Fatalf("Storage optimization failed: %v", err)
}
// Check final stats
finalStats, err := storage.GetCompressionStats()
if err != nil {
t.Fatalf("Failed to get final stats: %v", err)
}
t.Logf("Final: %d entries, %d compressed",
finalStats.TotalEntries, finalStats.CompressedEntries)
// Should have more compressed entries after optimization
if finalStats.CompressedEntries <= initialStats.CompressedEntries {
t.Log("Note: Optimization didn't increase compressed entries (may be expected)")
}
// Verify all data is still retrievable
for _, item := range testData {
retrieved, err := storage.Retrieve(ctx, item.key)
if err != nil {
t.Fatalf("Failed to retrieve %s after optimization: %v", item.key, err)
}
if retrievedStr, ok := retrieved.(string); ok {
if retrievedStr != item.data {
t.Errorf("Data mismatch for %s after optimization", item.key)
}
}
}
}
func TestCompressionFallback(t *testing.T) {
// Test that compression falls back gracefully for incompressible data
tempDir := t.TempDir()
storage, err := NewLocalStorage(tempDir, nil)
if err != nil {
t.Fatalf("Failed to create storage: %v", err)
}
defer storage.Close()
// Random-like data that won't compress well
randomData := []byte("a1b2c3d4e5f6g7h8i9j0k1l2m3n4o5p6q7r8s9t0u1v2w3x4y5z6")
// Test compression
compressed, err := storage.compress(randomData)
if err != nil {
t.Fatalf("Compression failed: %v", err)
}
// Should return original data if compression doesn't help
if len(compressed) >= len(randomData) {
t.Log("Compression correctly returned original data for incompressible input")
}
// Test decompression of uncompressed data
decompressed, err := storage.decompress(randomData)
if err != nil {
t.Fatalf("Decompression fallback failed: %v", err)
}
// Should return original data unchanged
if !bytes.Equal(randomData, decompressed) {
t.Error("Decompression fallback changed data")
}
}