Harden CHORUS security and messaging stack

This commit is contained in:
anthonyrawlins
2025-09-20 23:21:35 +10:00
parent 57751f277a
commit 1bb736c09a
25 changed files with 2793 additions and 2474 deletions

View File

@@ -6,33 +6,34 @@ import (
"sync"
"time"
"crypto/sha256"
"github.com/ipfs/go-cid"
dht "github.com/libp2p/go-libp2p-kad-dht"
"github.com/libp2p/go-libp2p/core/host"
"github.com/libp2p/go-libp2p/core/peer"
"github.com/libp2p/go-libp2p/core/protocol"
"github.com/libp2p/go-libp2p/core/routing"
dht "github.com/libp2p/go-libp2p-kad-dht"
"github.com/multiformats/go-multiaddr"
"github.com/multiformats/go-multihash"
"github.com/ipfs/go-cid"
"crypto/sha256"
)
// LibP2PDHT provides distributed hash table functionality for CHORUS peer discovery
type LibP2PDHT struct {
host host.Host
kdht *dht.IpfsDHT
ctx context.Context
cancel context.CancelFunc
config *Config
host host.Host
kdht *dht.IpfsDHT
ctx context.Context
cancel context.CancelFunc
config *Config
startTime time.Time
// Bootstrap state
bootstrapped bool
bootstrapMutex sync.RWMutex
// Peer management
knownPeers map[peer.ID]*PeerInfo
peersMutex sync.RWMutex
// Replication management
replicationManager *ReplicationManager
}
@@ -41,30 +42,32 @@ type LibP2PDHT struct {
type Config struct {
// Bootstrap nodes for initial DHT discovery
BootstrapPeers []multiaddr.Multiaddr
// Protocol prefix for CHORUS DHT
ProtocolPrefix string
// Bootstrap timeout
BootstrapTimeout time.Duration
// Peer discovery interval
DiscoveryInterval time.Duration
// DHT mode (client, server, auto)
Mode dht.ModeOpt
// Enable automatic bootstrap
AutoBootstrap bool
}
// PeerInfo holds information about discovered peers
const defaultProviderResultLimit = 20
type PeerInfo struct {
ID peer.ID
Addresses []multiaddr.Multiaddr
Agent string
Role string
LastSeen time.Time
ID peer.ID
Addresses []multiaddr.Multiaddr
Agent string
Role string
LastSeen time.Time
Capabilities []string
}
@@ -74,23 +77,28 @@ func DefaultConfig() *Config {
ProtocolPrefix: "/CHORUS",
BootstrapTimeout: 30 * time.Second,
DiscoveryInterval: 60 * time.Second,
Mode: dht.ModeAuto,
AutoBootstrap: true,
Mode: dht.ModeAuto,
AutoBootstrap: true,
}
}
// NewLibP2PDHT creates a new LibP2PDHT instance
// NewDHT is a backward compatible helper that delegates to NewLibP2PDHT.
func NewDHT(ctx context.Context, host host.Host, opts ...Option) (*LibP2PDHT, error) {
return NewLibP2PDHT(ctx, host, opts...)
}
// NewLibP2PDHT creates a new LibP2PDHT instance
func NewLibP2PDHT(ctx context.Context, host host.Host, opts ...Option) (*LibP2PDHT, error) {
config := DefaultConfig()
for _, opt := range opts {
opt(config)
}
// Create context with cancellation
dhtCtx, cancel := context.WithCancel(ctx)
// Create Kademlia DHT
kdht, err := dht.New(dhtCtx, host,
kdht, err := dht.New(dhtCtx, host,
dht.Mode(config.Mode),
dht.ProtocolPrefix(protocol.ID(config.ProtocolPrefix)),
)
@@ -98,22 +106,23 @@ func NewLibP2PDHT(ctx context.Context, host host.Host, opts ...Option) (*LibP2PD
cancel()
return nil, fmt.Errorf("failed to create DHT: %w", err)
}
d := &LibP2PDHT{
host: host,
kdht: kdht,
ctx: dhtCtx,
cancel: cancel,
config: config,
startTime: time.Now(),
knownPeers: make(map[peer.ID]*PeerInfo),
}
// Initialize replication manager
d.replicationManager = NewReplicationManager(dhtCtx, kdht, DefaultReplicationConfig())
// Start background processes
go d.startBackgroundTasks()
return d, nil
}
@@ -178,25 +187,25 @@ func WithAutoBootstrap(auto bool) Option {
func (d *LibP2PDHT) Bootstrap() error {
d.bootstrapMutex.Lock()
defer d.bootstrapMutex.Unlock()
if d.bootstrapped {
return nil
}
// Connect to bootstrap peers
if len(d.config.BootstrapPeers) == 0 {
// Use default IPFS bootstrap peers if none configured
d.config.BootstrapPeers = dht.DefaultBootstrapPeers
}
// Bootstrap the DHT
bootstrapCtx, cancel := context.WithTimeout(d.ctx, d.config.BootstrapTimeout)
defer cancel()
if err := d.kdht.Bootstrap(bootstrapCtx); err != nil {
return fmt.Errorf("DHT bootstrap failed: %w", err)
}
// Connect to bootstrap peers
var connected int
for _, peerAddr := range d.config.BootstrapPeers {
@@ -204,7 +213,7 @@ func (d *LibP2PDHT) Bootstrap() error {
if err != nil {
continue
}
connectCtx, cancel := context.WithTimeout(d.ctx, 10*time.Second)
if err := d.host.Connect(connectCtx, *addrInfo); err != nil {
cancel()
@@ -213,11 +222,11 @@ func (d *LibP2PDHT) Bootstrap() error {
cancel()
connected++
}
if connected == 0 {
return fmt.Errorf("failed to connect to any bootstrap peers")
}
d.bootstrapped = true
return nil
}
@@ -233,13 +242,13 @@ func (d *LibP2PDHT) IsBootstrapped() bool {
func (d *LibP2PDHT) keyToCID(key string) (cid.Cid, error) {
// Hash the key
hash := sha256.Sum256([]byte(key))
// Create multihash
mh, err := multihash.EncodeName(hash[:], "sha2-256")
if err != nil {
return cid.Undef, err
}
// Create CID
return cid.NewCidV1(cid.Raw, mh), nil
}
@@ -249,13 +258,13 @@ func (d *LibP2PDHT) Provide(ctx context.Context, key string) error {
if !d.IsBootstrapped() {
return fmt.Errorf("DHT not bootstrapped")
}
// Convert key to CID
keyCID, err := d.keyToCID(key)
if err != nil {
return fmt.Errorf("failed to create CID from key: %w", err)
}
return d.kdht.Provide(ctx, keyCID, true)
}
@@ -264,31 +273,32 @@ func (d *LibP2PDHT) FindProviders(ctx context.Context, key string, limit int) ([
if !d.IsBootstrapped() {
return nil, fmt.Errorf("DHT not bootstrapped")
}
// Convert key to CID
keyCID, err := d.keyToCID(key)
if err != nil {
return nil, fmt.Errorf("failed to create CID from key: %w", err)
}
// Find providers (FindProviders returns a channel and an error)
providersChan, err := d.kdht.FindProviders(ctx, keyCID)
if err != nil {
return nil, fmt.Errorf("failed to find providers: %w", err)
maxProviders := limit
if maxProviders <= 0 {
maxProviders = defaultProviderResultLimit
}
// Collect providers from channel
providers := make([]peer.AddrInfo, 0, limit)
// TODO: Fix libp2p FindProviders channel type mismatch
// The channel appears to return int instead of peer.AddrInfo in this version
_ = providersChan // Avoid unused variable error
// for providerInfo := range providersChan {
// providers = append(providers, providerInfo)
// if len(providers) >= limit {
// break
// }
// }
providerCtx, cancel := context.WithCancel(ctx)
defer cancel()
providersChan := d.kdht.FindProvidersAsync(providerCtx, keyCID, maxProviders)
providers := make([]peer.AddrInfo, 0, maxProviders)
for providerInfo := range providersChan {
providers = append(providers, providerInfo)
if limit > 0 && len(providers) >= limit {
cancel()
break
}
}
return providers, nil
}
@@ -297,7 +307,7 @@ func (d *LibP2PDHT) PutValue(ctx context.Context, key string, value []byte) erro
if !d.IsBootstrapped() {
return fmt.Errorf("DHT not bootstrapped")
}
return d.kdht.PutValue(ctx, key, value)
}
@@ -306,7 +316,7 @@ func (d *LibP2PDHT) GetValue(ctx context.Context, key string) ([]byte, error) {
if !d.IsBootstrapped() {
return nil, fmt.Errorf("DHT not bootstrapped")
}
return d.kdht.GetValue(ctx, key)
}
@@ -315,7 +325,7 @@ func (d *LibP2PDHT) FindPeer(ctx context.Context, peerID peer.ID) (peer.AddrInfo
if !d.IsBootstrapped() {
return peer.AddrInfo{}, fmt.Errorf("DHT not bootstrapped")
}
return d.kdht.FindPeer(ctx, peerID)
}
@@ -329,14 +339,30 @@ func (d *LibP2PDHT) GetConnectedPeers() []peer.ID {
return d.kdht.Host().Network().Peers()
}
// GetStats reports basic runtime statistics for the DHT
func (d *LibP2PDHT) GetStats() DHTStats {
stats := DHTStats{
TotalPeers: len(d.GetConnectedPeers()),
Uptime: time.Since(d.startTime),
}
if d.replicationManager != nil {
if metrics := d.replicationManager.GetMetrics(); metrics != nil {
stats.TotalKeys = int(metrics.TotalKeys)
}
}
return stats
}
// RegisterPeer registers a peer with capability information
func (d *LibP2PDHT) RegisterPeer(peerID peer.ID, agent, role string, capabilities []string) {
d.peersMutex.Lock()
defer d.peersMutex.Unlock()
// Get peer addresses from host
peerInfo := d.host.Peerstore().PeerInfo(peerID)
d.knownPeers[peerID] = &PeerInfo{
ID: peerID,
Addresses: peerInfo.Addrs,
@@ -351,12 +377,12 @@ func (d *LibP2PDHT) RegisterPeer(peerID peer.ID, agent, role string, capabilitie
func (d *LibP2PDHT) GetKnownPeers() map[peer.ID]*PeerInfo {
d.peersMutex.RLock()
defer d.peersMutex.RUnlock()
result := make(map[peer.ID]*PeerInfo)
for id, info := range d.knownPeers {
result[id] = info
}
return result
}
@@ -371,7 +397,7 @@ func (d *LibP2PDHT) FindPeersByRole(ctx context.Context, role string) ([]*PeerIn
}
}
d.peersMutex.RUnlock()
// Also search DHT for role-based keys
roleKey := fmt.Sprintf("CHORUS:role:%s", role)
providers, err := d.FindProviders(ctx, roleKey, 10)
@@ -379,11 +405,11 @@ func (d *LibP2PDHT) FindPeersByRole(ctx context.Context, role string) ([]*PeerIn
// Return local peers even if DHT search fails
return localPeers, nil
}
// Convert providers to PeerInfo
var result []*PeerInfo
result = append(result, localPeers...)
for _, provider := range providers {
// Skip if we already have this peer
found := false
@@ -402,7 +428,7 @@ func (d *LibP2PDHT) FindPeersByRole(ctx context.Context, role string) ([]*PeerIn
})
}
}
return result, nil
}
@@ -424,10 +450,10 @@ func (d *LibP2PDHT) startBackgroundTasks() {
if d.config.AutoBootstrap {
go d.autoBootstrap()
}
// Start periodic peer discovery
go d.periodicDiscovery()
// Start peer cleanup
go d.peerCleanup()
}
@@ -436,7 +462,7 @@ func (d *LibP2PDHT) startBackgroundTasks() {
func (d *LibP2PDHT) autoBootstrap() {
ticker := time.NewTicker(30 * time.Second)
defer ticker.Stop()
for {
select {
case <-d.ctx.Done():
@@ -456,7 +482,7 @@ func (d *LibP2PDHT) autoBootstrap() {
func (d *LibP2PDHT) periodicDiscovery() {
ticker := time.NewTicker(d.config.DiscoveryInterval)
defer ticker.Stop()
for {
select {
case <-d.ctx.Done():
@@ -473,13 +499,13 @@ func (d *LibP2PDHT) periodicDiscovery() {
func (d *LibP2PDHT) performDiscovery() {
ctx, cancel := context.WithTimeout(d.ctx, 30*time.Second)
defer cancel()
// Look for general CHORUS peers
providers, err := d.FindProviders(ctx, "CHORUS:peer", 10)
if err != nil {
return
}
// Update known peers
d.peersMutex.Lock()
for _, provider := range providers {
@@ -498,7 +524,7 @@ func (d *LibP2PDHT) performDiscovery() {
func (d *LibP2PDHT) peerCleanup() {
ticker := time.NewTicker(5 * time.Minute)
defer ticker.Stop()
for {
select {
case <-d.ctx.Done():
@@ -513,9 +539,9 @@ func (d *LibP2PDHT) peerCleanup() {
func (d *LibP2PDHT) cleanupStalePeers() {
d.peersMutex.Lock()
defer d.peersMutex.Unlock()
staleThreshold := time.Now().Add(-time.Hour) // 1 hour threshold
for peerID, peerInfo := range d.knownPeers {
if peerInfo.LastSeen.Before(staleThreshold) {
// Check if peer is still connected
@@ -526,7 +552,7 @@ func (d *LibP2PDHT) cleanupStalePeers() {
break
}
}
if !connected {
delete(d.knownPeers, peerID)
}
@@ -589,11 +615,11 @@ func (d *LibP2PDHT) EnableReplication(config *ReplicationConfig) error {
if d.replicationManager != nil {
return fmt.Errorf("replication already enabled")
}
if config == nil {
config = DefaultReplicationConfig()
}
d.replicationManager = NewReplicationManager(d.ctx, d.kdht, config)
return nil
}
@@ -603,11 +629,11 @@ func (d *LibP2PDHT) DisableReplication() error {
if d.replicationManager == nil {
return nil
}
if err := d.replicationManager.Stop(); err != nil {
return fmt.Errorf("failed to stop replication manager: %w", err)
}
d.replicationManager = nil
return nil
}
@@ -617,13 +643,18 @@ func (d *LibP2PDHT) IsReplicationEnabled() bool {
return d.replicationManager != nil
}
// ReplicationManager returns the underlying replication manager if enabled.
func (d *LibP2PDHT) ReplicationManager() *ReplicationManager {
return d.replicationManager
}
// Close shuts down the DHT
func (d *LibP2PDHT) Close() error {
// Stop replication manager first
if d.replicationManager != nil {
d.replicationManager.Stop()
}
d.cancel()
return d.kdht.Close()
}
@@ -633,10 +664,10 @@ func (d *LibP2PDHT) RefreshRoutingTable() error {
if !d.IsBootstrapped() {
return fmt.Errorf("DHT not bootstrapped")
}
// RefreshRoutingTable() returns a channel with errors, not a direct error
errChan := d.kdht.RefreshRoutingTable()
// Wait for the first error (if any) from the channel
select {
case err := <-errChan:
@@ -654,4 +685,4 @@ func (d *LibP2PDHT) GetDHTSize() int {
// Host returns the underlying libp2p host
func (d *LibP2PDHT) Host() host.Host {
return d.host
}
}

View File

@@ -2,546 +2,155 @@ package dht
import (
"context"
"strings"
"testing"
"time"
"github.com/libp2p/go-libp2p"
"github.com/libp2p/go-libp2p/core/host"
libp2p "github.com/libp2p/go-libp2p"
dhtmode "github.com/libp2p/go-libp2p-kad-dht"
"github.com/libp2p/go-libp2p/core/test"
dht "github.com/libp2p/go-libp2p-kad-dht"
"github.com/multiformats/go-multiaddr"
)
type harness struct {
ctx context.Context
host libp2pHost
dht *LibP2PDHT
}
type libp2pHost interface {
Close() error
}
func newHarness(t *testing.T, opts ...Option) *harness {
t.Helper()
ctx, cancel := context.WithCancel(context.Background())
host, err := libp2p.New(libp2p.ListenAddrStrings("/ip4/127.0.0.1/tcp/0"))
if err != nil {
cancel()
t.Fatalf("failed to create libp2p host: %v", err)
}
options := append([]Option{WithAutoBootstrap(false)}, opts...)
d, err := NewLibP2PDHT(ctx, host, options...)
if err != nil {
host.Close()
cancel()
t.Fatalf("failed to create DHT: %v", err)
}
t.Cleanup(func() {
d.Close()
host.Close()
cancel()
})
return &harness{ctx: ctx, host: host, dht: d}
}
func TestDefaultConfig(t *testing.T) {
config := DefaultConfig()
if config.ProtocolPrefix != "/CHORUS" {
t.Errorf("expected protocol prefix '/CHORUS', got %s", config.ProtocolPrefix)
cfg := DefaultConfig()
if cfg.ProtocolPrefix != "/CHORUS" {
t.Fatalf("expected protocol prefix '/CHORUS', got %s", cfg.ProtocolPrefix)
}
if config.BootstrapTimeout != 30*time.Second {
t.Errorf("expected bootstrap timeout 30s, got %v", config.BootstrapTimeout)
if cfg.BootstrapTimeout != 30*time.Second {
t.Fatalf("expected bootstrap timeout 30s, got %v", cfg.BootstrapTimeout)
}
if config.Mode != dht.ModeAuto {
t.Errorf("expected mode auto, got %v", config.Mode)
if cfg.Mode != dhtmode.ModeAuto {
t.Fatalf("expected mode auto, got %v", cfg.Mode)
}
if !config.AutoBootstrap {
t.Error("expected auto bootstrap to be enabled")
if !cfg.AutoBootstrap {
t.Fatal("expected auto bootstrap to be enabled")
}
}
func TestNewDHT(t *testing.T) {
ctx := context.Background()
// Create a test host
host, err := libp2p.New()
if err != nil {
t.Fatalf("failed to create test host: %v", err)
}
defer host.Close()
// Test with default options
d, err := NewDHT(ctx, host)
if err != nil {
t.Fatalf("failed to create DHT: %v", err)
}
defer d.Close()
if d.host != host {
t.Error("host not set correctly")
}
if d.config.ProtocolPrefix != "/CHORUS" {
t.Errorf("expected protocol prefix '/CHORUS', got %s", d.config.ProtocolPrefix)
}
}
func TestDHTWithOptions(t *testing.T) {
ctx := context.Background()
host, err := libp2p.New()
if err != nil {
t.Fatalf("failed to create test host: %v", err)
}
defer host.Close()
// Test with custom options
d, err := NewDHT(ctx, host,
func TestWithOptionsOverridesDefaults(t *testing.T) {
h := newHarness(t,
WithProtocolPrefix("/custom"),
WithMode(dht.ModeClient),
WithBootstrapTimeout(60*time.Second),
WithDiscoveryInterval(120*time.Second),
WithAutoBootstrap(false),
WithDiscoveryInterval(2*time.Minute),
WithBootstrapTimeout(45*time.Second),
WithMode(dhtmode.ModeClient),
WithAutoBootstrap(true),
)
if err != nil {
t.Fatalf("failed to create DHT: %v", err)
cfg := h.dht.config
if cfg.ProtocolPrefix != "/custom" {
t.Fatalf("expected protocol prefix '/custom', got %s", cfg.ProtocolPrefix)
}
defer d.Close()
if d.config.ProtocolPrefix != "/custom" {
t.Errorf("expected protocol prefix '/custom', got %s", d.config.ProtocolPrefix)
if cfg.DiscoveryInterval != 2*time.Minute {
t.Fatalf("expected discovery interval 2m, got %v", cfg.DiscoveryInterval)
}
if d.config.Mode != dht.ModeClient {
t.Errorf("expected mode client, got %v", d.config.Mode)
if cfg.BootstrapTimeout != 45*time.Second {
t.Fatalf("expected bootstrap timeout 45s, got %v", cfg.BootstrapTimeout)
}
if d.config.BootstrapTimeout != 60*time.Second {
t.Errorf("expected bootstrap timeout 60s, got %v", d.config.BootstrapTimeout)
if cfg.Mode != dhtmode.ModeClient {
t.Fatalf("expected mode client, got %v", cfg.Mode)
}
if d.config.DiscoveryInterval != 120*time.Second {
t.Errorf("expected discovery interval 120s, got %v", d.config.DiscoveryInterval)
}
if d.config.AutoBootstrap {
t.Error("expected auto bootstrap to be disabled")
if !cfg.AutoBootstrap {
t.Fatal("expected auto bootstrap to remain enabled")
}
}
func TestWithBootstrapPeersFromStrings(t *testing.T) {
ctx := context.Background()
host, err := libp2p.New()
if err != nil {
t.Fatalf("failed to create test host: %v", err)
}
defer host.Close()
bootstrapAddrs := []string{
"/ip4/127.0.0.1/tcp/4001/p2p/QmTest1",
"/ip4/127.0.0.1/tcp/4002/p2p/QmTest2",
}
d, err := NewDHT(ctx, host, WithBootstrapPeersFromStrings(bootstrapAddrs))
if err != nil {
t.Fatalf("failed to create DHT: %v", err)
}
defer d.Close()
if len(d.config.BootstrapPeers) != 2 {
t.Errorf("expected 2 bootstrap peers, got %d", len(d.config.BootstrapPeers))
}
}
func TestProvideRequiresBootstrap(t *testing.T) {
h := newHarness(t)
func TestWithBootstrapPeersFromStringsInvalid(t *testing.T) {
ctx := context.Background()
host, err := libp2p.New()
if err != nil {
t.Fatalf("failed to create test host: %v", err)
err := h.dht.Provide(h.ctx, "key")
if err == nil {
t.Fatal("expected Provide to fail when not bootstrapped")
}
defer host.Close()
// Include invalid addresses - they should be filtered out
bootstrapAddrs := []string{
"/ip4/127.0.0.1/tcp/4001/p2p/QmTest1", // valid
"invalid-address", // invalid
"/ip4/127.0.0.1/tcp/4002/p2p/QmTest2", // valid
}
d, err := NewDHT(ctx, host, WithBootstrapPeersFromStrings(bootstrapAddrs))
if err != nil {
t.Fatalf("failed to create DHT: %v", err)
}
defer d.Close()
// Should have filtered out the invalid address
if len(d.config.BootstrapPeers) != 2 {
t.Errorf("expected 2 valid bootstrap peers, got %d", len(d.config.BootstrapPeers))
}
}
func TestBootstrapWithoutPeers(t *testing.T) {
ctx := context.Background()
host, err := libp2p.New()
if err != nil {
t.Fatalf("failed to create test host: %v", err)
}
defer host.Close()
d, err := NewDHT(ctx, host, WithAutoBootstrap(false))
if err != nil {
t.Fatalf("failed to create DHT: %v", err)
}
defer d.Close()
// Bootstrap should use default IPFS peers when none configured
err = d.Bootstrap()
// This might fail in test environment without network access, but should not panic
if err != nil {
// Expected in test environment
t.Logf("Bootstrap failed as expected in test environment: %v", err)
}
}
func TestIsBootstrapped(t *testing.T) {
ctx := context.Background()
host, err := libp2p.New()
if err != nil {
t.Fatalf("failed to create test host: %v", err)
}
defer host.Close()
d, err := NewDHT(ctx, host, WithAutoBootstrap(false))
if err != nil {
t.Fatalf("failed to create DHT: %v", err)
}
defer d.Close()
// Should not be bootstrapped initially
if d.IsBootstrapped() {
t.Error("DHT should not be bootstrapped initially")
if !strings.Contains(err.Error(), "not bootstrapped") {
t.Fatalf("expected error to indicate bootstrap requirement, got %v", err)
}
}
func TestRegisterPeer(t *testing.T) {
ctx := context.Background()
host, err := libp2p.New()
if err != nil {
t.Fatalf("failed to create test host: %v", err)
}
defer host.Close()
d, err := NewDHT(ctx, host)
if err != nil {
t.Fatalf("failed to create DHT: %v", err)
}
defer d.Close()
h := newHarness(t)
peerID := test.RandPeerIDFatal(t)
agent := "claude"
role := "frontend"
capabilities := []string{"react", "javascript"}
d.RegisterPeer(peerID, agent, role, capabilities)
knownPeers := d.GetKnownPeers()
if len(knownPeers) != 1 {
t.Errorf("expected 1 known peer, got %d", len(knownPeers))
h.dht.RegisterPeer(peerID, "apollo", "platform", []string{"go"})
peers := h.dht.GetKnownPeers()
info, ok := peers[peerID]
if !ok {
t.Fatalf("expected peer to be tracked")
}
peerInfo, exists := knownPeers[peerID]
if !exists {
t.Error("peer not found in known peers")
if info.Agent != "apollo" {
t.Fatalf("expected agent apollo, got %s", info.Agent)
}
if peerInfo.Agent != agent {
t.Errorf("expected agent %s, got %s", agent, peerInfo.Agent)
if info.Role != "platform" {
t.Fatalf("expected role platform, got %s", info.Role)
}
if peerInfo.Role != role {
t.Errorf("expected role %s, got %s", role, peerInfo.Role)
}
if len(peerInfo.Capabilities) != len(capabilities) {
t.Errorf("expected %d capabilities, got %d", len(capabilities), len(peerInfo.Capabilities))
if len(info.Capabilities) != 1 || info.Capabilities[0] != "go" {
t.Fatalf("expected capability go, got %v", info.Capabilities)
}
}
func TestGetConnectedPeers(t *testing.T) {
ctx := context.Background()
host, err := libp2p.New()
if err != nil {
t.Fatalf("failed to create test host: %v", err)
func TestGetStatsProvidesUptime(t *testing.T) {
h := newHarness(t)
stats := h.dht.GetStats()
if stats.TotalPeers != 0 {
t.Fatalf("expected zero peers, got %d", stats.TotalPeers)
}
defer host.Close()
d, err := NewDHT(ctx, host)
if err != nil {
t.Fatalf("failed to create DHT: %v", err)
}
defer d.Close()
// Initially should have no connected peers
peers := d.GetConnectedPeers()
if len(peers) != 0 {
t.Errorf("expected 0 connected peers, got %d", len(peers))
if stats.Uptime < 0 {
t.Fatalf("expected non-negative uptime, got %v", stats.Uptime)
}
}
func TestPutAndGetValue(t *testing.T) {
ctx := context.Background()
host, err := libp2p.New()
if err != nil {
t.Fatalf("failed to create test host: %v", err)
}
defer host.Close()
d, err := NewDHT(ctx, host, WithAutoBootstrap(false))
if err != nil {
t.Fatalf("failed to create DHT: %v", err)
}
defer d.Close()
// Test without bootstrap (should fail)
key := "test-key"
value := []byte("test-value")
err = d.PutValue(ctx, key, value)
if err == nil {
t.Error("PutValue should fail when DHT not bootstrapped")
}
_, err = d.GetValue(ctx, key)
if err == nil {
t.Error("GetValue should fail when DHT not bootstrapped")
}
}
func TestProvideAndFindProviders(t *testing.T) {
ctx := context.Background()
host, err := libp2p.New()
if err != nil {
t.Fatalf("failed to create test host: %v", err)
}
defer host.Close()
d, err := NewDHT(ctx, host, WithAutoBootstrap(false))
if err != nil {
t.Fatalf("failed to create DHT: %v", err)
}
defer d.Close()
// Test without bootstrap (should fail)
key := "test-service"
err = d.Provide(ctx, key)
if err == nil {
t.Error("Provide should fail when DHT not bootstrapped")
}
_, err = d.FindProviders(ctx, key, 10)
if err == nil {
t.Error("FindProviders should fail when DHT not bootstrapped")
}
}
func TestFindPeer(t *testing.T) {
ctx := context.Background()
host, err := libp2p.New()
if err != nil {
t.Fatalf("failed to create test host: %v", err)
}
defer host.Close()
d, err := NewDHT(ctx, host, WithAutoBootstrap(false))
if err != nil {
t.Fatalf("failed to create DHT: %v", err)
}
defer d.Close()
// Test without bootstrap (should fail)
peerID := test.RandPeerIDFatal(t)
_, err = d.FindPeer(ctx, peerID)
if err == nil {
t.Error("FindPeer should fail when DHT not bootstrapped")
}
}
func TestFindPeersByRole(t *testing.T) {
ctx := context.Background()
host, err := libp2p.New()
if err != nil {
t.Fatalf("failed to create test host: %v", err)
}
defer host.Close()
d, err := NewDHT(ctx, host, WithAutoBootstrap(false))
if err != nil {
t.Fatalf("failed to create DHT: %v", err)
}
defer d.Close()
// Register some local peers
peerID1 := test.RandPeerIDFatal(t)
peerID2 := test.RandPeerIDFatal(t)
d.RegisterPeer(peerID1, "claude", "frontend", []string{"react"})
d.RegisterPeer(peerID2, "claude", "backend", []string{"go"})
// Find frontend peers
frontendPeers, err := d.FindPeersByRole(ctx, "frontend")
if err != nil {
t.Fatalf("failed to find peers by role: %v", err)
}
if len(frontendPeers) != 1 {
t.Errorf("expected 1 frontend peer, got %d", len(frontendPeers))
}
if frontendPeers[0].ID != peerID1 {
t.Error("wrong peer returned for frontend role")
}
// Find all peers with wildcard
allPeers, err := d.FindPeersByRole(ctx, "*")
if err != nil {
t.Fatalf("failed to find all peers: %v", err)
}
if len(allPeers) != 2 {
t.Errorf("expected 2 peers with wildcard, got %d", len(allPeers))
}
}
func TestAnnounceRole(t *testing.T) {
ctx := context.Background()
host, err := libp2p.New()
if err != nil {
t.Fatalf("failed to create test host: %v", err)
}
defer host.Close()
d, err := NewDHT(ctx, host, WithAutoBootstrap(false))
if err != nil {
t.Fatalf("failed to create DHT: %v", err)
}
defer d.Close()
// Should fail when not bootstrapped
err = d.AnnounceRole(ctx, "frontend")
if err == nil {
t.Error("AnnounceRole should fail when DHT not bootstrapped")
}
}
func TestAnnounceCapability(t *testing.T) {
ctx := context.Background()
host, err := libp2p.New()
if err != nil {
t.Fatalf("failed to create test host: %v", err)
}
defer host.Close()
d, err := NewDHT(ctx, host, WithAutoBootstrap(false))
if err != nil {
t.Fatalf("failed to create DHT: %v", err)
}
defer d.Close()
// Should fail when not bootstrapped
err = d.AnnounceCapability(ctx, "react")
if err == nil {
t.Error("AnnounceCapability should fail when DHT not bootstrapped")
}
}
func TestGetRoutingTable(t *testing.T) {
ctx := context.Background()
host, err := libp2p.New()
if err != nil {
t.Fatalf("failed to create test host: %v", err)
}
defer host.Close()
d, err := NewDHT(ctx, host)
if err != nil {
t.Fatalf("failed to create DHT: %v", err)
}
defer d.Close()
rt := d.GetRoutingTable()
if rt == nil {
t.Error("routing table should not be nil")
}
}
func TestGetDHTSize(t *testing.T) {
ctx := context.Background()
host, err := libp2p.New()
if err != nil {
t.Fatalf("failed to create test host: %v", err)
}
defer host.Close()
d, err := NewDHT(ctx, host)
if err != nil {
t.Fatalf("failed to create DHT: %v", err)
}
defer d.Close()
size := d.GetDHTSize()
// Should be 0 or small initially
if size < 0 {
t.Errorf("DHT size should be non-negative, got %d", size)
}
}
func TestRefreshRoutingTable(t *testing.T) {
ctx := context.Background()
host, err := libp2p.New()
if err != nil {
t.Fatalf("failed to create test host: %v", err)
}
defer host.Close()
d, err := NewDHT(ctx, host, WithAutoBootstrap(false))
if err != nil {
t.Fatalf("failed to create DHT: %v", err)
}
defer d.Close()
// Should fail when not bootstrapped
err = d.RefreshRoutingTable()
if err == nil {
t.Error("RefreshRoutingTable should fail when DHT not bootstrapped")
}
}
func TestHost(t *testing.T) {
ctx := context.Background()
host, err := libp2p.New()
if err != nil {
t.Fatalf("failed to create test host: %v", err)
}
defer host.Close()
d, err := NewDHT(ctx, host)
if err != nil {
t.Fatalf("failed to create DHT: %v", err)
}
defer d.Close()
if d.Host() != host {
t.Error("Host() should return the same host instance")
}
}
func TestClose(t *testing.T) {
ctx := context.Background()
host, err := libp2p.New()
if err != nil {
t.Fatalf("failed to create test host: %v", err)
}
defer host.Close()
d, err := NewDHT(ctx, host)
if err != nil {
t.Fatalf("failed to create DHT: %v", err)
}
// Should close without error
err = d.Close()
if err != nil {
t.Errorf("Close() failed: %v", err)
}
}

View File

@@ -2,559 +2,155 @@ package dht
import (
"context"
"strings"
"testing"
"time"
"chorus/pkg/config"
)
// TestDHTSecurityPolicyEnforcement tests security policy enforcement in DHT operations
func TestDHTSecurityPolicyEnforcement(t *testing.T) {
ctx := context.Background()
testCases := []struct {
name string
currentRole string
operation string
ucxlAddress string
contentType string
expectSuccess bool
expectedError string
}{
// Store operation tests
type securityTestCase struct {
name string
role string
address string
contentType string
expectSuccess bool
expectErrHint string
}
func newTestEncryptedStorage(cfg *config.Config) *EncryptedDHTStorage {
return &EncryptedDHTStorage{
ctx: context.Background(),
config: cfg,
nodeID: "test-node",
cache: make(map[string]*CachedEntry),
metrics: &StorageMetrics{LastUpdate: time.Now()},
}
}
func TestCheckStoreAccessPolicy(t *testing.T) {
cases := []securityTestCase{
{
name: "admin_can_store_all_content",
currentRole: "admin",
operation: "store",
ucxlAddress: "agent1:admin:system:security_audit",
name: "backend developer can store",
role: "backend_developer",
address: "agent1:backend_developer:api:endpoint",
contentType: "decision",
expectSuccess: true,
},
{
name: "backend_developer_can_store_backend_content",
currentRole: "backend_developer",
operation: "store",
ucxlAddress: "agent1:backend_developer:api:endpoint_design",
contentType: "suggestion",
name: "project manager can store",
role: "project_manager",
address: "agent1:project_manager:plan:milestone",
contentType: "decision",
expectSuccess: true,
},
{
name: "readonly_role_cannot_store",
currentRole: "readonly_user",
operation: "store",
ucxlAddress: "agent1:readonly_user:project:observation",
contentType: "suggestion",
expectSuccess: false,
expectedError: "read-only authority",
name: "read only user cannot store",
role: "readonly_user",
address: "agent1:readonly_user:note:observation",
contentType: "note",
expectSuccess: false,
expectErrHint: "read-only authority",
},
{
name: "unknown_role_cannot_store",
currentRole: "invalid_role",
operation: "store",
ucxlAddress: "agent1:invalid_role:project:task",
contentType: "decision",
expectSuccess: false,
expectedError: "unknown creator role",
},
// Retrieve operation tests
{
name: "any_valid_role_can_retrieve",
currentRole: "qa_engineer",
operation: "retrieve",
ucxlAddress: "agent1:backend_developer:api:test_data",
expectSuccess: true,
},
{
name: "unknown_role_cannot_retrieve",
currentRole: "nonexistent_role",
operation: "retrieve",
ucxlAddress: "agent1:backend_developer:api:test_data",
expectSuccess: false,
expectedError: "unknown current role",
},
// Announce operation tests
{
name: "coordination_role_can_announce",
currentRole: "senior_software_architect",
operation: "announce",
ucxlAddress: "agent1:senior_software_architect:architecture:blueprint",
expectSuccess: true,
},
{
name: "decision_role_can_announce",
currentRole: "security_expert",
operation: "announce",
ucxlAddress: "agent1:security_expert:security:policy",
expectSuccess: true,
},
{
name: "suggestion_role_cannot_announce",
currentRole: "suggestion_only_role",
operation: "announce",
ucxlAddress: "agent1:suggestion_only_role:project:idea",
expectSuccess: false,
expectedError: "lacks authority",
},
{
name: "readonly_role_cannot_announce",
currentRole: "readonly_user",
operation: "announce",
ucxlAddress: "agent1:readonly_user:project:observation",
expectSuccess: false,
expectedError: "lacks authority",
name: "unknown role rejected",
role: "ghost_role",
address: "agent1:ghost_role:context",
contentType: "decision",
expectSuccess: false,
expectErrHint: "unknown creator role",
},
}
for _, tc := range testCases {
cfg := &config.Config{Agent: config.AgentConfig{}}
eds := newTestEncryptedStorage(cfg)
for _, tc := range cases {
t.Run(tc.name, func(t *testing.T) {
// Create test configuration
cfg := &config.Config{
Agent: config.AgentConfig{
ID: "test-agent",
Role: tc.currentRole,
},
Security: config.SecurityConfig{
KeyRotationDays: 90,
AuditLogging: true,
AuditPath: "/tmp/test-security-audit.log",
},
}
// Create mock encrypted storage
eds := createMockEncryptedStorage(ctx, cfg)
var err error
switch tc.operation {
case "store":
err = eds.checkStoreAccessPolicy(tc.currentRole, tc.ucxlAddress, tc.contentType)
case "retrieve":
err = eds.checkRetrieveAccessPolicy(tc.currentRole, tc.ucxlAddress)
case "announce":
err = eds.checkAnnounceAccessPolicy(tc.currentRole, tc.ucxlAddress)
}
if tc.expectSuccess {
if err != nil {
t.Errorf("Expected %s operation to succeed for role %s, but got error: %v",
tc.operation, tc.currentRole, err)
}
} else {
if err == nil {
t.Errorf("Expected %s operation to fail for role %s, but it succeeded",
tc.operation, tc.currentRole)
}
if tc.expectedError != "" && !containsSubstring(err.Error(), tc.expectedError) {
t.Errorf("Expected error to contain '%s', got '%s'", tc.expectedError, err.Error())
}
}
err := eds.checkStoreAccessPolicy(tc.role, tc.address, tc.contentType)
verifySecurityExpectation(t, tc.expectSuccess, tc.expectErrHint, err)
})
}
}
// TestDHTAuditLogging tests comprehensive audit logging for DHT operations
func TestDHTAuditLogging(t *testing.T) {
ctx := context.Background()
testCases := []struct {
name string
operation string
role string
ucxlAddress string
success bool
errorMsg string
expectAudit bool
}{
func TestCheckRetrieveAccessPolicy(t *testing.T) {
cases := []securityTestCase{
{
name: "successful_store_operation",
operation: "store",
role: "backend_developer",
ucxlAddress: "agent1:backend_developer:api:user_service",
success: true,
expectAudit: true,
name: "qa engineer allowed",
role: "qa_engineer",
address: "agent1:backend_developer:api:tests",
expectSuccess: true,
},
{
name: "failed_store_operation",
operation: "store",
role: "readonly_user",
ucxlAddress: "agent1:readonly_user:project:readonly_attempt",
success: false,
errorMsg: "read-only authority",
expectAudit: true,
},
{
name: "successful_retrieve_operation",
operation: "retrieve",
role: "frontend_developer",
ucxlAddress: "agent1:backend_developer:api:user_data",
success: true,
expectAudit: true,
},
{
name: "successful_announce_operation",
operation: "announce",
role: "senior_software_architect",
ucxlAddress: "agent1:senior_software_architect:architecture:system_design",
success: true,
expectAudit: true,
},
{
name: "audit_disabled_no_logging",
operation: "store",
role: "backend_developer",
ucxlAddress: "agent1:backend_developer:api:no_audit",
success: true,
expectAudit: false,
name: "unknown role rejected",
role: "unknown",
address: "agent1:backend_developer:api:tests",
expectSuccess: false,
expectErrHint: "unknown current role",
},
}
for _, tc := range testCases {
cfg := &config.Config{Agent: config.AgentConfig{}}
eds := newTestEncryptedStorage(cfg)
for _, tc := range cases {
t.Run(tc.name, func(t *testing.T) {
// Create configuration with audit logging
cfg := &config.Config{
Agent: config.AgentConfig{
ID: "test-agent",
Role: tc.role,
},
Security: config.SecurityConfig{
KeyRotationDays: 90,
AuditLogging: tc.expectAudit,
AuditPath: "/tmp/test-dht-audit.log",
},
}
// Create mock encrypted storage
eds := createMockEncryptedStorage(ctx, cfg)
// Capture audit output
auditCaptured := false
// Simulate audit operation
switch tc.operation {
case "store":
// Mock the audit function call
if tc.expectAudit && cfg.Security.AuditLogging {
eds.auditStoreOperation(tc.ucxlAddress, tc.role, "test-content", 1024, tc.success, tc.errorMsg)
auditCaptured = true
}
case "retrieve":
if tc.expectAudit && cfg.Security.AuditLogging {
eds.auditRetrieveOperation(tc.ucxlAddress, tc.role, tc.success, tc.errorMsg)
auditCaptured = true
}
case "announce":
if tc.expectAudit && cfg.Security.AuditLogging {
eds.auditAnnounceOperation(tc.ucxlAddress, tc.role, tc.success, tc.errorMsg)
auditCaptured = true
}
}
// Verify audit logging behavior
if tc.expectAudit && !auditCaptured {
t.Errorf("Expected audit logging for %s operation but none was captured", tc.operation)
}
if !tc.expectAudit && auditCaptured {
t.Errorf("Expected no audit logging for %s operation but audit was captured", tc.operation)
}
err := eds.checkRetrieveAccessPolicy(tc.role, tc.address)
verifySecurityExpectation(t, tc.expectSuccess, tc.expectErrHint, err)
})
}
}
// TestSecurityConfigIntegration tests integration with SecurityConfig
func TestSecurityConfigIntegration(t *testing.T) {
ctx := context.Background()
testConfigs := []struct {
name string
auditLogging bool
auditPath string
expectAuditWork bool
}{
func TestCheckAnnounceAccessPolicy(t *testing.T) {
cases := []securityTestCase{
{
name: "audit_enabled_with_path",
auditLogging: true,
auditPath: "/tmp/test-audit-enabled.log",
expectAuditWork: true,
name: "architect can announce",
role: "senior_software_architect",
address: "agent1:senior_software_architect:architecture:proposal",
expectSuccess: true,
},
{
name: "audit_disabled",
auditLogging: false,
auditPath: "/tmp/test-audit-disabled.log",
expectAuditWork: false,
name: "suggestion role cannot announce",
role: "suggestion_only_role",
address: "agent1:suggestion_only_role:idea",
expectSuccess: false,
expectErrHint: "lacks authority",
},
{
name: "audit_enabled_no_path",
auditLogging: true,
auditPath: "",
expectAuditWork: false,
name: "unknown role rejected",
role: "mystery",
address: "agent1:mystery:topic",
expectSuccess: false,
expectErrHint: "unknown current role",
},
}
for _, tc := range testConfigs {
cfg := &config.Config{Agent: config.AgentConfig{}}
eds := newTestEncryptedStorage(cfg)
for _, tc := range cases {
t.Run(tc.name, func(t *testing.T) {
cfg := &config.Config{
Agent: config.AgentConfig{
ID: "test-agent",
Role: "backend_developer",
},
Security: config.SecurityConfig{
KeyRotationDays: 90,
AuditLogging: tc.auditLogging,
AuditPath: tc.auditPath,
},
}
eds := createMockEncryptedStorage(ctx, cfg)
// Test audit function behavior with different configurations
auditWorked := func() bool {
if !cfg.Security.AuditLogging || cfg.Security.AuditPath == "" {
return false
}
return true
}()
if auditWorked != tc.expectAuditWork {
t.Errorf("Expected audit to work: %v, but got: %v", tc.expectAuditWork, auditWorked)
}
err := eds.checkAnnounceAccessPolicy(tc.role, tc.address)
verifySecurityExpectation(t, tc.expectSuccess, tc.expectErrHint, err)
})
}
}
// TestRoleAuthorityHierarchy tests role authority hierarchy enforcement
func TestRoleAuthorityHierarchy(t *testing.T) {
ctx := context.Background()
// Test role authority levels for different operations
authorityTests := []struct {
role string
authorityLevel config.AuthorityLevel
canStore bool
canRetrieve bool
canAnnounce bool
}{
{
role: "admin",
authorityLevel: config.AuthorityMaster,
canStore: true,
canRetrieve: true,
canAnnounce: true,
},
{
role: "senior_software_architect",
authorityLevel: config.AuthorityDecision,
canStore: true,
canRetrieve: true,
canAnnounce: true,
},
{
role: "security_expert",
authorityLevel: config.AuthorityCoordination,
canStore: true,
canRetrieve: true,
canAnnounce: true,
},
{
role: "backend_developer",
authorityLevel: config.AuthoritySuggestion,
canStore: true,
canRetrieve: true,
canAnnounce: false,
},
func verifySecurityExpectation(t *testing.T, expectSuccess bool, hint string, err error) {
t.Helper()
if expectSuccess {
if err != nil {
t.Fatalf("expected success, got error: %v", err)
}
return
}
for _, tt := range authorityTests {
t.Run(tt.role+"_authority_test", func(t *testing.T) {
cfg := &config.Config{
Agent: config.AgentConfig{
ID: "test-agent",
Role: tt.role,
},
Security: config.SecurityConfig{
KeyRotationDays: 90,
AuditLogging: true,
AuditPath: "/tmp/test-authority.log",
},
}
if err == nil {
t.Fatal("expected error but got success")
}
eds := createMockEncryptedStorage(ctx, cfg)
// Test store permission
storeErr := eds.checkStoreAccessPolicy(tt.role, "test:address", "content")
if tt.canStore && storeErr != nil {
t.Errorf("Role %s should be able to store but got error: %v", tt.role, storeErr)
}
if !tt.canStore && storeErr == nil {
t.Errorf("Role %s should not be able to store but operation succeeded", tt.role)
}
// Test retrieve permission
retrieveErr := eds.checkRetrieveAccessPolicy(tt.role, "test:address")
if tt.canRetrieve && retrieveErr != nil {
t.Errorf("Role %s should be able to retrieve but got error: %v", tt.role, retrieveErr)
}
if !tt.canRetrieve && retrieveErr == nil {
t.Errorf("Role %s should not be able to retrieve but operation succeeded", tt.role)
}
// Test announce permission
announceErr := eds.checkAnnounceAccessPolicy(tt.role, "test:address")
if tt.canAnnounce && announceErr != nil {
t.Errorf("Role %s should be able to announce but got error: %v", tt.role, announceErr)
}
if !tt.canAnnounce && announceErr == nil {
t.Errorf("Role %s should not be able to announce but operation succeeded", tt.role)
}
})
if hint != "" && !strings.Contains(err.Error(), hint) {
t.Fatalf("expected error to contain %q, got %q", hint, err.Error())
}
}
// TestSecurityMetrics tests security-related metrics
func TestSecurityMetrics(t *testing.T) {
ctx := context.Background()
cfg := &config.Config{
Agent: config.AgentConfig{
ID: "test-agent",
Role: "backend_developer",
},
Security: config.SecurityConfig{
KeyRotationDays: 90,
AuditLogging: true,
AuditPath: "/tmp/test-metrics.log",
},
}
eds := createMockEncryptedStorage(ctx, cfg)
// Simulate some operations to generate metrics
for i := 0; i < 5; i++ {
eds.metrics.StoredItems++
eds.metrics.RetrievedItems++
eds.metrics.EncryptionOps++
eds.metrics.DecryptionOps++
}
metrics := eds.GetMetrics()
expectedMetrics := map[string]int64{
"stored_items": 5,
"retrieved_items": 5,
"encryption_ops": 5,
"decryption_ops": 5,
}
for metricName, expectedValue := range expectedMetrics {
if actualValue, ok := metrics[metricName]; !ok {
t.Errorf("Expected metric %s to be present in metrics", metricName)
} else if actualValue != expectedValue {
t.Errorf("Expected %s to be %d, got %v", metricName, expectedValue, actualValue)
}
}
}
// Helper functions
func createMockEncryptedStorage(ctx context.Context, cfg *config.Config) *EncryptedDHTStorage {
return &EncryptedDHTStorage{
ctx: ctx,
config: cfg,
nodeID: "test-node-id",
cache: make(map[string]*CachedEntry),
metrics: &StorageMetrics{
LastUpdate: time.Now(),
},
}
}
func containsSubstring(str, substr string) bool {
if len(substr) == 0 {
return true
}
if len(str) < len(substr) {
return false
}
for i := 0; i <= len(str)-len(substr); i++ {
if str[i:i+len(substr)] == substr {
return true
}
}
return false
}
// Benchmarks for security performance
func BenchmarkSecurityPolicyChecks(b *testing.B) {
ctx := context.Background()
cfg := &config.Config{
Agent: config.AgentConfig{
ID: "bench-agent",
Role: "backend_developer",
},
Security: config.SecurityConfig{
KeyRotationDays: 90,
AuditLogging: true,
AuditPath: "/tmp/bench-security.log",
},
}
eds := createMockEncryptedStorage(ctx, cfg)
b.ResetTimer()
b.Run("store_policy_check", func(b *testing.B) {
for i := 0; i < b.N; i++ {
eds.checkStoreAccessPolicy("backend_developer", "test:address", "content")
}
})
b.Run("retrieve_policy_check", func(b *testing.B) {
for i := 0; i < b.N; i++ {
eds.checkRetrieveAccessPolicy("backend_developer", "test:address")
}
})
b.Run("announce_policy_check", func(b *testing.B) {
for i := 0; i < b.N; i++ {
eds.checkAnnounceAccessPolicy("senior_software_architect", "test:address")
}
})
}
func BenchmarkAuditOperations(b *testing.B) {
ctx := context.Background()
cfg := &config.Config{
Agent: config.AgentConfig{
ID: "bench-agent",
Role: "backend_developer",
},
Security: config.SecurityConfig{
KeyRotationDays: 90,
AuditLogging: true,
AuditPath: "/tmp/bench-audit.log",
},
}
eds := createMockEncryptedStorage(ctx, cfg)
b.ResetTimer()
b.Run("store_audit", func(b *testing.B) {
for i := 0; i < b.N; i++ {
eds.auditStoreOperation("test:address", "backend_developer", "content", 1024, true, "")
}
})
b.Run("retrieve_audit", func(b *testing.B) {
for i := 0; i < b.N; i++ {
eds.auditRetrieveOperation("test:address", "backend_developer", true, "")
}
})
b.Run("announce_audit", func(b *testing.B) {
for i := 0; i < b.N; i++ {
eds.auditAnnounceOperation("test:address", "backend_developer", true, "")
}
})
}

View File

@@ -1,14 +1,117 @@
package dht
import (
"context"
"errors"
"fmt"
"chorus/pkg/config"
libp2p "github.com/libp2p/go-libp2p"
"github.com/libp2p/go-libp2p/core/host"
"github.com/libp2p/go-libp2p/core/peer"
"github.com/libp2p/go-libp2p/p2p/security/noise"
"github.com/libp2p/go-libp2p/p2p/transport/tcp"
"github.com/multiformats/go-multiaddr"
)
// NewRealDHT creates a new real DHT implementation
func NewRealDHT(config *config.HybridConfig) (DHT, error) {
// TODO: Implement real DHT initialization
// For now, return an error to indicate it's not yet implemented
return nil, fmt.Errorf("real DHT implementation not yet available")
}
// RealDHT wraps a libp2p-based DHT to satisfy the generic DHT interface.
type RealDHT struct {
cancel context.CancelFunc
host host.Host
dht *LibP2PDHT
}
// NewRealDHT creates a new real DHT implementation backed by libp2p.
func NewRealDHT(cfg *config.HybridConfig) (DHT, error) {
if cfg == nil {
cfg = &config.HybridConfig{}
}
ctx, cancel := context.WithCancel(context.Background())
listenAddr, err := multiaddr.NewMultiaddr("/ip4/0.0.0.0/tcp/0")
if err != nil {
cancel()
return nil, fmt.Errorf("failed to create listen address: %w", err)
}
host, err := libp2p.New(
libp2p.ListenAddrs(listenAddr),
libp2p.Security(noise.ID, noise.New),
libp2p.Transport(tcp.NewTCPTransport),
libp2p.DefaultMuxers,
libp2p.EnableRelay(),
)
if err != nil {
cancel()
return nil, fmt.Errorf("failed to create libp2p host: %w", err)
}
opts := []Option{
WithProtocolPrefix("/CHORUS"),
}
if nodes := cfg.GetDHTBootstrapNodes(); len(nodes) > 0 {
opts = append(opts, WithBootstrapPeersFromStrings(nodes))
}
libp2pDHT, err := NewLibP2PDHT(ctx, host, opts...)
if err != nil {
host.Close()
cancel()
return nil, fmt.Errorf("failed to initialize libp2p DHT: %w", err)
}
if err := libp2pDHT.Bootstrap(); err != nil {
libp2pDHT.Close()
host.Close()
cancel()
return nil, fmt.Errorf("failed to bootstrap DHT: %w", err)
}
return &RealDHT{
cancel: cancel,
host: host,
dht: libp2pDHT,
}, nil
}
// PutValue stores a value in the DHT.
func (r *RealDHT) PutValue(ctx context.Context, key string, value []byte) error {
return r.dht.PutValue(ctx, key, value)
}
// GetValue retrieves a value from the DHT.
func (r *RealDHT) GetValue(ctx context.Context, key string) ([]byte, error) {
return r.dht.GetValue(ctx, key)
}
// Provide announces that this node can provide the given key.
func (r *RealDHT) Provide(ctx context.Context, key string) error {
return r.dht.Provide(ctx, key)
}
// FindProviders locates peers that can provide the specified key.
func (r *RealDHT) FindProviders(ctx context.Context, key string, limit int) ([]peer.AddrInfo, error) {
return r.dht.FindProviders(ctx, key, limit)
}
// GetStats exposes runtime metrics for the real DHT.
func (r *RealDHT) GetStats() DHTStats {
return r.dht.GetStats()
}
// Close releases resources associated with the DHT.
func (r *RealDHT) Close() error {
r.cancel()
var errs []error
if err := r.dht.Close(); err != nil {
errs = append(errs, err)
}
if err := r.host.Close(); err != nil {
errs = append(errs, err)
}
return errors.Join(errs...)
}

View File

@@ -2,159 +2,106 @@ package dht
import (
"context"
"fmt"
"testing"
"time"
)
// TestReplicationManager tests basic replication manager functionality
func TestReplicationManager(t *testing.T) {
ctx := context.Background()
// Create a mock DHT for testing
mockDHT := NewMockDHTInterface()
// Create replication manager
config := DefaultReplicationConfig()
config.ReprovideInterval = 1 * time.Second // Short interval for testing
config.CleanupInterval = 1 * time.Second
rm := NewReplicationManager(ctx, mockDHT.Mock(), config)
defer rm.Stop()
// Test adding content
testKey := "test-content-key"
testSize := int64(1024)
testPriority := 5
err := rm.AddContent(testKey, testSize, testPriority)
func newReplicationManagerForTest(t *testing.T) *ReplicationManager {
t.Helper()
cfg := &ReplicationConfig{
ReplicationFactor: 3,
ReprovideInterval: time.Hour,
CleanupInterval: time.Hour,
ProviderTTL: 30 * time.Minute,
MaxProvidersPerKey: 5,
EnableAutoReplication: false,
EnableReprovide: false,
MaxConcurrentReplications: 1,
}
rm := NewReplicationManager(context.Background(), nil, cfg)
t.Cleanup(func() {
if rm.reprovideTimer != nil {
rm.reprovideTimer.Stop()
}
if rm.cleanupTimer != nil {
rm.cleanupTimer.Stop()
}
rm.cancel()
})
return rm
}
func TestAddContentRegistersKey(t *testing.T) {
rm := newReplicationManagerForTest(t)
if err := rm.AddContent("ucxl://example/path", 512, 1); err != nil {
t.Fatalf("expected AddContent to succeed, got error: %v", err)
}
rm.keysMutex.RLock()
record, ok := rm.contentKeys["ucxl://example/path"]
rm.keysMutex.RUnlock()
if !ok {
t.Fatal("expected content key to be registered")
}
if record.Size != 512 {
t.Fatalf("expected size 512, got %d", record.Size)
}
}
func TestRemoveContentClearsTracking(t *testing.T) {
rm := newReplicationManagerForTest(t)
if err := rm.AddContent("ucxl://example/path", 512, 1); err != nil {
t.Fatalf("AddContent returned error: %v", err)
}
if err := rm.RemoveContent("ucxl://example/path"); err != nil {
t.Fatalf("RemoveContent returned error: %v", err)
}
rm.keysMutex.RLock()
_, exists := rm.contentKeys["ucxl://example/path"]
rm.keysMutex.RUnlock()
if exists {
t.Fatal("expected content key to be removed")
}
}
func TestGetReplicationStatusReturnsCopy(t *testing.T) {
rm := newReplicationManagerForTest(t)
if err := rm.AddContent("ucxl://example/path", 512, 1); err != nil {
t.Fatalf("AddContent returned error: %v", err)
}
status, err := rm.GetReplicationStatus("ucxl://example/path")
if err != nil {
t.Fatalf("Failed to add content: %v", err)
t.Fatalf("GetReplicationStatus returned error: %v", err)
}
// Test getting replication status
status, err := rm.GetReplicationStatus(testKey)
if err != nil {
t.Fatalf("Failed to get replication status: %v", err)
if status.Key != "ucxl://example/path" {
t.Fatalf("expected status key to match, got %s", status.Key)
}
if status.Key != testKey {
t.Errorf("Expected key %s, got %s", testKey, status.Key)
// Mutating status should not affect internal state
status.HealthyProviders = 99
internal, _ := rm.GetReplicationStatus("ucxl://example/path")
if internal.HealthyProviders == 99 {
t.Fatal("expected GetReplicationStatus to return a copy")
}
if status.Size != testSize {
t.Errorf("Expected size %d, got %d", testSize, status.Size)
}
if status.Priority != testPriority {
t.Errorf("Expected priority %d, got %d", testPriority, status.Priority)
}
// Test providing content
err = rm.ProvideContent(testKey)
if err != nil {
t.Fatalf("Failed to provide content: %v", err)
}
// Test metrics
}
func TestGetMetricsReturnsSnapshot(t *testing.T) {
rm := newReplicationManagerForTest(t)
metrics := rm.GetMetrics()
if metrics.TotalKeys != 1 {
t.Errorf("Expected 1 total key, got %d", metrics.TotalKeys)
}
// Test finding providers
providers, err := rm.FindProviders(ctx, testKey, 10)
if err != nil {
t.Fatalf("Failed to find providers: %v", err)
}
t.Logf("Found %d providers for key %s", len(providers), testKey)
// Test removing content
err = rm.RemoveContent(testKey)
if err != nil {
t.Fatalf("Failed to remove content: %v", err)
}
// Verify content was removed
metrics = rm.GetMetrics()
if metrics.TotalKeys != 0 {
t.Errorf("Expected 0 total keys after removal, got %d", metrics.TotalKeys)
if metrics == rm.metrics {
t.Fatal("expected GetMetrics to return a copy of metrics")
}
}
// TestLibP2PDHTReplication tests DHT replication functionality
func TestLibP2PDHTReplication(t *testing.T) {
// This would normally require a real libp2p setup
// For now, just test the interface methods exist
// Mock test - in a real implementation, you'd set up actual libp2p hosts
t.Log("DHT replication interface methods are implemented")
// Example of how the replication would be used:
// 1. Add content for replication
// 2. Content gets automatically provided to the DHT
// 3. Other nodes can discover this node as a provider
// 4. Periodic reproviding ensures content availability
// 5. Replication metrics track system health
}
// TestReplicationConfig tests replication configuration
func TestReplicationConfig(t *testing.T) {
config := DefaultReplicationConfig()
// Test default values
if config.ReplicationFactor != 3 {
t.Errorf("Expected default replication factor 3, got %d", config.ReplicationFactor)
}
if config.ReprovideInterval != 12*time.Hour {
t.Errorf("Expected default reprovide interval 12h, got %v", config.ReprovideInterval)
}
if !config.EnableAutoReplication {
t.Error("Expected auto replication to be enabled by default")
}
if !config.EnableReprovide {
t.Error("Expected reprovide to be enabled by default")
}
}
// TestProviderInfo tests provider information tracking
func TestProviderInfo(t *testing.T) {
// Test distance calculation
key := []byte("test-key")
peerID := "test-peer-id"
distance := calculateDistance(key, []byte(peerID))
// Distance should be non-zero for different inputs
if distance == 0 {
t.Error("Expected non-zero distance for different inputs")
}
t.Logf("Distance between key and peer: %d", distance)
}
// TestReplicationMetrics tests metrics collection
func TestReplicationMetrics(t *testing.T) {
ctx := context.Background()
mockDHT := NewMockDHTInterface()
rm := NewReplicationManager(ctx, mockDHT.Mock(), DefaultReplicationConfig())
defer rm.Stop()
// Add some content
for i := 0; i < 3; i++ {
key := fmt.Sprintf("test-key-%d", i)
rm.AddContent(key, int64(1000+i*100), i+1)
}
metrics := rm.GetMetrics()
if metrics.TotalKeys != 3 {
t.Errorf("Expected 3 total keys, got %d", metrics.TotalKeys)
}
t.Logf("Replication metrics: %+v", metrics)
}