Harden CHORUS security and messaging stack
This commit is contained in:
213
pkg/dht/dht.go
213
pkg/dht/dht.go
@@ -6,33 +6,34 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"crypto/sha256"
|
||||
"github.com/ipfs/go-cid"
|
||||
dht "github.com/libp2p/go-libp2p-kad-dht"
|
||||
"github.com/libp2p/go-libp2p/core/host"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/libp2p/go-libp2p/core/protocol"
|
||||
"github.com/libp2p/go-libp2p/core/routing"
|
||||
dht "github.com/libp2p/go-libp2p-kad-dht"
|
||||
"github.com/multiformats/go-multiaddr"
|
||||
"github.com/multiformats/go-multihash"
|
||||
"github.com/ipfs/go-cid"
|
||||
"crypto/sha256"
|
||||
)
|
||||
|
||||
// LibP2PDHT provides distributed hash table functionality for CHORUS peer discovery
|
||||
type LibP2PDHT struct {
|
||||
host host.Host
|
||||
kdht *dht.IpfsDHT
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
config *Config
|
||||
|
||||
host host.Host
|
||||
kdht *dht.IpfsDHT
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
config *Config
|
||||
startTime time.Time
|
||||
|
||||
// Bootstrap state
|
||||
bootstrapped bool
|
||||
bootstrapMutex sync.RWMutex
|
||||
|
||||
|
||||
// Peer management
|
||||
knownPeers map[peer.ID]*PeerInfo
|
||||
peersMutex sync.RWMutex
|
||||
|
||||
|
||||
// Replication management
|
||||
replicationManager *ReplicationManager
|
||||
}
|
||||
@@ -41,30 +42,32 @@ type LibP2PDHT struct {
|
||||
type Config struct {
|
||||
// Bootstrap nodes for initial DHT discovery
|
||||
BootstrapPeers []multiaddr.Multiaddr
|
||||
|
||||
|
||||
// Protocol prefix for CHORUS DHT
|
||||
ProtocolPrefix string
|
||||
|
||||
|
||||
// Bootstrap timeout
|
||||
BootstrapTimeout time.Duration
|
||||
|
||||
|
||||
// Peer discovery interval
|
||||
DiscoveryInterval time.Duration
|
||||
|
||||
|
||||
// DHT mode (client, server, auto)
|
||||
Mode dht.ModeOpt
|
||||
|
||||
|
||||
// Enable automatic bootstrap
|
||||
AutoBootstrap bool
|
||||
}
|
||||
|
||||
// PeerInfo holds information about discovered peers
|
||||
const defaultProviderResultLimit = 20
|
||||
|
||||
type PeerInfo struct {
|
||||
ID peer.ID
|
||||
Addresses []multiaddr.Multiaddr
|
||||
Agent string
|
||||
Role string
|
||||
LastSeen time.Time
|
||||
ID peer.ID
|
||||
Addresses []multiaddr.Multiaddr
|
||||
Agent string
|
||||
Role string
|
||||
LastSeen time.Time
|
||||
Capabilities []string
|
||||
}
|
||||
|
||||
@@ -74,23 +77,28 @@ func DefaultConfig() *Config {
|
||||
ProtocolPrefix: "/CHORUS",
|
||||
BootstrapTimeout: 30 * time.Second,
|
||||
DiscoveryInterval: 60 * time.Second,
|
||||
Mode: dht.ModeAuto,
|
||||
AutoBootstrap: true,
|
||||
Mode: dht.ModeAuto,
|
||||
AutoBootstrap: true,
|
||||
}
|
||||
}
|
||||
|
||||
// NewLibP2PDHT creates a new LibP2PDHT instance
|
||||
// NewDHT is a backward compatible helper that delegates to NewLibP2PDHT.
|
||||
func NewDHT(ctx context.Context, host host.Host, opts ...Option) (*LibP2PDHT, error) {
|
||||
return NewLibP2PDHT(ctx, host, opts...)
|
||||
}
|
||||
|
||||
// NewLibP2PDHT creates a new LibP2PDHT instance
|
||||
func NewLibP2PDHT(ctx context.Context, host host.Host, opts ...Option) (*LibP2PDHT, error) {
|
||||
config := DefaultConfig()
|
||||
for _, opt := range opts {
|
||||
opt(config)
|
||||
}
|
||||
|
||||
|
||||
// Create context with cancellation
|
||||
dhtCtx, cancel := context.WithCancel(ctx)
|
||||
|
||||
|
||||
// Create Kademlia DHT
|
||||
kdht, err := dht.New(dhtCtx, host,
|
||||
kdht, err := dht.New(dhtCtx, host,
|
||||
dht.Mode(config.Mode),
|
||||
dht.ProtocolPrefix(protocol.ID(config.ProtocolPrefix)),
|
||||
)
|
||||
@@ -98,22 +106,23 @@ func NewLibP2PDHT(ctx context.Context, host host.Host, opts ...Option) (*LibP2PD
|
||||
cancel()
|
||||
return nil, fmt.Errorf("failed to create DHT: %w", err)
|
||||
}
|
||||
|
||||
|
||||
d := &LibP2PDHT{
|
||||
host: host,
|
||||
kdht: kdht,
|
||||
ctx: dhtCtx,
|
||||
cancel: cancel,
|
||||
config: config,
|
||||
startTime: time.Now(),
|
||||
knownPeers: make(map[peer.ID]*PeerInfo),
|
||||
}
|
||||
|
||||
|
||||
// Initialize replication manager
|
||||
d.replicationManager = NewReplicationManager(dhtCtx, kdht, DefaultReplicationConfig())
|
||||
|
||||
|
||||
// Start background processes
|
||||
go d.startBackgroundTasks()
|
||||
|
||||
|
||||
return d, nil
|
||||
}
|
||||
|
||||
@@ -178,25 +187,25 @@ func WithAutoBootstrap(auto bool) Option {
|
||||
func (d *LibP2PDHT) Bootstrap() error {
|
||||
d.bootstrapMutex.Lock()
|
||||
defer d.bootstrapMutex.Unlock()
|
||||
|
||||
|
||||
if d.bootstrapped {
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
// Connect to bootstrap peers
|
||||
if len(d.config.BootstrapPeers) == 0 {
|
||||
// Use default IPFS bootstrap peers if none configured
|
||||
d.config.BootstrapPeers = dht.DefaultBootstrapPeers
|
||||
}
|
||||
|
||||
|
||||
// Bootstrap the DHT
|
||||
bootstrapCtx, cancel := context.WithTimeout(d.ctx, d.config.BootstrapTimeout)
|
||||
defer cancel()
|
||||
|
||||
|
||||
if err := d.kdht.Bootstrap(bootstrapCtx); err != nil {
|
||||
return fmt.Errorf("DHT bootstrap failed: %w", err)
|
||||
}
|
||||
|
||||
|
||||
// Connect to bootstrap peers
|
||||
var connected int
|
||||
for _, peerAddr := range d.config.BootstrapPeers {
|
||||
@@ -204,7 +213,7 @@ func (d *LibP2PDHT) Bootstrap() error {
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
|
||||
connectCtx, cancel := context.WithTimeout(d.ctx, 10*time.Second)
|
||||
if err := d.host.Connect(connectCtx, *addrInfo); err != nil {
|
||||
cancel()
|
||||
@@ -213,11 +222,11 @@ func (d *LibP2PDHT) Bootstrap() error {
|
||||
cancel()
|
||||
connected++
|
||||
}
|
||||
|
||||
|
||||
if connected == 0 {
|
||||
return fmt.Errorf("failed to connect to any bootstrap peers")
|
||||
}
|
||||
|
||||
|
||||
d.bootstrapped = true
|
||||
return nil
|
||||
}
|
||||
@@ -233,13 +242,13 @@ func (d *LibP2PDHT) IsBootstrapped() bool {
|
||||
func (d *LibP2PDHT) keyToCID(key string) (cid.Cid, error) {
|
||||
// Hash the key
|
||||
hash := sha256.Sum256([]byte(key))
|
||||
|
||||
|
||||
// Create multihash
|
||||
mh, err := multihash.EncodeName(hash[:], "sha2-256")
|
||||
if err != nil {
|
||||
return cid.Undef, err
|
||||
}
|
||||
|
||||
|
||||
// Create CID
|
||||
return cid.NewCidV1(cid.Raw, mh), nil
|
||||
}
|
||||
@@ -249,13 +258,13 @@ func (d *LibP2PDHT) Provide(ctx context.Context, key string) error {
|
||||
if !d.IsBootstrapped() {
|
||||
return fmt.Errorf("DHT not bootstrapped")
|
||||
}
|
||||
|
||||
|
||||
// Convert key to CID
|
||||
keyCID, err := d.keyToCID(key)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create CID from key: %w", err)
|
||||
}
|
||||
|
||||
|
||||
return d.kdht.Provide(ctx, keyCID, true)
|
||||
}
|
||||
|
||||
@@ -264,31 +273,32 @@ func (d *LibP2PDHT) FindProviders(ctx context.Context, key string, limit int) ([
|
||||
if !d.IsBootstrapped() {
|
||||
return nil, fmt.Errorf("DHT not bootstrapped")
|
||||
}
|
||||
|
||||
|
||||
// Convert key to CID
|
||||
keyCID, err := d.keyToCID(key)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create CID from key: %w", err)
|
||||
}
|
||||
|
||||
// Find providers (FindProviders returns a channel and an error)
|
||||
providersChan, err := d.kdht.FindProviders(ctx, keyCID)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to find providers: %w", err)
|
||||
|
||||
maxProviders := limit
|
||||
if maxProviders <= 0 {
|
||||
maxProviders = defaultProviderResultLimit
|
||||
}
|
||||
|
||||
// Collect providers from channel
|
||||
providers := make([]peer.AddrInfo, 0, limit)
|
||||
// TODO: Fix libp2p FindProviders channel type mismatch
|
||||
// The channel appears to return int instead of peer.AddrInfo in this version
|
||||
_ = providersChan // Avoid unused variable error
|
||||
// for providerInfo := range providersChan {
|
||||
// providers = append(providers, providerInfo)
|
||||
// if len(providers) >= limit {
|
||||
// break
|
||||
// }
|
||||
// }
|
||||
|
||||
|
||||
providerCtx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
|
||||
providersChan := d.kdht.FindProvidersAsync(providerCtx, keyCID, maxProviders)
|
||||
providers := make([]peer.AddrInfo, 0, maxProviders)
|
||||
|
||||
for providerInfo := range providersChan {
|
||||
providers = append(providers, providerInfo)
|
||||
if limit > 0 && len(providers) >= limit {
|
||||
cancel()
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return providers, nil
|
||||
}
|
||||
|
||||
@@ -297,7 +307,7 @@ func (d *LibP2PDHT) PutValue(ctx context.Context, key string, value []byte) erro
|
||||
if !d.IsBootstrapped() {
|
||||
return fmt.Errorf("DHT not bootstrapped")
|
||||
}
|
||||
|
||||
|
||||
return d.kdht.PutValue(ctx, key, value)
|
||||
}
|
||||
|
||||
@@ -306,7 +316,7 @@ func (d *LibP2PDHT) GetValue(ctx context.Context, key string) ([]byte, error) {
|
||||
if !d.IsBootstrapped() {
|
||||
return nil, fmt.Errorf("DHT not bootstrapped")
|
||||
}
|
||||
|
||||
|
||||
return d.kdht.GetValue(ctx, key)
|
||||
}
|
||||
|
||||
@@ -315,7 +325,7 @@ func (d *LibP2PDHT) FindPeer(ctx context.Context, peerID peer.ID) (peer.AddrInfo
|
||||
if !d.IsBootstrapped() {
|
||||
return peer.AddrInfo{}, fmt.Errorf("DHT not bootstrapped")
|
||||
}
|
||||
|
||||
|
||||
return d.kdht.FindPeer(ctx, peerID)
|
||||
}
|
||||
|
||||
@@ -329,14 +339,30 @@ func (d *LibP2PDHT) GetConnectedPeers() []peer.ID {
|
||||
return d.kdht.Host().Network().Peers()
|
||||
}
|
||||
|
||||
// GetStats reports basic runtime statistics for the DHT
|
||||
func (d *LibP2PDHT) GetStats() DHTStats {
|
||||
stats := DHTStats{
|
||||
TotalPeers: len(d.GetConnectedPeers()),
|
||||
Uptime: time.Since(d.startTime),
|
||||
}
|
||||
|
||||
if d.replicationManager != nil {
|
||||
if metrics := d.replicationManager.GetMetrics(); metrics != nil {
|
||||
stats.TotalKeys = int(metrics.TotalKeys)
|
||||
}
|
||||
}
|
||||
|
||||
return stats
|
||||
}
|
||||
|
||||
// RegisterPeer registers a peer with capability information
|
||||
func (d *LibP2PDHT) RegisterPeer(peerID peer.ID, agent, role string, capabilities []string) {
|
||||
d.peersMutex.Lock()
|
||||
defer d.peersMutex.Unlock()
|
||||
|
||||
|
||||
// Get peer addresses from host
|
||||
peerInfo := d.host.Peerstore().PeerInfo(peerID)
|
||||
|
||||
|
||||
d.knownPeers[peerID] = &PeerInfo{
|
||||
ID: peerID,
|
||||
Addresses: peerInfo.Addrs,
|
||||
@@ -351,12 +377,12 @@ func (d *LibP2PDHT) RegisterPeer(peerID peer.ID, agent, role string, capabilitie
|
||||
func (d *LibP2PDHT) GetKnownPeers() map[peer.ID]*PeerInfo {
|
||||
d.peersMutex.RLock()
|
||||
defer d.peersMutex.RUnlock()
|
||||
|
||||
|
||||
result := make(map[peer.ID]*PeerInfo)
|
||||
for id, info := range d.knownPeers {
|
||||
result[id] = info
|
||||
}
|
||||
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
@@ -371,7 +397,7 @@ func (d *LibP2PDHT) FindPeersByRole(ctx context.Context, role string) ([]*PeerIn
|
||||
}
|
||||
}
|
||||
d.peersMutex.RUnlock()
|
||||
|
||||
|
||||
// Also search DHT for role-based keys
|
||||
roleKey := fmt.Sprintf("CHORUS:role:%s", role)
|
||||
providers, err := d.FindProviders(ctx, roleKey, 10)
|
||||
@@ -379,11 +405,11 @@ func (d *LibP2PDHT) FindPeersByRole(ctx context.Context, role string) ([]*PeerIn
|
||||
// Return local peers even if DHT search fails
|
||||
return localPeers, nil
|
||||
}
|
||||
|
||||
|
||||
// Convert providers to PeerInfo
|
||||
var result []*PeerInfo
|
||||
result = append(result, localPeers...)
|
||||
|
||||
|
||||
for _, provider := range providers {
|
||||
// Skip if we already have this peer
|
||||
found := false
|
||||
@@ -402,7 +428,7 @@ func (d *LibP2PDHT) FindPeersByRole(ctx context.Context, role string) ([]*PeerIn
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
@@ -424,10 +450,10 @@ func (d *LibP2PDHT) startBackgroundTasks() {
|
||||
if d.config.AutoBootstrap {
|
||||
go d.autoBootstrap()
|
||||
}
|
||||
|
||||
|
||||
// Start periodic peer discovery
|
||||
go d.periodicDiscovery()
|
||||
|
||||
|
||||
// Start peer cleanup
|
||||
go d.peerCleanup()
|
||||
}
|
||||
@@ -436,7 +462,7 @@ func (d *LibP2PDHT) startBackgroundTasks() {
|
||||
func (d *LibP2PDHT) autoBootstrap() {
|
||||
ticker := time.NewTicker(30 * time.Second)
|
||||
defer ticker.Stop()
|
||||
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-d.ctx.Done():
|
||||
@@ -456,7 +482,7 @@ func (d *LibP2PDHT) autoBootstrap() {
|
||||
func (d *LibP2PDHT) periodicDiscovery() {
|
||||
ticker := time.NewTicker(d.config.DiscoveryInterval)
|
||||
defer ticker.Stop()
|
||||
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-d.ctx.Done():
|
||||
@@ -473,13 +499,13 @@ func (d *LibP2PDHT) periodicDiscovery() {
|
||||
func (d *LibP2PDHT) performDiscovery() {
|
||||
ctx, cancel := context.WithTimeout(d.ctx, 30*time.Second)
|
||||
defer cancel()
|
||||
|
||||
|
||||
// Look for general CHORUS peers
|
||||
providers, err := d.FindProviders(ctx, "CHORUS:peer", 10)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
|
||||
// Update known peers
|
||||
d.peersMutex.Lock()
|
||||
for _, provider := range providers {
|
||||
@@ -498,7 +524,7 @@ func (d *LibP2PDHT) performDiscovery() {
|
||||
func (d *LibP2PDHT) peerCleanup() {
|
||||
ticker := time.NewTicker(5 * time.Minute)
|
||||
defer ticker.Stop()
|
||||
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-d.ctx.Done():
|
||||
@@ -513,9 +539,9 @@ func (d *LibP2PDHT) peerCleanup() {
|
||||
func (d *LibP2PDHT) cleanupStalePeers() {
|
||||
d.peersMutex.Lock()
|
||||
defer d.peersMutex.Unlock()
|
||||
|
||||
|
||||
staleThreshold := time.Now().Add(-time.Hour) // 1 hour threshold
|
||||
|
||||
|
||||
for peerID, peerInfo := range d.knownPeers {
|
||||
if peerInfo.LastSeen.Before(staleThreshold) {
|
||||
// Check if peer is still connected
|
||||
@@ -526,7 +552,7 @@ func (d *LibP2PDHT) cleanupStalePeers() {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
if !connected {
|
||||
delete(d.knownPeers, peerID)
|
||||
}
|
||||
@@ -589,11 +615,11 @@ func (d *LibP2PDHT) EnableReplication(config *ReplicationConfig) error {
|
||||
if d.replicationManager != nil {
|
||||
return fmt.Errorf("replication already enabled")
|
||||
}
|
||||
|
||||
|
||||
if config == nil {
|
||||
config = DefaultReplicationConfig()
|
||||
}
|
||||
|
||||
|
||||
d.replicationManager = NewReplicationManager(d.ctx, d.kdht, config)
|
||||
return nil
|
||||
}
|
||||
@@ -603,11 +629,11 @@ func (d *LibP2PDHT) DisableReplication() error {
|
||||
if d.replicationManager == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
if err := d.replicationManager.Stop(); err != nil {
|
||||
return fmt.Errorf("failed to stop replication manager: %w", err)
|
||||
}
|
||||
|
||||
|
||||
d.replicationManager = nil
|
||||
return nil
|
||||
}
|
||||
@@ -617,13 +643,18 @@ func (d *LibP2PDHT) IsReplicationEnabled() bool {
|
||||
return d.replicationManager != nil
|
||||
}
|
||||
|
||||
// ReplicationManager returns the underlying replication manager if enabled.
|
||||
func (d *LibP2PDHT) ReplicationManager() *ReplicationManager {
|
||||
return d.replicationManager
|
||||
}
|
||||
|
||||
// Close shuts down the DHT
|
||||
func (d *LibP2PDHT) Close() error {
|
||||
// Stop replication manager first
|
||||
if d.replicationManager != nil {
|
||||
d.replicationManager.Stop()
|
||||
}
|
||||
|
||||
|
||||
d.cancel()
|
||||
return d.kdht.Close()
|
||||
}
|
||||
@@ -633,10 +664,10 @@ func (d *LibP2PDHT) RefreshRoutingTable() error {
|
||||
if !d.IsBootstrapped() {
|
||||
return fmt.Errorf("DHT not bootstrapped")
|
||||
}
|
||||
|
||||
|
||||
// RefreshRoutingTable() returns a channel with errors, not a direct error
|
||||
errChan := d.kdht.RefreshRoutingTable()
|
||||
|
||||
|
||||
// Wait for the first error (if any) from the channel
|
||||
select {
|
||||
case err := <-errChan:
|
||||
@@ -654,4 +685,4 @@ func (d *LibP2PDHT) GetDHTSize() int {
|
||||
// Host returns the underlying libp2p host
|
||||
func (d *LibP2PDHT) Host() host.Host {
|
||||
return d.host
|
||||
}
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user