689 lines
		
	
	
		
			17 KiB
		
	
	
	
		
			Go
		
	
	
	
	
	
			
		
		
	
	
			689 lines
		
	
	
		
			17 KiB
		
	
	
	
		
			Go
		
	
	
	
	
	
| package dht
 | |
| 
 | |
| import (
 | |
| 	"context"
 | |
| 	"fmt"
 | |
| 	"sync"
 | |
| 	"time"
 | |
| 
 | |
| 	"crypto/sha256"
 | |
| 	"github.com/ipfs/go-cid"
 | |
| 	dht "github.com/libp2p/go-libp2p-kad-dht"
 | |
| 	"github.com/libp2p/go-libp2p/core/host"
 | |
| 	"github.com/libp2p/go-libp2p/core/peer"
 | |
| 	"github.com/libp2p/go-libp2p/core/protocol"
 | |
| 	"github.com/libp2p/go-libp2p/core/routing"
 | |
| 	"github.com/multiformats/go-multiaddr"
 | |
| 	"github.com/multiformats/go-multihash"
 | |
| )
 | |
| 
 | |
| // LibP2PDHT provides distributed hash table functionality for CHORUS peer discovery
 | |
| type LibP2PDHT struct {
 | |
| 	host      host.Host
 | |
| 	kdht      *dht.IpfsDHT
 | |
| 	ctx       context.Context
 | |
| 	cancel    context.CancelFunc
 | |
| 	config    *Config
 | |
| 	startTime time.Time
 | |
| 
 | |
| 	// Bootstrap state
 | |
| 	bootstrapped   bool
 | |
| 	bootstrapMutex sync.RWMutex
 | |
| 
 | |
| 	// Peer management
 | |
| 	knownPeers map[peer.ID]*PeerInfo
 | |
| 	peersMutex sync.RWMutex
 | |
| 
 | |
| 	// Replication management
 | |
| 	replicationManager *ReplicationManager
 | |
| }
 | |
| 
 | |
| // Config holds DHT configuration
 | |
| type Config struct {
 | |
| 	// Bootstrap nodes for initial DHT discovery
 | |
| 	BootstrapPeers []multiaddr.Multiaddr
 | |
| 
 | |
| 	// Protocol prefix for CHORUS DHT
 | |
| 	ProtocolPrefix string
 | |
| 
 | |
| 	// Bootstrap timeout
 | |
| 	BootstrapTimeout time.Duration
 | |
| 
 | |
| 	// Peer discovery interval
 | |
| 	DiscoveryInterval time.Duration
 | |
| 
 | |
| 	// DHT mode (client, server, auto)
 | |
| 	Mode dht.ModeOpt
 | |
| 
 | |
| 	// Enable automatic bootstrap
 | |
| 	AutoBootstrap bool
 | |
| }
 | |
| 
 | |
| // PeerInfo holds information about discovered peers
 | |
| const defaultProviderResultLimit = 20
 | |
| 
 | |
| type PeerInfo struct {
 | |
| 	ID           peer.ID
 | |
| 	Addresses    []multiaddr.Multiaddr
 | |
| 	Agent        string
 | |
| 	Role         string
 | |
| 	LastSeen     time.Time
 | |
| 	Capabilities []string
 | |
| }
 | |
| 
 | |
| // DefaultConfig returns a default DHT configuration
 | |
| func DefaultConfig() *Config {
 | |
| 	return &Config{
 | |
| 		ProtocolPrefix:    "/CHORUS",
 | |
| 		BootstrapTimeout:  30 * time.Second,
 | |
| 		DiscoveryInterval: 60 * time.Second,
 | |
| 		Mode:              dht.ModeAuto,
 | |
| 		AutoBootstrap:     true,
 | |
| 	}
 | |
| }
 | |
| 
 | |
| // NewDHT is a backward compatible helper that delegates to NewLibP2PDHT.
 | |
| func NewDHT(ctx context.Context, host host.Host, opts ...Option) (*LibP2PDHT, error) {
 | |
| 	return NewLibP2PDHT(ctx, host, opts...)
 | |
| }
 | |
| 
 | |
| // NewLibP2PDHT creates a new LibP2PDHT instance
 | |
| func NewLibP2PDHT(ctx context.Context, host host.Host, opts ...Option) (*LibP2PDHT, error) {
 | |
| 	config := DefaultConfig()
 | |
| 	for _, opt := range opts {
 | |
| 		opt(config)
 | |
| 	}
 | |
| 
 | |
| 	// Create context with cancellation
 | |
| 	dhtCtx, cancel := context.WithCancel(ctx)
 | |
| 
 | |
| 	// Create Kademlia DHT
 | |
| 	kdht, err := dht.New(dhtCtx, host,
 | |
| 		dht.Mode(config.Mode),
 | |
| 		dht.ProtocolPrefix(protocol.ID(config.ProtocolPrefix)),
 | |
| 	)
 | |
| 	if err != nil {
 | |
| 		cancel()
 | |
| 		return nil, fmt.Errorf("failed to create DHT: %w", err)
 | |
| 	}
 | |
| 
 | |
| 	d := &LibP2PDHT{
 | |
| 		host:       host,
 | |
| 		kdht:       kdht,
 | |
| 		ctx:        dhtCtx,
 | |
| 		cancel:     cancel,
 | |
| 		config:     config,
 | |
| 		startTime:  time.Now(),
 | |
| 		knownPeers: make(map[peer.ID]*PeerInfo),
 | |
| 	}
 | |
| 
 | |
| 	// Initialize replication manager
 | |
| 	d.replicationManager = NewReplicationManager(dhtCtx, kdht, DefaultReplicationConfig())
 | |
| 
 | |
| 	// Start background processes
 | |
| 	go d.startBackgroundTasks()
 | |
| 
 | |
| 	return d, nil
 | |
| }
 | |
| 
 | |
| // Option configures the DHT
 | |
| type Option func(*Config)
 | |
| 
 | |
| // WithBootstrapPeers sets the bootstrap peers
 | |
| func WithBootstrapPeers(peers []multiaddr.Multiaddr) Option {
 | |
| 	return func(c *Config) {
 | |
| 		c.BootstrapPeers = peers
 | |
| 	}
 | |
| }
 | |
| 
 | |
| // WithBootstrapPeersFromStrings sets bootstrap peers from string addresses
 | |
| func WithBootstrapPeersFromStrings(addresses []string) Option {
 | |
| 	return func(c *Config) {
 | |
| 		c.BootstrapPeers = make([]multiaddr.Multiaddr, 0, len(addresses))
 | |
| 		for _, addr := range addresses {
 | |
| 			if ma, err := multiaddr.NewMultiaddr(addr); err == nil {
 | |
| 				c.BootstrapPeers = append(c.BootstrapPeers, ma)
 | |
| 			}
 | |
| 		}
 | |
| 	}
 | |
| }
 | |
| 
 | |
| // WithProtocolPrefix sets the DHT protocol prefix
 | |
| func WithProtocolPrefix(prefix string) Option {
 | |
| 	return func(c *Config) {
 | |
| 		c.ProtocolPrefix = prefix
 | |
| 	}
 | |
| }
 | |
| 
 | |
| // WithMode sets the DHT mode
 | |
| func WithMode(mode dht.ModeOpt) Option {
 | |
| 	return func(c *Config) {
 | |
| 		c.Mode = mode
 | |
| 	}
 | |
| }
 | |
| 
 | |
| // WithBootstrapTimeout sets the bootstrap timeout
 | |
| func WithBootstrapTimeout(timeout time.Duration) Option {
 | |
| 	return func(c *Config) {
 | |
| 		c.BootstrapTimeout = timeout
 | |
| 	}
 | |
| }
 | |
| 
 | |
| // WithDiscoveryInterval sets the peer discovery interval
 | |
| func WithDiscoveryInterval(interval time.Duration) Option {
 | |
| 	return func(c *Config) {
 | |
| 		c.DiscoveryInterval = interval
 | |
| 	}
 | |
| }
 | |
| 
 | |
| // WithAutoBootstrap enables/disables automatic bootstrap
 | |
| func WithAutoBootstrap(auto bool) Option {
 | |
| 	return func(c *Config) {
 | |
| 		c.AutoBootstrap = auto
 | |
| 	}
 | |
| }
 | |
| 
 | |
| // Bootstrap connects to the DHT network using bootstrap peers
 | |
| func (d *LibP2PDHT) Bootstrap() error {
 | |
| 	d.bootstrapMutex.Lock()
 | |
| 	defer d.bootstrapMutex.Unlock()
 | |
| 
 | |
| 	if d.bootstrapped {
 | |
| 		return nil
 | |
| 	}
 | |
| 
 | |
| 	// Connect to bootstrap peers
 | |
| 	if len(d.config.BootstrapPeers) == 0 {
 | |
| 		// Use default IPFS bootstrap peers if none configured
 | |
| 		d.config.BootstrapPeers = dht.DefaultBootstrapPeers
 | |
| 	}
 | |
| 
 | |
| 	// Bootstrap the DHT
 | |
| 	bootstrapCtx, cancel := context.WithTimeout(d.ctx, d.config.BootstrapTimeout)
 | |
| 	defer cancel()
 | |
| 
 | |
| 	if err := d.kdht.Bootstrap(bootstrapCtx); err != nil {
 | |
| 		return fmt.Errorf("DHT bootstrap failed: %w", err)
 | |
| 	}
 | |
| 
 | |
| 	// Connect to bootstrap peers
 | |
| 	var connected int
 | |
| 	for _, peerAddr := range d.config.BootstrapPeers {
 | |
| 		addrInfo, err := peer.AddrInfoFromP2pAddr(peerAddr)
 | |
| 		if err != nil {
 | |
| 			continue
 | |
| 		}
 | |
| 
 | |
| 		connectCtx, cancel := context.WithTimeout(d.ctx, 10*time.Second)
 | |
| 		if err := d.host.Connect(connectCtx, *addrInfo); err != nil {
 | |
| 			cancel()
 | |
| 			continue
 | |
| 		}
 | |
| 		cancel()
 | |
| 		connected++
 | |
| 	}
 | |
| 
 | |
| 	if connected == 0 {
 | |
| 		return fmt.Errorf("failed to connect to any bootstrap peers")
 | |
| 	}
 | |
| 
 | |
| 	d.bootstrapped = true
 | |
| 	return nil
 | |
| }
 | |
| 
 | |
| // IsBootstrapped returns whether the DHT has been bootstrapped
 | |
| func (d *LibP2PDHT) IsBootstrapped() bool {
 | |
| 	d.bootstrapMutex.RLock()
 | |
| 	defer d.bootstrapMutex.RUnlock()
 | |
| 	return d.bootstrapped
 | |
| }
 | |
| 
 | |
| // keyToCID converts a string key to a CID for DHT operations
 | |
| func (d *LibP2PDHT) keyToCID(key string) (cid.Cid, error) {
 | |
| 	// Hash the key
 | |
| 	hash := sha256.Sum256([]byte(key))
 | |
| 
 | |
| 	// Create multihash
 | |
| 	mh, err := multihash.EncodeName(hash[:], "sha2-256")
 | |
| 	if err != nil {
 | |
| 		return cid.Undef, err
 | |
| 	}
 | |
| 
 | |
| 	// Create CID
 | |
| 	return cid.NewCidV1(cid.Raw, mh), nil
 | |
| }
 | |
| 
 | |
| // Provide announces that this peer provides a given key
 | |
| func (d *LibP2PDHT) Provide(ctx context.Context, key string) error {
 | |
| 	if !d.IsBootstrapped() {
 | |
| 		return fmt.Errorf("DHT not bootstrapped")
 | |
| 	}
 | |
| 
 | |
| 	// Convert key to CID
 | |
| 	keyCID, err := d.keyToCID(key)
 | |
| 	if err != nil {
 | |
| 		return fmt.Errorf("failed to create CID from key: %w", err)
 | |
| 	}
 | |
| 
 | |
| 	return d.kdht.Provide(ctx, keyCID, true)
 | |
| }
 | |
| 
 | |
| // FindProviders finds peers that provide a given key
 | |
| func (d *LibP2PDHT) FindProviders(ctx context.Context, key string, limit int) ([]peer.AddrInfo, error) {
 | |
| 	if !d.IsBootstrapped() {
 | |
| 		return nil, fmt.Errorf("DHT not bootstrapped")
 | |
| 	}
 | |
| 
 | |
| 	// Convert key to CID
 | |
| 	keyCID, err := d.keyToCID(key)
 | |
| 	if err != nil {
 | |
| 		return nil, fmt.Errorf("failed to create CID from key: %w", err)
 | |
| 	}
 | |
| 
 | |
| 	maxProviders := limit
 | |
| 	if maxProviders <= 0 {
 | |
| 		maxProviders = defaultProviderResultLimit
 | |
| 	}
 | |
| 
 | |
| 	providerCtx, cancel := context.WithCancel(ctx)
 | |
| 	defer cancel()
 | |
| 
 | |
| 	providersChan := d.kdht.FindProvidersAsync(providerCtx, keyCID, maxProviders)
 | |
| 	providers := make([]peer.AddrInfo, 0, maxProviders)
 | |
| 
 | |
| 	for providerInfo := range providersChan {
 | |
| 		providers = append(providers, providerInfo)
 | |
| 		if limit > 0 && len(providers) >= limit {
 | |
| 			cancel()
 | |
| 			break
 | |
| 		}
 | |
| 	}
 | |
| 
 | |
| 	return providers, nil
 | |
| }
 | |
| 
 | |
| // PutValue puts a key-value pair into the DHT
 | |
| func (d *LibP2PDHT) PutValue(ctx context.Context, key string, value []byte) error {
 | |
| 	if !d.IsBootstrapped() {
 | |
| 		return fmt.Errorf("DHT not bootstrapped")
 | |
| 	}
 | |
| 
 | |
| 	return d.kdht.PutValue(ctx, key, value)
 | |
| }
 | |
| 
 | |
| // GetValue retrieves a value from the DHT
 | |
| func (d *LibP2PDHT) GetValue(ctx context.Context, key string) ([]byte, error) {
 | |
| 	if !d.IsBootstrapped() {
 | |
| 		return nil, fmt.Errorf("DHT not bootstrapped")
 | |
| 	}
 | |
| 
 | |
| 	return d.kdht.GetValue(ctx, key)
 | |
| }
 | |
| 
 | |
| // FindPeer finds a specific peer in the DHT
 | |
| func (d *LibP2PDHT) FindPeer(ctx context.Context, peerID peer.ID) (peer.AddrInfo, error) {
 | |
| 	if !d.IsBootstrapped() {
 | |
| 		return peer.AddrInfo{}, fmt.Errorf("DHT not bootstrapped")
 | |
| 	}
 | |
| 
 | |
| 	return d.kdht.FindPeer(ctx, peerID)
 | |
| }
 | |
| 
 | |
| // GetRoutingTable returns the DHT routing table
 | |
| func (d *LibP2PDHT) GetRoutingTable() routing.ContentRouting {
 | |
| 	return d.kdht
 | |
| }
 | |
| 
 | |
| // GetConnectedPeers returns currently connected DHT peers
 | |
| func (d *LibP2PDHT) GetConnectedPeers() []peer.ID {
 | |
| 	return d.kdht.Host().Network().Peers()
 | |
| }
 | |
| 
 | |
| // GetStats reports basic runtime statistics for the DHT
 | |
| func (d *LibP2PDHT) GetStats() DHTStats {
 | |
| 	stats := DHTStats{
 | |
| 		TotalPeers: len(d.GetConnectedPeers()),
 | |
| 		Uptime:     time.Since(d.startTime),
 | |
| 	}
 | |
| 
 | |
| 	if d.replicationManager != nil {
 | |
| 		if metrics := d.replicationManager.GetMetrics(); metrics != nil {
 | |
| 			stats.TotalKeys = int(metrics.TotalKeys)
 | |
| 		}
 | |
| 	}
 | |
| 
 | |
| 	return stats
 | |
| }
 | |
| 
 | |
| // RegisterPeer registers a peer with capability information
 | |
| func (d *LibP2PDHT) RegisterPeer(peerID peer.ID, agent, role string, capabilities []string) {
 | |
| 	d.peersMutex.Lock()
 | |
| 	defer d.peersMutex.Unlock()
 | |
| 
 | |
| 	// Get peer addresses from host
 | |
| 	peerInfo := d.host.Peerstore().PeerInfo(peerID)
 | |
| 
 | |
| 	d.knownPeers[peerID] = &PeerInfo{
 | |
| 		ID:           peerID,
 | |
| 		Addresses:    peerInfo.Addrs,
 | |
| 		Agent:        agent,
 | |
| 		Role:         role,
 | |
| 		LastSeen:     time.Now(),
 | |
| 		Capabilities: capabilities,
 | |
| 	}
 | |
| }
 | |
| 
 | |
| // GetKnownPeers returns all known peers with their information
 | |
| func (d *LibP2PDHT) GetKnownPeers() map[peer.ID]*PeerInfo {
 | |
| 	d.peersMutex.RLock()
 | |
| 	defer d.peersMutex.RUnlock()
 | |
| 
 | |
| 	result := make(map[peer.ID]*PeerInfo)
 | |
| 	for id, info := range d.knownPeers {
 | |
| 		result[id] = info
 | |
| 	}
 | |
| 
 | |
| 	return result
 | |
| }
 | |
| 
 | |
| // FindPeersByRole finds peers with a specific role
 | |
| func (d *LibP2PDHT) FindPeersByRole(ctx context.Context, role string) ([]*PeerInfo, error) {
 | |
| 	// First check local known peers
 | |
| 	d.peersMutex.RLock()
 | |
| 	var localPeers []*PeerInfo
 | |
| 	for _, peer := range d.knownPeers {
 | |
| 		if peer.Role == role || role == "*" {
 | |
| 			localPeers = append(localPeers, peer)
 | |
| 		}
 | |
| 	}
 | |
| 	d.peersMutex.RUnlock()
 | |
| 
 | |
| 	// Also search DHT for role-based keys
 | |
| 	roleKey := fmt.Sprintf("CHORUS:role:%s", role)
 | |
| 	providers, err := d.FindProviders(ctx, roleKey, 10)
 | |
| 	if err != nil {
 | |
| 		// Return local peers even if DHT search fails
 | |
| 		return localPeers, nil
 | |
| 	}
 | |
| 
 | |
| 	// Convert providers to PeerInfo
 | |
| 	var result []*PeerInfo
 | |
| 	result = append(result, localPeers...)
 | |
| 
 | |
| 	for _, provider := range providers {
 | |
| 		// Skip if we already have this peer
 | |
| 		found := false
 | |
| 		for _, existing := range result {
 | |
| 			if existing.ID == provider.ID {
 | |
| 				found = true
 | |
| 				break
 | |
| 			}
 | |
| 		}
 | |
| 		if !found {
 | |
| 			result = append(result, &PeerInfo{
 | |
| 				ID:        provider.ID,
 | |
| 				Addresses: provider.Addrs,
 | |
| 				Role:      role, // Inferred from search
 | |
| 				LastSeen:  time.Now(),
 | |
| 			})
 | |
| 		}
 | |
| 	}
 | |
| 
 | |
| 	return result, nil
 | |
| }
 | |
| 
 | |
| // AnnounceRole announces this peer's role to the DHT
 | |
| func (d *LibP2PDHT) AnnounceRole(ctx context.Context, role string) error {
 | |
| 	roleKey := fmt.Sprintf("CHORUS:role:%s", role)
 | |
| 	return d.Provide(ctx, roleKey)
 | |
| }
 | |
| 
 | |
| // AnnounceCapability announces a capability to the DHT
 | |
| func (d *LibP2PDHT) AnnounceCapability(ctx context.Context, capability string) error {
 | |
| 	capKey := fmt.Sprintf("CHORUS:capability:%s", capability)
 | |
| 	return d.Provide(ctx, capKey)
 | |
| }
 | |
| 
 | |
| // startBackgroundTasks starts background maintenance tasks
 | |
| func (d *LibP2PDHT) startBackgroundTasks() {
 | |
| 	// Auto-bootstrap if enabled
 | |
| 	if d.config.AutoBootstrap {
 | |
| 		go d.autoBootstrap()
 | |
| 	}
 | |
| 
 | |
| 	// Start periodic peer discovery
 | |
| 	go d.periodicDiscovery()
 | |
| 
 | |
| 	// Start peer cleanup
 | |
| 	go d.peerCleanup()
 | |
| }
 | |
| 
 | |
| // autoBootstrap attempts to bootstrap if not already bootstrapped
 | |
| func (d *LibP2PDHT) autoBootstrap() {
 | |
| 	ticker := time.NewTicker(30 * time.Second)
 | |
| 	defer ticker.Stop()
 | |
| 
 | |
| 	for {
 | |
| 		select {
 | |
| 		case <-d.ctx.Done():
 | |
| 			return
 | |
| 		case <-ticker.C:
 | |
| 			if !d.IsBootstrapped() {
 | |
| 				if err := d.Bootstrap(); err != nil {
 | |
| 					// Log error but continue trying
 | |
| 					continue
 | |
| 				}
 | |
| 			}
 | |
| 		}
 | |
| 	}
 | |
| }
 | |
| 
 | |
| // periodicDiscovery performs periodic peer discovery
 | |
| func (d *LibP2PDHT) periodicDiscovery() {
 | |
| 	ticker := time.NewTicker(d.config.DiscoveryInterval)
 | |
| 	defer ticker.Stop()
 | |
| 
 | |
| 	for {
 | |
| 		select {
 | |
| 		case <-d.ctx.Done():
 | |
| 			return
 | |
| 		case <-ticker.C:
 | |
| 			if d.IsBootstrapped() {
 | |
| 				d.performDiscovery()
 | |
| 			}
 | |
| 		}
 | |
| 	}
 | |
| }
 | |
| 
 | |
| // performDiscovery discovers new peers
 | |
| func (d *LibP2PDHT) performDiscovery() {
 | |
| 	ctx, cancel := context.WithTimeout(d.ctx, 30*time.Second)
 | |
| 	defer cancel()
 | |
| 
 | |
| 	// Look for general CHORUS peers
 | |
| 	providers, err := d.FindProviders(ctx, "CHORUS:peer", 10)
 | |
| 	if err != nil {
 | |
| 		return
 | |
| 	}
 | |
| 
 | |
| 	// Update known peers
 | |
| 	d.peersMutex.Lock()
 | |
| 	for _, provider := range providers {
 | |
| 		if _, exists := d.knownPeers[provider.ID]; !exists {
 | |
| 			d.knownPeers[provider.ID] = &PeerInfo{
 | |
| 				ID:        provider.ID,
 | |
| 				Addresses: provider.Addrs,
 | |
| 				LastSeen:  time.Now(),
 | |
| 			}
 | |
| 		}
 | |
| 	}
 | |
| 	d.peersMutex.Unlock()
 | |
| }
 | |
| 
 | |
| // peerCleanup removes stale peer information
 | |
| func (d *LibP2PDHT) peerCleanup() {
 | |
| 	ticker := time.NewTicker(5 * time.Minute)
 | |
| 	defer ticker.Stop()
 | |
| 
 | |
| 	for {
 | |
| 		select {
 | |
| 		case <-d.ctx.Done():
 | |
| 			return
 | |
| 		case <-ticker.C:
 | |
| 			d.cleanupStalePeers()
 | |
| 		}
 | |
| 	}
 | |
| }
 | |
| 
 | |
| // cleanupStalePeers removes peers that haven't been seen recently
 | |
| func (d *LibP2PDHT) cleanupStalePeers() {
 | |
| 	d.peersMutex.Lock()
 | |
| 	defer d.peersMutex.Unlock()
 | |
| 
 | |
| 	staleThreshold := time.Now().Add(-time.Hour) // 1 hour threshold
 | |
| 
 | |
| 	for peerID, peerInfo := range d.knownPeers {
 | |
| 		if peerInfo.LastSeen.Before(staleThreshold) {
 | |
| 			// Check if peer is still connected
 | |
| 			connected := false
 | |
| 			for _, connectedPeer := range d.GetConnectedPeers() {
 | |
| 				if connectedPeer == peerID {
 | |
| 					connected = true
 | |
| 					break
 | |
| 				}
 | |
| 			}
 | |
| 
 | |
| 			if !connected {
 | |
| 				delete(d.knownPeers, peerID)
 | |
| 			}
 | |
| 		}
 | |
| 	}
 | |
| }
 | |
| 
 | |
| // Replication interface methods
 | |
| 
 | |
| // AddContentForReplication adds content to the replication manager
 | |
| func (d *LibP2PDHT) AddContentForReplication(key string, size int64, priority int) error {
 | |
| 	if d.replicationManager == nil {
 | |
| 		return fmt.Errorf("replication manager not initialized")
 | |
| 	}
 | |
| 	return d.replicationManager.AddContent(key, size, priority)
 | |
| }
 | |
| 
 | |
| // RemoveContentFromReplication removes content from the replication manager
 | |
| func (d *LibP2PDHT) RemoveContentFromReplication(key string) error {
 | |
| 	if d.replicationManager == nil {
 | |
| 		return fmt.Errorf("replication manager not initialized")
 | |
| 	}
 | |
| 	return d.replicationManager.RemoveContent(key)
 | |
| }
 | |
| 
 | |
| // GetReplicationStatus returns replication status for a specific key
 | |
| func (d *LibP2PDHT) GetReplicationStatus(key string) (*ReplicationStatus, error) {
 | |
| 	if d.replicationManager == nil {
 | |
| 		return nil, fmt.Errorf("replication manager not initialized")
 | |
| 	}
 | |
| 	return d.replicationManager.GetReplicationStatus(key)
 | |
| }
 | |
| 
 | |
| // GetReplicationMetrics returns replication metrics
 | |
| func (d *LibP2PDHT) GetReplicationMetrics() *ReplicationMetrics {
 | |
| 	if d.replicationManager == nil {
 | |
| 		return &ReplicationMetrics{}
 | |
| 	}
 | |
| 	return d.replicationManager.GetMetrics()
 | |
| }
 | |
| 
 | |
| // FindContentProviders finds providers for content using the replication manager
 | |
| func (d *LibP2PDHT) FindContentProviders(ctx context.Context, key string, limit int) ([]ProviderInfo, error) {
 | |
| 	if d.replicationManager == nil {
 | |
| 		return nil, fmt.Errorf("replication manager not initialized")
 | |
| 	}
 | |
| 	return d.replicationManager.FindProviders(ctx, key, limit)
 | |
| }
 | |
| 
 | |
| // ProvideContent announces this node as a provider for the given content
 | |
| func (d *LibP2PDHT) ProvideContent(key string) error {
 | |
| 	if d.replicationManager == nil {
 | |
| 		return fmt.Errorf("replication manager not initialized")
 | |
| 	}
 | |
| 	return d.replicationManager.ProvideContent(key)
 | |
| }
 | |
| 
 | |
| // EnableReplication starts the replication manager (if not already started)
 | |
| func (d *LibP2PDHT) EnableReplication(config *ReplicationConfig) error {
 | |
| 	if d.replicationManager != nil {
 | |
| 		return fmt.Errorf("replication already enabled")
 | |
| 	}
 | |
| 
 | |
| 	if config == nil {
 | |
| 		config = DefaultReplicationConfig()
 | |
| 	}
 | |
| 
 | |
| 	d.replicationManager = NewReplicationManager(d.ctx, d.kdht, config)
 | |
| 	return nil
 | |
| }
 | |
| 
 | |
| // DisableReplication stops and removes the replication manager
 | |
| func (d *LibP2PDHT) DisableReplication() error {
 | |
| 	if d.replicationManager == nil {
 | |
| 		return nil
 | |
| 	}
 | |
| 
 | |
| 	if err := d.replicationManager.Stop(); err != nil {
 | |
| 		return fmt.Errorf("failed to stop replication manager: %w", err)
 | |
| 	}
 | |
| 
 | |
| 	d.replicationManager = nil
 | |
| 	return nil
 | |
| }
 | |
| 
 | |
| // IsReplicationEnabled returns whether replication is currently enabled
 | |
| func (d *LibP2PDHT) IsReplicationEnabled() bool {
 | |
| 	return d.replicationManager != nil
 | |
| }
 | |
| 
 | |
| // ReplicationManager returns the underlying replication manager if enabled.
 | |
| func (d *LibP2PDHT) ReplicationManager() *ReplicationManager {
 | |
| 	return d.replicationManager
 | |
| }
 | |
| 
 | |
| // Close shuts down the DHT
 | |
| func (d *LibP2PDHT) Close() error {
 | |
| 	// Stop replication manager first
 | |
| 	if d.replicationManager != nil {
 | |
| 		d.replicationManager.Stop()
 | |
| 	}
 | |
| 
 | |
| 	d.cancel()
 | |
| 	return d.kdht.Close()
 | |
| }
 | |
| 
 | |
| // RefreshRoutingTable refreshes the DHT routing table
 | |
| func (d *LibP2PDHT) RefreshRoutingTable() error {
 | |
| 	if !d.IsBootstrapped() {
 | |
| 		return fmt.Errorf("DHT not bootstrapped")
 | |
| 	}
 | |
| 
 | |
| 	// RefreshRoutingTable() returns a channel with errors, not a direct error
 | |
| 	errChan := d.kdht.RefreshRoutingTable()
 | |
| 
 | |
| 	// Wait for the first error (if any) from the channel
 | |
| 	select {
 | |
| 	case err := <-errChan:
 | |
| 		return err
 | |
| 	case <-time.After(30 * time.Second):
 | |
| 		return fmt.Errorf("refresh routing table timed out")
 | |
| 	}
 | |
| }
 | |
| 
 | |
| // GetDHTSize returns an estimate of the DHT size
 | |
| func (d *LibP2PDHT) GetDHTSize() int {
 | |
| 	return d.kdht.RoutingTable().Size()
 | |
| }
 | |
| 
 | |
| // Host returns the underlying libp2p host
 | |
| func (d *LibP2PDHT) Host() host.Host {
 | |
| 	return d.host
 | |
| }
 | 
