Implement UCXL Protocol Foundation (Phase 1)
- Add complete UCXL address parser with BNF grammar validation - Implement temporal navigation system with bounds checking - Create UCXI HTTP server with REST-like operations - Add comprehensive test suite with 87 passing tests - Integrate with existing BZZZ architecture (opt-in via config) - Support semantic addressing with wildcards and version control Core Features: - UCXL address format: ucxl://agent:role@project:task/temporal/path - Temporal segments: *^, ~~N, ^^N, *~, *~N with navigation logic - UCXI endpoints: GET/PUT/POST/DELETE/ANNOUNCE operations - Production-ready with error handling and graceful shutdown 🤖 Generated with [Claude Code](https://claude.ai/code) Co-Authored-By: Claude <noreply@anthropic.com>
This commit is contained in:
521
pkg/dht/dht.go
Normal file
521
pkg/dht/dht.go
Normal file
@@ -0,0 +1,521 @@
|
||||
package dht
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/libp2p/go-libp2p/core/host"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/libp2p/go-libp2p/core/routing"
|
||||
dht "github.com/libp2p/go-libp2p-kad-dht"
|
||||
"github.com/multiformats/go-multiaddr"
|
||||
)
|
||||
|
||||
// DHT provides distributed hash table functionality for BZZZ peer discovery
|
||||
type DHT struct {
|
||||
host host.Host
|
||||
kdht *dht.IpfsDHT
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
config *Config
|
||||
|
||||
// Bootstrap state
|
||||
bootstrapped bool
|
||||
bootstrapMutex sync.RWMutex
|
||||
|
||||
// Peer management
|
||||
knownPeers map[peer.ID]*PeerInfo
|
||||
peersMutex sync.RWMutex
|
||||
}
|
||||
|
||||
// Config holds DHT configuration
|
||||
type Config struct {
|
||||
// Bootstrap nodes for initial DHT discovery
|
||||
BootstrapPeers []multiaddr.Multiaddr
|
||||
|
||||
// Protocol prefix for BZZZ DHT
|
||||
ProtocolPrefix string
|
||||
|
||||
// Bootstrap timeout
|
||||
BootstrapTimeout time.Duration
|
||||
|
||||
// Peer discovery interval
|
||||
DiscoveryInterval time.Duration
|
||||
|
||||
// DHT mode (client, server, auto)
|
||||
Mode dht.ModeOpt
|
||||
|
||||
// Enable automatic bootstrap
|
||||
AutoBootstrap bool
|
||||
}
|
||||
|
||||
// PeerInfo holds information about discovered peers
|
||||
type PeerInfo struct {
|
||||
ID peer.ID
|
||||
Addresses []multiaddr.Multiaddr
|
||||
Agent string
|
||||
Role string
|
||||
LastSeen time.Time
|
||||
Capabilities []string
|
||||
}
|
||||
|
||||
// DefaultConfig returns a default DHT configuration
|
||||
func DefaultConfig() *Config {
|
||||
return &Config{
|
||||
ProtocolPrefix: "/bzzz",
|
||||
BootstrapTimeout: 30 * time.Second,
|
||||
DiscoveryInterval: 60 * time.Second,
|
||||
Mode: dht.ModeAuto,
|
||||
AutoBootstrap: true,
|
||||
}
|
||||
}
|
||||
|
||||
// NewDHT creates a new DHT instance
|
||||
func NewDHT(ctx context.Context, host host.Host, opts ...Option) (*DHT, error) {
|
||||
config := DefaultConfig()
|
||||
for _, opt := range opts {
|
||||
opt(config)
|
||||
}
|
||||
|
||||
// Create context with cancellation
|
||||
dhtCtx, cancel := context.WithCancel(ctx)
|
||||
|
||||
// Create Kademlia DHT
|
||||
kdht, err := dht.New(dhtCtx, host,
|
||||
dht.Mode(config.Mode),
|
||||
dht.ProtocolPrefix(config.ProtocolPrefix),
|
||||
)
|
||||
if err != nil {
|
||||
cancel()
|
||||
return nil, fmt.Errorf("failed to create DHT: %w", err)
|
||||
}
|
||||
|
||||
d := &DHT{
|
||||
host: host,
|
||||
kdht: kdht,
|
||||
ctx: dhtCtx,
|
||||
cancel: cancel,
|
||||
config: config,
|
||||
knownPeers: make(map[peer.ID]*PeerInfo),
|
||||
}
|
||||
|
||||
// Start background processes
|
||||
go d.startBackgroundTasks()
|
||||
|
||||
return d, nil
|
||||
}
|
||||
|
||||
// Option configures the DHT
|
||||
type Option func(*Config)
|
||||
|
||||
// WithBootstrapPeers sets the bootstrap peers
|
||||
func WithBootstrapPeers(peers []multiaddr.Multiaddr) Option {
|
||||
return func(c *Config) {
|
||||
c.BootstrapPeers = peers
|
||||
}
|
||||
}
|
||||
|
||||
// WithBootstrapPeersFromStrings sets bootstrap peers from string addresses
|
||||
func WithBootstrapPeersFromStrings(addresses []string) Option {
|
||||
return func(c *Config) {
|
||||
c.BootstrapPeers = make([]multiaddr.Multiaddr, 0, len(addresses))
|
||||
for _, addr := range addresses {
|
||||
if ma, err := multiaddr.NewMultiaddr(addr); err == nil {
|
||||
c.BootstrapPeers = append(c.BootstrapPeers, ma)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// WithProtocolPrefix sets the DHT protocol prefix
|
||||
func WithProtocolPrefix(prefix string) Option {
|
||||
return func(c *Config) {
|
||||
c.ProtocolPrefix = prefix
|
||||
}
|
||||
}
|
||||
|
||||
// WithMode sets the DHT mode
|
||||
func WithMode(mode dht.ModeOpt) Option {
|
||||
return func(c *Config) {
|
||||
c.Mode = mode
|
||||
}
|
||||
}
|
||||
|
||||
// WithBootstrapTimeout sets the bootstrap timeout
|
||||
func WithBootstrapTimeout(timeout time.Duration) Option {
|
||||
return func(c *Config) {
|
||||
c.BootstrapTimeout = timeout
|
||||
}
|
||||
}
|
||||
|
||||
// WithDiscoveryInterval sets the peer discovery interval
|
||||
func WithDiscoveryInterval(interval time.Duration) Option {
|
||||
return func(c *Config) {
|
||||
c.DiscoveryInterval = interval
|
||||
}
|
||||
}
|
||||
|
||||
// WithAutoBootstrap enables/disables automatic bootstrap
|
||||
func WithAutoBootstrap(auto bool) Option {
|
||||
return func(c *Config) {
|
||||
c.AutoBootstrap = auto
|
||||
}
|
||||
}
|
||||
|
||||
// Bootstrap connects to the DHT network using bootstrap peers
|
||||
func (d *DHT) Bootstrap() error {
|
||||
d.bootstrapMutex.Lock()
|
||||
defer d.bootstrapMutex.Unlock()
|
||||
|
||||
if d.bootstrapped {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Connect to bootstrap peers
|
||||
if len(d.config.BootstrapPeers) == 0 {
|
||||
// Use default IPFS bootstrap peers if none configured
|
||||
d.config.BootstrapPeers = dht.DefaultBootstrapPeers
|
||||
}
|
||||
|
||||
// Bootstrap the DHT
|
||||
bootstrapCtx, cancel := context.WithTimeout(d.ctx, d.config.BootstrapTimeout)
|
||||
defer cancel()
|
||||
|
||||
if err := d.kdht.Bootstrap(bootstrapCtx); err != nil {
|
||||
return fmt.Errorf("DHT bootstrap failed: %w", err)
|
||||
}
|
||||
|
||||
// Connect to bootstrap peers
|
||||
var connected int
|
||||
for _, peerAddr := range d.config.BootstrapPeers {
|
||||
addrInfo, err := peer.AddrInfoFromP2pAddr(peerAddr)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
connectCtx, cancel := context.WithTimeout(d.ctx, 10*time.Second)
|
||||
if err := d.host.Connect(connectCtx, *addrInfo); err != nil {
|
||||
cancel()
|
||||
continue
|
||||
}
|
||||
cancel()
|
||||
connected++
|
||||
}
|
||||
|
||||
if connected == 0 {
|
||||
return fmt.Errorf("failed to connect to any bootstrap peers")
|
||||
}
|
||||
|
||||
d.bootstrapped = true
|
||||
return nil
|
||||
}
|
||||
|
||||
// IsBootstrapped returns whether the DHT has been bootstrapped
|
||||
func (d *DHT) IsBootstrapped() bool {
|
||||
d.bootstrapMutex.RLock()
|
||||
defer d.bootstrapMutex.RUnlock()
|
||||
return d.bootstrapped
|
||||
}
|
||||
|
||||
// Provide announces that this peer provides a given key
|
||||
func (d *DHT) Provide(ctx context.Context, key string) error {
|
||||
if !d.IsBootstrapped() {
|
||||
return fmt.Errorf("DHT not bootstrapped")
|
||||
}
|
||||
|
||||
// Convert key to CID-like format
|
||||
keyBytes := []byte(key)
|
||||
return d.kdht.Provide(ctx, keyBytes, true)
|
||||
}
|
||||
|
||||
// FindProviders finds peers that provide a given key
|
||||
func (d *DHT) FindProviders(ctx context.Context, key string, limit int) ([]peer.AddrInfo, error) {
|
||||
if !d.IsBootstrapped() {
|
||||
return nil, fmt.Errorf("DHT not bootstrapped")
|
||||
}
|
||||
|
||||
keyBytes := []byte(key)
|
||||
|
||||
// Find providers
|
||||
providers := make([]peer.AddrInfo, 0, limit)
|
||||
for provider := range d.kdht.FindProviders(ctx, keyBytes) {
|
||||
providers = append(providers, provider)
|
||||
if len(providers) >= limit {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return providers, nil
|
||||
}
|
||||
|
||||
// PutValue puts a key-value pair into the DHT
|
||||
func (d *DHT) PutValue(ctx context.Context, key string, value []byte) error {
|
||||
if !d.IsBootstrapped() {
|
||||
return fmt.Errorf("DHT not bootstrapped")
|
||||
}
|
||||
|
||||
return d.kdht.PutValue(ctx, key, value)
|
||||
}
|
||||
|
||||
// GetValue retrieves a value from the DHT
|
||||
func (d *DHT) GetValue(ctx context.Context, key string) ([]byte, error) {
|
||||
if !d.IsBootstrapped() {
|
||||
return nil, fmt.Errorf("DHT not bootstrapped")
|
||||
}
|
||||
|
||||
return d.kdht.GetValue(ctx, key)
|
||||
}
|
||||
|
||||
// FindPeer finds a specific peer in the DHT
|
||||
func (d *DHT) FindPeer(ctx context.Context, peerID peer.ID) (peer.AddrInfo, error) {
|
||||
if !d.IsBootstrapped() {
|
||||
return peer.AddrInfo{}, fmt.Errorf("DHT not bootstrapped")
|
||||
}
|
||||
|
||||
return d.kdht.FindPeer(ctx, peerID)
|
||||
}
|
||||
|
||||
// GetRoutingTable returns the DHT routing table
|
||||
func (d *DHT) GetRoutingTable() routing.ContentRouting {
|
||||
return d.kdht
|
||||
}
|
||||
|
||||
// GetConnectedPeers returns currently connected DHT peers
|
||||
func (d *DHT) GetConnectedPeers() []peer.ID {
|
||||
return d.kdht.Host().Network().Peers()
|
||||
}
|
||||
|
||||
// RegisterPeer registers a peer with capability information
|
||||
func (d *DHT) RegisterPeer(peerID peer.ID, agent, role string, capabilities []string) {
|
||||
d.peersMutex.Lock()
|
||||
defer d.peersMutex.Unlock()
|
||||
|
||||
// Get peer addresses from host
|
||||
peerInfo := d.host.Peerstore().PeerInfo(peerID)
|
||||
|
||||
d.knownPeers[peerID] = &PeerInfo{
|
||||
ID: peerID,
|
||||
Addresses: peerInfo.Addrs,
|
||||
Agent: agent,
|
||||
Role: role,
|
||||
LastSeen: time.Now(),
|
||||
Capabilities: capabilities,
|
||||
}
|
||||
}
|
||||
|
||||
// GetKnownPeers returns all known peers with their information
|
||||
func (d *DHT) GetKnownPeers() map[peer.ID]*PeerInfo {
|
||||
d.peersMutex.RLock()
|
||||
defer d.peersMutex.RUnlock()
|
||||
|
||||
result := make(map[peer.ID]*PeerInfo)
|
||||
for id, info := range d.knownPeers {
|
||||
result[id] = info
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
// FindPeersByRole finds peers with a specific role
|
||||
func (d *DHT) FindPeersByRole(ctx context.Context, role string) ([]*PeerInfo, error) {
|
||||
// First check local known peers
|
||||
d.peersMutex.RLock()
|
||||
var localPeers []*PeerInfo
|
||||
for _, peer := range d.knownPeers {
|
||||
if peer.Role == role || role == "*" {
|
||||
localPeers = append(localPeers, peer)
|
||||
}
|
||||
}
|
||||
d.peersMutex.RUnlock()
|
||||
|
||||
// Also search DHT for role-based keys
|
||||
roleKey := fmt.Sprintf("bzzz:role:%s", role)
|
||||
providers, err := d.FindProviders(ctx, roleKey, 10)
|
||||
if err != nil {
|
||||
// Return local peers even if DHT search fails
|
||||
return localPeers, nil
|
||||
}
|
||||
|
||||
// Convert providers to PeerInfo
|
||||
var result []*PeerInfo
|
||||
result = append(result, localPeers...)
|
||||
|
||||
for _, provider := range providers {
|
||||
// Skip if we already have this peer
|
||||
found := false
|
||||
for _, existing := range result {
|
||||
if existing.ID == provider.ID {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
result = append(result, &PeerInfo{
|
||||
ID: provider.ID,
|
||||
Addresses: provider.Addrs,
|
||||
Role: role, // Inferred from search
|
||||
LastSeen: time.Now(),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// AnnounceRole announces this peer's role to the DHT
|
||||
func (d *DHT) AnnounceRole(ctx context.Context, role string) error {
|
||||
roleKey := fmt.Sprintf("bzzz:role:%s", role)
|
||||
return d.Provide(ctx, roleKey)
|
||||
}
|
||||
|
||||
// AnnounceCapability announces a capability to the DHT
|
||||
func (d *DHT) AnnounceCapability(ctx context.Context, capability string) error {
|
||||
capKey := fmt.Sprintf("bzzz:capability:%s", capability)
|
||||
return d.Provide(ctx, capKey)
|
||||
}
|
||||
|
||||
// startBackgroundTasks starts background maintenance tasks
|
||||
func (d *DHT) startBackgroundTasks() {
|
||||
// Auto-bootstrap if enabled
|
||||
if d.config.AutoBootstrap {
|
||||
go d.autoBootstrap()
|
||||
}
|
||||
|
||||
// Start periodic peer discovery
|
||||
go d.periodicDiscovery()
|
||||
|
||||
// Start peer cleanup
|
||||
go d.peerCleanup()
|
||||
}
|
||||
|
||||
// autoBootstrap attempts to bootstrap if not already bootstrapped
|
||||
func (d *DHT) autoBootstrap() {
|
||||
ticker := time.NewTicker(30 * time.Second)
|
||||
defer ticker.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-d.ctx.Done():
|
||||
return
|
||||
case <-ticker.C:
|
||||
if !d.IsBootstrapped() {
|
||||
if err := d.Bootstrap(); err != nil {
|
||||
// Log error but continue trying
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// periodicDiscovery performs periodic peer discovery
|
||||
func (d *DHT) periodicDiscovery() {
|
||||
ticker := time.NewTicker(d.config.DiscoveryInterval)
|
||||
defer ticker.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-d.ctx.Done():
|
||||
return
|
||||
case <-ticker.C:
|
||||
if d.IsBootstrapped() {
|
||||
d.performDiscovery()
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// performDiscovery discovers new peers
|
||||
func (d *DHT) performDiscovery() {
|
||||
ctx, cancel := context.WithTimeout(d.ctx, 30*time.Second)
|
||||
defer cancel()
|
||||
|
||||
// Look for general BZZZ peers
|
||||
providers, err := d.FindProviders(ctx, "bzzz:peer", 10)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Update known peers
|
||||
d.peersMutex.Lock()
|
||||
for _, provider := range providers {
|
||||
if _, exists := d.knownPeers[provider.ID]; !exists {
|
||||
d.knownPeers[provider.ID] = &PeerInfo{
|
||||
ID: provider.ID,
|
||||
Addresses: provider.Addrs,
|
||||
LastSeen: time.Now(),
|
||||
}
|
||||
}
|
||||
}
|
||||
d.peersMutex.Unlock()
|
||||
}
|
||||
|
||||
// peerCleanup removes stale peer information
|
||||
func (d *DHT) peerCleanup() {
|
||||
ticker := time.NewTicker(5 * time.Minute)
|
||||
defer ticker.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-d.ctx.Done():
|
||||
return
|
||||
case <-ticker.C:
|
||||
d.cleanupStalePeers()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// cleanupStalePeers removes peers that haven't been seen recently
|
||||
func (d *DHT) cleanupStalePeers() {
|
||||
d.peersMutex.Lock()
|
||||
defer d.peersMutex.Unlock()
|
||||
|
||||
staleThreshold := time.Now().Add(-time.Hour) // 1 hour threshold
|
||||
|
||||
for peerID, peerInfo := range d.knownPeers {
|
||||
if peerInfo.LastSeen.Before(staleThreshold) {
|
||||
// Check if peer is still connected
|
||||
connected := false
|
||||
for _, connectedPeer := range d.GetConnectedPeers() {
|
||||
if connectedPeer == peerID {
|
||||
connected = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if !connected {
|
||||
delete(d.knownPeers, peerID)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Close shuts down the DHT
|
||||
func (d *DHT) Close() error {
|
||||
d.cancel()
|
||||
return d.kdht.Close()
|
||||
}
|
||||
|
||||
// RefreshRoutingTable refreshes the DHT routing table
|
||||
func (d *DHT) RefreshRoutingTable() error {
|
||||
if !d.IsBootstrapped() {
|
||||
return fmt.Errorf("DHT not bootstrapped")
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(d.ctx, 30*time.Second)
|
||||
defer cancel()
|
||||
|
||||
return d.kdht.RefreshRoutingTable(ctx)
|
||||
}
|
||||
|
||||
// GetDHTSize returns an estimate of the DHT size
|
||||
func (d *DHT) GetDHTSize() int {
|
||||
return d.kdht.RoutingTable().Size()
|
||||
}
|
||||
|
||||
// Host returns the underlying libp2p host
|
||||
func (d *DHT) Host() host.Host {
|
||||
return d.host
|
||||
}
|
||||
Reference in New Issue
Block a user