Integrate BACKBEAT SDK and resolve KACHING license validation

Major integrations and fixes:
- Added BACKBEAT SDK integration for P2P operation timing
- Implemented beat-aware status tracking for distributed operations
- Added Docker secrets support for secure license management
- Resolved KACHING license validation via HTTPS/TLS
- Updated docker-compose configuration for clean stack deployment
- Disabled rollback policies to prevent deployment failures
- Added license credential storage (CHORUS-DEV-MULTI-001)

Technical improvements:
- BACKBEAT P2P operation tracking with phase management
- Enhanced configuration system with file-based secrets
- Improved error handling for license validation
- Clean separation of KACHING and CHORUS deployment stacks

🤖 Generated with [Claude Code](https://claude.ai/code)

Co-Authored-By: Claude <noreply@anthropic.com>
This commit is contained in:
anthonyrawlins
2025-09-06 07:56:26 +10:00
parent 543ab216f9
commit 9bdcbe0447
4730 changed files with 1480093 additions and 1916 deletions

21
vendor/github.com/libp2p/go-libp2p-kbucket/LICENSE generated vendored Normal file
View File

@@ -0,0 +1,21 @@
The MIT License (MIT)
Copyright (c) 2016 Protocol Labs
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.

30
vendor/github.com/libp2p/go-libp2p-kbucket/README.md generated vendored Normal file
View File

@@ -0,0 +1,30 @@
# go-libp2p-kbucket
[![](https://img.shields.io/badge/made%20by-Protocol%20Labs-blue.svg?style=flat-square)](https://protocol.ai)
[![](https://img.shields.io/badge/project-libp2p-yellow.svg?style=flat-square)](https://libp2p.io/)
[![](https://img.shields.io/badge/freenode-%23libp2p-yellow.svg?style=flat-square)](http://webchat.freenode.net/?channels=%23libp2p)
[![Discourse posts](https://img.shields.io/discourse/https/discuss.libp2p.io/posts.svg)](https://discuss.libp2p.io)
> A kbucket implementation for use as a routing table in go-libp2p-kad-dht
## Documenation
See https://godoc.org/github.com/libp2p/go-libp2p-kbucket.
## Contribute
Feel free to join in. All welcome. Open an [issue](https://github.com/libp2p/go-libp2p-kbucket/issues)!
This repository falls under the libp2p [Code of Conduct](https://github.com/libp2p/community/blob/master/code-of-conduct.md).
### Want to hack on libp2p?
[![](https://cdn.rawgit.com/libp2p/community/master/img/contribute.gif)](https://github.com/libp2p/community/blob/master/CONTRIBUTE.md)
## License
MIT
---
The last gx published version of this module was: 2.2.23: QmSNE1XryoCMnZCbRaj1D23k6YKCaTQ386eJciu1pAfu8M

165
vendor/github.com/libp2p/go-libp2p-kbucket/bucket.go generated vendored Normal file
View File

@@ -0,0 +1,165 @@
//go:generate go run ./generate
package kbucket
import (
"container/list"
"time"
"github.com/libp2p/go-libp2p/core/peer"
)
// PeerInfo holds all related information for a peer in the K-Bucket.
type PeerInfo struct {
Id peer.ID
// LastUsefulAt is the time instant at which the peer was last "useful" to us.
// Please see the DHT docs for the definition of usefulness.
LastUsefulAt time.Time
// LastSuccessfulOutboundQueryAt is the time instant at which we last got a
// successful query response from the peer.
LastSuccessfulOutboundQueryAt time.Time
// AddedAt is the time this peer was added to the routing table.
AddedAt time.Time
// Id of the peer in the DHT XOR keyspace
dhtId ID
// if a bucket is full, this peer can be replaced to make space for a new peer.
replaceable bool
}
// bucket holds a list of peers.
// we synchronize on the Routing Table lock for all access to the bucket
// and so do not need any locks in the bucket.
// if we want/need to avoid locking the table for accessing a bucket in the future,
// it WILL be the caller's responsibility to synchronize all access to a bucket.
type bucket struct {
list *list.List
}
func newBucket() *bucket {
b := new(bucket)
b.list = list.New()
return b
}
// returns all peers in the bucket
// it is safe for the caller to modify the returned objects as it is a defensive copy
func (b *bucket) peers() []PeerInfo {
ps := make([]PeerInfo, 0, b.len())
for e := b.list.Front(); e != nil; e = e.Next() {
p := e.Value.(*PeerInfo)
ps = append(ps, *p)
}
return ps
}
// returns the "minimum" peer in the bucket based on the `lessThan` comparator passed to it.
// It is NOT safe for the comparator to mutate the given `PeerInfo`
// as we pass in a pointer to it.
// It is NOT safe to modify the returned value.
func (b *bucket) min(lessThan func(p1 *PeerInfo, p2 *PeerInfo) bool) *PeerInfo {
if b.list.Len() == 0 {
return nil
}
minVal := b.list.Front().Value.(*PeerInfo)
for e := b.list.Front().Next(); e != nil; e = e.Next() {
val := e.Value.(*PeerInfo)
if lessThan(val, minVal) {
minVal = val
}
}
return minVal
}
// updateAllWith updates all the peers in the bucket by applying the given update function.
func (b *bucket) updateAllWith(updateFnc func(p *PeerInfo)) {
for e := b.list.Front(); e != nil; e = e.Next() {
val := e.Value.(*PeerInfo)
updateFnc(val)
}
}
// return the Ids of all the peers in the bucket.
func (b *bucket) peerIds() []peer.ID {
ps := make([]peer.ID, 0, b.list.Len())
for e := b.list.Front(); e != nil; e = e.Next() {
p := e.Value.(*PeerInfo)
ps = append(ps, p.Id)
}
return ps
}
// returns the peer with the given Id if it exists
// returns nil if the peerId does not exist
func (b *bucket) getPeer(p peer.ID) *PeerInfo {
for e := b.list.Front(); e != nil; e = e.Next() {
if e.Value.(*PeerInfo).Id == p {
return e.Value.(*PeerInfo)
}
}
return nil
}
// removes the peer with the given Id from the bucket.
// returns true if successful, false otherwise.
func (b *bucket) remove(id peer.ID) bool {
for e := b.list.Front(); e != nil; e = e.Next() {
if e.Value.(*PeerInfo).Id == id {
b.list.Remove(e)
return true
}
}
return false
}
func (b *bucket) pushFront(p *PeerInfo) {
b.list.PushFront(p)
}
func (b *bucket) len() int {
return b.list.Len()
}
// splits a buckets peers into two buckets, the methods receiver will have
// peers with CPL equal to cpl, the returned bucket will have peers with CPL
// greater than cpl (returned bucket has closer peers)
func (b *bucket) split(cpl int, target ID) *bucket {
out := list.New()
newbuck := newBucket()
newbuck.list = out
e := b.list.Front()
for e != nil {
pDhtId := e.Value.(*PeerInfo).dhtId
peerCPL := CommonPrefixLen(pDhtId, target)
if peerCPL > cpl {
cur := e
out.PushBack(e.Value)
e = e.Next()
b.list.Remove(cur)
continue
}
e = e.Next()
}
return newbuck
}
// maxCommonPrefix returns the maximum common prefix length between any peer in
// the bucket with the target ID.
func (b *bucket) maxCommonPrefix(target ID) uint {
maxCpl := uint(0)
for e := b.list.Front(); e != nil; e = e.Next() {
cpl := uint(CommonPrefixLen(e.Value.(*PeerInfo).dhtId, target))
if cpl > maxCpl {
maxCpl = cpl
}
}
return maxCpl
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,3 @@
coverage:
range: "50...100"
comment: off

View File

@@ -0,0 +1,97 @@
package keyspace
import (
"sort"
"math/big"
)
// Key represents an identifier in a KeySpace. It holds a reference to the
// associated KeySpace, as well references to both the Original identifier,
// as well as the new, KeySpace Bytes one.
type Key struct {
// Space is the KeySpace this Key is related to.
Space KeySpace
// Original is the original value of the identifier
Original []byte
// Bytes is the new value of the identifier, in the KeySpace.
Bytes []byte
}
// Equal returns whether this key is equal to another.
func (k1 Key) Equal(k2 Key) bool {
if k1.Space != k2.Space {
panic("k1 and k2 not in same key space.")
}
return k1.Space.Equal(k1, k2)
}
// Less returns whether this key comes before another.
func (k1 Key) Less(k2 Key) bool {
if k1.Space != k2.Space {
panic("k1 and k2 not in same key space.")
}
return k1.Space.Less(k1, k2)
}
// Distance returns this key's distance to another
func (k1 Key) Distance(k2 Key) *big.Int {
if k1.Space != k2.Space {
panic("k1 and k2 not in same key space.")
}
return k1.Space.Distance(k1, k2)
}
// KeySpace is an object used to do math on identifiers. Each keyspace has its
// own properties and rules. See XorKeySpace.
type KeySpace interface {
// Key converts an identifier into a Key in this space.
Key([]byte) Key
// Equal returns whether keys are equal in this key space
Equal(Key, Key) bool
// Distance returns the distance metric in this key space
Distance(Key, Key) *big.Int
// Less returns whether the first key is smaller than the second.
Less(Key, Key) bool
}
// byDistanceToCenter is a type used to sort Keys by proximity to a center.
type byDistanceToCenter struct {
Center Key
Keys []Key
}
func (s byDistanceToCenter) Len() int {
return len(s.Keys)
}
func (s byDistanceToCenter) Swap(i, j int) {
s.Keys[i], s.Keys[j] = s.Keys[j], s.Keys[i]
}
func (s byDistanceToCenter) Less(i, j int) bool {
a := s.Center.Distance(s.Keys[i])
b := s.Center.Distance(s.Keys[j])
return a.Cmp(b) == -1
}
// SortByDistance takes a KeySpace, a center Key, and a list of Keys toSort.
// It returns a new list, where the Keys toSort have been sorted by their
// distance to the center Key.
func SortByDistance(sp KeySpace, center Key, toSort []Key) []Key {
toSortCopy := make([]Key, len(toSort))
copy(toSortCopy, toSort)
bdtc := &byDistanceToCenter{
Center: center,
Keys: toSortCopy, // copy
}
sort.Sort(bdtc)
return bdtc.Keys
}

View File

@@ -0,0 +1,59 @@
package keyspace
import (
"bytes"
"math/big"
"math/bits"
u "github.com/ipfs/boxo/util"
sha256 "github.com/minio/sha256-simd"
)
// XORKeySpace is a KeySpace which:
// - normalizes identifiers using a cryptographic hash (sha256)
// - measures distance by XORing keys together
var XORKeySpace = &xorKeySpace{}
var _ KeySpace = XORKeySpace // ensure it conforms
type xorKeySpace struct{}
// Key converts an identifier into a Key in this space.
func (s *xorKeySpace) Key(id []byte) Key {
hash := sha256.Sum256(id)
key := hash[:]
return Key{
Space: s,
Original: id,
Bytes: key,
}
}
// Equal returns whether keys are equal in this key space
func (s *xorKeySpace) Equal(k1, k2 Key) bool {
return bytes.Equal(k1.Bytes, k2.Bytes)
}
// Distance returns the distance metric in this key space
func (s *xorKeySpace) Distance(k1, k2 Key) *big.Int {
// XOR the keys
k3 := u.XOR(k1.Bytes, k2.Bytes)
// interpret it as an integer
dist := big.NewInt(0).SetBytes(k3)
return dist
}
// Less returns whether the first key is smaller than the second.
func (s *xorKeySpace) Less(k1, k2 Key) bool {
return bytes.Compare(k1.Bytes, k2.Bytes) < 0
}
// ZeroPrefixLen returns the number of consecutive zeroes in a byte slice.
func ZeroPrefixLen(id []byte) int {
for i, b := range id {
if b != 0 {
return i*8 + bits.LeadingZeros8(uint8(b))
}
}
return len(id) * 8
}

View File

@@ -0,0 +1,275 @@
package peerdiversity
import (
"errors"
"fmt"
"net"
"sort"
"sync"
"github.com/libp2p/go-libp2p/core/peer"
"github.com/libp2p/go-cidranger"
asnutil "github.com/libp2p/go-libp2p-asn-util"
logging "github.com/ipfs/go-log"
ma "github.com/multiformats/go-multiaddr"
manet "github.com/multiformats/go-multiaddr/net"
)
var dfLog = logging.Logger("diversityFilter")
type asnStore interface {
AsnForIPv6(ip net.IP) (string, error)
}
// PeerIPGroupKey is a unique key that represents ONE of the IP Groups the peer belongs to.
// A peer has one PeerIPGroupKey per address. Thus, a peer can belong to MULTIPLE Groups if it has
// multiple addresses.
// For now, given a peer address, our grouping mechanism is as follows:
// 1. For IPv6 addresses, we group by the ASN of the IP address.
// 2. For IPv4 addresses, all addresses that belong to same legacy (Class A)/8 allocations
// OR share the same /16 prefix are in the same group.
type PeerIPGroupKey string
// https://en.wikipedia.org/wiki/List_of_assigned_/8_IPv4_address_blocks
var legacyClassA = []string{"12.0.0.0/8", "17.0.0.0/8", "19.0.0.0/8", "38.0.0.0/8", "48.0.0.0/8", "56.0.0.0/8", "73.0.0.0/8", "53.0.0.0/8"}
// PeerGroupInfo represents the grouping info for a Peer.
type PeerGroupInfo struct {
Id peer.ID
Cpl int
IPGroupKey PeerIPGroupKey
}
// PeerIPGroupFilter is the interface that must be implemented by callers who want to
// instantiate a `peerdiversity.Filter`. This interface provides the function hooks
// that are used/called by the `peerdiversity.Filter`.
type PeerIPGroupFilter interface {
// Allow is called by the Filter to test if a peer with the given
// grouping info should be allowed/rejected by the Filter. This will be called ONLY
// AFTER the peer has successfully passed all of the Filter's internal checks.
// Note: If the peer is whitelisted on the Filter, the peer will be allowed by the Filter without calling this function.
Allow(PeerGroupInfo) (allow bool)
// Increment is called by the Filter when a peer with the given Grouping Info.
// is added to the Filter state. This will happen after the peer has passed
// all of the Filter's internal checks and the Allow function defined above for all of it's Groups.
Increment(PeerGroupInfo)
// Decrement is called by the Filter when a peer with the given
// Grouping Info is removed from the Filter. This will happen when the caller/user of the Filter
// no longer wants the peer and the IP groups it belongs to to count towards the Filter state.
Decrement(PeerGroupInfo)
// PeerAddresses is called by the Filter to determine the addresses of the given peer
// it should use to determine the IP groups it belongs to.
PeerAddresses(peer.ID) []ma.Multiaddr
}
// Filter is a peer diversity filter that accepts or rejects peers based on the whitelisting rules configured
// AND the diversity policies defined by the implementation of the PeerIPGroupFilter interface
// passed to it.
type Filter struct {
mu sync.Mutex
// An implementation of the `PeerIPGroupFilter` interface defined above.
pgm PeerIPGroupFilter
peerGroups map[peer.ID][]PeerGroupInfo
// whitelisted peers
wlpeers map[peer.ID]struct{}
// legacy IPv4 Class A networks.
legacyCidrs cidranger.Ranger
logKey string
cplFnc func(peer.ID) int
cplPeerGroups map[int]map[peer.ID][]PeerIPGroupKey
asnStore asnStore
}
// NewFilter creates a Filter for Peer Diversity.
func NewFilter(pgm PeerIPGroupFilter, logKey string, cplFnc func(peer.ID) int) (*Filter, error) {
if pgm == nil {
return nil, errors.New("peergroup implementation can not be nil")
}
// Crate a Trie for legacy Class N networks
legacyCidrs := cidranger.NewPCTrieRanger()
for _, cidr := range legacyClassA {
_, nn, err := net.ParseCIDR(cidr)
if err != nil {
return nil, err
}
if err := legacyCidrs.Insert(cidranger.NewBasicRangerEntry(*nn)); err != nil {
return nil, err
}
}
return &Filter{
pgm: pgm,
peerGroups: make(map[peer.ID][]PeerGroupInfo),
wlpeers: make(map[peer.ID]struct{}),
legacyCidrs: legacyCidrs,
logKey: logKey,
cplFnc: cplFnc,
cplPeerGroups: make(map[int]map[peer.ID][]PeerIPGroupKey),
asnStore: asnutil.Store,
}, nil
}
func (f *Filter) Remove(p peer.ID) {
f.mu.Lock()
defer f.mu.Unlock()
cpl := f.cplFnc(p)
for _, info := range f.peerGroups[p] {
f.pgm.Decrement(info)
}
f.peerGroups[p] = nil
delete(f.peerGroups, p)
delete(f.cplPeerGroups[cpl], p)
if len(f.cplPeerGroups[cpl]) == 0 {
delete(f.cplPeerGroups, cpl)
}
}
// TryAdd attempts to add the peer to the Filter state and returns true if it's successful, false otherwise.
func (f *Filter) TryAdd(p peer.ID) bool {
f.mu.Lock()
defer f.mu.Unlock()
if _, ok := f.wlpeers[p]; ok {
return true
}
cpl := f.cplFnc(p)
// don't allow peers for which we can't determine addresses.
addrs := f.pgm.PeerAddresses(p)
if len(addrs) == 0 {
dfLog.Debugw("no addresses found for peer", "appKey", f.logKey, "peer", p)
return false
}
peerGroups := make([]PeerGroupInfo, 0, len(addrs))
for _, a := range addrs {
ip, err := manet.ToIP(a)
if err != nil {
dfLog.Errorw("failed to parse IP from multiaddr", "appKey", f.logKey,
"multiaddr", a.String(), "err", err)
return false
}
// reject the peer if we can't determine a grouping for one of it's address.
key, err := f.ipGroupKey(ip)
if err != nil {
dfLog.Errorw("failed to find Group Key", "appKey", f.logKey, "ip", ip.String(), "peer", p,
"err", err)
return false
}
if len(key) == 0 {
dfLog.Errorw("group key is empty", "appKey", f.logKey, "ip", ip.String(), "peer", p)
return false
}
group := PeerGroupInfo{Id: p, Cpl: cpl, IPGroupKey: key}
if !f.pgm.Allow(group) {
return false
}
peerGroups = append(peerGroups, group)
}
if _, ok := f.cplPeerGroups[cpl]; !ok {
f.cplPeerGroups[cpl] = make(map[peer.ID][]PeerIPGroupKey)
}
for _, g := range peerGroups {
f.pgm.Increment(g)
f.peerGroups[p] = append(f.peerGroups[p], g)
f.cplPeerGroups[cpl][p] = append(f.cplPeerGroups[cpl][p], g.IPGroupKey)
}
return true
}
// WhitelistPeers will always allow the given peers.
func (f *Filter) WhitelistPeers(peers ...peer.ID) {
f.mu.Lock()
defer f.mu.Unlock()
for _, p := range peers {
f.wlpeers[p] = struct{}{}
}
}
// returns the PeerIPGroupKey to which the given IP belongs.
func (f *Filter) ipGroupKey(ip net.IP) (PeerIPGroupKey, error) {
switch bz := ip.To4(); bz {
case nil:
// TODO Clean up the ASN codebase
// ipv6 Address -> get ASN
s, err := f.asnStore.AsnForIPv6(ip)
if err != nil {
return "", fmt.Errorf("failed to fetch ASN for IPv6 addr %s: %w", ip.String(), err)
}
// if no ASN found then fallback on using the /32 prefix
if len(s) == 0 {
dfLog.Debugw("ASN not known", "appKey", f.logKey, "ip", ip)
s = fmt.Sprintf("unknown ASN: %s", net.CIDRMask(32, 128).String())
}
return PeerIPGroupKey(s), nil
default:
// If it belongs to a legacy Class 8, we return the /8 prefix as the key
rs, _ := f.legacyCidrs.ContainingNetworks(ip)
if len(rs) != 0 {
key := ip.Mask(net.IPv4Mask(255, 0, 0, 0)).String()
return PeerIPGroupKey(key), nil
}
// otherwise -> /16 prefix
key := ip.Mask(net.IPv4Mask(255, 255, 0, 0)).String()
return PeerIPGroupKey(key), nil
}
}
// CplDiversityStats contains the peer diversity stats for a Cpl.
type CplDiversityStats struct {
Cpl int
Peers map[peer.ID][]PeerIPGroupKey
}
// GetDiversityStats returns the diversity stats for each CPL and is sorted by the CPL.
func (f *Filter) GetDiversityStats() []CplDiversityStats {
f.mu.Lock()
defer f.mu.Unlock()
stats := make([]CplDiversityStats, 0, len(f.cplPeerGroups))
var sortedCpls []int
for cpl := range f.cplPeerGroups {
sortedCpls = append(sortedCpls, cpl)
}
sort.Ints(sortedCpls)
for _, cpl := range sortedCpls {
ps := make(map[peer.ID][]PeerIPGroupKey, len(f.cplPeerGroups[cpl]))
cd := CplDiversityStats{cpl, ps}
for p, groups := range f.cplPeerGroups[cpl] {
ps[p] = groups
}
stats = append(stats, cd)
}
return stats
}

64
vendor/github.com/libp2p/go-libp2p-kbucket/sorting.go generated vendored Normal file
View File

@@ -0,0 +1,64 @@
package kbucket
import (
"container/list"
"sort"
"github.com/libp2p/go-libp2p/core/peer"
)
// A helper struct to sort peers by their distance to the local node
type peerDistance struct {
p peer.ID
distance ID
}
// peerDistanceSorter implements sort.Interface to sort peers by xor distance
type peerDistanceSorter struct {
peers []peerDistance
target ID
}
func (pds *peerDistanceSorter) Len() int { return len(pds.peers) }
func (pds *peerDistanceSorter) Swap(a, b int) {
pds.peers[a], pds.peers[b] = pds.peers[b], pds.peers[a]
}
func (pds *peerDistanceSorter) Less(a, b int) bool {
return pds.peers[a].distance.less(pds.peers[b].distance)
}
// Append the peer.ID to the sorter's slice. It may no longer be sorted.
func (pds *peerDistanceSorter) appendPeer(p peer.ID, pDhtId ID) {
pds.peers = append(pds.peers, peerDistance{
p: p,
distance: xor(pds.target, pDhtId),
})
}
// Append the peer.ID values in the list to the sorter's slice. It may no longer be sorted.
func (pds *peerDistanceSorter) appendPeersFromList(l *list.List) {
for e := l.Front(); e != nil; e = e.Next() {
pds.appendPeer(e.Value.(*PeerInfo).Id, e.Value.(*PeerInfo).dhtId)
}
}
func (pds *peerDistanceSorter) sort() {
sort.Sort(pds)
}
// SortClosestPeers Sort the given peers by their ascending distance from the target. A new slice is returned.
func SortClosestPeers(peers []peer.ID, target ID) []peer.ID {
sorter := peerDistanceSorter{
peers: make([]peerDistance, 0, len(peers)),
target: target,
}
for _, p := range peers {
sorter.appendPeer(p, ConvertPeerID(p))
}
sorter.sort()
out := make([]peer.ID, 0, sorter.Len())
for _, p := range sorter.peers {
out = append(out, p.p)
}
return out
}

570
vendor/github.com/libp2p/go-libp2p-kbucket/table.go generated vendored Normal file
View File

@@ -0,0 +1,570 @@
// Package kbucket implements a kademlia 'k-bucket' routing table.
package kbucket
import (
"context"
"errors"
"fmt"
"sync"
"time"
"github.com/libp2p/go-libp2p/core/peer"
"github.com/libp2p/go-libp2p/core/peerstore"
"github.com/libp2p/go-libp2p-kbucket/peerdiversity"
logging "github.com/ipfs/go-log"
)
var log = logging.Logger("table")
var ErrPeerRejectedHighLatency = errors.New("peer rejected; latency too high")
var ErrPeerRejectedNoCapacity = errors.New("peer rejected; insufficient capacity")
// RoutingTable defines the routing table.
type RoutingTable struct {
// the routing table context
ctx context.Context
// function to cancel the RT context
ctxCancel context.CancelFunc
// ID of the local peer
local ID
// Blanket lock, refine later for better performance
tabLock sync.RWMutex
// latency metrics
metrics peerstore.Metrics
// Maximum acceptable latency for peers in this cluster
maxLatency time.Duration
// kBuckets define all the fingers to other nodes.
buckets []*bucket
bucketsize int
cplRefreshLk sync.RWMutex
cplRefreshedAt map[uint]time.Time
// notification functions
PeerRemoved func(peer.ID)
PeerAdded func(peer.ID)
// usefulnessGracePeriod is the maximum grace period we will give to a
// peer in the bucket to be useful to us, failing which, we will evict
// it to make place for a new peer if the bucket is full
usefulnessGracePeriod time.Duration
df *peerdiversity.Filter
}
// NewRoutingTable creates a new routing table with a given bucketsize, local ID, and latency tolerance.
func NewRoutingTable(bucketsize int, localID ID, latency time.Duration, m peerstore.Metrics, usefulnessGracePeriod time.Duration,
df *peerdiversity.Filter) (*RoutingTable, error) {
rt := &RoutingTable{
buckets: []*bucket{newBucket()},
bucketsize: bucketsize,
local: localID,
maxLatency: latency,
metrics: m,
cplRefreshedAt: make(map[uint]time.Time),
PeerRemoved: func(peer.ID) {},
PeerAdded: func(peer.ID) {},
usefulnessGracePeriod: usefulnessGracePeriod,
df: df,
}
rt.ctx, rt.ctxCancel = context.WithCancel(context.Background())
return rt, nil
}
// Close shuts down the Routing Table & all associated processes.
// It is safe to call this multiple times.
func (rt *RoutingTable) Close() error {
rt.ctxCancel()
return nil
}
// NPeersForCpl returns the number of peers we have for a given Cpl
func (rt *RoutingTable) NPeersForCpl(cpl uint) int {
rt.tabLock.RLock()
defer rt.tabLock.RUnlock()
// it's in the last bucket
if int(cpl) >= len(rt.buckets)-1 {
count := 0
b := rt.buckets[len(rt.buckets)-1]
for _, p := range b.peers() {
if CommonPrefixLen(rt.local, p.dhtId) == int(cpl) {
count++
}
}
return count
} else {
return rt.buckets[cpl].len()
}
}
// UsefulNewPeer verifies whether the given peer.ID would be a good fit for the
// routing table. It returns true if the peer isn't in the routing table yet, if
// the bucket corresponding to peer.ID isn't full, if it contains replaceable
// peers or if it is the last bucket and adding a peer would unfold it.
func (rt *RoutingTable) UsefulNewPeer(p peer.ID) bool {
rt.tabLock.RLock()
defer rt.tabLock.RUnlock()
// bucket corresponding to p
bucketID := rt.bucketIdForPeer(p)
bucket := rt.buckets[bucketID]
if bucket.getPeer(p) != nil {
// peer already exists in the routing table, so it isn't useful
return false
}
// bucket isn't full
if bucket.len() < rt.bucketsize {
return true
}
// bucket is full, check if it contains replaceable peers
for e := bucket.list.Front(); e != nil; e = e.Next() {
peer := e.Value.(*PeerInfo)
if peer.replaceable {
// at least 1 peer is replaceable
return true
}
}
// the last bucket potentially contains peer ids with different CPL,
// and can be split in 2 buckets if needed
if bucketID == len(rt.buckets)-1 {
peers := bucket.peers()
cpl := CommonPrefixLen(rt.local, ConvertPeerID(p))
for _, peer := range peers {
// if at least 2 peers have a different CPL, the new peer is
// useful and will trigger a bucket split
if CommonPrefixLen(rt.local, peer.dhtId) != cpl {
return true
}
}
}
// the appropriate bucket is full of non replaceable peers
return false
}
// TryAddPeer tries to add a peer to the Routing table.
// If the peer ALREADY exists in the Routing Table and has been queried before, this call is a no-op.
// If the peer ALREADY exists in the Routing Table but hasn't been queried before, we set it's LastUsefulAt value to
// the current time. This needs to done because we don't mark peers as "Useful"(by setting the LastUsefulAt value)
// when we first connect to them.
//
// If the peer is a queryPeer i.e. we queried it or it queried us, we set the LastSuccessfulOutboundQuery to the current time.
// If the peer is just a peer that we connect to/it connected to us without any DHT query, we consider it as having
// no LastSuccessfulOutboundQuery.
//
//
// If the logical bucket to which the peer belongs is full and it's not the last bucket, we try to replace an existing peer
// whose LastSuccessfulOutboundQuery is above the maximum allowed threshold in that bucket with the new peer.
// If no such peer exists in that bucket, we do NOT add the peer to the Routing Table and return error "ErrPeerRejectedNoCapacity".
// TryAddPeer returns a boolean value set to true if the peer was newly added to the Routing Table, false otherwise.
// It also returns any error that occurred while adding the peer to the Routing Table. If the error is not nil,
// the boolean value will ALWAYS be false i.e. the peer wont be added to the Routing Table it it's not already there.
//
// A return value of false with error=nil indicates that the peer ALREADY exists in the Routing Table.
func (rt *RoutingTable) TryAddPeer(p peer.ID, queryPeer bool, isReplaceable bool) (bool, error) {
rt.tabLock.Lock()
defer rt.tabLock.Unlock()
return rt.addPeer(p, queryPeer, isReplaceable)
}
// locking is the responsibility of the caller
func (rt *RoutingTable) addPeer(p peer.ID, queryPeer bool, isReplaceable bool) (bool, error) {
bucketID := rt.bucketIdForPeer(p)
bucket := rt.buckets[bucketID]
now := time.Now()
var lastUsefulAt time.Time
if queryPeer {
lastUsefulAt = now
}
// peer already exists in the Routing Table.
if peerInfo := bucket.getPeer(p); peerInfo != nil {
// if we're querying the peer first time after adding it, let's give it a
// usefulness bump. This will ONLY happen once.
if peerInfo.LastUsefulAt.IsZero() && queryPeer {
peerInfo.LastUsefulAt = lastUsefulAt
}
return false, nil
}
// peer's latency threshold is NOT acceptable
if rt.metrics.LatencyEWMA(p) > rt.maxLatency {
// Connection doesnt meet requirements, skip!
return false, ErrPeerRejectedHighLatency
}
// add it to the diversity filter for now.
// if we aren't able to find a place for the peer in the table,
// we will simply remove it from the Filter later.
if rt.df != nil {
if !rt.df.TryAdd(p) {
return false, errors.New("peer rejected by the diversity filter")
}
}
// We have enough space in the bucket (whether spawned or grouped).
if bucket.len() < rt.bucketsize {
bucket.pushFront(&PeerInfo{
Id: p,
LastUsefulAt: lastUsefulAt,
LastSuccessfulOutboundQueryAt: now,
AddedAt: now,
dhtId: ConvertPeerID(p),
replaceable: isReplaceable,
})
rt.PeerAdded(p)
return true, nil
}
if bucketID == len(rt.buckets)-1 {
// if the bucket is too large and this is the last bucket (i.e. wildcard), unfold it.
rt.nextBucket()
// the structure of the table has changed, so let's recheck if the peer now has a dedicated bucket.
bucketID = rt.bucketIdForPeer(p)
bucket = rt.buckets[bucketID]
// push the peer only if the bucket isn't overflowing after slitting
if bucket.len() < rt.bucketsize {
bucket.pushFront(&PeerInfo{
Id: p,
LastUsefulAt: lastUsefulAt,
LastSuccessfulOutboundQueryAt: now,
AddedAt: now,
dhtId: ConvertPeerID(p),
replaceable: isReplaceable,
})
rt.PeerAdded(p)
return true, nil
}
}
// the bucket to which the peer belongs is full. Let's try to find a peer
// in that bucket which is replaceable.
// we don't really need a stable sort here as it dosen't matter which peer we evict
// as long as it's a replaceable peer.
replaceablePeer := bucket.min(func(p1 *PeerInfo, p2 *PeerInfo) bool {
return p1.replaceable
})
if replaceablePeer != nil && replaceablePeer.replaceable {
// we found a replaceable peer, let's replace it with the new peer.
// add new peer to the bucket. needs to happen before we remove the replaceable peer
// as if the bucket size is 1, we will end up removing the only peer, and deleting
// the bucket.
bucket.pushFront(&PeerInfo{
Id: p,
LastUsefulAt: lastUsefulAt,
LastSuccessfulOutboundQueryAt: now,
AddedAt: now,
dhtId: ConvertPeerID(p),
replaceable: isReplaceable,
})
rt.PeerAdded(p)
// remove the replaceable peer
rt.removePeer(replaceablePeer.Id)
return true, nil
}
// we weren't able to find place for the peer, remove it from the filter state.
if rt.df != nil {
rt.df.Remove(p)
}
return false, ErrPeerRejectedNoCapacity
}
// MarkAllPeersIrreplaceable marks all peers in the routing table as irreplaceable
// This means that we will never replace an existing peer in the table to make space for a new peer.
// However, they can still be removed by calling the `RemovePeer` API.
func (rt *RoutingTable) MarkAllPeersIrreplaceable() {
rt.tabLock.Lock()
defer rt.tabLock.Unlock()
for i := range rt.buckets {
b := rt.buckets[i]
b.updateAllWith(func(p *PeerInfo) {
p.replaceable = false
})
}
}
// GetPeerInfos returns the peer information that we've stored in the buckets
func (rt *RoutingTable) GetPeerInfos() []PeerInfo {
rt.tabLock.RLock()
defer rt.tabLock.RUnlock()
var pis []PeerInfo
for _, b := range rt.buckets {
pis = append(pis, b.peers()...)
}
return pis
}
// UpdateLastSuccessfulOutboundQueryAt updates the LastSuccessfulOutboundQueryAt time of the peer.
// Returns true if the update was successful, false otherwise.
func (rt *RoutingTable) UpdateLastSuccessfulOutboundQueryAt(p peer.ID, t time.Time) bool {
rt.tabLock.Lock()
defer rt.tabLock.Unlock()
bucketID := rt.bucketIdForPeer(p)
bucket := rt.buckets[bucketID]
if pc := bucket.getPeer(p); pc != nil {
pc.LastSuccessfulOutboundQueryAt = t
return true
}
return false
}
// UpdateLastUsefulAt updates the LastUsefulAt time of the peer.
// Returns true if the update was successful, false otherwise.
func (rt *RoutingTable) UpdateLastUsefulAt(p peer.ID, t time.Time) bool {
rt.tabLock.Lock()
defer rt.tabLock.Unlock()
bucketID := rt.bucketIdForPeer(p)
bucket := rt.buckets[bucketID]
if pc := bucket.getPeer(p); pc != nil {
pc.LastUsefulAt = t
return true
}
return false
}
// RemovePeer should be called when the caller is sure that a peer is not useful for queries.
// For eg: the peer could have stopped supporting the DHT protocol.
// It evicts the peer from the Routing Table.
func (rt *RoutingTable) RemovePeer(p peer.ID) {
rt.tabLock.Lock()
defer rt.tabLock.Unlock()
rt.removePeer(p)
}
// locking is the responsibility of the caller
func (rt *RoutingTable) removePeer(p peer.ID) bool {
bucketID := rt.bucketIdForPeer(p)
bucket := rt.buckets[bucketID]
if bucket.remove(p) {
if rt.df != nil {
rt.df.Remove(p)
}
for {
lastBucketIndex := len(rt.buckets) - 1
// remove the last bucket if it's empty and it isn't the only bucket we have
if len(rt.buckets) > 1 && rt.buckets[lastBucketIndex].len() == 0 {
rt.buckets[lastBucketIndex] = nil
rt.buckets = rt.buckets[:lastBucketIndex]
} else if len(rt.buckets) >= 2 && rt.buckets[lastBucketIndex-1].len() == 0 {
// if the second last bucket just became empty, remove and replace it with the last bucket.
rt.buckets[lastBucketIndex-1] = rt.buckets[lastBucketIndex]
rt.buckets[lastBucketIndex] = nil
rt.buckets = rt.buckets[:lastBucketIndex]
} else {
break
}
}
// peer removed callback
rt.PeerRemoved(p)
return true
}
return false
}
func (rt *RoutingTable) nextBucket() {
// This is the last bucket, which allegedly is a mixed bag containing peers not belonging in dedicated (unfolded) buckets.
// _allegedly_ is used here to denote that *all* peers in the last bucket might feasibly belong to another bucket.
// This could happen if e.g. we've unfolded 4 buckets, and all peers in folded bucket 5 really belong in bucket 8.
bucket := rt.buckets[len(rt.buckets)-1]
newBucket := bucket.split(len(rt.buckets)-1, rt.local)
rt.buckets = append(rt.buckets, newBucket)
// The newly formed bucket still contains too many peers. We probably just unfolded a empty bucket.
if newBucket.len() >= rt.bucketsize {
// Keep unfolding the table until the last bucket is not overflowing.
rt.nextBucket()
}
}
// Find a specific peer by ID or return nil
func (rt *RoutingTable) Find(id peer.ID) peer.ID {
srch := rt.NearestPeers(ConvertPeerID(id), 1)
if len(srch) == 0 || srch[0] != id {
return ""
}
return srch[0]
}
// NearestPeer returns a single peer that is nearest to the given ID
func (rt *RoutingTable) NearestPeer(id ID) peer.ID {
peers := rt.NearestPeers(id, 1)
if len(peers) > 0 {
return peers[0]
}
log.Debugf("NearestPeer: Returning nil, table size = %d", rt.Size())
return ""
}
// NearestPeers returns a list of the 'count' closest peers to the given ID
func (rt *RoutingTable) NearestPeers(id ID, count int) []peer.ID {
// This is the number of bits _we_ share with the key. All peers in this
// bucket share cpl bits with us and will therefore share at least cpl+1
// bits with the given key. +1 because both the target and all peers in
// this bucket differ from us in the cpl bit.
cpl := CommonPrefixLen(id, rt.local)
// It's assumed that this also protects the buckets.
rt.tabLock.RLock()
// Get bucket index or last bucket
if cpl >= len(rt.buckets) {
cpl = len(rt.buckets) - 1
}
pds := peerDistanceSorter{
peers: make([]peerDistance, 0, count+rt.bucketsize),
target: id,
}
// Add peers from the target bucket (cpl+1 shared bits).
pds.appendPeersFromList(rt.buckets[cpl].list)
// If we're short, add peers from all buckets to the right. All buckets
// to the right share exactly cpl bits (as opposed to the cpl+1 bits
// shared by the peers in the cpl bucket).
//
// This is, unfortunately, less efficient than we'd like. We will switch
// to a trie implementation eventually which will allow us to find the
// closest N peers to any target key.
if pds.Len() < count {
for i := cpl + 1; i < len(rt.buckets); i++ {
pds.appendPeersFromList(rt.buckets[i].list)
}
}
// If we're still short, add in buckets that share _fewer_ bits. We can
// do this bucket by bucket because each bucket will share 1 fewer bit
// than the last.
//
// * bucket cpl-1: cpl-1 shared bits.
// * bucket cpl-2: cpl-2 shared bits.
// ...
for i := cpl - 1; i >= 0 && pds.Len() < count; i-- {
pds.appendPeersFromList(rt.buckets[i].list)
}
rt.tabLock.RUnlock()
// Sort by distance to local peer
pds.sort()
if count < pds.Len() {
pds.peers = pds.peers[:count]
}
out := make([]peer.ID, 0, pds.Len())
for _, p := range pds.peers {
out = append(out, p.p)
}
return out
}
// Size returns the total number of peers in the routing table
func (rt *RoutingTable) Size() int {
var tot int
rt.tabLock.RLock()
for _, buck := range rt.buckets {
tot += buck.len()
}
rt.tabLock.RUnlock()
return tot
}
// ListPeers takes a RoutingTable and returns a list of all peers from all buckets in the table.
func (rt *RoutingTable) ListPeers() []peer.ID {
rt.tabLock.RLock()
defer rt.tabLock.RUnlock()
var peers []peer.ID
for _, buck := range rt.buckets {
peers = append(peers, buck.peerIds()...)
}
return peers
}
// Print prints a descriptive statement about the provided RoutingTable
func (rt *RoutingTable) Print() {
fmt.Printf("Routing Table, bs = %d, Max latency = %d\n", rt.bucketsize, rt.maxLatency)
rt.tabLock.RLock()
for i, b := range rt.buckets {
fmt.Printf("\tbucket: %d\n", i)
for e := b.list.Front(); e != nil; e = e.Next() {
p := e.Value.(*PeerInfo).Id
fmt.Printf("\t\t- %s %s\n", p.String(), rt.metrics.LatencyEWMA(p).String())
}
}
rt.tabLock.RUnlock()
}
// GetDiversityStats returns the diversity stats for the Routing Table if a diversity Filter
// is configured.
func (rt *RoutingTable) GetDiversityStats() []peerdiversity.CplDiversityStats {
if rt.df != nil {
return rt.df.GetDiversityStats()
}
return nil
}
// the caller is responsible for the locking
func (rt *RoutingTable) bucketIdForPeer(p peer.ID) int {
peerID := ConvertPeerID(p)
cpl := CommonPrefixLen(peerID, rt.local)
bucketID := cpl
if bucketID >= len(rt.buckets) {
bucketID = len(rt.buckets) - 1
}
return bucketID
}
// maxCommonPrefix returns the maximum common prefix length between any peer in
// the table and the current peer.
func (rt *RoutingTable) maxCommonPrefix() uint {
rt.tabLock.RLock()
defer rt.tabLock.RUnlock()
for i := len(rt.buckets) - 1; i >= 0; i-- {
if rt.buckets[i].len() > 0 {
return rt.buckets[i].maxCommonPrefix(rt.local)
}
}
return 0
}

View File

@@ -0,0 +1,118 @@
package kbucket
import (
"crypto/rand"
"encoding/binary"
"fmt"
"time"
"github.com/libp2p/go-libp2p/core/peer"
mh "github.com/multiformats/go-multihash"
)
// maxCplForRefresh is the maximum cpl we support for refresh.
// This limit exists because we can only generate 'maxCplForRefresh' bit prefixes for now.
const maxCplForRefresh uint = 15
// GetTrackedCplsForRefresh returns the Cpl's we are tracking for refresh.
// Caller is free to modify the returned slice as it is a defensive copy.
func (rt *RoutingTable) GetTrackedCplsForRefresh() []time.Time {
maxCommonPrefix := rt.maxCommonPrefix()
if maxCommonPrefix > maxCplForRefresh {
maxCommonPrefix = maxCplForRefresh
}
rt.cplRefreshLk.RLock()
defer rt.cplRefreshLk.RUnlock()
cpls := make([]time.Time, maxCommonPrefix+1)
for i := uint(0); i <= maxCommonPrefix; i++ {
// defaults to the zero value if we haven't refreshed it yet.
cpls[i] = rt.cplRefreshedAt[i]
}
return cpls
}
func randUint16() (uint16, error) {
// Read a random prefix.
var prefixBytes [2]byte
_, err := rand.Read(prefixBytes[:])
return binary.BigEndian.Uint16(prefixBytes[:]), err
}
// GenRandPeerID generates a random peerID for a given Cpl
func (rt *RoutingTable) GenRandPeerID(targetCpl uint) (peer.ID, error) {
if targetCpl > maxCplForRefresh {
return "", fmt.Errorf("cannot generate peer ID for Cpl greater than %d", maxCplForRefresh)
}
localPrefix := binary.BigEndian.Uint16(rt.local)
// For host with ID `L`, an ID `K` belongs to a bucket with ID `B` ONLY IF CommonPrefixLen(L,K) is EXACTLY B.
// Hence, to achieve a targetPrefix `T`, we must toggle the (T+1)th bit in L & then copy (T+1) bits from L
// to our randomly generated prefix.
toggledLocalPrefix := localPrefix ^ (uint16(0x8000) >> targetCpl)
randPrefix, err := randUint16()
if err != nil {
return "", err
}
// Combine the toggled local prefix and the random bits at the correct offset
// such that ONLY the first `targetCpl` bits match the local ID.
mask := (^uint16(0)) << (16 - (targetCpl + 1))
targetPrefix := (toggledLocalPrefix & mask) | (randPrefix & ^mask)
// Convert to a known peer ID.
key := keyPrefixMap[targetPrefix]
id := [32 + 2]byte{mh.SHA2_256, 32}
binary.BigEndian.PutUint32(id[2:], key)
return peer.ID(id[:]), nil
}
// GenRandomKey generates a random key matching a provided Common Prefix Length (Cpl)
// wrt. the local identity. The returned key matches the targetCpl first bits of the
// local key, the following bit is the inverse of the local key's bit at position
// targetCpl+1 and the remaining bits are randomly generated.
func (rt *RoutingTable) GenRandomKey(targetCpl uint) (ID, error) {
if int(targetCpl+1) >= len(rt.local)*8 {
return nil, fmt.Errorf("cannot generate peer ID for Cpl greater than key length")
}
partialOffset := targetCpl / 8
// output contains the first partialOffset bytes of the local key
// and the remaining bytes are random
output := make([]byte, len(rt.local))
copy(output, rt.local[:partialOffset])
_, err := rand.Read(output[partialOffset:])
if err != nil {
return nil, err
}
remainingBits := 8 - targetCpl%8
orig := rt.local[partialOffset]
origMask := ^uint8(0) << remainingBits
randMask := ^origMask >> 1
flippedBitOffset := remainingBits - 1
flippedBitMask := uint8(1) << flippedBitOffset
// restore the remainingBits Most Significant Bits of orig
// and flip the flippedBitOffset-th bit of orig
output[partialOffset] = orig&origMask | (orig & flippedBitMask) ^ flippedBitMask | output[partialOffset]&randMask
return ID(output), nil
}
// ResetCplRefreshedAtForID resets the refresh time for the Cpl of the given ID.
func (rt *RoutingTable) ResetCplRefreshedAtForID(id ID, newTime time.Time) {
cpl := CommonPrefixLen(id, rt.local)
if uint(cpl) > maxCplForRefresh {
return
}
rt.cplRefreshLk.Lock()
defer rt.cplRefreshLk.Unlock()
rt.cplRefreshedAt[uint(cpl)] = newTime
}

59
vendor/github.com/libp2p/go-libp2p-kbucket/util.go generated vendored Normal file
View File

@@ -0,0 +1,59 @@
package kbucket
import (
"errors"
"github.com/minio/sha256-simd"
ks "github.com/libp2p/go-libp2p-kbucket/keyspace"
"github.com/libp2p/go-libp2p/core/peer"
u "github.com/ipfs/boxo/util"
)
// ErrLookupFailure is returned if a routing table query returns no results. This is NOT expected
// behaviour
var ErrLookupFailure = errors.New("failed to find any peer in table")
// ID for IpfsDHT is in the XORKeySpace
//
// The type dht.ID signifies that its contents have been hashed from either a
// peer.ID or a util.Key. This unifies the keyspace
type ID []byte
func (id ID) less(other ID) bool {
a := ks.Key{Space: ks.XORKeySpace, Bytes: id}
b := ks.Key{Space: ks.XORKeySpace, Bytes: other}
return a.Less(b)
}
func xor(a, b ID) ID {
return ID(u.XOR(a, b))
}
func CommonPrefixLen(a, b ID) int {
return ks.ZeroPrefixLen(u.XOR(a, b))
}
// ConvertPeerID creates a DHT ID by hashing a Peer ID (Multihash)
func ConvertPeerID(id peer.ID) ID {
hash := sha256.Sum256([]byte(id))
return hash[:]
}
// ConvertKey creates a DHT ID by hashing a local key (String)
func ConvertKey(id string) ID {
hash := sha256.Sum256([]byte(id))
return hash[:]
}
// Closer returns true if a is closer to key than b is
func Closer(a, b peer.ID, key string) bool {
aid := ConvertPeerID(a)
bid := ConvertPeerID(b)
tgt := ConvertKey(key)
adist := xor(aid, tgt)
bdist := xor(bid, tgt)
return adist.less(bdist)
}

View File

@@ -0,0 +1,3 @@
{
"version": "v0.6.3"
}