Integrate BACKBEAT SDK and resolve KACHING license validation
Major integrations and fixes: - Added BACKBEAT SDK integration for P2P operation timing - Implemented beat-aware status tracking for distributed operations - Added Docker secrets support for secure license management - Resolved KACHING license validation via HTTPS/TLS - Updated docker-compose configuration for clean stack deployment - Disabled rollback policies to prevent deployment failures - Added license credential storage (CHORUS-DEV-MULTI-001) Technical improvements: - BACKBEAT P2P operation tracking with phase management - Enhanced configuration system with file-based secrets - Improved error handling for license validation - Clean separation of KACHING and CHORUS deployment stacks 🤖 Generated with [Claude Code](https://claude.ai/code) Co-Authored-By: Claude <noreply@anthropic.com>
This commit is contained in:
58
vendor/github.com/libp2p/go-libp2p/p2p/host/peerstore/metrics.go
generated
vendored
Normal file
58
vendor/github.com/libp2p/go-libp2p/p2p/host/peerstore/metrics.go
generated
vendored
Normal file
@@ -0,0 +1,58 @@
|
||||
package peerstore
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
)
|
||||
|
||||
// LatencyEWMASmoothing governs the decay of the EWMA (the speed
|
||||
// at which it changes). This must be a normalized (0-1) value.
|
||||
// 1 is 100% change, 0 is no change.
|
||||
var LatencyEWMASmoothing = 0.1
|
||||
|
||||
type metrics struct {
|
||||
mutex sync.RWMutex
|
||||
latmap map[peer.ID]time.Duration
|
||||
}
|
||||
|
||||
func NewMetrics() *metrics {
|
||||
return &metrics{
|
||||
latmap: make(map[peer.ID]time.Duration),
|
||||
}
|
||||
}
|
||||
|
||||
// RecordLatency records a new latency measurement
|
||||
func (m *metrics) RecordLatency(p peer.ID, next time.Duration) {
|
||||
nextf := float64(next)
|
||||
s := LatencyEWMASmoothing
|
||||
if s > 1 || s < 0 {
|
||||
s = 0.1 // ignore the knob. it's broken. look, it jiggles.
|
||||
}
|
||||
|
||||
m.mutex.Lock()
|
||||
ewma, found := m.latmap[p]
|
||||
ewmaf := float64(ewma)
|
||||
if !found {
|
||||
m.latmap[p] = next // when no data, just take it as the mean.
|
||||
} else {
|
||||
nextf = ((1.0 - s) * ewmaf) + (s * nextf)
|
||||
m.latmap[p] = time.Duration(nextf)
|
||||
}
|
||||
m.mutex.Unlock()
|
||||
}
|
||||
|
||||
// LatencyEWMA returns an exponentially-weighted moving avg.
|
||||
// of all measurements of a peer's latency.
|
||||
func (m *metrics) LatencyEWMA(p peer.ID) time.Duration {
|
||||
m.mutex.RLock()
|
||||
defer m.mutex.RUnlock()
|
||||
return m.latmap[p]
|
||||
}
|
||||
|
||||
func (m *metrics) RemovePeer(p peer.ID) {
|
||||
m.mutex.Lock()
|
||||
delete(m.latmap, p)
|
||||
m.mutex.Unlock()
|
||||
}
|
||||
22
vendor/github.com/libp2p/go-libp2p/p2p/host/peerstore/peerstore.go
generated
vendored
Normal file
22
vendor/github.com/libp2p/go-libp2p/p2p/host/peerstore/peerstore.go
generated
vendored
Normal file
@@ -0,0 +1,22 @@
|
||||
package peerstore
|
||||
|
||||
import (
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
pstore "github.com/libp2p/go-libp2p/core/peerstore"
|
||||
)
|
||||
|
||||
func PeerInfos(ps pstore.Peerstore, peers peer.IDSlice) []peer.AddrInfo {
|
||||
pi := make([]peer.AddrInfo, len(peers))
|
||||
for i, p := range peers {
|
||||
pi[i] = ps.PeerInfo(p)
|
||||
}
|
||||
return pi
|
||||
}
|
||||
|
||||
func PeerInfoIDs(pis []peer.AddrInfo) peer.IDSlice {
|
||||
ps := make(peer.IDSlice, len(pis))
|
||||
for i, pi := range pis {
|
||||
ps[i] = pi.ID
|
||||
}
|
||||
return ps
|
||||
}
|
||||
530
vendor/github.com/libp2p/go-libp2p/p2p/host/peerstore/pstoremem/addr_book.go
generated
vendored
Normal file
530
vendor/github.com/libp2p/go-libp2p/p2p/host/peerstore/pstoremem/addr_book.go
generated
vendored
Normal file
@@ -0,0 +1,530 @@
|
||||
package pstoremem
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sort"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
pstore "github.com/libp2p/go-libp2p/core/peerstore"
|
||||
"github.com/libp2p/go-libp2p/core/record"
|
||||
|
||||
logging "github.com/ipfs/go-log/v2"
|
||||
ma "github.com/multiformats/go-multiaddr"
|
||||
)
|
||||
|
||||
var log = logging.Logger("peerstore")
|
||||
|
||||
type expiringAddr struct {
|
||||
Addr ma.Multiaddr
|
||||
TTL time.Duration
|
||||
Expires time.Time
|
||||
}
|
||||
|
||||
func (e *expiringAddr) ExpiredBy(t time.Time) bool {
|
||||
return !t.Before(e.Expires)
|
||||
}
|
||||
|
||||
type peerRecordState struct {
|
||||
Envelope *record.Envelope
|
||||
Seq uint64
|
||||
}
|
||||
|
||||
type addrSegments [256]*addrSegment
|
||||
|
||||
type addrSegment struct {
|
||||
sync.RWMutex
|
||||
|
||||
// Use pointers to save memory. Maps always leave some fraction of their
|
||||
// space unused. storing the *values* directly in the map will
|
||||
// drastically increase the space waste. In our case, by 6x.
|
||||
addrs map[peer.ID]map[string]*expiringAddr
|
||||
|
||||
signedPeerRecords map[peer.ID]*peerRecordState
|
||||
}
|
||||
|
||||
func (segments *addrSegments) get(p peer.ID) *addrSegment {
|
||||
if len(p) == 0 { // it's not terribly useful to use an empty peer ID, but at least we should not panic
|
||||
return segments[0]
|
||||
}
|
||||
return segments[uint8(p[len(p)-1])]
|
||||
}
|
||||
|
||||
type clock interface {
|
||||
Now() time.Time
|
||||
}
|
||||
|
||||
type realclock struct{}
|
||||
|
||||
func (rc realclock) Now() time.Time {
|
||||
return time.Now()
|
||||
}
|
||||
|
||||
// memoryAddrBook manages addresses.
|
||||
type memoryAddrBook struct {
|
||||
segments addrSegments
|
||||
|
||||
refCount sync.WaitGroup
|
||||
cancel func()
|
||||
|
||||
subManager *AddrSubManager
|
||||
clock clock
|
||||
}
|
||||
|
||||
var _ pstore.AddrBook = (*memoryAddrBook)(nil)
|
||||
var _ pstore.CertifiedAddrBook = (*memoryAddrBook)(nil)
|
||||
|
||||
func NewAddrBook() *memoryAddrBook {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
|
||||
ab := &memoryAddrBook{
|
||||
segments: func() (ret addrSegments) {
|
||||
for i := range ret {
|
||||
ret[i] = &addrSegment{
|
||||
addrs: make(map[peer.ID]map[string]*expiringAddr),
|
||||
signedPeerRecords: make(map[peer.ID]*peerRecordState)}
|
||||
}
|
||||
return ret
|
||||
}(),
|
||||
subManager: NewAddrSubManager(),
|
||||
cancel: cancel,
|
||||
clock: realclock{},
|
||||
}
|
||||
ab.refCount.Add(1)
|
||||
go ab.background(ctx)
|
||||
return ab
|
||||
}
|
||||
|
||||
type AddrBookOption func(book *memoryAddrBook) error
|
||||
|
||||
func WithClock(clock clock) AddrBookOption {
|
||||
return func(book *memoryAddrBook) error {
|
||||
book.clock = clock
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// background periodically schedules a gc
|
||||
func (mab *memoryAddrBook) background(ctx context.Context) {
|
||||
defer mab.refCount.Done()
|
||||
ticker := time.NewTicker(1 * time.Hour)
|
||||
defer ticker.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
mab.gc()
|
||||
case <-ctx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (mab *memoryAddrBook) Close() error {
|
||||
mab.cancel()
|
||||
mab.refCount.Wait()
|
||||
return nil
|
||||
}
|
||||
|
||||
// gc garbage collects the in-memory address book.
|
||||
func (mab *memoryAddrBook) gc() {
|
||||
now := mab.clock.Now()
|
||||
for _, s := range mab.segments {
|
||||
s.Lock()
|
||||
for p, amap := range s.addrs {
|
||||
for k, addr := range amap {
|
||||
if addr.ExpiredBy(now) {
|
||||
delete(amap, k)
|
||||
}
|
||||
}
|
||||
if len(amap) == 0 {
|
||||
delete(s.addrs, p)
|
||||
delete(s.signedPeerRecords, p)
|
||||
}
|
||||
}
|
||||
s.Unlock()
|
||||
}
|
||||
}
|
||||
|
||||
func (mab *memoryAddrBook) PeersWithAddrs() peer.IDSlice {
|
||||
// deduplicate, since the same peer could have both signed & unsigned addrs
|
||||
set := make(map[peer.ID]struct{})
|
||||
for _, s := range mab.segments {
|
||||
s.RLock()
|
||||
for pid, amap := range s.addrs {
|
||||
if len(amap) > 0 {
|
||||
set[pid] = struct{}{}
|
||||
}
|
||||
}
|
||||
s.RUnlock()
|
||||
}
|
||||
peers := make(peer.IDSlice, 0, len(set))
|
||||
for pid := range set {
|
||||
peers = append(peers, pid)
|
||||
}
|
||||
return peers
|
||||
}
|
||||
|
||||
// AddAddr calls AddAddrs(p, []ma.Multiaddr{addr}, ttl)
|
||||
func (mab *memoryAddrBook) AddAddr(p peer.ID, addr ma.Multiaddr, ttl time.Duration) {
|
||||
mab.AddAddrs(p, []ma.Multiaddr{addr}, ttl)
|
||||
}
|
||||
|
||||
// AddAddrs gives memoryAddrBook addresses to use, with a given ttl
|
||||
// (time-to-live), after which the address is no longer valid.
|
||||
// This function never reduces the TTL or expiration of an address.
|
||||
func (mab *memoryAddrBook) AddAddrs(p peer.ID, addrs []ma.Multiaddr, ttl time.Duration) {
|
||||
// if we have a valid peer record, ignore unsigned addrs
|
||||
// peerRec := mab.GetPeerRecord(p)
|
||||
// if peerRec != nil {
|
||||
// return
|
||||
// }
|
||||
mab.addAddrs(p, addrs, ttl)
|
||||
}
|
||||
|
||||
// ConsumePeerRecord adds addresses from a signed peer.PeerRecord (contained in
|
||||
// a record.Envelope), which will expire after the given TTL.
|
||||
// See https://godoc.org/github.com/libp2p/go-libp2p/core/peerstore#CertifiedAddrBook for more details.
|
||||
func (mab *memoryAddrBook) ConsumePeerRecord(recordEnvelope *record.Envelope, ttl time.Duration) (bool, error) {
|
||||
r, err := recordEnvelope.Record()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
rec, ok := r.(*peer.PeerRecord)
|
||||
if !ok {
|
||||
return false, fmt.Errorf("unable to process envelope: not a PeerRecord")
|
||||
}
|
||||
if !rec.PeerID.MatchesPublicKey(recordEnvelope.PublicKey) {
|
||||
return false, fmt.Errorf("signing key does not match PeerID in PeerRecord")
|
||||
}
|
||||
|
||||
// ensure seq is greater than, or equal to, the last received
|
||||
s := mab.segments.get(rec.PeerID)
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
lastState, found := s.signedPeerRecords[rec.PeerID]
|
||||
if found && lastState.Seq > rec.Seq {
|
||||
return false, nil
|
||||
}
|
||||
s.signedPeerRecords[rec.PeerID] = &peerRecordState{
|
||||
Envelope: recordEnvelope,
|
||||
Seq: rec.Seq,
|
||||
}
|
||||
mab.addAddrsUnlocked(s, rec.PeerID, rec.Addrs, ttl, true)
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func (mab *memoryAddrBook) addAddrs(p peer.ID, addrs []ma.Multiaddr, ttl time.Duration) {
|
||||
s := mab.segments.get(p)
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
|
||||
mab.addAddrsUnlocked(s, p, addrs, ttl, false)
|
||||
}
|
||||
|
||||
func (mab *memoryAddrBook) addAddrsUnlocked(s *addrSegment, p peer.ID, addrs []ma.Multiaddr, ttl time.Duration, signed bool) {
|
||||
// if ttl is zero, exit. nothing to do.
|
||||
if ttl <= 0 {
|
||||
return
|
||||
}
|
||||
|
||||
amap, ok := s.addrs[p]
|
||||
if !ok {
|
||||
amap = make(map[string]*expiringAddr)
|
||||
s.addrs[p] = amap
|
||||
}
|
||||
|
||||
exp := mab.clock.Now().Add(ttl)
|
||||
for _, addr := range addrs {
|
||||
// Remove suffix of /p2p/peer-id from address
|
||||
addr, addrPid := peer.SplitAddr(addr)
|
||||
if addr == nil {
|
||||
log.Warnw("Was passed nil multiaddr", "peer", p)
|
||||
continue
|
||||
}
|
||||
if addrPid != "" && addrPid != p {
|
||||
log.Warnf("Was passed p2p address with a different peerId. found: %s, expected: %s", addrPid, p)
|
||||
continue
|
||||
}
|
||||
// find the highest TTL and Expiry time between
|
||||
// existing records and function args
|
||||
a, found := amap[string(addr.Bytes())] // won't allocate.
|
||||
if !found {
|
||||
// not found, announce it.
|
||||
entry := &expiringAddr{Addr: addr, Expires: exp, TTL: ttl}
|
||||
amap[string(addr.Bytes())] = entry
|
||||
mab.subManager.BroadcastAddr(p, addr)
|
||||
} else {
|
||||
// update ttl & exp to whichever is greater between new and existing entry
|
||||
if ttl > a.TTL {
|
||||
a.TTL = ttl
|
||||
}
|
||||
if exp.After(a.Expires) {
|
||||
a.Expires = exp
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// SetAddr calls mgr.SetAddrs(p, addr, ttl)
|
||||
func (mab *memoryAddrBook) SetAddr(p peer.ID, addr ma.Multiaddr, ttl time.Duration) {
|
||||
mab.SetAddrs(p, []ma.Multiaddr{addr}, ttl)
|
||||
}
|
||||
|
||||
// SetAddrs sets the ttl on addresses. This clears any TTL there previously.
|
||||
// This is used when we receive the best estimate of the validity of an address.
|
||||
func (mab *memoryAddrBook) SetAddrs(p peer.ID, addrs []ma.Multiaddr, ttl time.Duration) {
|
||||
s := mab.segments.get(p)
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
|
||||
amap, ok := s.addrs[p]
|
||||
if !ok {
|
||||
amap = make(map[string]*expiringAddr)
|
||||
s.addrs[p] = amap
|
||||
}
|
||||
|
||||
exp := mab.clock.Now().Add(ttl)
|
||||
for _, addr := range addrs {
|
||||
addr, addrPid := peer.SplitAddr(addr)
|
||||
if addr == nil {
|
||||
log.Warnw("was passed nil multiaddr", "peer", p)
|
||||
continue
|
||||
}
|
||||
if addrPid != "" && addrPid != p {
|
||||
log.Warnf("was passed p2p address with a different peerId, found: %s wanted: %s", addrPid, p)
|
||||
continue
|
||||
}
|
||||
aBytes := addr.Bytes()
|
||||
key := string(aBytes)
|
||||
|
||||
// re-set all of them for new ttl.
|
||||
if ttl > 0 {
|
||||
amap[key] = &expiringAddr{Addr: addr, Expires: exp, TTL: ttl}
|
||||
mab.subManager.BroadcastAddr(p, addr)
|
||||
} else {
|
||||
delete(amap, key)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// UpdateAddrs updates the addresses associated with the given peer that have
|
||||
// the given oldTTL to have the given newTTL.
|
||||
func (mab *memoryAddrBook) UpdateAddrs(p peer.ID, oldTTL time.Duration, newTTL time.Duration) {
|
||||
s := mab.segments.get(p)
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
exp := mab.clock.Now().Add(newTTL)
|
||||
amap, found := s.addrs[p]
|
||||
if !found {
|
||||
return
|
||||
}
|
||||
|
||||
for k, a := range amap {
|
||||
if oldTTL == a.TTL {
|
||||
if newTTL == 0 {
|
||||
delete(amap, k)
|
||||
} else {
|
||||
a.TTL = newTTL
|
||||
a.Expires = exp
|
||||
amap[k] = a
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Addrs returns all known (and valid) addresses for a given peer
|
||||
func (mab *memoryAddrBook) Addrs(p peer.ID) []ma.Multiaddr {
|
||||
s := mab.segments.get(p)
|
||||
s.RLock()
|
||||
defer s.RUnlock()
|
||||
|
||||
return validAddrs(mab.clock.Now(), s.addrs[p])
|
||||
}
|
||||
|
||||
func validAddrs(now time.Time, amap map[string]*expiringAddr) []ma.Multiaddr {
|
||||
good := make([]ma.Multiaddr, 0, len(amap))
|
||||
if amap == nil {
|
||||
return good
|
||||
}
|
||||
for _, m := range amap {
|
||||
if !m.ExpiredBy(now) {
|
||||
good = append(good, m.Addr)
|
||||
}
|
||||
}
|
||||
|
||||
return good
|
||||
}
|
||||
|
||||
// GetPeerRecord returns a Envelope containing a PeerRecord for the
|
||||
// given peer id, if one exists.
|
||||
// Returns nil if no signed PeerRecord exists for the peer.
|
||||
func (mab *memoryAddrBook) GetPeerRecord(p peer.ID) *record.Envelope {
|
||||
s := mab.segments.get(p)
|
||||
s.RLock()
|
||||
defer s.RUnlock()
|
||||
|
||||
// although the signed record gets garbage collected when all addrs inside it are expired,
|
||||
// we may be in between the expiration time and the GC interval
|
||||
// so, we check to see if we have any valid signed addrs before returning the record
|
||||
if len(validAddrs(mab.clock.Now(), s.addrs[p])) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
state := s.signedPeerRecords[p]
|
||||
if state == nil {
|
||||
return nil
|
||||
}
|
||||
return state.Envelope
|
||||
}
|
||||
|
||||
// ClearAddrs removes all previously stored addresses
|
||||
func (mab *memoryAddrBook) ClearAddrs(p peer.ID) {
|
||||
s := mab.segments.get(p)
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
|
||||
delete(s.addrs, p)
|
||||
delete(s.signedPeerRecords, p)
|
||||
}
|
||||
|
||||
// AddrStream returns a channel on which all new addresses discovered for a
|
||||
// given peer ID will be published.
|
||||
func (mab *memoryAddrBook) AddrStream(ctx context.Context, p peer.ID) <-chan ma.Multiaddr {
|
||||
s := mab.segments.get(p)
|
||||
s.RLock()
|
||||
defer s.RUnlock()
|
||||
|
||||
baseaddrslice := s.addrs[p]
|
||||
initial := make([]ma.Multiaddr, 0, len(baseaddrslice))
|
||||
for _, a := range baseaddrslice {
|
||||
initial = append(initial, a.Addr)
|
||||
}
|
||||
|
||||
return mab.subManager.AddrStream(ctx, p, initial)
|
||||
}
|
||||
|
||||
type addrSub struct {
|
||||
pubch chan ma.Multiaddr
|
||||
ctx context.Context
|
||||
}
|
||||
|
||||
func (s *addrSub) pubAddr(a ma.Multiaddr) {
|
||||
select {
|
||||
case s.pubch <- a:
|
||||
case <-s.ctx.Done():
|
||||
}
|
||||
}
|
||||
|
||||
// An abstracted, pub-sub manager for address streams. Extracted from
|
||||
// memoryAddrBook in order to support additional implementations.
|
||||
type AddrSubManager struct {
|
||||
mu sync.RWMutex
|
||||
subs map[peer.ID][]*addrSub
|
||||
}
|
||||
|
||||
// NewAddrSubManager initializes an AddrSubManager.
|
||||
func NewAddrSubManager() *AddrSubManager {
|
||||
return &AddrSubManager{
|
||||
subs: make(map[peer.ID][]*addrSub),
|
||||
}
|
||||
}
|
||||
|
||||
// Used internally by the address stream coroutine to remove a subscription
|
||||
// from the manager.
|
||||
func (mgr *AddrSubManager) removeSub(p peer.ID, s *addrSub) {
|
||||
mgr.mu.Lock()
|
||||
defer mgr.mu.Unlock()
|
||||
|
||||
subs := mgr.subs[p]
|
||||
if len(subs) == 1 {
|
||||
if subs[0] != s {
|
||||
return
|
||||
}
|
||||
delete(mgr.subs, p)
|
||||
return
|
||||
}
|
||||
|
||||
for i, v := range subs {
|
||||
if v == s {
|
||||
subs[i] = subs[len(subs)-1]
|
||||
subs[len(subs)-1] = nil
|
||||
mgr.subs[p] = subs[:len(subs)-1]
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// BroadcastAddr broadcasts a new address to all subscribed streams.
|
||||
func (mgr *AddrSubManager) BroadcastAddr(p peer.ID, addr ma.Multiaddr) {
|
||||
mgr.mu.RLock()
|
||||
defer mgr.mu.RUnlock()
|
||||
|
||||
if subs, ok := mgr.subs[p]; ok {
|
||||
for _, sub := range subs {
|
||||
sub.pubAddr(addr)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// AddrStream creates a new subscription for a given peer ID, pre-populating the
|
||||
// channel with any addresses we might already have on file.
|
||||
func (mgr *AddrSubManager) AddrStream(ctx context.Context, p peer.ID, initial []ma.Multiaddr) <-chan ma.Multiaddr {
|
||||
sub := &addrSub{pubch: make(chan ma.Multiaddr), ctx: ctx}
|
||||
out := make(chan ma.Multiaddr)
|
||||
|
||||
mgr.mu.Lock()
|
||||
mgr.subs[p] = append(mgr.subs[p], sub)
|
||||
mgr.mu.Unlock()
|
||||
|
||||
sort.Sort(addrList(initial))
|
||||
|
||||
go func(buffer []ma.Multiaddr) {
|
||||
defer close(out)
|
||||
|
||||
sent := make(map[string]struct{}, len(buffer))
|
||||
for _, a := range buffer {
|
||||
sent[string(a.Bytes())] = struct{}{}
|
||||
}
|
||||
|
||||
var outch chan ma.Multiaddr
|
||||
var next ma.Multiaddr
|
||||
if len(buffer) > 0 {
|
||||
next = buffer[0]
|
||||
buffer = buffer[1:]
|
||||
outch = out
|
||||
}
|
||||
|
||||
for {
|
||||
select {
|
||||
case outch <- next:
|
||||
if len(buffer) > 0 {
|
||||
next = buffer[0]
|
||||
buffer = buffer[1:]
|
||||
} else {
|
||||
outch = nil
|
||||
next = nil
|
||||
}
|
||||
case naddr := <-sub.pubch:
|
||||
if _, ok := sent[string(naddr.Bytes())]; ok {
|
||||
continue
|
||||
}
|
||||
sent[string(naddr.Bytes())] = struct{}{}
|
||||
|
||||
if next == nil {
|
||||
next = naddr
|
||||
outch = out
|
||||
} else {
|
||||
buffer = append(buffer, naddr)
|
||||
}
|
||||
case <-ctx.Done():
|
||||
mgr.removeSub(p, sub)
|
||||
return
|
||||
}
|
||||
}
|
||||
}(initial)
|
||||
|
||||
return out
|
||||
}
|
||||
97
vendor/github.com/libp2p/go-libp2p/p2p/host/peerstore/pstoremem/keybook.go
generated
vendored
Normal file
97
vendor/github.com/libp2p/go-libp2p/p2p/host/peerstore/pstoremem/keybook.go
generated
vendored
Normal file
@@ -0,0 +1,97 @@
|
||||
package pstoremem
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"sync"
|
||||
|
||||
ic "github.com/libp2p/go-libp2p/core/crypto"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
pstore "github.com/libp2p/go-libp2p/core/peerstore"
|
||||
)
|
||||
|
||||
type memoryKeyBook struct {
|
||||
sync.RWMutex // same lock. wont happen a ton.
|
||||
pks map[peer.ID]ic.PubKey
|
||||
sks map[peer.ID]ic.PrivKey
|
||||
}
|
||||
|
||||
var _ pstore.KeyBook = (*memoryKeyBook)(nil)
|
||||
|
||||
func NewKeyBook() *memoryKeyBook {
|
||||
return &memoryKeyBook{
|
||||
pks: map[peer.ID]ic.PubKey{},
|
||||
sks: map[peer.ID]ic.PrivKey{},
|
||||
}
|
||||
}
|
||||
|
||||
func (mkb *memoryKeyBook) PeersWithKeys() peer.IDSlice {
|
||||
mkb.RLock()
|
||||
ps := make(peer.IDSlice, 0, len(mkb.pks)+len(mkb.sks))
|
||||
for p := range mkb.pks {
|
||||
ps = append(ps, p)
|
||||
}
|
||||
for p := range mkb.sks {
|
||||
if _, found := mkb.pks[p]; !found {
|
||||
ps = append(ps, p)
|
||||
}
|
||||
}
|
||||
mkb.RUnlock()
|
||||
return ps
|
||||
}
|
||||
|
||||
func (mkb *memoryKeyBook) PubKey(p peer.ID) ic.PubKey {
|
||||
mkb.RLock()
|
||||
pk := mkb.pks[p]
|
||||
mkb.RUnlock()
|
||||
if pk != nil {
|
||||
return pk
|
||||
}
|
||||
pk, err := p.ExtractPublicKey()
|
||||
if err == nil {
|
||||
mkb.Lock()
|
||||
mkb.pks[p] = pk
|
||||
mkb.Unlock()
|
||||
}
|
||||
return pk
|
||||
}
|
||||
|
||||
func (mkb *memoryKeyBook) AddPubKey(p peer.ID, pk ic.PubKey) error {
|
||||
// check it's correct first
|
||||
if !p.MatchesPublicKey(pk) {
|
||||
return errors.New("ID does not match PublicKey")
|
||||
}
|
||||
|
||||
mkb.Lock()
|
||||
mkb.pks[p] = pk
|
||||
mkb.Unlock()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (mkb *memoryKeyBook) PrivKey(p peer.ID) ic.PrivKey {
|
||||
mkb.RLock()
|
||||
defer mkb.RUnlock()
|
||||
return mkb.sks[p]
|
||||
}
|
||||
|
||||
func (mkb *memoryKeyBook) AddPrivKey(p peer.ID, sk ic.PrivKey) error {
|
||||
if sk == nil {
|
||||
return errors.New("sk is nil (PrivKey)")
|
||||
}
|
||||
|
||||
// check it's correct first
|
||||
if !p.MatchesPrivateKey(sk) {
|
||||
return errors.New("ID does not match PrivateKey")
|
||||
}
|
||||
|
||||
mkb.Lock()
|
||||
mkb.sks[p] = sk
|
||||
mkb.Unlock()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (mkb *memoryKeyBook) RemovePeer(p peer.ID) {
|
||||
mkb.Lock()
|
||||
delete(mkb.sks, p)
|
||||
delete(mkb.pks, p)
|
||||
mkb.Unlock()
|
||||
}
|
||||
54
vendor/github.com/libp2p/go-libp2p/p2p/host/peerstore/pstoremem/metadata.go
generated
vendored
Normal file
54
vendor/github.com/libp2p/go-libp2p/p2p/host/peerstore/pstoremem/metadata.go
generated
vendored
Normal file
@@ -0,0 +1,54 @@
|
||||
package pstoremem
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
pstore "github.com/libp2p/go-libp2p/core/peerstore"
|
||||
)
|
||||
|
||||
type memoryPeerMetadata struct {
|
||||
// store other data, like versions
|
||||
ds map[peer.ID]map[string]interface{}
|
||||
dslock sync.RWMutex
|
||||
}
|
||||
|
||||
var _ pstore.PeerMetadata = (*memoryPeerMetadata)(nil)
|
||||
|
||||
func NewPeerMetadata() *memoryPeerMetadata {
|
||||
return &memoryPeerMetadata{
|
||||
ds: make(map[peer.ID]map[string]interface{}),
|
||||
}
|
||||
}
|
||||
|
||||
func (ps *memoryPeerMetadata) Put(p peer.ID, key string, val interface{}) error {
|
||||
ps.dslock.Lock()
|
||||
defer ps.dslock.Unlock()
|
||||
m, ok := ps.ds[p]
|
||||
if !ok {
|
||||
m = make(map[string]interface{})
|
||||
ps.ds[p] = m
|
||||
}
|
||||
m[key] = val
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ps *memoryPeerMetadata) Get(p peer.ID, key string) (interface{}, error) {
|
||||
ps.dslock.RLock()
|
||||
defer ps.dslock.RUnlock()
|
||||
m, ok := ps.ds[p]
|
||||
if !ok {
|
||||
return nil, pstore.ErrNotFound
|
||||
}
|
||||
val, ok := m[key]
|
||||
if !ok {
|
||||
return nil, pstore.ErrNotFound
|
||||
}
|
||||
return val, nil
|
||||
}
|
||||
|
||||
func (ps *memoryPeerMetadata) RemovePeer(p peer.ID) {
|
||||
ps.dslock.Lock()
|
||||
delete(ps.ds, p)
|
||||
ps.dslock.Unlock()
|
||||
}
|
||||
114
vendor/github.com/libp2p/go-libp2p/p2p/host/peerstore/pstoremem/peerstore.go
generated
vendored
Normal file
114
vendor/github.com/libp2p/go-libp2p/p2p/host/peerstore/pstoremem/peerstore.go
generated
vendored
Normal file
@@ -0,0 +1,114 @@
|
||||
package pstoremem
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/libp2p/go-libp2p/core/peerstore"
|
||||
pstore "github.com/libp2p/go-libp2p/p2p/host/peerstore"
|
||||
)
|
||||
|
||||
type pstoremem struct {
|
||||
peerstore.Metrics
|
||||
|
||||
*memoryKeyBook
|
||||
*memoryAddrBook
|
||||
*memoryProtoBook
|
||||
*memoryPeerMetadata
|
||||
}
|
||||
|
||||
var _ peerstore.Peerstore = &pstoremem{}
|
||||
|
||||
type Option interface{}
|
||||
|
||||
// NewPeerstore creates an in-memory thread-safe collection of peers.
|
||||
// It's the caller's responsibility to call RemovePeer to ensure
|
||||
// that memory consumption of the peerstore doesn't grow unboundedly.
|
||||
func NewPeerstore(opts ...Option) (ps *pstoremem, err error) {
|
||||
ab := NewAddrBook()
|
||||
defer func() {
|
||||
if err != nil {
|
||||
ab.Close()
|
||||
}
|
||||
}()
|
||||
|
||||
var protoBookOpts []ProtoBookOption
|
||||
for _, opt := range opts {
|
||||
switch o := opt.(type) {
|
||||
case ProtoBookOption:
|
||||
protoBookOpts = append(protoBookOpts, o)
|
||||
case AddrBookOption:
|
||||
o(ab)
|
||||
default:
|
||||
return nil, fmt.Errorf("unexpected peer store option: %v", o)
|
||||
}
|
||||
}
|
||||
pb, err := NewProtoBook(protoBookOpts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &pstoremem{
|
||||
Metrics: pstore.NewMetrics(),
|
||||
memoryKeyBook: NewKeyBook(),
|
||||
memoryAddrBook: ab,
|
||||
memoryProtoBook: pb,
|
||||
memoryPeerMetadata: NewPeerMetadata(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (ps *pstoremem) Close() (err error) {
|
||||
var errs []error
|
||||
weakClose := func(name string, c interface{}) {
|
||||
if cl, ok := c.(io.Closer); ok {
|
||||
if err = cl.Close(); err != nil {
|
||||
errs = append(errs, fmt.Errorf("%s error: %s", name, err))
|
||||
}
|
||||
}
|
||||
}
|
||||
weakClose("keybook", ps.memoryKeyBook)
|
||||
weakClose("addressbook", ps.memoryAddrBook)
|
||||
weakClose("protobook", ps.memoryProtoBook)
|
||||
weakClose("peermetadata", ps.memoryPeerMetadata)
|
||||
|
||||
if len(errs) > 0 {
|
||||
return fmt.Errorf("failed while closing peerstore; err(s): %q", errs)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ps *pstoremem) Peers() peer.IDSlice {
|
||||
set := map[peer.ID]struct{}{}
|
||||
for _, p := range ps.PeersWithKeys() {
|
||||
set[p] = struct{}{}
|
||||
}
|
||||
for _, p := range ps.PeersWithAddrs() {
|
||||
set[p] = struct{}{}
|
||||
}
|
||||
|
||||
pps := make(peer.IDSlice, 0, len(set))
|
||||
for p := range set {
|
||||
pps = append(pps, p)
|
||||
}
|
||||
return pps
|
||||
}
|
||||
|
||||
func (ps *pstoremem) PeerInfo(p peer.ID) peer.AddrInfo {
|
||||
return peer.AddrInfo{
|
||||
ID: p,
|
||||
Addrs: ps.memoryAddrBook.Addrs(p),
|
||||
}
|
||||
}
|
||||
|
||||
// RemovePeer removes entries associated with a peer from:
|
||||
// * the KeyBook
|
||||
// * the ProtoBook
|
||||
// * the PeerMetadata
|
||||
// * the Metrics
|
||||
// It DOES NOT remove the peer from the AddrBook.
|
||||
func (ps *pstoremem) RemovePeer(p peer.ID) {
|
||||
ps.memoryKeyBook.RemovePeer(p)
|
||||
ps.memoryProtoBook.RemovePeer(p)
|
||||
ps.memoryPeerMetadata.RemovePeer(p)
|
||||
ps.Metrics.RemovePeer(p)
|
||||
}
|
||||
192
vendor/github.com/libp2p/go-libp2p/p2p/host/peerstore/pstoremem/protobook.go
generated
vendored
Normal file
192
vendor/github.com/libp2p/go-libp2p/p2p/host/peerstore/pstoremem/protobook.go
generated
vendored
Normal file
@@ -0,0 +1,192 @@
|
||||
package pstoremem
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"sync"
|
||||
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
pstore "github.com/libp2p/go-libp2p/core/peerstore"
|
||||
"github.com/libp2p/go-libp2p/core/protocol"
|
||||
)
|
||||
|
||||
type protoSegment struct {
|
||||
sync.RWMutex
|
||||
protocols map[peer.ID]map[protocol.ID]struct{}
|
||||
}
|
||||
|
||||
type protoSegments [256]*protoSegment
|
||||
|
||||
func (s *protoSegments) get(p peer.ID) *protoSegment {
|
||||
return s[byte(p[len(p)-1])]
|
||||
}
|
||||
|
||||
var errTooManyProtocols = errors.New("too many protocols")
|
||||
|
||||
type memoryProtoBook struct {
|
||||
segments protoSegments
|
||||
|
||||
maxProtos int
|
||||
|
||||
lk sync.RWMutex
|
||||
interned map[protocol.ID]protocol.ID
|
||||
}
|
||||
|
||||
var _ pstore.ProtoBook = (*memoryProtoBook)(nil)
|
||||
|
||||
type ProtoBookOption func(book *memoryProtoBook) error
|
||||
|
||||
func WithMaxProtocols(num int) ProtoBookOption {
|
||||
return func(pb *memoryProtoBook) error {
|
||||
pb.maxProtos = num
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func NewProtoBook(opts ...ProtoBookOption) (*memoryProtoBook, error) {
|
||||
pb := &memoryProtoBook{
|
||||
interned: make(map[protocol.ID]protocol.ID, 256),
|
||||
segments: func() (ret protoSegments) {
|
||||
for i := range ret {
|
||||
ret[i] = &protoSegment{
|
||||
protocols: make(map[peer.ID]map[protocol.ID]struct{}),
|
||||
}
|
||||
}
|
||||
return ret
|
||||
}(),
|
||||
maxProtos: 1024,
|
||||
}
|
||||
|
||||
for _, opt := range opts {
|
||||
if err := opt(pb); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return pb, nil
|
||||
}
|
||||
|
||||
func (pb *memoryProtoBook) internProtocol(proto protocol.ID) protocol.ID {
|
||||
// check if it is interned with the read lock
|
||||
pb.lk.RLock()
|
||||
interned, ok := pb.interned[proto]
|
||||
pb.lk.RUnlock()
|
||||
|
||||
if ok {
|
||||
return interned
|
||||
}
|
||||
|
||||
// intern with the write lock
|
||||
pb.lk.Lock()
|
||||
defer pb.lk.Unlock()
|
||||
|
||||
// check again in case it got interned in between locks
|
||||
interned, ok = pb.interned[proto]
|
||||
if ok {
|
||||
return interned
|
||||
}
|
||||
|
||||
pb.interned[proto] = proto
|
||||
return proto
|
||||
}
|
||||
|
||||
func (pb *memoryProtoBook) SetProtocols(p peer.ID, protos ...protocol.ID) error {
|
||||
if len(protos) > pb.maxProtos {
|
||||
return errTooManyProtocols
|
||||
}
|
||||
|
||||
newprotos := make(map[protocol.ID]struct{}, len(protos))
|
||||
for _, proto := range protos {
|
||||
newprotos[pb.internProtocol(proto)] = struct{}{}
|
||||
}
|
||||
|
||||
s := pb.segments.get(p)
|
||||
s.Lock()
|
||||
s.protocols[p] = newprotos
|
||||
s.Unlock()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (pb *memoryProtoBook) AddProtocols(p peer.ID, protos ...protocol.ID) error {
|
||||
s := pb.segments.get(p)
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
|
||||
protomap, ok := s.protocols[p]
|
||||
if !ok {
|
||||
protomap = make(map[protocol.ID]struct{})
|
||||
s.protocols[p] = protomap
|
||||
}
|
||||
if len(protomap)+len(protos) > pb.maxProtos {
|
||||
return errTooManyProtocols
|
||||
}
|
||||
|
||||
for _, proto := range protos {
|
||||
protomap[pb.internProtocol(proto)] = struct{}{}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (pb *memoryProtoBook) GetProtocols(p peer.ID) ([]protocol.ID, error) {
|
||||
s := pb.segments.get(p)
|
||||
s.RLock()
|
||||
defer s.RUnlock()
|
||||
|
||||
out := make([]protocol.ID, 0, len(s.protocols[p]))
|
||||
for k := range s.protocols[p] {
|
||||
out = append(out, k)
|
||||
}
|
||||
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (pb *memoryProtoBook) RemoveProtocols(p peer.ID, protos ...protocol.ID) error {
|
||||
s := pb.segments.get(p)
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
|
||||
protomap, ok := s.protocols[p]
|
||||
if !ok {
|
||||
// nothing to remove.
|
||||
return nil
|
||||
}
|
||||
|
||||
for _, proto := range protos {
|
||||
delete(protomap, pb.internProtocol(proto))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (pb *memoryProtoBook) SupportsProtocols(p peer.ID, protos ...protocol.ID) ([]protocol.ID, error) {
|
||||
s := pb.segments.get(p)
|
||||
s.RLock()
|
||||
defer s.RUnlock()
|
||||
|
||||
out := make([]protocol.ID, 0, len(protos))
|
||||
for _, proto := range protos {
|
||||
if _, ok := s.protocols[p][proto]; ok {
|
||||
out = append(out, proto)
|
||||
}
|
||||
}
|
||||
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (pb *memoryProtoBook) FirstSupportedProtocol(p peer.ID, protos ...protocol.ID) (protocol.ID, error) {
|
||||
s := pb.segments.get(p)
|
||||
s.RLock()
|
||||
defer s.RUnlock()
|
||||
|
||||
for _, proto := range protos {
|
||||
if _, ok := s.protocols[p][proto]; ok {
|
||||
return proto, nil
|
||||
}
|
||||
}
|
||||
return "", nil
|
||||
}
|
||||
|
||||
func (pb *memoryProtoBook) RemovePeer(p peer.ID) {
|
||||
s := pb.segments.get(p)
|
||||
s.Lock()
|
||||
delete(s.protocols, p)
|
||||
s.Unlock()
|
||||
}
|
||||
50
vendor/github.com/libp2p/go-libp2p/p2p/host/peerstore/pstoremem/sorting.go
generated
vendored
Normal file
50
vendor/github.com/libp2p/go-libp2p/p2p/host/peerstore/pstoremem/sorting.go
generated
vendored
Normal file
@@ -0,0 +1,50 @@
|
||||
package pstoremem
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
|
||||
ma "github.com/multiformats/go-multiaddr"
|
||||
mafmt "github.com/multiformats/go-multiaddr-fmt"
|
||||
manet "github.com/multiformats/go-multiaddr/net"
|
||||
)
|
||||
|
||||
func isFDCostlyTransport(a ma.Multiaddr) bool {
|
||||
return mafmt.TCP.Matches(a)
|
||||
}
|
||||
|
||||
type addrList []ma.Multiaddr
|
||||
|
||||
func (al addrList) Len() int { return len(al) }
|
||||
func (al addrList) Swap(i, j int) { al[i], al[j] = al[j], al[i] }
|
||||
|
||||
func (al addrList) Less(i, j int) bool {
|
||||
a := al[i]
|
||||
b := al[j]
|
||||
|
||||
// dial localhost addresses next, they should fail immediately
|
||||
lba := manet.IsIPLoopback(a)
|
||||
lbb := manet.IsIPLoopback(b)
|
||||
if lba && !lbb {
|
||||
return true
|
||||
}
|
||||
|
||||
// dial utp and similar 'non-fd-consuming' addresses first
|
||||
fda := isFDCostlyTransport(a)
|
||||
fdb := isFDCostlyTransport(b)
|
||||
if !fda {
|
||||
return fdb
|
||||
}
|
||||
|
||||
// if 'b' doesnt take a file descriptor
|
||||
if !fdb {
|
||||
return false
|
||||
}
|
||||
|
||||
// if 'b' is loopback and both take file descriptors
|
||||
if lbb {
|
||||
return false
|
||||
}
|
||||
|
||||
// for the rest, just sort by bytes
|
||||
return bytes.Compare(a.Bytes(), b.Bytes()) > 0
|
||||
}
|
||||
Reference in New Issue
Block a user