Integrate BACKBEAT SDK and resolve KACHING license validation
Major integrations and fixes: - Added BACKBEAT SDK integration for P2P operation timing - Implemented beat-aware status tracking for distributed operations - Added Docker secrets support for secure license management - Resolved KACHING license validation via HTTPS/TLS - Updated docker-compose configuration for clean stack deployment - Disabled rollback policies to prevent deployment failures - Added license credential storage (CHORUS-DEV-MULTI-001) Technical improvements: - BACKBEAT P2P operation tracking with phase management - Enhanced configuration system with file-based secrets - Improved error handling for license validation - Clean separation of KACHING and CHORUS deployment stacks 🤖 Generated with [Claude Code](https://claude.ai/code) Co-Authored-By: Claude <noreply@anthropic.com>
This commit is contained in:
15
vendor/github.com/libp2p/go-libp2p-kad-dht/CODEOWNERS
generated
vendored
Normal file
15
vendor/github.com/libp2p/go-libp2p-kad-dht/CODEOWNERS
generated
vendored
Normal file
@@ -0,0 +1,15 @@
|
||||
# CODEOWNERS
|
||||
|
||||
# default owner is the libp2p team
|
||||
*.go @libp2p/go-libp2p-maintainers @guillaumemichel
|
||||
/pb/ @libp2p/go-libp2p-maintainers @guillaumemichel
|
||||
|
||||
# dual is an application for IPFS
|
||||
/dual/ @libp2p/kubo-maintainers @guillaumemichel
|
||||
# fullrt is IPFS specific
|
||||
/fullrt/ @libp2p/kubo-maintainers @guillaumemichel
|
||||
# providers describe the IPFS specific providers
|
||||
/providers/ @libp2p/kubo-maintainers @guillaumemichel
|
||||
# records are IPFS specific
|
||||
/records.go @libp2p/kubo-maintainers @guillaumemichel
|
||||
/records_test.go @libp2p/kubo-maintainers @guillaumemichel
|
||||
21
vendor/github.com/libp2p/go-libp2p-kad-dht/LICENSE
generated
vendored
Normal file
21
vendor/github.com/libp2p/go-libp2p-kad-dht/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,21 @@
|
||||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2016 Protocol Labs, Inc.
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
||||
51
vendor/github.com/libp2p/go-libp2p-kad-dht/README.md
generated
vendored
Normal file
51
vendor/github.com/libp2p/go-libp2p-kad-dht/README.md
generated
vendored
Normal file
@@ -0,0 +1,51 @@
|
||||
# go-libp2p-kad-dht
|
||||
|
||||
[](https://protocol.ai)
|
||||
[](https://libp2p.io)
|
||||
[](https://godoc.org/github.com/libp2p/go-libp2p-kad-dht)
|
||||
[](https://discuss.libp2p.io)
|
||||
|
||||
> A Go implementation of [libp2p Kademlia DHT specification](https://github.com/libp2p/specs/tree/master/kad-dht)
|
||||
|
||||
## Table of Contents
|
||||
|
||||
- [Install](#install)
|
||||
- [Usage](#usage)
|
||||
- [Optimizations](#optimizations)
|
||||
- [Contribute](#contribute)
|
||||
- [Maintainers](#maintainers)
|
||||
- [License](#license)
|
||||
|
||||
## Install
|
||||
|
||||
```sh
|
||||
go get github.com/libp2p/go-libp2p-kad-dht
|
||||
```
|
||||
|
||||
## Optimizations
|
||||
|
||||
Client-side optimizations are described in [optimizations.md](./optimizations.md)
|
||||
|
||||
## Usage
|
||||
|
||||
Go to https://godoc.org/github.com/libp2p/go-libp2p-kad-dht.
|
||||
|
||||
## Contribute
|
||||
|
||||
Contributions welcome. Please check out [the issues](https://github.com/libp2p/go-libp2p-kad-dht/issues).
|
||||
|
||||
Check out our [contributing document](https://github.com/libp2p/community/blob/master/CONTRIBUTE.md) for more information on how we work, and about contributing in general. Please be aware that all interactions related to libp2p are subject to the IPFS [Code of Conduct](https://github.com/ipfs/community/blob/master/code-of-conduct.md).
|
||||
|
||||
<!-- Small note: If editing the README, please conform to the [standard-readme](https://github.com/RichardLitt/standard-readme) specification. -->
|
||||
|
||||
## Maintainers
|
||||
|
||||
- [@ipfs/kubo-maintainers](https://github.com/orgs/ipfs/teams/kubo-maintainers)
|
||||
- [@libp2p/go-libp2p-maintainers](https://github.com/orgs/libp2p/teams/go-libp2p-maintainers)
|
||||
- [@guillaumemichel](https://github.com/guillaumemichel)
|
||||
|
||||
See [CODEOWNERS](./CODEOWNERS).
|
||||
|
||||
## License
|
||||
|
||||
[MIT](LICENSE) © Protocol Labs Inc.
|
||||
940
vendor/github.com/libp2p/go-libp2p-kad-dht/dht.go
generated
vendored
Normal file
940
vendor/github.com/libp2p/go-libp2p-kad-dht/dht.go
generated
vendored
Normal file
@@ -0,0 +1,940 @@
|
||||
package dht
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math"
|
||||
"math/rand"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/libp2p/go-libp2p-routing-helpers/tracing"
|
||||
"github.com/libp2p/go-libp2p/core/host"
|
||||
"github.com/libp2p/go-libp2p/core/network"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/libp2p/go-libp2p/core/peerstore"
|
||||
"github.com/libp2p/go-libp2p/core/protocol"
|
||||
"github.com/libp2p/go-libp2p/core/routing"
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
"go.opentelemetry.io/otel/trace"
|
||||
|
||||
"github.com/libp2p/go-libp2p-kad-dht/internal"
|
||||
dhtcfg "github.com/libp2p/go-libp2p-kad-dht/internal/config"
|
||||
"github.com/libp2p/go-libp2p-kad-dht/internal/net"
|
||||
"github.com/libp2p/go-libp2p-kad-dht/metrics"
|
||||
"github.com/libp2p/go-libp2p-kad-dht/netsize"
|
||||
pb "github.com/libp2p/go-libp2p-kad-dht/pb"
|
||||
"github.com/libp2p/go-libp2p-kad-dht/providers"
|
||||
"github.com/libp2p/go-libp2p-kad-dht/rtrefresh"
|
||||
kb "github.com/libp2p/go-libp2p-kbucket"
|
||||
"github.com/libp2p/go-libp2p-kbucket/peerdiversity"
|
||||
record "github.com/libp2p/go-libp2p-record"
|
||||
recpb "github.com/libp2p/go-libp2p-record/pb"
|
||||
|
||||
"github.com/gogo/protobuf/proto"
|
||||
ds "github.com/ipfs/go-datastore"
|
||||
logging "github.com/ipfs/go-log"
|
||||
"github.com/multiformats/go-base32"
|
||||
ma "github.com/multiformats/go-multiaddr"
|
||||
"go.opencensus.io/tag"
|
||||
"go.uber.org/multierr"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
const tracer = tracing.Tracer("go-libp2p-kad-dht")
|
||||
const dhtName = "IpfsDHT"
|
||||
|
||||
var (
|
||||
logger = logging.Logger("dht")
|
||||
baseLogger = logger.Desugar()
|
||||
|
||||
rtFreezeTimeout = 1 * time.Minute
|
||||
)
|
||||
|
||||
const (
|
||||
// BaseConnMgrScore is the base of the score set on the connection
|
||||
// manager "kbucket" tag. It is added with the common prefix length
|
||||
// between two peer IDs.
|
||||
baseConnMgrScore = 5
|
||||
)
|
||||
|
||||
type mode int
|
||||
|
||||
const (
|
||||
modeServer mode = iota + 1
|
||||
modeClient
|
||||
)
|
||||
|
||||
const (
|
||||
kad1 protocol.ID = "/kad/1.0.0"
|
||||
)
|
||||
|
||||
const (
|
||||
kbucketTag = "kbucket"
|
||||
protectedBuckets = 2
|
||||
)
|
||||
|
||||
// IpfsDHT is an implementation of Kademlia with S/Kademlia modifications.
|
||||
// It is used to implement the base Routing module.
|
||||
type IpfsDHT struct {
|
||||
host host.Host // the network services we need
|
||||
self peer.ID // Local peer (yourself)
|
||||
selfKey kb.ID
|
||||
peerstore peerstore.Peerstore // Peer Registry
|
||||
|
||||
datastore ds.Datastore // Local data
|
||||
|
||||
routingTable *kb.RoutingTable // Array of routing tables for differently distanced nodes
|
||||
// providerStore stores & manages the provider records for this Dht peer.
|
||||
providerStore providers.ProviderStore
|
||||
|
||||
// manages Routing Table refresh
|
||||
rtRefreshManager *rtrefresh.RtRefreshManager
|
||||
|
||||
birth time.Time // When this peer started up
|
||||
|
||||
Validator record.Validator
|
||||
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
wg sync.WaitGroup
|
||||
|
||||
protoMessenger *pb.ProtocolMessenger
|
||||
msgSender pb.MessageSenderWithDisconnect
|
||||
|
||||
stripedPutLocks [256]sync.Mutex
|
||||
|
||||
// DHT protocols we query with. We'll only add peers to our routing
|
||||
// table if they speak these protocols.
|
||||
protocols []protocol.ID
|
||||
|
||||
// DHT protocols we can respond to.
|
||||
serverProtocols []protocol.ID
|
||||
|
||||
auto ModeOpt
|
||||
mode mode
|
||||
modeLk sync.Mutex
|
||||
|
||||
bucketSize int
|
||||
alpha int // The concurrency parameter per path
|
||||
beta int // The number of peers closest to a target that must have responded for a query path to terminate
|
||||
|
||||
queryPeerFilter QueryFilterFunc
|
||||
routingTablePeerFilter RouteTableFilterFunc
|
||||
rtPeerDiversityFilter peerdiversity.PeerIPGroupFilter
|
||||
|
||||
autoRefresh bool
|
||||
|
||||
// timeout for the lookupCheck operation
|
||||
lookupCheckTimeout time.Duration
|
||||
// number of concurrent lookupCheck operations
|
||||
lookupCheckCapacity int
|
||||
lookupChecksLk sync.Mutex
|
||||
|
||||
// A function returning a set of bootstrap peers to fallback on if all other attempts to fix
|
||||
// the routing table fail (or, e.g., this is the first time this node is
|
||||
// connecting to the network).
|
||||
bootstrapPeers func() []peer.AddrInfo
|
||||
|
||||
maxRecordAge time.Duration
|
||||
|
||||
// Allows disabling dht subsystems. These should _only_ be set on
|
||||
// "forked" DHTs (e.g., DHTs with custom protocols and/or private
|
||||
// networks).
|
||||
enableProviders, enableValues bool
|
||||
|
||||
disableFixLowPeers bool
|
||||
fixLowPeersChan chan struct{}
|
||||
|
||||
addPeerToRTChan chan peer.ID
|
||||
refreshFinishedCh chan struct{}
|
||||
|
||||
rtFreezeTimeout time.Duration
|
||||
|
||||
// network size estimator
|
||||
nsEstimator *netsize.Estimator
|
||||
enableOptProv bool
|
||||
|
||||
// a bound channel to limit asynchronicity of in-flight ADD_PROVIDER RPCs
|
||||
optProvJobsPool chan struct{}
|
||||
|
||||
// configuration variables for tests
|
||||
testAddressUpdateProcessing bool
|
||||
|
||||
// addrFilter is used to filter the addresses we put into the peer store.
|
||||
// Mostly used to filter out localhost and local addresses.
|
||||
addrFilter func([]ma.Multiaddr) []ma.Multiaddr
|
||||
}
|
||||
|
||||
// Assert that IPFS assumptions about interfaces aren't broken. These aren't a
|
||||
// guarantee, but we can use them to aid refactoring.
|
||||
var (
|
||||
_ routing.ContentRouting = (*IpfsDHT)(nil)
|
||||
_ routing.Routing = (*IpfsDHT)(nil)
|
||||
_ routing.PeerRouting = (*IpfsDHT)(nil)
|
||||
_ routing.PubKeyFetcher = (*IpfsDHT)(nil)
|
||||
_ routing.ValueStore = (*IpfsDHT)(nil)
|
||||
)
|
||||
|
||||
// New creates a new DHT with the specified host and options.
|
||||
// Please note that being connected to a DHT peer does not necessarily imply that it's also in the DHT Routing Table.
|
||||
// If the Routing Table has more than "minRTRefreshThreshold" peers, we consider a peer as a Routing Table candidate ONLY when
|
||||
// we successfully get a query response from it OR if it send us a query.
|
||||
func New(ctx context.Context, h host.Host, options ...Option) (*IpfsDHT, error) {
|
||||
var cfg dhtcfg.Config
|
||||
if err := cfg.Apply(append([]Option{dhtcfg.Defaults}, options...)...); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := cfg.ApplyFallbacks(h); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := cfg.Validate(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
dht, err := makeDHT(h, cfg)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create DHT, err=%s", err)
|
||||
}
|
||||
|
||||
dht.autoRefresh = cfg.RoutingTable.AutoRefresh
|
||||
|
||||
dht.maxRecordAge = cfg.MaxRecordAge
|
||||
dht.enableProviders = cfg.EnableProviders
|
||||
dht.enableValues = cfg.EnableValues
|
||||
dht.disableFixLowPeers = cfg.DisableFixLowPeers
|
||||
|
||||
dht.Validator = cfg.Validator
|
||||
dht.msgSender = net.NewMessageSenderImpl(h, dht.protocols)
|
||||
dht.protoMessenger, err = pb.NewProtocolMessenger(dht.msgSender)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
dht.testAddressUpdateProcessing = cfg.TestAddressUpdateProcessing
|
||||
|
||||
dht.auto = cfg.Mode
|
||||
switch cfg.Mode {
|
||||
case ModeAuto, ModeClient:
|
||||
dht.mode = modeClient
|
||||
case ModeAutoServer, ModeServer:
|
||||
dht.mode = modeServer
|
||||
default:
|
||||
return nil, fmt.Errorf("invalid dht mode %d", cfg.Mode)
|
||||
}
|
||||
|
||||
if dht.mode == modeServer {
|
||||
if err := dht.moveToServerMode(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
// register for event bus and network notifications
|
||||
if err := dht.startNetworkSubscriber(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// go-routine to make sure we ALWAYS have RT peer addresses in the peerstore
|
||||
// since RT membership is decoupled from connectivity
|
||||
go dht.persistRTPeersInPeerStore()
|
||||
|
||||
dht.rtPeerLoop()
|
||||
|
||||
// Fill routing table with currently connected peers that are DHT servers
|
||||
for _, p := range dht.host.Network().Peers() {
|
||||
dht.peerFound(p)
|
||||
}
|
||||
|
||||
dht.rtRefreshManager.Start()
|
||||
|
||||
// listens to the fix low peers chan and tries to fix the Routing Table
|
||||
if !dht.disableFixLowPeers {
|
||||
dht.runFixLowPeersLoop()
|
||||
}
|
||||
|
||||
return dht, nil
|
||||
}
|
||||
|
||||
// NewDHT creates a new DHT object with the given peer as the 'local' host.
|
||||
// IpfsDHT's initialized with this function will respond to DHT requests,
|
||||
// whereas IpfsDHT's initialized with NewDHTClient will not.
|
||||
func NewDHT(ctx context.Context, h host.Host, dstore ds.Batching) *IpfsDHT {
|
||||
dht, err := New(ctx, h, Datastore(dstore))
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return dht
|
||||
}
|
||||
|
||||
// NewDHTClient creates a new DHT object with the given peer as the 'local'
|
||||
// host. IpfsDHT clients initialized with this function will not respond to DHT
|
||||
// requests. If you need a peer to respond to DHT requests, use NewDHT instead.
|
||||
func NewDHTClient(ctx context.Context, h host.Host, dstore ds.Batching) *IpfsDHT {
|
||||
dht, err := New(ctx, h, Datastore(dstore), Mode(ModeClient))
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return dht
|
||||
}
|
||||
|
||||
func makeDHT(h host.Host, cfg dhtcfg.Config) (*IpfsDHT, error) {
|
||||
var protocols, serverProtocols []protocol.ID
|
||||
|
||||
v1proto := cfg.ProtocolPrefix + kad1
|
||||
|
||||
if cfg.V1ProtocolOverride != "" {
|
||||
v1proto = cfg.V1ProtocolOverride
|
||||
}
|
||||
|
||||
protocols = []protocol.ID{v1proto}
|
||||
serverProtocols = []protocol.ID{v1proto}
|
||||
|
||||
dht := &IpfsDHT{
|
||||
datastore: cfg.Datastore,
|
||||
self: h.ID(),
|
||||
selfKey: kb.ConvertPeerID(h.ID()),
|
||||
peerstore: h.Peerstore(),
|
||||
host: h,
|
||||
birth: time.Now(),
|
||||
protocols: protocols,
|
||||
serverProtocols: serverProtocols,
|
||||
bucketSize: cfg.BucketSize,
|
||||
alpha: cfg.Concurrency,
|
||||
beta: cfg.Resiliency,
|
||||
lookupCheckCapacity: cfg.LookupCheckConcurrency,
|
||||
queryPeerFilter: cfg.QueryPeerFilter,
|
||||
routingTablePeerFilter: cfg.RoutingTable.PeerFilter,
|
||||
rtPeerDiversityFilter: cfg.RoutingTable.DiversityFilter,
|
||||
addrFilter: cfg.AddressFilter,
|
||||
|
||||
fixLowPeersChan: make(chan struct{}, 1),
|
||||
|
||||
addPeerToRTChan: make(chan peer.ID),
|
||||
refreshFinishedCh: make(chan struct{}),
|
||||
|
||||
enableOptProv: cfg.EnableOptimisticProvide,
|
||||
optProvJobsPool: nil,
|
||||
}
|
||||
|
||||
var maxLastSuccessfulOutboundThreshold time.Duration
|
||||
|
||||
// The threshold is calculated based on the expected amount of time that should pass before we
|
||||
// query a peer as part of our refresh cycle.
|
||||
// To grok the Math Wizardy that produced these exact equations, please be patient as a document explaining it will
|
||||
// be published soon.
|
||||
if cfg.Concurrency < cfg.BucketSize { // (alpha < K)
|
||||
l1 := math.Log(float64(1) / float64(cfg.BucketSize)) // (Log(1/K))
|
||||
l2 := math.Log(float64(1) - (float64(cfg.Concurrency) / float64(cfg.BucketSize))) // Log(1 - (alpha / K))
|
||||
maxLastSuccessfulOutboundThreshold = time.Duration(l1 / l2 * float64(cfg.RoutingTable.RefreshInterval))
|
||||
} else {
|
||||
maxLastSuccessfulOutboundThreshold = cfg.RoutingTable.RefreshInterval
|
||||
}
|
||||
|
||||
// construct routing table
|
||||
// use twice the theoritical usefulness threhold to keep older peers around longer
|
||||
rt, err := makeRoutingTable(dht, cfg, 2*maxLastSuccessfulOutboundThreshold)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to construct routing table,err=%s", err)
|
||||
}
|
||||
dht.routingTable = rt
|
||||
dht.bootstrapPeers = cfg.BootstrapPeers
|
||||
|
||||
dht.lookupCheckTimeout = cfg.RoutingTable.RefreshQueryTimeout
|
||||
|
||||
// init network size estimator
|
||||
dht.nsEstimator = netsize.NewEstimator(h.ID(), rt, cfg.BucketSize)
|
||||
|
||||
if dht.enableOptProv {
|
||||
dht.optProvJobsPool = make(chan struct{}, cfg.OptimisticProvideJobsPoolSize)
|
||||
}
|
||||
|
||||
// rt refresh manager
|
||||
dht.rtRefreshManager, err = makeRtRefreshManager(dht, cfg, maxLastSuccessfulOutboundThreshold)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to construct RT Refresh Manager,err=%s", err)
|
||||
}
|
||||
|
||||
// create a tagged context derived from the original context
|
||||
// the DHT context should be done when the process is closed
|
||||
dht.ctx, dht.cancel = context.WithCancel(dht.newContextWithLocalTags(context.Background()))
|
||||
|
||||
if cfg.ProviderStore != nil {
|
||||
dht.providerStore = cfg.ProviderStore
|
||||
} else {
|
||||
dht.providerStore, err = providers.NewProviderManager(h.ID(), dht.peerstore, cfg.Datastore)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("initializing default provider manager (%v)", err)
|
||||
}
|
||||
}
|
||||
|
||||
dht.rtFreezeTimeout = rtFreezeTimeout
|
||||
|
||||
return dht, nil
|
||||
}
|
||||
|
||||
// lookupCheck performs a lookup request to a remote peer.ID, verifying that it is able to
|
||||
// answer it correctly
|
||||
func (dht *IpfsDHT) lookupCheck(ctx context.Context, p peer.ID) error {
|
||||
// lookup request to p requesting for its own peer.ID
|
||||
peerids, err := dht.protoMessenger.GetClosestPeers(ctx, p, p)
|
||||
// p should return at least its own peerid
|
||||
if err == nil && len(peerids) == 0 {
|
||||
return fmt.Errorf("peer %s failed to return its closest peers, got %d", p, len(peerids))
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func makeRtRefreshManager(dht *IpfsDHT, cfg dhtcfg.Config, maxLastSuccessfulOutboundThreshold time.Duration) (*rtrefresh.RtRefreshManager, error) {
|
||||
keyGenFnc := func(cpl uint) (string, error) {
|
||||
p, err := dht.routingTable.GenRandPeerID(cpl)
|
||||
return string(p), err
|
||||
}
|
||||
|
||||
queryFnc := func(ctx context.Context, key string) error {
|
||||
_, err := dht.GetClosestPeers(ctx, key)
|
||||
return err
|
||||
}
|
||||
|
||||
r, err := rtrefresh.NewRtRefreshManager(
|
||||
dht.host, dht.routingTable, cfg.RoutingTable.AutoRefresh,
|
||||
keyGenFnc,
|
||||
queryFnc,
|
||||
dht.lookupCheck,
|
||||
cfg.RoutingTable.RefreshQueryTimeout,
|
||||
cfg.RoutingTable.RefreshInterval,
|
||||
maxLastSuccessfulOutboundThreshold,
|
||||
dht.refreshFinishedCh)
|
||||
|
||||
return r, err
|
||||
}
|
||||
|
||||
func makeRoutingTable(dht *IpfsDHT, cfg dhtcfg.Config, maxLastSuccessfulOutboundThreshold time.Duration) (*kb.RoutingTable, error) {
|
||||
// make a Routing Table Diversity Filter
|
||||
var filter *peerdiversity.Filter
|
||||
if dht.rtPeerDiversityFilter != nil {
|
||||
df, err := peerdiversity.NewFilter(dht.rtPeerDiversityFilter, "rt/diversity", func(p peer.ID) int {
|
||||
return kb.CommonPrefixLen(dht.selfKey, kb.ConvertPeerID(p))
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to construct peer diversity filter: %w", err)
|
||||
}
|
||||
|
||||
filter = df
|
||||
}
|
||||
|
||||
rt, err := kb.NewRoutingTable(cfg.BucketSize, dht.selfKey, time.Minute, dht.host.Peerstore(), maxLastSuccessfulOutboundThreshold, filter)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
cmgr := dht.host.ConnManager()
|
||||
|
||||
rt.PeerAdded = func(p peer.ID) {
|
||||
commonPrefixLen := kb.CommonPrefixLen(dht.selfKey, kb.ConvertPeerID(p))
|
||||
if commonPrefixLen < protectedBuckets {
|
||||
cmgr.Protect(p, kbucketTag)
|
||||
} else {
|
||||
cmgr.TagPeer(p, kbucketTag, baseConnMgrScore)
|
||||
}
|
||||
}
|
||||
rt.PeerRemoved = func(p peer.ID) {
|
||||
cmgr.Unprotect(p, kbucketTag)
|
||||
cmgr.UntagPeer(p, kbucketTag)
|
||||
|
||||
// try to fix the RT
|
||||
dht.fixRTIfNeeded()
|
||||
}
|
||||
|
||||
return rt, err
|
||||
}
|
||||
|
||||
// ProviderStore returns the provider storage object for storing and retrieving provider records.
|
||||
func (dht *IpfsDHT) ProviderStore() providers.ProviderStore {
|
||||
return dht.providerStore
|
||||
}
|
||||
|
||||
// GetRoutingTableDiversityStats returns the diversity stats for the Routing Table.
|
||||
func (dht *IpfsDHT) GetRoutingTableDiversityStats() []peerdiversity.CplDiversityStats {
|
||||
return dht.routingTable.GetDiversityStats()
|
||||
}
|
||||
|
||||
// Mode allows introspection of the operation mode of the DHT
|
||||
func (dht *IpfsDHT) Mode() ModeOpt {
|
||||
return dht.auto
|
||||
}
|
||||
|
||||
// runFixLowPeersLoop manages simultaneous requests to fixLowPeers
|
||||
func (dht *IpfsDHT) runFixLowPeersLoop() {
|
||||
dht.wg.Add(1)
|
||||
go func() {
|
||||
defer dht.wg.Done()
|
||||
|
||||
dht.fixLowPeers()
|
||||
|
||||
ticker := time.NewTicker(periodicBootstrapInterval)
|
||||
defer ticker.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-dht.fixLowPeersChan:
|
||||
case <-ticker.C:
|
||||
case <-dht.ctx.Done():
|
||||
return
|
||||
}
|
||||
|
||||
dht.fixLowPeers()
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// fixLowPeers tries to get more peers into the routing table if we're below the threshold
|
||||
func (dht *IpfsDHT) fixLowPeers() {
|
||||
if dht.routingTable.Size() > minRTRefreshThreshold {
|
||||
return
|
||||
}
|
||||
|
||||
// we try to add all peers we are connected to to the Routing Table
|
||||
// in case they aren't already there.
|
||||
for _, p := range dht.host.Network().Peers() {
|
||||
dht.peerFound(p)
|
||||
}
|
||||
|
||||
// TODO Active Bootstrapping
|
||||
// We should first use non-bootstrap peers we knew of from previous
|
||||
// snapshots of the Routing Table before we connect to the bootstrappers.
|
||||
// See https://github.com/libp2p/go-libp2p-kad-dht/issues/387.
|
||||
if dht.routingTable.Size() == 0 && dht.bootstrapPeers != nil {
|
||||
bootstrapPeers := dht.bootstrapPeers()
|
||||
if len(bootstrapPeers) == 0 {
|
||||
// No point in continuing, we have no peers!
|
||||
return
|
||||
}
|
||||
|
||||
found := 0
|
||||
for _, i := range rand.Perm(len(bootstrapPeers)) {
|
||||
ai := bootstrapPeers[i]
|
||||
err := dht.Host().Connect(dht.ctx, ai)
|
||||
if err == nil {
|
||||
found++
|
||||
} else {
|
||||
logger.Warnw("failed to bootstrap", "peer", ai.ID, "error", err)
|
||||
}
|
||||
|
||||
// Wait for two bootstrap peers, or try them all.
|
||||
//
|
||||
// Why two? In theory, one should be enough
|
||||
// normally. However, if the network were to
|
||||
// restart and everyone connected to just one
|
||||
// bootstrapper, we'll end up with a mostly
|
||||
// partitioned network.
|
||||
//
|
||||
// So we always bootstrap with two random peers.
|
||||
if found == maxNBoostrappers {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// if we still don't have peers in our routing table(probably because Identify hasn't completed),
|
||||
// there is no point in triggering a Refresh.
|
||||
if dht.routingTable.Size() == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
if dht.autoRefresh {
|
||||
dht.rtRefreshManager.RefreshNoWait()
|
||||
}
|
||||
}
|
||||
|
||||
// TODO This is hacky, horrible and the programmer needs to have his mother called a hamster.
|
||||
// SHOULD be removed once https://github.com/libp2p/go-libp2p/issues/800 goes in.
|
||||
func (dht *IpfsDHT) persistRTPeersInPeerStore() {
|
||||
tickr := time.NewTicker(peerstore.RecentlyConnectedAddrTTL / 3)
|
||||
defer tickr.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-tickr.C:
|
||||
ps := dht.routingTable.ListPeers()
|
||||
for _, p := range ps {
|
||||
dht.peerstore.UpdateAddrs(p, peerstore.RecentlyConnectedAddrTTL, peerstore.RecentlyConnectedAddrTTL)
|
||||
}
|
||||
case <-dht.ctx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// getLocal attempts to retrieve the value from the datastore.
|
||||
//
|
||||
// returns nil, nil when either nothing is found or the value found doesn't properly validate.
|
||||
// returns nil, some_error when there's a *datastore* error (i.e., something goes very wrong)
|
||||
func (dht *IpfsDHT) getLocal(ctx context.Context, key string) (*recpb.Record, error) {
|
||||
logger.Debugw("finding value in datastore", "key", internal.LoggableRecordKeyString(key))
|
||||
|
||||
rec, err := dht.getRecordFromDatastore(ctx, mkDsKey(key))
|
||||
if err != nil {
|
||||
logger.Warnw("get local failed", "key", internal.LoggableRecordKeyString(key), "error", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Double check the key. Can't hurt.
|
||||
if rec != nil && string(rec.GetKey()) != key {
|
||||
logger.Errorw("BUG: found a DHT record that didn't match it's key", "expected", internal.LoggableRecordKeyString(key), "got", rec.GetKey())
|
||||
return nil, nil
|
||||
|
||||
}
|
||||
return rec, nil
|
||||
}
|
||||
|
||||
// putLocal stores the key value pair in the datastore
|
||||
func (dht *IpfsDHT) putLocal(ctx context.Context, key string, rec *recpb.Record) error {
|
||||
data, err := proto.Marshal(rec)
|
||||
if err != nil {
|
||||
logger.Warnw("failed to put marshal record for local put", "error", err, "key", internal.LoggableRecordKeyString(key))
|
||||
return err
|
||||
}
|
||||
|
||||
return dht.datastore.Put(ctx, mkDsKey(key), data)
|
||||
}
|
||||
|
||||
func (dht *IpfsDHT) rtPeerLoop() {
|
||||
dht.wg.Add(1)
|
||||
go func() {
|
||||
defer dht.wg.Done()
|
||||
|
||||
var bootstrapCount uint
|
||||
var isBootsrapping bool
|
||||
var timerCh <-chan time.Time
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-timerCh:
|
||||
dht.routingTable.MarkAllPeersIrreplaceable()
|
||||
case p := <-dht.addPeerToRTChan:
|
||||
if dht.routingTable.Size() == 0 {
|
||||
isBootsrapping = true
|
||||
bootstrapCount = 0
|
||||
timerCh = nil
|
||||
}
|
||||
// queryPeer set to true as we only try to add queried peers to the RT
|
||||
newlyAdded, err := dht.routingTable.TryAddPeer(p, true, isBootsrapping)
|
||||
if err != nil {
|
||||
// peer not added.
|
||||
continue
|
||||
}
|
||||
if newlyAdded {
|
||||
// peer was added to the RT, it can now be fixed if needed.
|
||||
dht.fixRTIfNeeded()
|
||||
} else {
|
||||
// the peer is already in our RT, but we just successfully queried it and so let's give it a
|
||||
// bump on the query time so we don't ping it too soon for a liveliness check.
|
||||
dht.routingTable.UpdateLastSuccessfulOutboundQueryAt(p, time.Now())
|
||||
}
|
||||
case <-dht.refreshFinishedCh:
|
||||
bootstrapCount = bootstrapCount + 1
|
||||
if bootstrapCount == 2 {
|
||||
timerCh = time.NewTimer(dht.rtFreezeTimeout).C
|
||||
}
|
||||
|
||||
old := isBootsrapping
|
||||
isBootsrapping = false
|
||||
if old {
|
||||
dht.rtRefreshManager.RefreshNoWait()
|
||||
}
|
||||
|
||||
case <-dht.ctx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// peerFound verifies whether the found peer advertises DHT protocols
|
||||
// and probe it to make sure it answers DHT queries as expected. If
|
||||
// it fails to answer, it isn't added to the routingTable.
|
||||
func (dht *IpfsDHT) peerFound(p peer.ID) {
|
||||
// if the peer is already in the routing table or the appropriate bucket is
|
||||
// already full, don't try to add the new peer.ID
|
||||
if !dht.routingTable.UsefulNewPeer(p) {
|
||||
return
|
||||
}
|
||||
|
||||
// verify whether the remote peer advertises the right dht protocol
|
||||
b, err := dht.validRTPeer(p)
|
||||
if err != nil {
|
||||
logger.Errorw("failed to validate if peer is a DHT peer", "peer", p, "error", err)
|
||||
} else if b {
|
||||
|
||||
// check if the maximal number of concurrent lookup checks is reached
|
||||
dht.lookupChecksLk.Lock()
|
||||
if dht.lookupCheckCapacity == 0 {
|
||||
dht.lookupChecksLk.Unlock()
|
||||
// drop the new peer.ID if the maximal number of concurrent lookup
|
||||
// checks is reached
|
||||
return
|
||||
}
|
||||
dht.lookupCheckCapacity--
|
||||
dht.lookupChecksLk.Unlock()
|
||||
|
||||
go func() {
|
||||
livelinessCtx, cancel := context.WithTimeout(dht.ctx, dht.lookupCheckTimeout)
|
||||
defer cancel()
|
||||
|
||||
// performing a FIND_NODE query
|
||||
err := dht.lookupCheck(livelinessCtx, p)
|
||||
|
||||
dht.lookupChecksLk.Lock()
|
||||
dht.lookupCheckCapacity++
|
||||
dht.lookupChecksLk.Unlock()
|
||||
|
||||
if err != nil {
|
||||
logger.Debugw("connected peer not answering DHT request as expected", "peer", p, "error", err)
|
||||
return
|
||||
}
|
||||
|
||||
// if the FIND_NODE succeeded, the peer is considered as valid
|
||||
dht.validPeerFound(p)
|
||||
}()
|
||||
}
|
||||
}
|
||||
|
||||
// validPeerFound signals the routingTable that we've found a peer that
|
||||
// supports the DHT protocol, and just answered correctly to a DHT FindPeers
|
||||
func (dht *IpfsDHT) validPeerFound(p peer.ID) {
|
||||
if c := baseLogger.Check(zap.DebugLevel, "peer found"); c != nil {
|
||||
c.Write(zap.String("peer", p.String()))
|
||||
}
|
||||
|
||||
select {
|
||||
case dht.addPeerToRTChan <- p:
|
||||
case <-dht.ctx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// peerStoppedDHT signals the routing table that a peer is unable to responsd to DHT queries anymore.
|
||||
func (dht *IpfsDHT) peerStoppedDHT(p peer.ID) {
|
||||
logger.Debugw("peer stopped dht", "peer", p)
|
||||
// A peer that does not support the DHT protocol is dead for us.
|
||||
// There's no point in talking to anymore till it starts supporting the DHT protocol again.
|
||||
dht.routingTable.RemovePeer(p)
|
||||
}
|
||||
|
||||
func (dht *IpfsDHT) fixRTIfNeeded() {
|
||||
select {
|
||||
case dht.fixLowPeersChan <- struct{}{}:
|
||||
default:
|
||||
}
|
||||
}
|
||||
|
||||
// FindLocal looks for a peer with a given ID connected to this dht and returns the peer and the table it was found in.
|
||||
func (dht *IpfsDHT) FindLocal(ctx context.Context, id peer.ID) peer.AddrInfo {
|
||||
_, span := internal.StartSpan(ctx, "IpfsDHT.FindLocal", trace.WithAttributes(attribute.Stringer("PeerID", id)))
|
||||
defer span.End()
|
||||
|
||||
switch dht.host.Network().Connectedness(id) {
|
||||
case network.Connected, network.CanConnect:
|
||||
return dht.peerstore.PeerInfo(id)
|
||||
default:
|
||||
return peer.AddrInfo{}
|
||||
}
|
||||
}
|
||||
|
||||
// nearestPeersToQuery returns the routing tables closest peers.
|
||||
func (dht *IpfsDHT) nearestPeersToQuery(pmes *pb.Message, count int) []peer.ID {
|
||||
closer := dht.routingTable.NearestPeers(kb.ConvertKey(string(pmes.GetKey())), count)
|
||||
return closer
|
||||
}
|
||||
|
||||
// betterPeersToQuery returns nearestPeersToQuery with some additional filtering
|
||||
func (dht *IpfsDHT) betterPeersToQuery(pmes *pb.Message, from peer.ID, count int) []peer.ID {
|
||||
closer := dht.nearestPeersToQuery(pmes, count)
|
||||
|
||||
// no node? nil
|
||||
if closer == nil {
|
||||
logger.Infow("no closer peers to send", from)
|
||||
return nil
|
||||
}
|
||||
|
||||
filtered := make([]peer.ID, 0, len(closer))
|
||||
for _, clp := range closer {
|
||||
|
||||
// == to self? thats bad
|
||||
if clp == dht.self {
|
||||
logger.Error("BUG betterPeersToQuery: attempted to return self! this shouldn't happen...")
|
||||
return nil
|
||||
}
|
||||
// Dont send a peer back themselves
|
||||
if clp == from {
|
||||
continue
|
||||
}
|
||||
|
||||
filtered = append(filtered, clp)
|
||||
}
|
||||
|
||||
// ok seems like closer nodes
|
||||
return filtered
|
||||
}
|
||||
|
||||
func (dht *IpfsDHT) setMode(m mode) error {
|
||||
dht.modeLk.Lock()
|
||||
defer dht.modeLk.Unlock()
|
||||
|
||||
if m == dht.mode {
|
||||
return nil
|
||||
}
|
||||
|
||||
switch m {
|
||||
case modeServer:
|
||||
return dht.moveToServerMode()
|
||||
case modeClient:
|
||||
return dht.moveToClientMode()
|
||||
default:
|
||||
return fmt.Errorf("unrecognized dht mode: %d", m)
|
||||
}
|
||||
}
|
||||
|
||||
// moveToServerMode advertises (via libp2p identify updates) that we are able to respond to DHT queries and sets the appropriate stream handlers.
|
||||
// Note: We may support responding to queries with protocols aside from our primary ones in order to support
|
||||
// interoperability with older versions of the DHT protocol.
|
||||
func (dht *IpfsDHT) moveToServerMode() error {
|
||||
dht.mode = modeServer
|
||||
for _, p := range dht.serverProtocols {
|
||||
dht.host.SetStreamHandler(p, dht.handleNewStream)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// moveToClientMode stops advertising (and rescinds advertisements via libp2p identify updates) that we are able to
|
||||
// respond to DHT queries and removes the appropriate stream handlers. We also kill all inbound streams that were
|
||||
// utilizing the handled protocols.
|
||||
// Note: We may support responding to queries with protocols aside from our primary ones in order to support
|
||||
// interoperability with older versions of the DHT protocol.
|
||||
func (dht *IpfsDHT) moveToClientMode() error {
|
||||
dht.mode = modeClient
|
||||
for _, p := range dht.serverProtocols {
|
||||
dht.host.RemoveStreamHandler(p)
|
||||
}
|
||||
|
||||
pset := make(map[protocol.ID]bool)
|
||||
for _, p := range dht.serverProtocols {
|
||||
pset[p] = true
|
||||
}
|
||||
|
||||
for _, c := range dht.host.Network().Conns() {
|
||||
for _, s := range c.GetStreams() {
|
||||
if pset[s.Protocol()] {
|
||||
if s.Stat().Direction == network.DirInbound {
|
||||
_ = s.Reset()
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (dht *IpfsDHT) getMode() mode {
|
||||
dht.modeLk.Lock()
|
||||
defer dht.modeLk.Unlock()
|
||||
return dht.mode
|
||||
}
|
||||
|
||||
// Context returns the DHT's context.
|
||||
func (dht *IpfsDHT) Context() context.Context {
|
||||
return dht.ctx
|
||||
}
|
||||
|
||||
// RoutingTable returns the DHT's routingTable.
|
||||
func (dht *IpfsDHT) RoutingTable() *kb.RoutingTable {
|
||||
return dht.routingTable
|
||||
}
|
||||
|
||||
// Close calls Process Close.
|
||||
func (dht *IpfsDHT) Close() error {
|
||||
dht.cancel()
|
||||
dht.wg.Wait()
|
||||
|
||||
var wg sync.WaitGroup
|
||||
closes := [...]func() error{
|
||||
dht.rtRefreshManager.Close,
|
||||
dht.providerStore.Close,
|
||||
}
|
||||
var errors [len(closes)]error
|
||||
wg.Add(len(errors))
|
||||
for i, c := range closes {
|
||||
go func(i int, c func() error) {
|
||||
defer wg.Done()
|
||||
errors[i] = c()
|
||||
}(i, c)
|
||||
}
|
||||
wg.Wait()
|
||||
|
||||
return multierr.Combine(errors[:]...)
|
||||
}
|
||||
|
||||
func mkDsKey(s string) ds.Key {
|
||||
return ds.NewKey(base32.RawStdEncoding.EncodeToString([]byte(s)))
|
||||
}
|
||||
|
||||
// PeerID returns the DHT node's Peer ID.
|
||||
func (dht *IpfsDHT) PeerID() peer.ID {
|
||||
return dht.self
|
||||
}
|
||||
|
||||
// PeerKey returns a DHT key, converted from the DHT node's Peer ID.
|
||||
func (dht *IpfsDHT) PeerKey() []byte {
|
||||
return kb.ConvertPeerID(dht.self)
|
||||
}
|
||||
|
||||
// Host returns the libp2p host this DHT is operating with.
|
||||
func (dht *IpfsDHT) Host() host.Host {
|
||||
return dht.host
|
||||
}
|
||||
|
||||
// Ping sends a ping message to the passed peer and waits for a response.
|
||||
func (dht *IpfsDHT) Ping(ctx context.Context, p peer.ID) error {
|
||||
ctx, span := internal.StartSpan(ctx, "IpfsDHT.Ping", trace.WithAttributes(attribute.Stringer("PeerID", p)))
|
||||
defer span.End()
|
||||
return dht.protoMessenger.Ping(ctx, p)
|
||||
}
|
||||
|
||||
// NetworkSize returns the most recent estimation of the DHT network size.
|
||||
// EXPERIMENTAL: We do not provide any guarantees that this method will
|
||||
// continue to exist in the codebase. Use it at your own risk.
|
||||
func (dht *IpfsDHT) NetworkSize() (int32, error) {
|
||||
return dht.nsEstimator.NetworkSize()
|
||||
}
|
||||
|
||||
// newContextWithLocalTags returns a new context.Context with the InstanceID and
|
||||
// PeerID keys populated. It will also take any extra tags that need adding to
|
||||
// the context as tag.Mutators.
|
||||
func (dht *IpfsDHT) newContextWithLocalTags(ctx context.Context, extraTags ...tag.Mutator) context.Context {
|
||||
extraTags = append(
|
||||
extraTags,
|
||||
tag.Upsert(metrics.KeyPeerID, dht.self.String()),
|
||||
tag.Upsert(metrics.KeyInstanceID, fmt.Sprintf("%p", dht)),
|
||||
)
|
||||
ctx, _ = tag.New(
|
||||
ctx,
|
||||
extraTags...,
|
||||
) // ignoring error as it is unrelated to the actual function of this code.
|
||||
return ctx
|
||||
}
|
||||
|
||||
func (dht *IpfsDHT) maybeAddAddrs(p peer.ID, addrs []ma.Multiaddr, ttl time.Duration) {
|
||||
// Don't add addresses for self or our connected peers. We have better ones.
|
||||
if p == dht.self || dht.host.Network().Connectedness(p) == network.Connected {
|
||||
return
|
||||
}
|
||||
dht.peerstore.AddAddrs(p, dht.filterAddrs(addrs), ttl)
|
||||
}
|
||||
|
||||
func (dht *IpfsDHT) filterAddrs(addrs []ma.Multiaddr) []ma.Multiaddr {
|
||||
if f := dht.addrFilter; f != nil {
|
||||
return f(addrs)
|
||||
}
|
||||
return addrs
|
||||
}
|
||||
84
vendor/github.com/libp2p/go-libp2p-kad-dht/dht_bootstrap.go
generated
vendored
Normal file
84
vendor/github.com/libp2p/go-libp2p-kad-dht/dht_bootstrap.go
generated
vendored
Normal file
@@ -0,0 +1,84 @@
|
||||
package dht
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
|
||||
"github.com/multiformats/go-multiaddr"
|
||||
)
|
||||
|
||||
// DefaultBootstrapPeers is a set of public DHT bootstrap peers provided by libp2p.
|
||||
var DefaultBootstrapPeers []multiaddr.Multiaddr
|
||||
|
||||
// Minimum number of peers in the routing table. If we drop below this and we
|
||||
// see a new peer, we trigger a bootstrap round.
|
||||
var minRTRefreshThreshold = 10
|
||||
|
||||
const (
|
||||
periodicBootstrapInterval = 2 * time.Minute
|
||||
maxNBoostrappers = 2
|
||||
)
|
||||
|
||||
func init() {
|
||||
for _, s := range []string{
|
||||
"/dnsaddr/bootstrap.libp2p.io/p2p/QmNnooDu7bfjPFoTZYxMNLWUQJyrVwtbZg5gBMjTezGAJN",
|
||||
"/dnsaddr/bootstrap.libp2p.io/p2p/QmQCU2EcMqAqQPR2i9bChDtGNJchTbq5TbXJJ16u19uLTa",
|
||||
"/dnsaddr/bootstrap.libp2p.io/p2p/QmbLHAnMoJPWSCR5Zhtx6BHJX9KiKNN6tpvbUcqanj75Nb",
|
||||
"/dnsaddr/bootstrap.libp2p.io/p2p/QmcZf59bWwK5XFi76CZX8cbJ4BhTzzA3gU1ZjYZcYW3dwt",
|
||||
"/ip4/104.131.131.82/tcp/4001/p2p/QmaCpDMGvV2BGHeYERUEnRQAwe3N8SzbUtfsmvsqQLuvuJ", // mars.i.ipfs.io
|
||||
} {
|
||||
ma, err := multiaddr.NewMultiaddr(s)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
DefaultBootstrapPeers = append(DefaultBootstrapPeers, ma)
|
||||
}
|
||||
}
|
||||
|
||||
// GetDefaultBootstrapPeerAddrInfos returns the peer.AddrInfos for the default
|
||||
// bootstrap peers so we can use these for initializing the DHT by passing these to the
|
||||
// BootstrapPeers(...) option.
|
||||
func GetDefaultBootstrapPeerAddrInfos() []peer.AddrInfo {
|
||||
ds := make([]peer.AddrInfo, 0, len(DefaultBootstrapPeers))
|
||||
|
||||
for i := range DefaultBootstrapPeers {
|
||||
info, err := peer.AddrInfoFromP2pAddr(DefaultBootstrapPeers[i])
|
||||
if err != nil {
|
||||
logger.Errorw("failed to convert bootstrapper address to peer addr info", "address",
|
||||
DefaultBootstrapPeers[i].String(), err, "err")
|
||||
continue
|
||||
}
|
||||
ds = append(ds, *info)
|
||||
}
|
||||
return ds
|
||||
}
|
||||
|
||||
// Bootstrap tells the DHT to get into a bootstrapped state satisfying the
|
||||
// IpfsRouter interface.
|
||||
func (dht *IpfsDHT) Bootstrap(ctx context.Context) (err error) {
|
||||
_, end := tracer.Bootstrap(dhtName, ctx)
|
||||
defer func() { end(err) }()
|
||||
|
||||
dht.fixRTIfNeeded()
|
||||
dht.rtRefreshManager.RefreshNoWait()
|
||||
return nil
|
||||
}
|
||||
|
||||
// RefreshRoutingTable tells the DHT to refresh it's routing tables.
|
||||
//
|
||||
// The returned channel will block until the refresh finishes, then yield the
|
||||
// error and close. The channel is buffered and safe to ignore.
|
||||
func (dht *IpfsDHT) RefreshRoutingTable() <-chan error {
|
||||
return dht.rtRefreshManager.Refresh(false)
|
||||
}
|
||||
|
||||
// ForceRefresh acts like RefreshRoutingTable but forces the DHT to refresh all
|
||||
// buckets in the Routing Table irrespective of when they were last refreshed.
|
||||
//
|
||||
// The returned channel will block until the refresh finishes, then yield the
|
||||
// error and close. The channel is buffered and safe to ignore.
|
||||
func (dht *IpfsDHT) ForceRefresh() <-chan error {
|
||||
return dht.rtRefreshManager.Refresh(true)
|
||||
}
|
||||
240
vendor/github.com/libp2p/go-libp2p-kad-dht/dht_filters.go
generated
vendored
Normal file
240
vendor/github.com/libp2p/go-libp2p-kad-dht/dht_filters.go
generated
vendored
Normal file
@@ -0,0 +1,240 @@
|
||||
package dht
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"net"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/libp2p/go-libp2p/core/host"
|
||||
"github.com/libp2p/go-libp2p/core/network"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
|
||||
"github.com/google/gopacket/routing"
|
||||
netroute "github.com/libp2p/go-netroute"
|
||||
|
||||
ma "github.com/multiformats/go-multiaddr"
|
||||
manet "github.com/multiformats/go-multiaddr/net"
|
||||
|
||||
dhtcfg "github.com/libp2p/go-libp2p-kad-dht/internal/config"
|
||||
)
|
||||
|
||||
// QueryFilterFunc is a filter applied when considering peers to dial when querying
|
||||
type QueryFilterFunc = dhtcfg.QueryFilterFunc
|
||||
|
||||
// RouteTableFilterFunc is a filter applied when considering connections to keep in
|
||||
// the local route table.
|
||||
type RouteTableFilterFunc = dhtcfg.RouteTableFilterFunc
|
||||
|
||||
var publicCIDR6 = "2000::/3"
|
||||
var public6 *net.IPNet
|
||||
|
||||
func init() {
|
||||
_, public6, _ = net.ParseCIDR(publicCIDR6)
|
||||
}
|
||||
|
||||
// isPublicAddr follows the logic of manet.IsPublicAddr, except it uses
|
||||
// a stricter definition of "public" for ipv6: namely "is it in 2000::/3"?
|
||||
func isPublicAddr(a ma.Multiaddr) bool {
|
||||
ip, err := manet.ToIP(a)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
if ip.To4() != nil {
|
||||
return !inAddrRange(ip, manet.Private4) && !inAddrRange(ip, manet.Unroutable4)
|
||||
}
|
||||
|
||||
return public6.Contains(ip)
|
||||
}
|
||||
|
||||
// isPrivateAddr follows the logic of manet.IsPrivateAddr, except that
|
||||
// it uses a stricter definition of "public" for ipv6
|
||||
func isPrivateAddr(a ma.Multiaddr) bool {
|
||||
ip, err := manet.ToIP(a)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
if ip.To4() != nil {
|
||||
return inAddrRange(ip, manet.Private4)
|
||||
}
|
||||
|
||||
return !public6.Contains(ip) && !inAddrRange(ip, manet.Unroutable6)
|
||||
}
|
||||
|
||||
// PublicQueryFilter returns true if the peer is suspected of being publicly accessible
|
||||
func PublicQueryFilter(_ interface{}, ai peer.AddrInfo) bool {
|
||||
if len(ai.Addrs) == 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
var hasPublicAddr bool
|
||||
for _, a := range ai.Addrs {
|
||||
if !isRelayAddr(a) && isPublicAddr(a) {
|
||||
hasPublicAddr = true
|
||||
}
|
||||
}
|
||||
return hasPublicAddr
|
||||
}
|
||||
|
||||
type hasHost interface {
|
||||
Host() host.Host
|
||||
}
|
||||
|
||||
var _ QueryFilterFunc = PublicQueryFilter
|
||||
|
||||
// PublicRoutingTableFilter allows a peer to be added to the routing table if the connections to that peer indicate
|
||||
// that it is on a public network
|
||||
func PublicRoutingTableFilter(dht interface{}, p peer.ID) bool {
|
||||
d := dht.(hasHost)
|
||||
|
||||
conns := d.Host().Network().ConnsToPeer(p)
|
||||
if len(conns) == 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
// Do we have a public address for this peer?
|
||||
id := conns[0].RemotePeer()
|
||||
known := d.Host().Peerstore().PeerInfo(id)
|
||||
for _, a := range known.Addrs {
|
||||
if !isRelayAddr(a) && isPublicAddr(a) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
var _ RouteTableFilterFunc = PublicRoutingTableFilter
|
||||
|
||||
// PrivateQueryFilter doens't currently restrict which peers we are willing to query from the local DHT.
|
||||
func PrivateQueryFilter(_ interface{}, ai peer.AddrInfo) bool {
|
||||
return len(ai.Addrs) > 0
|
||||
}
|
||||
|
||||
var _ QueryFilterFunc = PrivateQueryFilter
|
||||
|
||||
// We call this very frequently but routes can technically change at runtime.
|
||||
// Cache it for two minutes.
|
||||
const routerCacheTime = 2 * time.Minute
|
||||
|
||||
var routerCache struct {
|
||||
sync.RWMutex
|
||||
router routing.Router
|
||||
expires time.Time
|
||||
}
|
||||
|
||||
func getCachedRouter() routing.Router {
|
||||
routerCache.RLock()
|
||||
router := routerCache.router
|
||||
expires := routerCache.expires
|
||||
routerCache.RUnlock()
|
||||
|
||||
if time.Now().Before(expires) {
|
||||
return router
|
||||
}
|
||||
|
||||
routerCache.Lock()
|
||||
defer routerCache.Unlock()
|
||||
|
||||
now := time.Now()
|
||||
if now.Before(routerCache.expires) {
|
||||
return router
|
||||
}
|
||||
routerCache.router, _ = netroute.New()
|
||||
routerCache.expires = now.Add(routerCacheTime)
|
||||
return router
|
||||
}
|
||||
|
||||
// PrivateRoutingTableFilter allows a peer to be added to the routing table if the connections to that peer indicate
|
||||
// that it is on a private network
|
||||
func PrivateRoutingTableFilter(dht interface{}, p peer.ID) bool {
|
||||
d := dht.(hasHost)
|
||||
conns := d.Host().Network().ConnsToPeer(p)
|
||||
return privRTFilter(d, conns)
|
||||
}
|
||||
|
||||
func privRTFilter(dht interface{}, conns []network.Conn) bool {
|
||||
d := dht.(hasHost)
|
||||
h := d.Host()
|
||||
|
||||
router := getCachedRouter()
|
||||
myAdvertisedIPs := make([]net.IP, 0)
|
||||
for _, a := range h.Addrs() {
|
||||
if isPublicAddr(a) && !isRelayAddr(a) {
|
||||
ip, err := manet.ToIP(a)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
myAdvertisedIPs = append(myAdvertisedIPs, ip)
|
||||
}
|
||||
}
|
||||
|
||||
for _, c := range conns {
|
||||
ra := c.RemoteMultiaddr()
|
||||
if isPrivateAddr(ra) && !isRelayAddr(ra) {
|
||||
return true
|
||||
}
|
||||
|
||||
if isPublicAddr(ra) {
|
||||
ip, err := manet.ToIP(ra)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
// if the ip is the same as one of the local host's public advertised IPs - then consider it local
|
||||
for _, i := range myAdvertisedIPs {
|
||||
if i.Equal(ip) {
|
||||
return true
|
||||
}
|
||||
if ip.To4() == nil {
|
||||
if i.To4() == nil && isEUI(ip) && sameV6Net(i, ip) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// if there's no gateway - a direct host in the OS routing table - then consider it local
|
||||
// This is relevant in particular to ipv6 networks where the addresses may all be public,
|
||||
// but the nodes are aware of direct links between each other.
|
||||
if router != nil {
|
||||
_, gw, _, err := router.Route(ip)
|
||||
if gw == nil && err == nil {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
var _ RouteTableFilterFunc = PrivateRoutingTableFilter
|
||||
|
||||
func isEUI(ip net.IP) bool {
|
||||
// per rfc 2373
|
||||
return len(ip) == net.IPv6len && ip[11] == 0xff && ip[12] == 0xfe
|
||||
}
|
||||
|
||||
func sameV6Net(a, b net.IP) bool {
|
||||
//lint:ignore SA1021 We're comparing only parts of the IP address here.
|
||||
return len(a) == net.IPv6len && len(b) == net.IPv6len && bytes.Equal(a[0:8], b[0:8]) //nolint
|
||||
}
|
||||
|
||||
func isRelayAddr(a ma.Multiaddr) bool {
|
||||
found := false
|
||||
ma.ForEach(a, func(c ma.Component) bool {
|
||||
found = c.Protocol().Code == ma.P_CIRCUIT
|
||||
return !found
|
||||
})
|
||||
return found
|
||||
}
|
||||
|
||||
func inAddrRange(ip net.IP, ipnets []*net.IPNet) bool {
|
||||
for _, ipnet := range ipnets {
|
||||
if ipnet.Contains(ip) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
166
vendor/github.com/libp2p/go-libp2p-kad-dht/dht_net.go
generated
vendored
Normal file
166
vendor/github.com/libp2p/go-libp2p-kad-dht/dht_net.go
generated
vendored
Normal file
@@ -0,0 +1,166 @@
|
||||
package dht
|
||||
|
||||
import (
|
||||
"io"
|
||||
"time"
|
||||
|
||||
"github.com/libp2p/go-libp2p/core/network"
|
||||
|
||||
"github.com/libp2p/go-libp2p-kad-dht/internal/net"
|
||||
"github.com/libp2p/go-libp2p-kad-dht/metrics"
|
||||
pb "github.com/libp2p/go-libp2p-kad-dht/pb"
|
||||
|
||||
"github.com/libp2p/go-msgio"
|
||||
"go.opencensus.io/stats"
|
||||
"go.opencensus.io/tag"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
var dhtStreamIdleTimeout = 1 * time.Minute
|
||||
|
||||
// ErrReadTimeout is an error that occurs when no message is read within the timeout period.
|
||||
var ErrReadTimeout = net.ErrReadTimeout
|
||||
|
||||
// handleNewStream implements the network.StreamHandler
|
||||
func (dht *IpfsDHT) handleNewStream(s network.Stream) {
|
||||
if dht.handleNewMessage(s) {
|
||||
// If we exited without error, close gracefully.
|
||||
_ = s.Close()
|
||||
} else {
|
||||
// otherwise, send an error.
|
||||
_ = s.Reset()
|
||||
}
|
||||
}
|
||||
|
||||
// Returns true on orderly completion of writes (so we can Close the stream).
|
||||
func (dht *IpfsDHT) handleNewMessage(s network.Stream) bool {
|
||||
ctx := dht.ctx
|
||||
r := msgio.NewVarintReaderSize(s, network.MessageSizeMax)
|
||||
|
||||
mPeer := s.Conn().RemotePeer()
|
||||
|
||||
timer := time.AfterFunc(dhtStreamIdleTimeout, func() { _ = s.Reset() })
|
||||
defer timer.Stop()
|
||||
|
||||
for {
|
||||
if dht.getMode() != modeServer {
|
||||
logger.Debugf("ignoring incoming dht message while not in server mode")
|
||||
return false
|
||||
}
|
||||
|
||||
var req pb.Message
|
||||
msgbytes, err := r.ReadMsg()
|
||||
msgLen := len(msgbytes)
|
||||
if err != nil {
|
||||
r.ReleaseMsg(msgbytes)
|
||||
if err == io.EOF {
|
||||
return true
|
||||
}
|
||||
// This string test is necessary because there isn't a single stream reset error
|
||||
// instance in use.
|
||||
if c := baseLogger.Check(zap.DebugLevel, "error reading message"); c != nil && err.Error() != "stream reset" {
|
||||
c.Write(zap.String("from", mPeer.String()),
|
||||
zap.Error(err))
|
||||
}
|
||||
if msgLen > 0 {
|
||||
_ = stats.RecordWithTags(ctx,
|
||||
[]tag.Mutator{tag.Upsert(metrics.KeyMessageType, "UNKNOWN")},
|
||||
metrics.ReceivedMessages.M(1),
|
||||
metrics.ReceivedMessageErrors.M(1),
|
||||
metrics.ReceivedBytes.M(int64(msgLen)),
|
||||
)
|
||||
}
|
||||
return false
|
||||
}
|
||||
err = req.Unmarshal(msgbytes)
|
||||
r.ReleaseMsg(msgbytes)
|
||||
if err != nil {
|
||||
if c := baseLogger.Check(zap.DebugLevel, "error unmarshaling message"); c != nil {
|
||||
c.Write(zap.String("from", mPeer.String()),
|
||||
zap.Error(err))
|
||||
}
|
||||
_ = stats.RecordWithTags(ctx,
|
||||
[]tag.Mutator{tag.Upsert(metrics.KeyMessageType, "UNKNOWN")},
|
||||
metrics.ReceivedMessages.M(1),
|
||||
metrics.ReceivedMessageErrors.M(1),
|
||||
metrics.ReceivedBytes.M(int64(msgLen)),
|
||||
)
|
||||
return false
|
||||
}
|
||||
|
||||
timer.Reset(dhtStreamIdleTimeout)
|
||||
|
||||
startTime := time.Now()
|
||||
ctx, _ := tag.New(ctx,
|
||||
tag.Upsert(metrics.KeyMessageType, req.GetType().String()),
|
||||
)
|
||||
|
||||
stats.Record(ctx,
|
||||
metrics.ReceivedMessages.M(1),
|
||||
metrics.ReceivedBytes.M(int64(msgLen)),
|
||||
)
|
||||
|
||||
handler := dht.handlerForMsgType(req.GetType())
|
||||
if handler == nil {
|
||||
stats.Record(ctx, metrics.ReceivedMessageErrors.M(1))
|
||||
if c := baseLogger.Check(zap.DebugLevel, "can't handle received message"); c != nil {
|
||||
c.Write(zap.String("from", mPeer.String()),
|
||||
zap.Int32("type", int32(req.GetType())))
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
if c := baseLogger.Check(zap.DebugLevel, "handling message"); c != nil {
|
||||
c.Write(zap.String("from", mPeer.String()),
|
||||
zap.Int32("type", int32(req.GetType())),
|
||||
zap.Binary("key", req.GetKey()))
|
||||
}
|
||||
resp, err := handler(ctx, mPeer, &req)
|
||||
if err != nil {
|
||||
stats.Record(ctx, metrics.ReceivedMessageErrors.M(1))
|
||||
if c := baseLogger.Check(zap.DebugLevel, "error handling message"); c != nil {
|
||||
c.Write(zap.String("from", mPeer.String()),
|
||||
zap.Int32("type", int32(req.GetType())),
|
||||
zap.Binary("key", req.GetKey()),
|
||||
zap.Error(err))
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
if c := baseLogger.Check(zap.DebugLevel, "handled message"); c != nil {
|
||||
c.Write(zap.String("from", mPeer.String()),
|
||||
zap.Int32("type", int32(req.GetType())),
|
||||
zap.Binary("key", req.GetKey()),
|
||||
zap.Duration("time", time.Since(startTime)))
|
||||
}
|
||||
|
||||
if resp == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
// send out response msg
|
||||
err = net.WriteMsg(s, resp)
|
||||
if err != nil {
|
||||
stats.Record(ctx, metrics.ReceivedMessageErrors.M(1))
|
||||
if c := baseLogger.Check(zap.DebugLevel, "error writing response"); c != nil {
|
||||
c.Write(zap.String("from", mPeer.String()),
|
||||
zap.Int32("type", int32(req.GetType())),
|
||||
zap.Binary("key", req.GetKey()),
|
||||
zap.Error(err))
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
elapsedTime := time.Since(startTime)
|
||||
|
||||
if c := baseLogger.Check(zap.DebugLevel, "responded to message"); c != nil {
|
||||
c.Write(zap.String("from", mPeer.String()),
|
||||
zap.Int32("type", int32(req.GetType())),
|
||||
zap.Binary("key", req.GetKey()),
|
||||
zap.Duration("time", elapsedTime))
|
||||
}
|
||||
|
||||
latencyMillis := float64(elapsedTime) / float64(time.Millisecond)
|
||||
stats.Record(ctx, metrics.InboundRequestLatency.M(latencyMillis))
|
||||
}
|
||||
}
|
||||
358
vendor/github.com/libp2p/go-libp2p-kad-dht/dht_options.go
generated
vendored
Normal file
358
vendor/github.com/libp2p/go-libp2p-kad-dht/dht_options.go
generated
vendored
Normal file
@@ -0,0 +1,358 @@
|
||||
package dht
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
dhtcfg "github.com/libp2p/go-libp2p-kad-dht/internal/config"
|
||||
"github.com/libp2p/go-libp2p-kad-dht/providers"
|
||||
"github.com/libp2p/go-libp2p-kbucket/peerdiversity"
|
||||
record "github.com/libp2p/go-libp2p-record"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/libp2p/go-libp2p/core/protocol"
|
||||
|
||||
ds "github.com/ipfs/go-datastore"
|
||||
ma "github.com/multiformats/go-multiaddr"
|
||||
)
|
||||
|
||||
// ModeOpt describes what mode the dht should operate in
|
||||
type ModeOpt = dhtcfg.ModeOpt
|
||||
|
||||
const (
|
||||
// ModeAuto utilizes EvtLocalReachabilityChanged events sent over the event bus to dynamically switch the DHT
|
||||
// between Client and Server modes based on network conditions
|
||||
ModeAuto ModeOpt = iota
|
||||
// ModeClient operates the DHT as a client only, it cannot respond to incoming queries
|
||||
ModeClient
|
||||
// ModeServer operates the DHT as a server, it can both send and respond to queries
|
||||
ModeServer
|
||||
// ModeAutoServer operates in the same way as ModeAuto, but acts as a server when reachability is unknown
|
||||
ModeAutoServer
|
||||
)
|
||||
|
||||
// DefaultPrefix is the application specific prefix attached to all DHT protocols by default.
|
||||
const DefaultPrefix protocol.ID = "/ipfs"
|
||||
|
||||
type Option = dhtcfg.Option
|
||||
|
||||
// ProviderStore sets the provider storage manager.
|
||||
func ProviderStore(ps providers.ProviderStore) Option {
|
||||
return func(c *dhtcfg.Config) error {
|
||||
c.ProviderStore = ps
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// RoutingTableLatencyTolerance sets the maximum acceptable latency for peers
|
||||
// in the routing table's cluster.
|
||||
func RoutingTableLatencyTolerance(latency time.Duration) Option {
|
||||
return func(c *dhtcfg.Config) error {
|
||||
c.RoutingTable.LatencyTolerance = latency
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// RoutingTableRefreshQueryTimeout sets the timeout for routing table refresh
|
||||
// queries.
|
||||
func RoutingTableRefreshQueryTimeout(timeout time.Duration) Option {
|
||||
return func(c *dhtcfg.Config) error {
|
||||
c.RoutingTable.RefreshQueryTimeout = timeout
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// RoutingTableRefreshPeriod sets the period for refreshing buckets in the
|
||||
// routing table. The DHT will refresh buckets every period by:
|
||||
//
|
||||
// 1. First searching for nearby peers to figure out how many buckets we should try to fill.
|
||||
// 1. Then searching for a random key in each bucket that hasn't been queried in
|
||||
// the last refresh period.
|
||||
func RoutingTableRefreshPeriod(period time.Duration) Option {
|
||||
return func(c *dhtcfg.Config) error {
|
||||
c.RoutingTable.RefreshInterval = period
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// Datastore configures the DHT to use the specified datastore.
|
||||
//
|
||||
// Defaults to an in-memory (temporary) map.
|
||||
func Datastore(ds ds.Batching) Option {
|
||||
return func(c *dhtcfg.Config) error {
|
||||
c.Datastore = ds
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// Mode configures which mode the DHT operates in (Client, Server, Auto).
|
||||
//
|
||||
// Defaults to ModeAuto.
|
||||
func Mode(m ModeOpt) Option {
|
||||
return func(c *dhtcfg.Config) error {
|
||||
c.Mode = m
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// Validator configures the DHT to use the specified validator.
|
||||
//
|
||||
// Defaults to a namespaced validator that can validate both public key (under the "pk"
|
||||
// namespace) and IPNS records (under the "ipns" namespace). Setting the validator
|
||||
// implies that the user wants to control the validators and therefore the default
|
||||
// public key and IPNS validators will not be added.
|
||||
func Validator(v record.Validator) Option {
|
||||
return func(c *dhtcfg.Config) error {
|
||||
c.Validator = v
|
||||
c.ValidatorChanged = true
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// NamespacedValidator adds a validator namespaced under `ns`. This option fails
|
||||
// if the DHT is not using a `record.NamespacedValidator` as its validator (it
|
||||
// uses one by default but this can be overridden with the `Validator` option).
|
||||
// Adding a namespaced validator without changing the `Validator` will result in
|
||||
// adding a new validator in addition to the default public key and IPNS validators.
|
||||
// The "pk" and "ipns" namespaces cannot be overridden here unless a new `Validator`
|
||||
// has been set first.
|
||||
//
|
||||
// Example: Given a validator registered as `NamespacedValidator("ipns",
|
||||
// myValidator)`, all records with keys starting with `/ipns/` will be validated
|
||||
// with `myValidator`.
|
||||
func NamespacedValidator(ns string, v record.Validator) Option {
|
||||
return func(c *dhtcfg.Config) error {
|
||||
nsval, ok := c.Validator.(record.NamespacedValidator)
|
||||
if !ok {
|
||||
return fmt.Errorf("can only add namespaced validators to a NamespacedValidator")
|
||||
}
|
||||
nsval[ns] = v
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// ProtocolPrefix sets an application specific prefix to be attached to all DHT protocols. For example,
|
||||
// /myapp/kad/1.0.0 instead of /ipfs/kad/1.0.0. Prefix should be of the form /myapp.
|
||||
//
|
||||
// Defaults to dht.DefaultPrefix
|
||||
func ProtocolPrefix(prefix protocol.ID) Option {
|
||||
return func(c *dhtcfg.Config) error {
|
||||
c.ProtocolPrefix = prefix
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// ProtocolExtension adds an application specific protocol to the DHT protocol. For example,
|
||||
// /ipfs/lan/kad/1.0.0 instead of /ipfs/kad/1.0.0. extension should be of the form /lan.
|
||||
func ProtocolExtension(ext protocol.ID) Option {
|
||||
return func(c *dhtcfg.Config) error {
|
||||
c.ProtocolPrefix += ext
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// V1ProtocolOverride overrides the protocolID used for /kad/1.0.0 with another. This is an
|
||||
// advanced feature, and should only be used to handle legacy networks that have not been
|
||||
// using protocolIDs of the form /app/kad/1.0.0.
|
||||
//
|
||||
// This option will override and ignore the ProtocolPrefix and ProtocolExtension options
|
||||
func V1ProtocolOverride(proto protocol.ID) Option {
|
||||
return func(c *dhtcfg.Config) error {
|
||||
c.V1ProtocolOverride = proto
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// BucketSize configures the bucket size (k in the Kademlia paper) of the routing table.
|
||||
//
|
||||
// The default value is 20.
|
||||
func BucketSize(bucketSize int) Option {
|
||||
return func(c *dhtcfg.Config) error {
|
||||
c.BucketSize = bucketSize
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// Concurrency configures the number of concurrent requests (alpha in the Kademlia paper) for a given query path.
|
||||
//
|
||||
// The default value is 10.
|
||||
func Concurrency(alpha int) Option {
|
||||
return func(c *dhtcfg.Config) error {
|
||||
c.Concurrency = alpha
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// Resiliency configures the number of peers closest to a target that must have responded in order for a given query
|
||||
// path to complete.
|
||||
//
|
||||
// The default value is 3.
|
||||
func Resiliency(beta int) Option {
|
||||
return func(c *dhtcfg.Config) error {
|
||||
c.Resiliency = beta
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// LookupInterval configures maximal number of go routines that can be used to
|
||||
// perform a lookup check operation, before adding a new node to the routing table.
|
||||
func LookupCheckConcurrency(n int) Option {
|
||||
return func(c *dhtcfg.Config) error {
|
||||
c.LookupCheckConcurrency = n
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// MaxRecordAge specifies the maximum time that any node will hold onto a record ("PutValue record")
|
||||
// from the time its received. This does not apply to any other forms of validity that
|
||||
// the record may contain.
|
||||
// For example, a record may contain an ipns entry with an EOL saying its valid
|
||||
// until the year 2020 (a great time in the future). For that record to stick around
|
||||
// it must be rebroadcasted more frequently than once every 'MaxRecordAge'
|
||||
func MaxRecordAge(maxAge time.Duration) Option {
|
||||
return func(c *dhtcfg.Config) error {
|
||||
c.MaxRecordAge = maxAge
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// DisableAutoRefresh completely disables 'auto-refresh' on the DHT routing
|
||||
// table. This means that we will neither refresh the routing table periodically
|
||||
// nor when the routing table size goes below the minimum threshold.
|
||||
func DisableAutoRefresh() Option {
|
||||
return func(c *dhtcfg.Config) error {
|
||||
c.RoutingTable.AutoRefresh = false
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// DisableProviders disables storing and retrieving provider records.
|
||||
//
|
||||
// Defaults to enabled.
|
||||
//
|
||||
// WARNING: do not change this unless you're using a forked DHT (i.e., a private
|
||||
// network and/or distinct DHT protocols with the `Protocols` option).
|
||||
func DisableProviders() Option {
|
||||
return func(c *dhtcfg.Config) error {
|
||||
c.EnableProviders = false
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// DisableValues disables storing and retrieving value records (including
|
||||
// public keys).
|
||||
//
|
||||
// Defaults to enabled.
|
||||
//
|
||||
// WARNING: do not change this unless you're using a forked DHT (i.e., a private
|
||||
// network and/or distinct DHT protocols with the `Protocols` option).
|
||||
func DisableValues() Option {
|
||||
return func(c *dhtcfg.Config) error {
|
||||
c.EnableValues = false
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// QueryFilter sets a function that approves which peers may be dialed in a query
|
||||
func QueryFilter(filter QueryFilterFunc) Option {
|
||||
return func(c *dhtcfg.Config) error {
|
||||
c.QueryPeerFilter = filter
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// RoutingTableFilter sets a function that approves which peers may be added to the routing table. The host should
|
||||
// already have at least one connection to the peer under consideration.
|
||||
func RoutingTableFilter(filter RouteTableFilterFunc) Option {
|
||||
return func(c *dhtcfg.Config) error {
|
||||
c.RoutingTable.PeerFilter = filter
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// BootstrapPeers configures the bootstrapping nodes that we will connect to to seed
|
||||
// and refresh our Routing Table if it becomes empty.
|
||||
func BootstrapPeers(bootstrappers ...peer.AddrInfo) Option {
|
||||
return func(c *dhtcfg.Config) error {
|
||||
c.BootstrapPeers = func() []peer.AddrInfo {
|
||||
return bootstrappers
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// BootstrapPeersFunc configures the function that returns the bootstrapping nodes that we will
|
||||
// connect to to seed and refresh our Routing Table if it becomes empty.
|
||||
func BootstrapPeersFunc(getBootstrapPeers func() []peer.AddrInfo) Option {
|
||||
return func(c *dhtcfg.Config) error {
|
||||
c.BootstrapPeers = getBootstrapPeers
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// RoutingTablePeerDiversityFilter configures the implementation of the `PeerIPGroupFilter` that will be used
|
||||
// to construct the diversity filter for the Routing Table.
|
||||
// Please see the docs for `peerdiversity.PeerIPGroupFilter` AND `peerdiversity.Filter` for more details.
|
||||
func RoutingTablePeerDiversityFilter(pg peerdiversity.PeerIPGroupFilter) Option {
|
||||
return func(c *dhtcfg.Config) error {
|
||||
c.RoutingTable.DiversityFilter = pg
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// disableFixLowPeersRoutine disables the "fixLowPeers" routine in the DHT.
|
||||
// This is ONLY for tests.
|
||||
func disableFixLowPeersRoutine(t *testing.T) Option {
|
||||
return func(c *dhtcfg.Config) error {
|
||||
c.DisableFixLowPeers = true
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// forceAddressUpdateProcessing forces the DHT to handle changes to the hosts addresses.
|
||||
// This occurs even when AutoRefresh has been disabled.
|
||||
// This is ONLY for tests.
|
||||
func forceAddressUpdateProcessing(t *testing.T) Option {
|
||||
return func(c *dhtcfg.Config) error {
|
||||
c.TestAddressUpdateProcessing = true
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// EnableOptimisticProvide enables an optimization that skips the last hops of the provide process.
|
||||
// This works by using the network size estimator (which uses the keyspace density of queries)
|
||||
// to optimistically send ADD_PROVIDER requests when we most likely have found the last hop.
|
||||
// It will also run some ADD_PROVIDER requests asynchronously in the background after returning,
|
||||
// this allows to optimistically return earlier if some threshold number of RPCs have succeeded.
|
||||
// The number of background/in-flight queries can be configured with the OptimisticProvideJobsPoolSize
|
||||
// option.
|
||||
//
|
||||
// EXPERIMENTAL: This is an experimental option and might be removed in the future. Use at your own risk.
|
||||
func EnableOptimisticProvide() Option {
|
||||
return func(c *dhtcfg.Config) error {
|
||||
c.EnableOptimisticProvide = true
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// OptimisticProvideJobsPoolSize allows to configure the asynchronicity limit for in-flight ADD_PROVIDER RPCs.
|
||||
// It makes sense to set it to a multiple of optProvReturnRatio * BucketSize. Check the description of
|
||||
// EnableOptimisticProvide for more details.
|
||||
//
|
||||
// EXPERIMENTAL: This is an experimental option and might be removed in the future. Use at your own risk.
|
||||
func OptimisticProvideJobsPoolSize(size int) Option {
|
||||
return func(c *dhtcfg.Config) error {
|
||||
c.OptimisticProvideJobsPoolSize = size
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// AddressFilter allows to configure the address filtering function.
|
||||
// This function is run before addresses are added to the peerstore.
|
||||
// It is most useful to avoid adding localhost / local addresses.
|
||||
func AddressFilter(f func([]ma.Multiaddr) []ma.Multiaddr) Option {
|
||||
return func(c *dhtcfg.Config) error {
|
||||
c.AddressFilter = f
|
||||
return nil
|
||||
}
|
||||
}
|
||||
3
vendor/github.com/libp2p/go-libp2p-kad-dht/doc.go
generated
vendored
Normal file
3
vendor/github.com/libp2p/go-libp2p-kad-dht/doc.go
generated
vendored
Normal file
@@ -0,0 +1,3 @@
|
||||
// Package dht implements a distributed hash table that satisfies the ipfs routing
|
||||
// interface. This DHT is modeled after kademlia with S/Kademlia modifications.
|
||||
package dht
|
||||
247
vendor/github.com/libp2p/go-libp2p-kad-dht/events.go
generated
vendored
Normal file
247
vendor/github.com/libp2p/go-libp2p-kad-dht/events.go
generated
vendored
Normal file
@@ -0,0 +1,247 @@
|
||||
package dht
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"sync"
|
||||
|
||||
"github.com/google/uuid"
|
||||
|
||||
kbucket "github.com/libp2p/go-libp2p-kbucket"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
)
|
||||
|
||||
// KeyKadID contains the Kademlia key in string and binary form.
|
||||
type KeyKadID struct {
|
||||
Key string
|
||||
Kad kbucket.ID
|
||||
}
|
||||
|
||||
// NewKeyKadID creates a KeyKadID from a string Kademlia ID.
|
||||
func NewKeyKadID(k string) *KeyKadID {
|
||||
return &KeyKadID{
|
||||
Key: k,
|
||||
Kad: kbucket.ConvertKey(k),
|
||||
}
|
||||
}
|
||||
|
||||
// PeerKadID contains a libp2p Peer ID and a binary Kademlia ID.
|
||||
type PeerKadID struct {
|
||||
Peer peer.ID
|
||||
Kad kbucket.ID
|
||||
}
|
||||
|
||||
// NewPeerKadID creates a PeerKadID from a libp2p Peer ID.
|
||||
func NewPeerKadID(p peer.ID) *PeerKadID {
|
||||
return &PeerKadID{
|
||||
Peer: p,
|
||||
Kad: kbucket.ConvertPeerID(p),
|
||||
}
|
||||
}
|
||||
|
||||
// NewPeerKadIDSlice creates a slice of PeerKadID from the passed slice of libp2p Peer IDs.
|
||||
func NewPeerKadIDSlice(p []peer.ID) []*PeerKadID {
|
||||
r := make([]*PeerKadID, len(p))
|
||||
for i := range p {
|
||||
r[i] = NewPeerKadID(p[i])
|
||||
}
|
||||
return r
|
||||
}
|
||||
|
||||
// OptPeerKadID returns a pointer to a PeerKadID or nil if the passed Peer ID is it's default value.
|
||||
func OptPeerKadID(p peer.ID) *PeerKadID {
|
||||
if p == "" {
|
||||
return nil
|
||||
}
|
||||
return NewPeerKadID(p)
|
||||
}
|
||||
|
||||
// NewLookupEvent creates a LookupEvent automatically converting the node
|
||||
// libp2p Peer ID to a PeerKadID and the string Kademlia key to a KeyKadID.
|
||||
func NewLookupEvent(
|
||||
node peer.ID,
|
||||
id uuid.UUID,
|
||||
key string,
|
||||
request *LookupUpdateEvent,
|
||||
response *LookupUpdateEvent,
|
||||
terminate *LookupTerminateEvent,
|
||||
) *LookupEvent {
|
||||
return &LookupEvent{
|
||||
Node: NewPeerKadID(node),
|
||||
ID: id,
|
||||
Key: NewKeyKadID(key),
|
||||
Request: request,
|
||||
Response: response,
|
||||
Terminate: terminate,
|
||||
}
|
||||
}
|
||||
|
||||
// LookupEvent is emitted for every notable event that happens during a DHT lookup.
|
||||
// LookupEvent supports JSON marshalling because all of its fields do, recursively.
|
||||
type LookupEvent struct {
|
||||
// Node is the ID of the node performing the lookup.
|
||||
Node *PeerKadID
|
||||
// ID is a unique identifier for the lookup instance.
|
||||
ID uuid.UUID
|
||||
// Key is the Kademlia key used as a lookup target.
|
||||
Key *KeyKadID
|
||||
// Request, if not nil, describes a state update event, associated with an outgoing query request.
|
||||
Request *LookupUpdateEvent
|
||||
// Response, if not nil, describes a state update event, associated with an outgoing query response.
|
||||
Response *LookupUpdateEvent
|
||||
// Terminate, if not nil, describe a termination event.
|
||||
Terminate *LookupTerminateEvent
|
||||
}
|
||||
|
||||
// NewLookupUpdateEvent creates a new lookup update event, automatically converting the passed peer IDs to peer Kad IDs.
|
||||
func NewLookupUpdateEvent(
|
||||
cause peer.ID,
|
||||
source peer.ID,
|
||||
heard []peer.ID,
|
||||
waiting []peer.ID,
|
||||
queried []peer.ID,
|
||||
unreachable []peer.ID,
|
||||
) *LookupUpdateEvent {
|
||||
return &LookupUpdateEvent{
|
||||
Cause: OptPeerKadID(cause),
|
||||
Source: OptPeerKadID(source),
|
||||
Heard: NewPeerKadIDSlice(heard),
|
||||
Waiting: NewPeerKadIDSlice(waiting),
|
||||
Queried: NewPeerKadIDSlice(queried),
|
||||
Unreachable: NewPeerKadIDSlice(unreachable),
|
||||
}
|
||||
}
|
||||
|
||||
// LookupUpdateEvent describes a lookup state update event.
|
||||
type LookupUpdateEvent struct {
|
||||
// Cause is the peer whose response (or lack of response) caused the update event.
|
||||
// If Cause is nil, this is the first update event in the lookup, caused by the seeding.
|
||||
Cause *PeerKadID
|
||||
// Source is the peer who informed us about the peer IDs in this update (below).
|
||||
Source *PeerKadID
|
||||
// Heard is a set of peers whose state in the lookup's peerset is being set to "heard".
|
||||
Heard []*PeerKadID
|
||||
// Waiting is a set of peers whose state in the lookup's peerset is being set to "waiting".
|
||||
Waiting []*PeerKadID
|
||||
// Queried is a set of peers whose state in the lookup's peerset is being set to "queried".
|
||||
Queried []*PeerKadID
|
||||
// Unreachable is a set of peers whose state in the lookup's peerset is being set to "unreachable".
|
||||
Unreachable []*PeerKadID
|
||||
}
|
||||
|
||||
// LookupTerminateEvent describes a lookup termination event.
|
||||
type LookupTerminateEvent struct {
|
||||
// Reason is the reason for lookup termination.
|
||||
Reason LookupTerminationReason
|
||||
}
|
||||
|
||||
// NewLookupTerminateEvent creates a new lookup termination event with a given reason.
|
||||
func NewLookupTerminateEvent(reason LookupTerminationReason) *LookupTerminateEvent {
|
||||
return &LookupTerminateEvent{Reason: reason}
|
||||
}
|
||||
|
||||
// LookupTerminationReason captures reasons for terminating a lookup.
|
||||
type LookupTerminationReason int
|
||||
|
||||
// MarshalJSON returns the JSON encoding of the passed lookup termination reason.
|
||||
func (r LookupTerminationReason) MarshalJSON() ([]byte, error) {
|
||||
return json.Marshal(r.String())
|
||||
}
|
||||
|
||||
func (r LookupTerminationReason) String() string {
|
||||
switch r {
|
||||
case LookupStopped:
|
||||
return "stopped"
|
||||
case LookupCancelled:
|
||||
return "cancelled"
|
||||
case LookupStarvation:
|
||||
return "starvation"
|
||||
case LookupCompleted:
|
||||
return "completed"
|
||||
}
|
||||
panic("unreachable")
|
||||
}
|
||||
|
||||
const (
|
||||
// LookupStopped indicates that the lookup was aborted by the user's stopFn.
|
||||
LookupStopped LookupTerminationReason = iota
|
||||
// LookupCancelled indicates that the lookup was aborted by the context.
|
||||
LookupCancelled
|
||||
// LookupStarvation indicates that the lookup terminated due to lack of unqueried peers.
|
||||
LookupStarvation
|
||||
// LookupCompleted indicates that the lookup terminated successfully, reaching the Kademlia end condition.
|
||||
LookupCompleted
|
||||
)
|
||||
|
||||
type routingLookupKey struct{}
|
||||
|
||||
// TODO: lookupEventChannel copies the implementation of eventChanel.
|
||||
// The two should be refactored to use a common event channel implementation.
|
||||
// A common implementation needs to rethink the signature of RegisterForEvents,
|
||||
// because returning a typed channel cannot be made polymorphic without creating
|
||||
// additional "adapter" channels. This will be easier to handle when Go
|
||||
// introduces generics.
|
||||
type lookupEventChannel struct {
|
||||
mu sync.Mutex
|
||||
ctx context.Context
|
||||
ch chan<- *LookupEvent
|
||||
}
|
||||
|
||||
// waitThenClose is spawned in a goroutine when the channel is registered. This
|
||||
// safely cleans up the channel when the context has been canceled.
|
||||
func (e *lookupEventChannel) waitThenClose() {
|
||||
<-e.ctx.Done()
|
||||
e.mu.Lock()
|
||||
close(e.ch)
|
||||
// 1. Signals that we're done.
|
||||
// 2. Frees memory (in case we end up hanging on to this for a while).
|
||||
e.ch = nil
|
||||
e.mu.Unlock()
|
||||
}
|
||||
|
||||
// send sends an event on the event channel, aborting if either the passed or
|
||||
// the internal context expire.
|
||||
func (e *lookupEventChannel) send(ctx context.Context, ev *LookupEvent) {
|
||||
e.mu.Lock()
|
||||
// Closed.
|
||||
if e.ch == nil {
|
||||
e.mu.Unlock()
|
||||
return
|
||||
}
|
||||
// in case the passed context is unrelated, wait on both.
|
||||
select {
|
||||
case e.ch <- ev:
|
||||
case <-e.ctx.Done():
|
||||
case <-ctx.Done():
|
||||
}
|
||||
e.mu.Unlock()
|
||||
}
|
||||
|
||||
// RegisterForLookupEvents registers a lookup event channel with the given context.
|
||||
// The returned context can be passed to DHT queries to receive lookup events on
|
||||
// the returned channels.
|
||||
//
|
||||
// The passed context MUST be canceled when the caller is no longer interested
|
||||
// in query events.
|
||||
func RegisterForLookupEvents(ctx context.Context) (context.Context, <-chan *LookupEvent) {
|
||||
ch := make(chan *LookupEvent, LookupEventBufferSize)
|
||||
ech := &lookupEventChannel{ch: ch, ctx: ctx}
|
||||
go ech.waitThenClose()
|
||||
return context.WithValue(ctx, routingLookupKey{}, ech), ch
|
||||
}
|
||||
|
||||
// LookupEventBufferSize is the number of events to buffer.
|
||||
var LookupEventBufferSize = 16
|
||||
|
||||
// PublishLookupEvent publishes a query event to the query event channel
|
||||
// associated with the given context, if any.
|
||||
func PublishLookupEvent(ctx context.Context, ev *LookupEvent) {
|
||||
ich := ctx.Value(routingLookupKey{})
|
||||
if ich == nil {
|
||||
return
|
||||
}
|
||||
|
||||
// We *want* to panic here.
|
||||
ech := ich.(*lookupEventChannel)
|
||||
ech.send(ctx, ev)
|
||||
}
|
||||
382
vendor/github.com/libp2p/go-libp2p-kad-dht/handlers.go
generated
vendored
Normal file
382
vendor/github.com/libp2p/go-libp2p-kad-dht/handlers.go
generated
vendored
Normal file
@@ -0,0 +1,382 @@
|
||||
package dht
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
pstore "github.com/libp2p/go-libp2p/p2p/host/peerstore"
|
||||
|
||||
"github.com/gogo/protobuf/proto"
|
||||
u "github.com/ipfs/boxo/util"
|
||||
ds "github.com/ipfs/go-datastore"
|
||||
"github.com/libp2p/go-libp2p-kad-dht/internal"
|
||||
pb "github.com/libp2p/go-libp2p-kad-dht/pb"
|
||||
recpb "github.com/libp2p/go-libp2p-record/pb"
|
||||
"github.com/multiformats/go-base32"
|
||||
)
|
||||
|
||||
// dhthandler specifies the signature of functions that handle DHT messages.
|
||||
type dhtHandler func(context.Context, peer.ID, *pb.Message) (*pb.Message, error)
|
||||
|
||||
func (dht *IpfsDHT) handlerForMsgType(t pb.Message_MessageType) dhtHandler {
|
||||
switch t {
|
||||
case pb.Message_FIND_NODE:
|
||||
return dht.handleFindPeer
|
||||
case pb.Message_PING:
|
||||
return dht.handlePing
|
||||
}
|
||||
|
||||
if dht.enableValues {
|
||||
switch t {
|
||||
case pb.Message_GET_VALUE:
|
||||
return dht.handleGetValue
|
||||
case pb.Message_PUT_VALUE:
|
||||
return dht.handlePutValue
|
||||
}
|
||||
}
|
||||
|
||||
if dht.enableProviders {
|
||||
switch t {
|
||||
case pb.Message_ADD_PROVIDER:
|
||||
return dht.handleAddProvider
|
||||
case pb.Message_GET_PROVIDERS:
|
||||
return dht.handleGetProviders
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (dht *IpfsDHT) handleGetValue(ctx context.Context, p peer.ID, pmes *pb.Message) (_ *pb.Message, err error) {
|
||||
// first, is there even a key?
|
||||
k := pmes.GetKey()
|
||||
if len(k) == 0 {
|
||||
return nil, errors.New("handleGetValue but no key was provided")
|
||||
}
|
||||
|
||||
// setup response
|
||||
resp := pb.NewMessage(pmes.GetType(), pmes.GetKey(), pmes.GetClusterLevel())
|
||||
|
||||
rec, err := dht.checkLocalDatastore(ctx, k)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
resp.Record = rec
|
||||
|
||||
// Find closest peer on given cluster to desired key and reply with that info
|
||||
closer := dht.betterPeersToQuery(pmes, p, dht.bucketSize)
|
||||
if len(closer) > 0 {
|
||||
// TODO: pstore.PeerInfos should move to core (=> peerstore.AddrInfos).
|
||||
closerinfos := pstore.PeerInfos(dht.peerstore, closer)
|
||||
for _, pi := range closerinfos {
|
||||
logger.Debugf("handleGetValue returning closer peer: '%s'", pi.ID)
|
||||
if len(pi.Addrs) < 1 {
|
||||
logger.Warnw("no addresses on peer being sent",
|
||||
"local", dht.self,
|
||||
"to", p,
|
||||
"sending", pi.ID,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
resp.CloserPeers = pb.PeerInfosToPBPeers(dht.host.Network(), closerinfos)
|
||||
}
|
||||
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func (dht *IpfsDHT) checkLocalDatastore(ctx context.Context, k []byte) (*recpb.Record, error) {
|
||||
logger.Debugf("%s handleGetValue looking into ds", dht.self)
|
||||
dskey := convertToDsKey(k)
|
||||
buf, err := dht.datastore.Get(ctx, dskey)
|
||||
logger.Debugf("%s handleGetValue looking into ds GOT %v", dht.self, buf)
|
||||
|
||||
if err == ds.ErrNotFound {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// if we got an unexpected error, bail.
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// if we have the value, send it back
|
||||
logger.Debugf("%s handleGetValue success!", dht.self)
|
||||
|
||||
rec := new(recpb.Record)
|
||||
err = proto.Unmarshal(buf, rec)
|
||||
if err != nil {
|
||||
logger.Debug("failed to unmarshal DHT record from datastore")
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var recordIsBad bool
|
||||
recvtime, err := u.ParseRFC3339(rec.GetTimeReceived())
|
||||
if err != nil {
|
||||
logger.Info("either no receive time set on record, or it was invalid: ", err)
|
||||
recordIsBad = true
|
||||
}
|
||||
|
||||
if time.Since(recvtime) > dht.maxRecordAge {
|
||||
logger.Debug("old record found, tossing.")
|
||||
recordIsBad = true
|
||||
}
|
||||
|
||||
// NOTE: We do not verify the record here beyond checking these timestamps.
|
||||
// we put the burden of checking the records on the requester as checking a record
|
||||
// may be computationally expensive
|
||||
|
||||
if recordIsBad {
|
||||
err := dht.datastore.Delete(ctx, dskey)
|
||||
if err != nil {
|
||||
logger.Error("Failed to delete bad record from datastore: ", err)
|
||||
}
|
||||
|
||||
return nil, nil // can treat this as not having the record at all
|
||||
}
|
||||
|
||||
return rec, nil
|
||||
}
|
||||
|
||||
// Cleans the record (to avoid storing arbitrary data).
|
||||
func cleanRecord(rec *recpb.Record) {
|
||||
rec.TimeReceived = ""
|
||||
}
|
||||
|
||||
// Store a value in this peer local storage
|
||||
func (dht *IpfsDHT) handlePutValue(ctx context.Context, p peer.ID, pmes *pb.Message) (_ *pb.Message, err error) {
|
||||
if len(pmes.GetKey()) == 0 {
|
||||
return nil, errors.New("handleGetValue but no key was provided")
|
||||
}
|
||||
|
||||
rec := pmes.GetRecord()
|
||||
if rec == nil {
|
||||
logger.Debugw("got nil record from", "from", p)
|
||||
return nil, errors.New("nil record")
|
||||
}
|
||||
|
||||
if !bytes.Equal(pmes.GetKey(), rec.GetKey()) {
|
||||
return nil, errors.New("put key doesn't match record key")
|
||||
}
|
||||
|
||||
cleanRecord(rec)
|
||||
|
||||
// Make sure the record is valid (not expired, valid signature etc)
|
||||
if err = dht.Validator.Validate(string(rec.GetKey()), rec.GetValue()); err != nil {
|
||||
logger.Infow("bad dht record in PUT", "from", p, "key", internal.LoggableRecordKeyBytes(rec.GetKey()), "error", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
dskey := convertToDsKey(rec.GetKey())
|
||||
|
||||
// fetch the striped lock for this key
|
||||
var indexForLock byte
|
||||
if len(rec.GetKey()) == 0 {
|
||||
indexForLock = 0
|
||||
} else {
|
||||
indexForLock = rec.GetKey()[len(rec.GetKey())-1]
|
||||
}
|
||||
lk := &dht.stripedPutLocks[indexForLock]
|
||||
lk.Lock()
|
||||
defer lk.Unlock()
|
||||
|
||||
// Make sure the new record is "better" than the record we have locally.
|
||||
// This prevents a record with for example a lower sequence number from
|
||||
// overwriting a record with a higher sequence number.
|
||||
existing, err := dht.getRecordFromDatastore(ctx, dskey)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if existing != nil {
|
||||
recs := [][]byte{rec.GetValue(), existing.GetValue()}
|
||||
i, err := dht.Validator.Select(string(rec.GetKey()), recs)
|
||||
if err != nil {
|
||||
logger.Warnw("dht record passed validation but failed select", "from", p, "key", internal.LoggableRecordKeyBytes(rec.GetKey()), "error", err)
|
||||
return nil, err
|
||||
}
|
||||
if i != 0 {
|
||||
logger.Infow("DHT record in PUT older than existing record (ignoring)", "peer", p, "key", internal.LoggableRecordKeyBytes(rec.GetKey()))
|
||||
return nil, errors.New("old record")
|
||||
}
|
||||
}
|
||||
|
||||
// record the time we receive every record
|
||||
rec.TimeReceived = u.FormatRFC3339(time.Now())
|
||||
|
||||
data, err := proto.Marshal(rec)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = dht.datastore.Put(ctx, dskey, data)
|
||||
return pmes, err
|
||||
}
|
||||
|
||||
// returns nil, nil when either nothing is found or the value found doesn't properly validate.
|
||||
// returns nil, some_error when there's a *datastore* error (i.e., something goes very wrong)
|
||||
func (dht *IpfsDHT) getRecordFromDatastore(ctx context.Context, dskey ds.Key) (*recpb.Record, error) {
|
||||
buf, err := dht.datastore.Get(ctx, dskey)
|
||||
if err == ds.ErrNotFound {
|
||||
return nil, nil
|
||||
}
|
||||
if err != nil {
|
||||
logger.Errorw("error retrieving record from datastore", "key", dskey, "error", err)
|
||||
return nil, err
|
||||
}
|
||||
rec := new(recpb.Record)
|
||||
err = proto.Unmarshal(buf, rec)
|
||||
if err != nil {
|
||||
// Bad data in datastore, log it but don't return an error, we'll just overwrite it
|
||||
logger.Errorw("failed to unmarshal record from datastore", "key", dskey, "error", err)
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
err = dht.Validator.Validate(string(rec.GetKey()), rec.GetValue())
|
||||
if err != nil {
|
||||
// Invalid record in datastore, probably expired but don't return an error,
|
||||
// we'll just overwrite it
|
||||
logger.Debugw("local record verify failed", "key", rec.GetKey(), "error", err)
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
return rec, nil
|
||||
}
|
||||
|
||||
func (dht *IpfsDHT) handlePing(_ context.Context, p peer.ID, pmes *pb.Message) (*pb.Message, error) {
|
||||
logger.Debugf("%s Responding to ping from %s!\n", dht.self, p)
|
||||
return pmes, nil
|
||||
}
|
||||
|
||||
func (dht *IpfsDHT) handleFindPeer(ctx context.Context, from peer.ID, pmes *pb.Message) (_ *pb.Message, _err error) {
|
||||
resp := pb.NewMessage(pmes.GetType(), nil, pmes.GetClusterLevel())
|
||||
var closest []peer.ID
|
||||
|
||||
if len(pmes.GetKey()) == 0 {
|
||||
return nil, fmt.Errorf("handleFindPeer with empty key")
|
||||
}
|
||||
|
||||
// if looking for self... special case where we send it on CloserPeers.
|
||||
targetPid := peer.ID(pmes.GetKey())
|
||||
if targetPid == dht.self {
|
||||
closest = []peer.ID{dht.self}
|
||||
} else {
|
||||
closest = dht.betterPeersToQuery(pmes, from, dht.bucketSize)
|
||||
|
||||
// Never tell a peer about itself.
|
||||
if targetPid != from {
|
||||
// Add the target peer to the set of closest peers if
|
||||
// not already present in our routing table.
|
||||
//
|
||||
// Later, when we lookup known addresses for all peers
|
||||
// in this set, we'll prune this peer if we don't
|
||||
// _actually_ know where it is.
|
||||
found := false
|
||||
for _, p := range closest {
|
||||
if targetPid == p {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
closest = append(closest, targetPid)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if closest == nil {
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
// TODO: pstore.PeerInfos should move to core (=> peerstore.AddrInfos).
|
||||
closestinfos := pstore.PeerInfos(dht.peerstore, closest)
|
||||
// possibly an over-allocation but this array is temporary anyways.
|
||||
withAddresses := make([]peer.AddrInfo, 0, len(closestinfos))
|
||||
for _, pi := range closestinfos {
|
||||
if len(pi.Addrs) > 0 {
|
||||
withAddresses = append(withAddresses, pi)
|
||||
}
|
||||
}
|
||||
|
||||
resp.CloserPeers = pb.PeerInfosToPBPeers(dht.host.Network(), withAddresses)
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func (dht *IpfsDHT) handleGetProviders(ctx context.Context, p peer.ID, pmes *pb.Message) (_ *pb.Message, _err error) {
|
||||
key := pmes.GetKey()
|
||||
if len(key) > 80 {
|
||||
return nil, fmt.Errorf("handleGetProviders key size too large")
|
||||
} else if len(key) == 0 {
|
||||
return nil, fmt.Errorf("handleGetProviders key is empty")
|
||||
}
|
||||
|
||||
resp := pb.NewMessage(pmes.GetType(), pmes.GetKey(), pmes.GetClusterLevel())
|
||||
|
||||
// setup providers
|
||||
providers, err := dht.providerStore.GetProviders(ctx, key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
filtered := make([]peer.AddrInfo, len(providers))
|
||||
for i, provider := range providers {
|
||||
filtered[i] = peer.AddrInfo{
|
||||
ID: provider.ID,
|
||||
Addrs: dht.filterAddrs(provider.Addrs),
|
||||
}
|
||||
}
|
||||
|
||||
resp.ProviderPeers = pb.PeerInfosToPBPeers(dht.host.Network(), filtered)
|
||||
|
||||
// Also send closer peers.
|
||||
closer := dht.betterPeersToQuery(pmes, p, dht.bucketSize)
|
||||
if closer != nil {
|
||||
// TODO: pstore.PeerInfos should move to core (=> peerstore.AddrInfos).
|
||||
infos := pstore.PeerInfos(dht.peerstore, closer)
|
||||
resp.CloserPeers = pb.PeerInfosToPBPeers(dht.host.Network(), infos)
|
||||
}
|
||||
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func (dht *IpfsDHT) handleAddProvider(ctx context.Context, p peer.ID, pmes *pb.Message) (_ *pb.Message, _err error) {
|
||||
key := pmes.GetKey()
|
||||
if len(key) > 80 {
|
||||
return nil, fmt.Errorf("handleAddProvider key size too large")
|
||||
} else if len(key) == 0 {
|
||||
return nil, fmt.Errorf("handleAddProvider key is empty")
|
||||
}
|
||||
|
||||
logger.Debugw("adding provider", "from", p, "key", internal.LoggableProviderRecordBytes(key))
|
||||
|
||||
// add provider should use the address given in the message
|
||||
pinfos := pb.PBPeersToPeerInfos(pmes.GetProviderPeers())
|
||||
for _, pi := range pinfos {
|
||||
if pi.ID != p {
|
||||
// we should ignore this provider record! not from originator.
|
||||
// (we should sign them and check signature later...)
|
||||
logger.Debugw("received provider from wrong peer", "from", p, "peer", pi.ID)
|
||||
continue
|
||||
}
|
||||
|
||||
if len(pi.Addrs) < 1 {
|
||||
logger.Debugw("no valid addresses for provider", "from", p)
|
||||
continue
|
||||
}
|
||||
|
||||
// We run the addrs filter after checking for the length,
|
||||
// this allows transient nodes with varying /p2p-circuit addresses to still have their anouncement go through.
|
||||
addrs := dht.filterAddrs(pi.Addrs)
|
||||
dht.providerStore.AddProvider(ctx, key, peer.AddrInfo{ID: pi.ID, Addrs: addrs})
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func convertToDsKey(s []byte) ds.Key {
|
||||
return ds.NewKey(base32.RawStdEncoding.EncodeToString(s))
|
||||
}
|
||||
172
vendor/github.com/libp2p/go-libp2p-kad-dht/internal/config/config.go
generated
vendored
Normal file
172
vendor/github.com/libp2p/go-libp2p-kad-dht/internal/config/config.go
generated
vendored
Normal file
@@ -0,0 +1,172 @@
|
||||
package config
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/ipfs/boxo/ipns"
|
||||
ds "github.com/ipfs/go-datastore"
|
||||
dssync "github.com/ipfs/go-datastore/sync"
|
||||
"github.com/libp2p/go-libp2p-kad-dht/providers"
|
||||
"github.com/libp2p/go-libp2p-kbucket/peerdiversity"
|
||||
record "github.com/libp2p/go-libp2p-record"
|
||||
"github.com/libp2p/go-libp2p/core/host"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/libp2p/go-libp2p/core/protocol"
|
||||
ma "github.com/multiformats/go-multiaddr"
|
||||
)
|
||||
|
||||
// DefaultPrefix is the application specific prefix attached to all DHT protocols by default.
|
||||
const DefaultPrefix protocol.ID = "/ipfs"
|
||||
|
||||
const defaultBucketSize = 20
|
||||
|
||||
// ModeOpt describes what mode the dht should operate in
|
||||
type ModeOpt int
|
||||
|
||||
// QueryFilterFunc is a filter applied when considering peers to dial when querying
|
||||
type QueryFilterFunc func(dht interface{}, ai peer.AddrInfo) bool
|
||||
|
||||
// RouteTableFilterFunc is a filter applied when considering connections to keep in
|
||||
// the local route table.
|
||||
type RouteTableFilterFunc func(dht interface{}, p peer.ID) bool
|
||||
|
||||
// Config is a structure containing all the options that can be used when constructing a DHT.
|
||||
type Config struct {
|
||||
Datastore ds.Batching
|
||||
Validator record.Validator
|
||||
ValidatorChanged bool // if true implies that the validator has been changed and that Defaults should not be used
|
||||
Mode ModeOpt
|
||||
ProtocolPrefix protocol.ID
|
||||
V1ProtocolOverride protocol.ID
|
||||
BucketSize int
|
||||
Concurrency int
|
||||
Resiliency int
|
||||
MaxRecordAge time.Duration
|
||||
EnableProviders bool
|
||||
EnableValues bool
|
||||
ProviderStore providers.ProviderStore
|
||||
QueryPeerFilter QueryFilterFunc
|
||||
LookupCheckConcurrency int
|
||||
|
||||
RoutingTable struct {
|
||||
RefreshQueryTimeout time.Duration
|
||||
RefreshInterval time.Duration
|
||||
AutoRefresh bool
|
||||
LatencyTolerance time.Duration
|
||||
CheckInterval time.Duration
|
||||
PeerFilter RouteTableFilterFunc
|
||||
DiversityFilter peerdiversity.PeerIPGroupFilter
|
||||
}
|
||||
|
||||
BootstrapPeers func() []peer.AddrInfo
|
||||
AddressFilter func([]ma.Multiaddr) []ma.Multiaddr
|
||||
|
||||
// test specific Config options
|
||||
DisableFixLowPeers bool
|
||||
TestAddressUpdateProcessing bool
|
||||
|
||||
EnableOptimisticProvide bool
|
||||
OptimisticProvideJobsPoolSize int
|
||||
}
|
||||
|
||||
func EmptyQueryFilter(_ interface{}, ai peer.AddrInfo) bool { return true }
|
||||
func EmptyRTFilter(_ interface{}, p peer.ID) bool { return true }
|
||||
|
||||
// Apply applies the given options to this Option
|
||||
func (c *Config) Apply(opts ...Option) error {
|
||||
for i, opt := range opts {
|
||||
if err := opt(c); err != nil {
|
||||
return fmt.Errorf("dht option %d failed: %s", i, err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ApplyFallbacks sets default values that could not be applied during config creation since they are dependent
|
||||
// on other configuration parameters (e.g. optA is by default 2x optB) and/or on the Host
|
||||
func (c *Config) ApplyFallbacks(h host.Host) error {
|
||||
if !c.ValidatorChanged {
|
||||
nsval, ok := c.Validator.(record.NamespacedValidator)
|
||||
if ok {
|
||||
if _, pkFound := nsval["pk"]; !pkFound {
|
||||
nsval["pk"] = record.PublicKeyValidator{}
|
||||
}
|
||||
if _, ipnsFound := nsval["ipns"]; !ipnsFound {
|
||||
nsval["ipns"] = ipns.Validator{KeyBook: h.Peerstore()}
|
||||
}
|
||||
} else {
|
||||
return fmt.Errorf("the default Validator was changed without being marked as changed")
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Option DHT option type.
|
||||
type Option func(*Config) error
|
||||
|
||||
// Defaults are the default DHT options. This option will be automatically
|
||||
// prepended to any options you pass to the DHT constructor.
|
||||
var Defaults = func(o *Config) error {
|
||||
o.Validator = record.NamespacedValidator{}
|
||||
o.Datastore = dssync.MutexWrap(ds.NewMapDatastore())
|
||||
o.ProtocolPrefix = DefaultPrefix
|
||||
o.EnableProviders = true
|
||||
o.EnableValues = true
|
||||
o.QueryPeerFilter = EmptyQueryFilter
|
||||
|
||||
o.RoutingTable.LatencyTolerance = 10 * time.Second
|
||||
o.RoutingTable.RefreshQueryTimeout = 10 * time.Second
|
||||
o.RoutingTable.RefreshInterval = 10 * time.Minute
|
||||
o.RoutingTable.AutoRefresh = true
|
||||
o.RoutingTable.PeerFilter = EmptyRTFilter
|
||||
|
||||
o.MaxRecordAge = providers.ProvideValidity
|
||||
|
||||
o.BucketSize = defaultBucketSize
|
||||
o.Concurrency = 10
|
||||
o.Resiliency = 3
|
||||
o.LookupCheckConcurrency = 256
|
||||
|
||||
// MAGIC: It makes sense to set it to a multiple of OptProvReturnRatio * BucketSize. We chose a multiple of 4.
|
||||
o.OptimisticProvideJobsPoolSize = 60
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Config) Validate() error {
|
||||
if c.ProtocolPrefix != DefaultPrefix {
|
||||
return nil
|
||||
}
|
||||
if c.BucketSize != defaultBucketSize {
|
||||
return fmt.Errorf("protocol prefix %s must use bucket size %d", DefaultPrefix, defaultBucketSize)
|
||||
}
|
||||
if !c.EnableProviders {
|
||||
return fmt.Errorf("protocol prefix %s must have providers enabled", DefaultPrefix)
|
||||
}
|
||||
if !c.EnableValues {
|
||||
return fmt.Errorf("protocol prefix %s must have values enabled", DefaultPrefix)
|
||||
}
|
||||
|
||||
nsval, isNSVal := c.Validator.(record.NamespacedValidator)
|
||||
if !isNSVal {
|
||||
return fmt.Errorf("protocol prefix %s must use a namespaced Validator", DefaultPrefix)
|
||||
}
|
||||
|
||||
if len(nsval) != 2 {
|
||||
return fmt.Errorf("protocol prefix %s must have exactly two namespaced validators - /pk and /ipns", DefaultPrefix)
|
||||
}
|
||||
|
||||
if pkVal, pkValFound := nsval["pk"]; !pkValFound {
|
||||
return fmt.Errorf("protocol prefix %s must support the /pk namespaced Validator", DefaultPrefix)
|
||||
} else if _, ok := pkVal.(record.PublicKeyValidator); !ok {
|
||||
return fmt.Errorf("protocol prefix %s must use the record.PublicKeyValidator for the /pk namespace", DefaultPrefix)
|
||||
}
|
||||
|
||||
if ipnsVal, ipnsValFound := nsval["ipns"]; !ipnsValFound {
|
||||
return fmt.Errorf("protocol prefix %s must support the /ipns namespaced Validator", DefaultPrefix)
|
||||
} else if _, ok := ipnsVal.(ipns.Validator); !ok {
|
||||
return fmt.Errorf("protocol prefix %s must use ipns.Validator for the /ipns namespace", DefaultPrefix)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
16
vendor/github.com/libp2p/go-libp2p-kad-dht/internal/config/quorum.go
generated
vendored
Normal file
16
vendor/github.com/libp2p/go-libp2p-kad-dht/internal/config/quorum.go
generated
vendored
Normal file
@@ -0,0 +1,16 @@
|
||||
package config
|
||||
|
||||
import "github.com/libp2p/go-libp2p/core/routing"
|
||||
|
||||
type QuorumOptionKey struct{}
|
||||
|
||||
const defaultQuorum = 0
|
||||
|
||||
// GetQuorum defaults to 0 if no option is found
|
||||
func GetQuorum(opts *routing.Options) int {
|
||||
responsesNeeded, ok := opts.Other[QuorumOptionKey{}].(int)
|
||||
if !ok {
|
||||
responsesNeeded = defaultQuorum
|
||||
}
|
||||
return responsesNeeded
|
||||
}
|
||||
28
vendor/github.com/libp2p/go-libp2p-kad-dht/internal/ctx_mutex.go
generated
vendored
Normal file
28
vendor/github.com/libp2p/go-libp2p-kad-dht/internal/ctx_mutex.go
generated
vendored
Normal file
@@ -0,0 +1,28 @@
|
||||
package internal
|
||||
|
||||
import (
|
||||
"context"
|
||||
)
|
||||
|
||||
type CtxMutex chan struct{}
|
||||
|
||||
func NewCtxMutex() CtxMutex {
|
||||
return make(CtxMutex, 1)
|
||||
}
|
||||
|
||||
func (m CtxMutex) Lock(ctx context.Context) error {
|
||||
select {
|
||||
case m <- struct{}{}:
|
||||
return nil
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
}
|
||||
}
|
||||
|
||||
func (m CtxMutex) Unlock() {
|
||||
select {
|
||||
case <-m:
|
||||
default:
|
||||
panic("not locked")
|
||||
}
|
||||
}
|
||||
5
vendor/github.com/libp2p/go-libp2p-kad-dht/internal/errors.go
generated
vendored
Normal file
5
vendor/github.com/libp2p/go-libp2p-kad-dht/internal/errors.go
generated
vendored
Normal file
@@ -0,0 +1,5 @@
|
||||
package internal
|
||||
|
||||
import "errors"
|
||||
|
||||
var ErrIncorrectRecord = errors.New("received incorrect record")
|
||||
92
vendor/github.com/libp2p/go-libp2p-kad-dht/internal/logging.go
generated
vendored
Normal file
92
vendor/github.com/libp2p/go-libp2p-kad-dht/internal/logging.go
generated
vendored
Normal file
@@ -0,0 +1,92 @@
|
||||
package internal
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/ipfs/go-cid"
|
||||
"github.com/multiformats/go-multibase"
|
||||
"github.com/multiformats/go-multihash"
|
||||
)
|
||||
|
||||
func multibaseB32Encode(k []byte) string {
|
||||
res, err := multibase.Encode(multibase.Base32, k)
|
||||
if err != nil {
|
||||
// Should be unreachable
|
||||
panic(err)
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
func tryFormatLoggableRecordKey(k string) (string, error) {
|
||||
if len(k) == 0 {
|
||||
return "", fmt.Errorf("LoggableRecordKey is empty")
|
||||
}
|
||||
var proto, cstr string
|
||||
if k[0] == '/' {
|
||||
// it's a path (probably)
|
||||
protoEnd := strings.IndexByte(k[1:], '/')
|
||||
if protoEnd < 0 {
|
||||
return "", fmt.Errorf("LoggableRecordKey starts with '/' but is not a path: %s", multibaseB32Encode([]byte(k)))
|
||||
}
|
||||
proto = k[1 : protoEnd+1]
|
||||
cstr = k[protoEnd+2:]
|
||||
|
||||
encStr := multibaseB32Encode([]byte(cstr))
|
||||
return fmt.Sprintf("/%s/%s", proto, encStr), nil
|
||||
}
|
||||
|
||||
return "", fmt.Errorf("LoggableRecordKey is not a path: %s", multibaseB32Encode([]byte(cstr)))
|
||||
}
|
||||
|
||||
type LoggableRecordKeyString string
|
||||
|
||||
func (lk LoggableRecordKeyString) String() string {
|
||||
k := string(lk)
|
||||
newKey, err := tryFormatLoggableRecordKey(k)
|
||||
if err == nil {
|
||||
return newKey
|
||||
}
|
||||
return err.Error()
|
||||
}
|
||||
|
||||
type LoggableRecordKeyBytes []byte
|
||||
|
||||
func (lk LoggableRecordKeyBytes) String() string {
|
||||
k := string(lk)
|
||||
newKey, err := tryFormatLoggableRecordKey(k)
|
||||
if err == nil {
|
||||
return newKey
|
||||
}
|
||||
return err.Error()
|
||||
}
|
||||
|
||||
type LoggableProviderRecordBytes []byte
|
||||
|
||||
func (lk LoggableProviderRecordBytes) String() string {
|
||||
newKey, err := tryFormatLoggableProviderKey(lk)
|
||||
if err == nil {
|
||||
return newKey
|
||||
}
|
||||
return err.Error()
|
||||
}
|
||||
|
||||
func tryFormatLoggableProviderKey(k []byte) (string, error) {
|
||||
if len(k) == 0 {
|
||||
return "", fmt.Errorf("LoggableProviderKey is empty")
|
||||
}
|
||||
|
||||
encodedKey := multibaseB32Encode(k)
|
||||
|
||||
// The DHT used to provide CIDs, but now provides multihashes
|
||||
// TODO: Drop this when enough of the network has upgraded
|
||||
if _, err := cid.Cast(k); err == nil {
|
||||
return encodedKey, nil
|
||||
}
|
||||
|
||||
if _, err := multihash.Cast(k); err == nil {
|
||||
return encodedKey, nil
|
||||
}
|
||||
|
||||
return "", fmt.Errorf("LoggableProviderKey is not a Multihash or CID: %s", encodedKey)
|
||||
}
|
||||
387
vendor/github.com/libp2p/go-libp2p-kad-dht/internal/net/message_manager.go
generated
vendored
Normal file
387
vendor/github.com/libp2p/go-libp2p-kad-dht/internal/net/message_manager.go
generated
vendored
Normal file
@@ -0,0 +1,387 @@
|
||||
package net
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/libp2p/go-libp2p/core/host"
|
||||
"github.com/libp2p/go-libp2p/core/network"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/libp2p/go-libp2p/core/protocol"
|
||||
|
||||
logging "github.com/ipfs/go-log"
|
||||
"github.com/libp2p/go-msgio"
|
||||
|
||||
//lint:ignore SA1019 TODO migrate away from gogo pb
|
||||
"github.com/libp2p/go-msgio/protoio"
|
||||
|
||||
"go.opencensus.io/stats"
|
||||
"go.opencensus.io/tag"
|
||||
|
||||
"github.com/libp2p/go-libp2p-kad-dht/internal"
|
||||
"github.com/libp2p/go-libp2p-kad-dht/metrics"
|
||||
pb "github.com/libp2p/go-libp2p-kad-dht/pb"
|
||||
)
|
||||
|
||||
var dhtReadMessageTimeout = 10 * time.Second
|
||||
|
||||
// ErrReadTimeout is an error that occurs when no message is read within the timeout period.
|
||||
var ErrReadTimeout = fmt.Errorf("timed out reading response")
|
||||
|
||||
var logger = logging.Logger("dht")
|
||||
|
||||
// messageSenderImpl is responsible for sending requests and messages to peers efficiently, including reuse of streams.
|
||||
// It also tracks metrics for sent requests and messages.
|
||||
type messageSenderImpl struct {
|
||||
host host.Host // the network services we need
|
||||
smlk sync.Mutex
|
||||
strmap map[peer.ID]*peerMessageSender
|
||||
protocols []protocol.ID
|
||||
}
|
||||
|
||||
func NewMessageSenderImpl(h host.Host, protos []protocol.ID) pb.MessageSenderWithDisconnect {
|
||||
return &messageSenderImpl{
|
||||
host: h,
|
||||
strmap: make(map[peer.ID]*peerMessageSender),
|
||||
protocols: protos,
|
||||
}
|
||||
}
|
||||
|
||||
func (m *messageSenderImpl) OnDisconnect(ctx context.Context, p peer.ID) {
|
||||
m.smlk.Lock()
|
||||
defer m.smlk.Unlock()
|
||||
ms, ok := m.strmap[p]
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
delete(m.strmap, p)
|
||||
|
||||
// Do this asynchronously as ms.lk can block for a while.
|
||||
go func() {
|
||||
if err := ms.lk.Lock(ctx); err != nil {
|
||||
return
|
||||
}
|
||||
defer ms.lk.Unlock()
|
||||
ms.invalidate()
|
||||
}()
|
||||
}
|
||||
|
||||
// SendRequest sends out a request, but also makes sure to
|
||||
// measure the RTT for latency measurements.
|
||||
func (m *messageSenderImpl) SendRequest(ctx context.Context, p peer.ID, pmes *pb.Message) (*pb.Message, error) {
|
||||
ctx, _ = tag.New(ctx, metrics.UpsertMessageType(pmes))
|
||||
|
||||
ms, err := m.messageSenderForPeer(ctx, p)
|
||||
if err != nil {
|
||||
stats.Record(ctx,
|
||||
metrics.SentRequests.M(1),
|
||||
metrics.SentRequestErrors.M(1),
|
||||
)
|
||||
logger.Debugw("request failed to open message sender", "error", err, "to", p)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
start := time.Now()
|
||||
|
||||
rpmes, err := ms.SendRequest(ctx, pmes)
|
||||
if err != nil {
|
||||
stats.Record(ctx,
|
||||
metrics.SentRequests.M(1),
|
||||
metrics.SentRequestErrors.M(1),
|
||||
)
|
||||
logger.Debugw("request failed", "error", err, "to", p)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
stats.Record(ctx,
|
||||
metrics.SentRequests.M(1),
|
||||
metrics.SentBytes.M(int64(pmes.Size())),
|
||||
metrics.OutboundRequestLatency.M(float64(time.Since(start))/float64(time.Millisecond)),
|
||||
)
|
||||
m.host.Peerstore().RecordLatency(p, time.Since(start))
|
||||
return rpmes, nil
|
||||
}
|
||||
|
||||
// SendMessage sends out a message
|
||||
func (m *messageSenderImpl) SendMessage(ctx context.Context, p peer.ID, pmes *pb.Message) error {
|
||||
ctx, _ = tag.New(ctx, metrics.UpsertMessageType(pmes))
|
||||
|
||||
ms, err := m.messageSenderForPeer(ctx, p)
|
||||
if err != nil {
|
||||
stats.Record(ctx,
|
||||
metrics.SentMessages.M(1),
|
||||
metrics.SentMessageErrors.M(1),
|
||||
)
|
||||
logger.Debugw("message failed to open message sender", "error", err, "to", p)
|
||||
return err
|
||||
}
|
||||
|
||||
if err := ms.SendMessage(ctx, pmes); err != nil {
|
||||
stats.Record(ctx,
|
||||
metrics.SentMessages.M(1),
|
||||
metrics.SentMessageErrors.M(1),
|
||||
)
|
||||
logger.Debugw("message failed", "error", err, "to", p)
|
||||
return err
|
||||
}
|
||||
|
||||
stats.Record(ctx,
|
||||
metrics.SentMessages.M(1),
|
||||
metrics.SentBytes.M(int64(pmes.Size())),
|
||||
)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *messageSenderImpl) messageSenderForPeer(ctx context.Context, p peer.ID) (*peerMessageSender, error) {
|
||||
m.smlk.Lock()
|
||||
ms, ok := m.strmap[p]
|
||||
if ok {
|
||||
m.smlk.Unlock()
|
||||
return ms, nil
|
||||
}
|
||||
ms = &peerMessageSender{p: p, m: m, lk: internal.NewCtxMutex()}
|
||||
m.strmap[p] = ms
|
||||
m.smlk.Unlock()
|
||||
|
||||
if err := ms.prepOrInvalidate(ctx); err != nil {
|
||||
m.smlk.Lock()
|
||||
defer m.smlk.Unlock()
|
||||
|
||||
if msCur, ok := m.strmap[p]; ok {
|
||||
// Changed. Use the new one, old one is invalid and
|
||||
// not in the map so we can just throw it away.
|
||||
if ms != msCur {
|
||||
return msCur, nil
|
||||
}
|
||||
// Not changed, remove the now invalid stream from the
|
||||
// map.
|
||||
delete(m.strmap, p)
|
||||
}
|
||||
// Invalid but not in map. Must have been removed by a disconnect.
|
||||
return nil, err
|
||||
}
|
||||
// All ready to go.
|
||||
return ms, nil
|
||||
}
|
||||
|
||||
// peerMessageSender is responsible for sending requests and messages to a particular peer
|
||||
type peerMessageSender struct {
|
||||
s network.Stream
|
||||
r msgio.ReadCloser
|
||||
lk internal.CtxMutex
|
||||
p peer.ID
|
||||
m *messageSenderImpl
|
||||
|
||||
invalid bool
|
||||
singleMes int
|
||||
}
|
||||
|
||||
// invalidate is called before this peerMessageSender is removed from the strmap.
|
||||
// It prevents the peerMessageSender from being reused/reinitialized and then
|
||||
// forgotten (leaving the stream open).
|
||||
func (ms *peerMessageSender) invalidate() {
|
||||
ms.invalid = true
|
||||
if ms.s != nil {
|
||||
_ = ms.s.Reset()
|
||||
ms.s = nil
|
||||
}
|
||||
}
|
||||
|
||||
func (ms *peerMessageSender) prepOrInvalidate(ctx context.Context) error {
|
||||
if err := ms.lk.Lock(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
defer ms.lk.Unlock()
|
||||
|
||||
if err := ms.prep(ctx); err != nil {
|
||||
ms.invalidate()
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ms *peerMessageSender) prep(ctx context.Context) error {
|
||||
if ms.invalid {
|
||||
return fmt.Errorf("message sender has been invalidated")
|
||||
}
|
||||
if ms.s != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// We only want to speak to peers using our primary protocols. We do not want to query any peer that only speaks
|
||||
// one of the secondary "server" protocols that we happen to support (e.g. older nodes that we can respond to for
|
||||
// backwards compatibility reasons).
|
||||
nstr, err := ms.m.host.NewStream(ctx, ms.p, ms.m.protocols...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ms.r = msgio.NewVarintReaderSize(nstr, network.MessageSizeMax)
|
||||
ms.s = nstr
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// streamReuseTries is the number of times we will try to reuse a stream to a
|
||||
// given peer before giving up and reverting to the old one-message-per-stream
|
||||
// behaviour.
|
||||
const streamReuseTries = 3
|
||||
|
||||
func (ms *peerMessageSender) SendMessage(ctx context.Context, pmes *pb.Message) error {
|
||||
if err := ms.lk.Lock(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
defer ms.lk.Unlock()
|
||||
|
||||
retry := false
|
||||
for {
|
||||
if err := ms.prep(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := ms.writeMsg(pmes); err != nil {
|
||||
_ = ms.s.Reset()
|
||||
ms.s = nil
|
||||
|
||||
if retry {
|
||||
logger.Debugw("error writing message", "error", err)
|
||||
return err
|
||||
}
|
||||
logger.Debugw("error writing message", "error", err, "retrying", true)
|
||||
retry = true
|
||||
continue
|
||||
}
|
||||
|
||||
var err error
|
||||
if ms.singleMes > streamReuseTries {
|
||||
err = ms.s.Close()
|
||||
ms.s = nil
|
||||
} else if retry {
|
||||
ms.singleMes++
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
func (ms *peerMessageSender) SendRequest(ctx context.Context, pmes *pb.Message) (*pb.Message, error) {
|
||||
if err := ms.lk.Lock(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer ms.lk.Unlock()
|
||||
|
||||
retry := false
|
||||
for {
|
||||
if err := ms.prep(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := ms.writeMsg(pmes); err != nil {
|
||||
_ = ms.s.Reset()
|
||||
ms.s = nil
|
||||
|
||||
if retry {
|
||||
logger.Debugw("error writing message", "error", err)
|
||||
return nil, err
|
||||
}
|
||||
logger.Debugw("error writing message", "error", err, "retrying", true)
|
||||
retry = true
|
||||
continue
|
||||
}
|
||||
|
||||
mes := new(pb.Message)
|
||||
if err := ms.ctxReadMsg(ctx, mes); err != nil {
|
||||
_ = ms.s.Reset()
|
||||
ms.s = nil
|
||||
if err == context.Canceled {
|
||||
// retry would be same error
|
||||
return nil, err
|
||||
}
|
||||
if retry {
|
||||
logger.Debugw("error reading message", "error", err)
|
||||
return nil, err
|
||||
}
|
||||
logger.Debugw("error reading message", "error", err, "retrying", true)
|
||||
retry = true
|
||||
continue
|
||||
}
|
||||
|
||||
var err error
|
||||
if ms.singleMes > streamReuseTries {
|
||||
err = ms.s.Close()
|
||||
ms.s = nil
|
||||
} else if retry {
|
||||
ms.singleMes++
|
||||
}
|
||||
|
||||
return mes, err
|
||||
}
|
||||
}
|
||||
|
||||
func (ms *peerMessageSender) writeMsg(pmes *pb.Message) error {
|
||||
return WriteMsg(ms.s, pmes)
|
||||
}
|
||||
|
||||
func (ms *peerMessageSender) ctxReadMsg(ctx context.Context, mes *pb.Message) error {
|
||||
errc := make(chan error, 1)
|
||||
go func(r msgio.ReadCloser) {
|
||||
defer close(errc)
|
||||
bytes, err := r.ReadMsg()
|
||||
defer r.ReleaseMsg(bytes)
|
||||
if err != nil {
|
||||
errc <- err
|
||||
return
|
||||
}
|
||||
errc <- mes.Unmarshal(bytes)
|
||||
}(ms.r)
|
||||
|
||||
t := time.NewTimer(dhtReadMessageTimeout)
|
||||
defer t.Stop()
|
||||
|
||||
select {
|
||||
case err := <-errc:
|
||||
return err
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
case <-t.C:
|
||||
return ErrReadTimeout
|
||||
}
|
||||
}
|
||||
|
||||
// The Protobuf writer performs multiple small writes when writing a message.
|
||||
// We need to buffer those writes, to make sure that we're not sending a new
|
||||
// packet for every single write.
|
||||
type bufferedDelimitedWriter struct {
|
||||
*bufio.Writer
|
||||
protoio.WriteCloser
|
||||
}
|
||||
|
||||
var writerPool = sync.Pool{
|
||||
New: func() interface{} {
|
||||
w := bufio.NewWriter(nil)
|
||||
return &bufferedDelimitedWriter{
|
||||
Writer: w,
|
||||
WriteCloser: protoio.NewDelimitedWriter(w),
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
func WriteMsg(w io.Writer, mes *pb.Message) error {
|
||||
bw := writerPool.Get().(*bufferedDelimitedWriter)
|
||||
bw.Reset(w)
|
||||
err := bw.WriteMsg(mes)
|
||||
if err == nil {
|
||||
err = bw.Flush()
|
||||
}
|
||||
bw.Reset(nil)
|
||||
writerPool.Put(bw)
|
||||
return err
|
||||
}
|
||||
|
||||
func (w *bufferedDelimitedWriter) Flush() error {
|
||||
return w.Writer.Flush()
|
||||
}
|
||||
32
vendor/github.com/libp2p/go-libp2p-kad-dht/internal/tracing.go
generated
vendored
Normal file
32
vendor/github.com/libp2p/go-libp2p-kad-dht/internal/tracing.go
generated
vendored
Normal file
@@ -0,0 +1,32 @@
|
||||
package internal
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"unicode/utf8"
|
||||
|
||||
"github.com/multiformats/go-multibase"
|
||||
"go.opentelemetry.io/otel"
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
"go.opentelemetry.io/otel/trace"
|
||||
)
|
||||
|
||||
func StartSpan(ctx context.Context, name string, opts ...trace.SpanStartOption) (context.Context, trace.Span) {
|
||||
return otel.Tracer("go-libp2p-kad-dht").Start(ctx, fmt.Sprintf("KademliaDHT.%s", name), opts...)
|
||||
}
|
||||
|
||||
// KeyAsAttribute format a DHT key into a suitable tracing attribute.
|
||||
// DHT keys can be either valid utf-8 or binary, when they are derived from, for example, a multihash.
|
||||
// Tracing (and notably OpenTelemetry+grpc exporter) requires valid utf-8 for string attributes.
|
||||
func KeyAsAttribute(name string, key string) attribute.KeyValue {
|
||||
b := []byte(key)
|
||||
if utf8.Valid(b) {
|
||||
return attribute.String(name, key)
|
||||
}
|
||||
encoded, err := multibase.Encode(multibase.Base58BTC, b)
|
||||
if err != nil {
|
||||
// should be unreachable
|
||||
panic(err)
|
||||
}
|
||||
return attribute.String(name, encoded)
|
||||
}
|
||||
85
vendor/github.com/libp2p/go-libp2p-kad-dht/lookup.go
generated
vendored
Normal file
85
vendor/github.com/libp2p/go-libp2p-kad-dht/lookup.go
generated
vendored
Normal file
@@ -0,0 +1,85 @@
|
||||
package dht
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/libp2p/go-libp2p-kad-dht/internal"
|
||||
"github.com/libp2p/go-libp2p-kad-dht/metrics"
|
||||
"github.com/libp2p/go-libp2p-kad-dht/qpeerset"
|
||||
kb "github.com/libp2p/go-libp2p-kbucket"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/libp2p/go-libp2p/core/routing"
|
||||
"go.opentelemetry.io/otel/trace"
|
||||
)
|
||||
|
||||
// GetClosestPeers is a Kademlia 'node lookup' operation. Returns a channel of
|
||||
// the K closest peers to the given key.
|
||||
//
|
||||
// If the context is canceled, this function will return the context error along
|
||||
// with the closest K peers it has found so far.
|
||||
func (dht *IpfsDHT) GetClosestPeers(ctx context.Context, key string) ([]peer.ID, error) {
|
||||
ctx, span := internal.StartSpan(ctx, "IpfsDHT.GetClosestPeers", trace.WithAttributes(internal.KeyAsAttribute("Key", key)))
|
||||
defer span.End()
|
||||
|
||||
if key == "" {
|
||||
return nil, fmt.Errorf("can't lookup empty key")
|
||||
}
|
||||
|
||||
//TODO: I can break the interface! return []peer.ID
|
||||
lookupRes, err := dht.runLookupWithFollowup(ctx, key, dht.pmGetClosestPeers(key), func(*qpeerset.QueryPeerset) bool { return false })
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := ctx.Err(); err != nil || !lookupRes.completed {
|
||||
return lookupRes.peers, err
|
||||
}
|
||||
|
||||
// tracking lookup results for network size estimator
|
||||
if err = dht.nsEstimator.Track(key, lookupRes.closest); err != nil {
|
||||
logger.Warnf("network size estimator track peers: %s", err)
|
||||
}
|
||||
|
||||
if ns, err := dht.nsEstimator.NetworkSize(); err == nil {
|
||||
metrics.NetworkSize.M(int64(ns))
|
||||
}
|
||||
|
||||
// refresh the cpl for this key as the query was successful
|
||||
dht.routingTable.ResetCplRefreshedAtForID(kb.ConvertKey(key), time.Now())
|
||||
|
||||
return lookupRes.peers, nil
|
||||
}
|
||||
|
||||
// pmGetClosestPeers is the protocol messenger version of the GetClosestPeer queryFn.
|
||||
func (dht *IpfsDHT) pmGetClosestPeers(key string) queryFn {
|
||||
return func(ctx context.Context, p peer.ID) ([]*peer.AddrInfo, error) {
|
||||
// For DHT query command
|
||||
routing.PublishQueryEvent(ctx, &routing.QueryEvent{
|
||||
Type: routing.SendingQuery,
|
||||
ID: p,
|
||||
})
|
||||
|
||||
peers, err := dht.protoMessenger.GetClosestPeers(ctx, p, peer.ID(key))
|
||||
if err != nil {
|
||||
logger.Debugf("error getting closer peers: %s", err)
|
||||
routing.PublishQueryEvent(ctx, &routing.QueryEvent{
|
||||
Type: routing.QueryError,
|
||||
ID: p,
|
||||
Extra: err.Error(),
|
||||
})
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// For DHT query command
|
||||
routing.PublishQueryEvent(ctx, &routing.QueryEvent{
|
||||
Type: routing.PeerResponse,
|
||||
ID: p,
|
||||
Responses: peers,
|
||||
})
|
||||
|
||||
return peers, err
|
||||
}
|
||||
}
|
||||
313
vendor/github.com/libp2p/go-libp2p-kad-dht/lookup_optim.go
generated
vendored
Normal file
313
vendor/github.com/libp2p/go-libp2p-kad-dht/lookup_optim.go
generated
vendored
Normal file
@@ -0,0 +1,313 @@
|
||||
package dht
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/libp2p/go-libp2p-kad-dht/metrics"
|
||||
"github.com/libp2p/go-libp2p-kad-dht/netsize"
|
||||
"github.com/libp2p/go-libp2p-kad-dht/qpeerset"
|
||||
kb "github.com/libp2p/go-libp2p-kbucket"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/multiformats/go-multihash"
|
||||
ks "github.com/whyrusleeping/go-keyspace"
|
||||
"gonum.org/v1/gonum/mathext"
|
||||
)
|
||||
|
||||
const (
|
||||
// optProvIndividualThresholdCertainty describes how sure we want to be that an individual peer that
|
||||
// we find during walking the DHT actually belongs to the k-closest peers based on the current network size
|
||||
// estimation.
|
||||
optProvIndividualThresholdCertainty = 0.9
|
||||
|
||||
// optProvSetThresholdStrictness describes the probability that the set of closest peers is actually further
|
||||
// away then the calculated set threshold. Put differently, what is the probability that we are too strict and
|
||||
// don't terminate the process early because we can't find any closer peers.
|
||||
optProvSetThresholdStrictness = 0.1
|
||||
|
||||
// optProvReturnRatio corresponds to how many ADD_PROVIDER RPCs must have completed (regardless of success)
|
||||
// before we return to the user. The ratio of 0.75 equals 15 RPC as it is based on the Kademlia bucket size.
|
||||
optProvReturnRatio = 0.75
|
||||
)
|
||||
|
||||
type addProviderRPCState int
|
||||
|
||||
const (
|
||||
scheduled addProviderRPCState = iota + 1
|
||||
success
|
||||
failure
|
||||
)
|
||||
|
||||
type optimisticState struct {
|
||||
// context for all ADD_PROVIDER RPCs
|
||||
putCtx context.Context
|
||||
|
||||
// reference to the DHT
|
||||
dht *IpfsDHT
|
||||
|
||||
// the most recent network size estimation
|
||||
networkSize int32
|
||||
|
||||
// a channel indicating when an ADD_PROVIDER RPC completed (successful or not)
|
||||
doneChan chan struct{}
|
||||
|
||||
// tracks which peers we have stored the provider records with
|
||||
peerStatesLk sync.RWMutex
|
||||
peerStates map[peer.ID]addProviderRPCState
|
||||
|
||||
// the key to provide
|
||||
key string
|
||||
|
||||
// the key to provide transformed into the Kademlia key space
|
||||
ksKey ks.Key
|
||||
|
||||
// distance threshold for individual peers. If peers are closer than this number we store
|
||||
// the provider records right away.
|
||||
individualThreshold float64
|
||||
|
||||
// distance threshold for the set of bucketSize closest peers. If the average distance of the bucketSize
|
||||
// closest peers is below this number we stop the DHT walk and store the remaining provider records.
|
||||
// "remaining" because we have likely already stored some on peers that were below the individualThreshold.
|
||||
setThreshold float64
|
||||
|
||||
// number of completed (regardless of success) ADD_PROVIDER RPCs before we return control back to the user.
|
||||
returnThreshold int
|
||||
|
||||
// putProvDone counts the ADD_PROVIDER RPCs that have completed (successful and unsuccessful)
|
||||
putProvDone atomic.Int32
|
||||
}
|
||||
|
||||
func (dht *IpfsDHT) newOptimisticState(ctx context.Context, key string) (*optimisticState, error) {
|
||||
// get network size and err out if there is no reasonable estimate
|
||||
networkSize, err := dht.nsEstimator.NetworkSize()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
individualThreshold := mathext.GammaIncRegInv(float64(dht.bucketSize), 1-optProvIndividualThresholdCertainty) / float64(networkSize)
|
||||
setThreshold := mathext.GammaIncRegInv(float64(dht.bucketSize)/2.0+1, 1-optProvSetThresholdStrictness) / float64(networkSize)
|
||||
returnThreshold := int(math.Ceil(float64(dht.bucketSize) * optProvReturnRatio))
|
||||
|
||||
return &optimisticState{
|
||||
putCtx: ctx,
|
||||
dht: dht,
|
||||
key: key,
|
||||
doneChan: make(chan struct{}, returnThreshold), // buffered channel to not miss events
|
||||
ksKey: ks.XORKeySpace.Key([]byte(key)),
|
||||
networkSize: networkSize,
|
||||
peerStates: map[peer.ID]addProviderRPCState{},
|
||||
individualThreshold: individualThreshold,
|
||||
setThreshold: setThreshold,
|
||||
returnThreshold: returnThreshold,
|
||||
putProvDone: atomic.Int32{},
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (dht *IpfsDHT) optimisticProvide(outerCtx context.Context, keyMH multihash.Multihash) error {
|
||||
key := string(keyMH)
|
||||
|
||||
if key == "" {
|
||||
return fmt.Errorf("can't lookup empty key")
|
||||
}
|
||||
|
||||
// initialize new context for all putProvider operations.
|
||||
// We don't want to give the outer context to the put operations as we return early before all
|
||||
// put operations have finished to avoid the long tail of the latency distribution. If we
|
||||
// provided the outer context the put operations may be cancelled depending on what happens
|
||||
// with the context on the user side.
|
||||
putCtx, putCtxCancel := context.WithTimeout(context.Background(), time.Minute)
|
||||
|
||||
es, err := dht.newOptimisticState(putCtx, key)
|
||||
if err != nil {
|
||||
putCtxCancel()
|
||||
return err
|
||||
}
|
||||
|
||||
// initialize context that finishes when this function returns
|
||||
innerCtx, innerCtxCancel := context.WithCancel(outerCtx)
|
||||
defer innerCtxCancel()
|
||||
|
||||
go func() {
|
||||
select {
|
||||
case <-outerCtx.Done():
|
||||
// If the outer context gets cancelled while we're still in this function. We stop all
|
||||
// pending put operations.
|
||||
putCtxCancel()
|
||||
case <-innerCtx.Done():
|
||||
// We have returned from this function. Ignore cancellations of the outer context and continue
|
||||
// with the remaining put operations.
|
||||
}
|
||||
}()
|
||||
|
||||
lookupRes, err := dht.runLookupWithFollowup(outerCtx, key, dht.pmGetClosestPeers(key), es.stopFn)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Store the provider records with all the closest peers we haven't already contacted/scheduled interaction with.
|
||||
es.peerStatesLk.Lock()
|
||||
for _, p := range lookupRes.peers {
|
||||
if _, found := es.peerStates[p]; found {
|
||||
continue
|
||||
}
|
||||
|
||||
go es.putProviderRecord(p)
|
||||
es.peerStates[p] = scheduled
|
||||
}
|
||||
es.peerStatesLk.Unlock()
|
||||
|
||||
// wait until a threshold number of RPCs have completed
|
||||
es.waitForRPCs()
|
||||
|
||||
if err := outerCtx.Err(); err != nil || !lookupRes.completed { // likely the "completed" field is false but that's not a given
|
||||
return err
|
||||
}
|
||||
|
||||
// tracking lookup results for network size estimator as "completed" is true
|
||||
if err = dht.nsEstimator.Track(key, lookupRes.closest); err != nil {
|
||||
logger.Warnf("network size estimator track peers: %s", err)
|
||||
}
|
||||
|
||||
if ns, err := dht.nsEstimator.NetworkSize(); err == nil {
|
||||
metrics.NetworkSize.M(int64(ns))
|
||||
}
|
||||
|
||||
// refresh the cpl for this key as the query was successful
|
||||
dht.routingTable.ResetCplRefreshedAtForID(kb.ConvertKey(key), time.Now())
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (os *optimisticState) stopFn(qps *qpeerset.QueryPeerset) bool {
|
||||
os.peerStatesLk.Lock()
|
||||
defer os.peerStatesLk.Unlock()
|
||||
|
||||
// get currently known closest peers and check if any of them is already very close.
|
||||
// If so -> store provider records straight away.
|
||||
closest := qps.GetClosestNInStates(os.dht.bucketSize, qpeerset.PeerHeard, qpeerset.PeerWaiting, qpeerset.PeerQueried)
|
||||
distances := make([]float64, os.dht.bucketSize)
|
||||
for i, p := range closest {
|
||||
// calculate distance of peer p to the target key
|
||||
distances[i] = netsize.NormedDistance(p, os.ksKey)
|
||||
|
||||
// Check if we have already scheduled interaction or have actually interacted with that peer
|
||||
if _, found := os.peerStates[p]; found {
|
||||
continue
|
||||
}
|
||||
|
||||
// Check if peer is close enough to store the provider record with
|
||||
if distances[i] > os.individualThreshold {
|
||||
continue
|
||||
}
|
||||
|
||||
// peer is indeed very close already -> store the provider record directly with it!
|
||||
go os.putProviderRecord(p)
|
||||
|
||||
// keep track that we've scheduled storing a provider record with that peer
|
||||
os.peerStates[p] = scheduled
|
||||
}
|
||||
|
||||
// count number of peers we have scheduled to contact or have already successfully contacted via the above method
|
||||
scheduledAndSuccessCount := 0
|
||||
for _, s := range os.peerStates {
|
||||
if s == scheduled || s == success {
|
||||
scheduledAndSuccessCount += 1
|
||||
}
|
||||
}
|
||||
|
||||
// if we have already contacted/scheduled the RPC for more than bucketSize peers stop the procedure
|
||||
if scheduledAndSuccessCount >= os.dht.bucketSize {
|
||||
return true
|
||||
}
|
||||
|
||||
// calculate average distance of the set of closest peers
|
||||
sum := 0.0
|
||||
for _, d := range distances {
|
||||
sum += d
|
||||
}
|
||||
avg := sum / float64(len(distances))
|
||||
|
||||
// if the average is below the set threshold stop the procedure
|
||||
return avg < os.setThreshold
|
||||
}
|
||||
|
||||
func (os *optimisticState) putProviderRecord(pid peer.ID) {
|
||||
err := os.dht.protoMessenger.PutProviderAddrs(os.putCtx, pid, []byte(os.key), peer.AddrInfo{
|
||||
ID: os.dht.self,
|
||||
Addrs: os.dht.filterAddrs(os.dht.host.Addrs()),
|
||||
})
|
||||
os.peerStatesLk.Lock()
|
||||
if err != nil {
|
||||
os.peerStates[pid] = failure
|
||||
} else {
|
||||
os.peerStates[pid] = success
|
||||
}
|
||||
os.peerStatesLk.Unlock()
|
||||
|
||||
// indicate that this ADD_PROVIDER RPC has completed
|
||||
os.doneChan <- struct{}{}
|
||||
}
|
||||
|
||||
// waitForRPCs waits for a subset of ADD_PROVIDER RPCs to complete and then acquire a lease on
|
||||
// a bound channel to return early back to the user and prevent unbound asynchronicity. If
|
||||
// there are already too many requests in-flight we are just waiting for our current set to
|
||||
// finish.
|
||||
func (os *optimisticState) waitForRPCs() {
|
||||
os.peerStatesLk.RLock()
|
||||
rpcCount := len(os.peerStates)
|
||||
os.peerStatesLk.RUnlock()
|
||||
|
||||
// returnThreshold can't be larger than the total number issued RPCs
|
||||
if os.returnThreshold > rpcCount {
|
||||
os.returnThreshold = rpcCount
|
||||
}
|
||||
|
||||
// Wait until returnThreshold ADD_PROVIDER RPCs have returned
|
||||
for range os.doneChan {
|
||||
if int(os.putProvDone.Add(1)) == os.returnThreshold {
|
||||
break
|
||||
}
|
||||
}
|
||||
// At this point only a subset of all ADD_PROVIDER RPCs have completed.
|
||||
// We want to give control back to the user as soon as possible because
|
||||
// it is highly likely that at least one of the remaining RPCs will time
|
||||
// out and thus slow down the whole processes. The provider records will
|
||||
// already be available with less than the total number of RPCs having
|
||||
// finished. This has been investigated here:
|
||||
// https://github.com/protocol/network-measurements/blob/master/results/rfm17-provider-record-liveness.md
|
||||
|
||||
// For the remaining ADD_PROVIDER RPCs try to acquire a lease on the optProvJobsPool channel.
|
||||
// If that worked we need to consume the doneChan and release the acquired lease on the
|
||||
// optProvJobsPool channel.
|
||||
remaining := rpcCount - int(os.putProvDone.Load())
|
||||
for i := 0; i < remaining; i++ {
|
||||
select {
|
||||
case os.dht.optProvJobsPool <- struct{}{}:
|
||||
// We were able to acquire a lease on the optProvJobsPool channel.
|
||||
// Consume doneChan to release the acquired lease again.
|
||||
go os.consumeDoneChan(rpcCount)
|
||||
case <-os.doneChan:
|
||||
// We were not able to acquire a lease but an ADD_PROVIDER RPC resolved.
|
||||
if int(os.putProvDone.Add(1)) == rpcCount {
|
||||
close(os.doneChan)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (os *optimisticState) consumeDoneChan(until int) {
|
||||
// Wait for an RPC to finish
|
||||
<-os.doneChan
|
||||
|
||||
// Release acquired lease for other's to get a spot
|
||||
<-os.dht.optProvJobsPool
|
||||
|
||||
// If all RPCs have finished, close the channel.
|
||||
if int(os.putProvDone.Add(1)) == until {
|
||||
close(os.doneChan)
|
||||
}
|
||||
}
|
||||
117
vendor/github.com/libp2p/go-libp2p-kad-dht/metrics/metrics.go
generated
vendored
Normal file
117
vendor/github.com/libp2p/go-libp2p-kad-dht/metrics/metrics.go
generated
vendored
Normal file
@@ -0,0 +1,117 @@
|
||||
package metrics
|
||||
|
||||
import (
|
||||
pb "github.com/libp2p/go-libp2p-kad-dht/pb"
|
||||
"go.opencensus.io/stats"
|
||||
"go.opencensus.io/stats/view"
|
||||
"go.opencensus.io/tag"
|
||||
)
|
||||
|
||||
var (
|
||||
defaultBytesDistribution = view.Distribution(1024, 2048, 4096, 16384, 65536, 262144, 1048576, 4194304, 16777216, 67108864, 268435456, 1073741824, 4294967296)
|
||||
defaultMillisecondsDistribution = view.Distribution(0.01, 0.05, 0.1, 0.3, 0.6, 0.8, 1, 2, 3, 4, 5, 6, 8, 10, 13, 16, 20, 25, 30, 40, 50, 65, 80, 100, 130, 160, 200, 250, 300, 400, 500, 650, 800, 1000, 2000, 5000, 10000, 20000, 50000, 100000)
|
||||
)
|
||||
|
||||
// Keys
|
||||
var (
|
||||
KeyMessageType, _ = tag.NewKey("message_type")
|
||||
KeyPeerID, _ = tag.NewKey("peer_id")
|
||||
// KeyInstanceID identifies a dht instance by the pointer address.
|
||||
// Useful for differentiating between different dhts that have the same peer id.
|
||||
KeyInstanceID, _ = tag.NewKey("instance_id")
|
||||
)
|
||||
|
||||
// UpsertMessageType is a convenience upserts the message type
|
||||
// of a pb.Message into the KeyMessageType.
|
||||
func UpsertMessageType(m *pb.Message) tag.Mutator {
|
||||
return tag.Upsert(KeyMessageType, m.Type.String())
|
||||
}
|
||||
|
||||
// Measures
|
||||
var (
|
||||
ReceivedMessages = stats.Int64("libp2p.io/dht/kad/received_messages", "Total number of messages received per RPC", stats.UnitDimensionless)
|
||||
ReceivedMessageErrors = stats.Int64("libp2p.io/dht/kad/received_message_errors", "Total number of errors for messages received per RPC", stats.UnitDimensionless)
|
||||
ReceivedBytes = stats.Int64("libp2p.io/dht/kad/received_bytes", "Total received bytes per RPC", stats.UnitBytes)
|
||||
InboundRequestLatency = stats.Float64("libp2p.io/dht/kad/inbound_request_latency", "Latency per RPC", stats.UnitMilliseconds)
|
||||
OutboundRequestLatency = stats.Float64("libp2p.io/dht/kad/outbound_request_latency", "Latency per RPC", stats.UnitMilliseconds)
|
||||
SentMessages = stats.Int64("libp2p.io/dht/kad/sent_messages", "Total number of messages sent per RPC", stats.UnitDimensionless)
|
||||
SentMessageErrors = stats.Int64("libp2p.io/dht/kad/sent_message_errors", "Total number of errors for messages sent per RPC", stats.UnitDimensionless)
|
||||
SentRequests = stats.Int64("libp2p.io/dht/kad/sent_requests", "Total number of requests sent per RPC", stats.UnitDimensionless)
|
||||
SentRequestErrors = stats.Int64("libp2p.io/dht/kad/sent_request_errors", "Total number of errors for requests sent per RPC", stats.UnitDimensionless)
|
||||
SentBytes = stats.Int64("libp2p.io/dht/kad/sent_bytes", "Total sent bytes per RPC", stats.UnitBytes)
|
||||
NetworkSize = stats.Int64("libp2p.io/dht/kad/network_size", "Network size estimation", stats.UnitDimensionless)
|
||||
)
|
||||
|
||||
// Views
|
||||
var (
|
||||
ReceivedMessagesView = &view.View{
|
||||
Measure: ReceivedMessages,
|
||||
TagKeys: []tag.Key{KeyMessageType, KeyPeerID, KeyInstanceID},
|
||||
Aggregation: view.Count(),
|
||||
}
|
||||
ReceivedMessageErrorsView = &view.View{
|
||||
Measure: ReceivedMessageErrors,
|
||||
TagKeys: []tag.Key{KeyMessageType, KeyPeerID, KeyInstanceID},
|
||||
Aggregation: view.Count(),
|
||||
}
|
||||
ReceivedBytesView = &view.View{
|
||||
Measure: ReceivedBytes,
|
||||
TagKeys: []tag.Key{KeyMessageType, KeyPeerID, KeyInstanceID},
|
||||
Aggregation: defaultBytesDistribution,
|
||||
}
|
||||
InboundRequestLatencyView = &view.View{
|
||||
Measure: InboundRequestLatency,
|
||||
TagKeys: []tag.Key{KeyMessageType, KeyPeerID, KeyInstanceID},
|
||||
Aggregation: defaultMillisecondsDistribution,
|
||||
}
|
||||
OutboundRequestLatencyView = &view.View{
|
||||
Measure: OutboundRequestLatency,
|
||||
TagKeys: []tag.Key{KeyMessageType, KeyPeerID, KeyInstanceID},
|
||||
Aggregation: defaultMillisecondsDistribution,
|
||||
}
|
||||
SentMessagesView = &view.View{
|
||||
Measure: SentMessages,
|
||||
TagKeys: []tag.Key{KeyMessageType, KeyPeerID, KeyInstanceID},
|
||||
Aggregation: view.Count(),
|
||||
}
|
||||
SentMessageErrorsView = &view.View{
|
||||
Measure: SentMessageErrors,
|
||||
TagKeys: []tag.Key{KeyMessageType, KeyPeerID, KeyInstanceID},
|
||||
Aggregation: view.Count(),
|
||||
}
|
||||
SentRequestsView = &view.View{
|
||||
Measure: SentRequests,
|
||||
TagKeys: []tag.Key{KeyMessageType, KeyPeerID, KeyInstanceID},
|
||||
Aggregation: view.Count(),
|
||||
}
|
||||
SentRequestErrorsView = &view.View{
|
||||
Measure: SentRequestErrors,
|
||||
TagKeys: []tag.Key{KeyMessageType, KeyPeerID, KeyInstanceID},
|
||||
Aggregation: view.Count(),
|
||||
}
|
||||
SentBytesView = &view.View{
|
||||
Measure: SentBytes,
|
||||
TagKeys: []tag.Key{KeyMessageType, KeyPeerID, KeyInstanceID},
|
||||
Aggregation: defaultBytesDistribution,
|
||||
}
|
||||
NetworkSizeView = &view.View{
|
||||
Measure: NetworkSize,
|
||||
TagKeys: []tag.Key{KeyPeerID, KeyInstanceID},
|
||||
Aggregation: view.Count(),
|
||||
}
|
||||
)
|
||||
|
||||
// DefaultViews with all views in it.
|
||||
var DefaultViews = []*view.View{
|
||||
ReceivedMessagesView,
|
||||
ReceivedMessageErrorsView,
|
||||
ReceivedBytesView,
|
||||
InboundRequestLatencyView,
|
||||
OutboundRequestLatencyView,
|
||||
SentMessagesView,
|
||||
SentMessageErrorsView,
|
||||
SentRequestsView,
|
||||
SentRequestErrorsView,
|
||||
SentBytesView,
|
||||
NetworkSizeView,
|
||||
}
|
||||
284
vendor/github.com/libp2p/go-libp2p-kad-dht/netsize/netsize.go
generated
vendored
Normal file
284
vendor/github.com/libp2p/go-libp2p-kad-dht/netsize/netsize.go
generated
vendored
Normal file
@@ -0,0 +1,284 @@
|
||||
package netsize
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"math/big"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
logging "github.com/ipfs/go-log"
|
||||
kbucket "github.com/libp2p/go-libp2p-kbucket"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
ks "github.com/whyrusleeping/go-keyspace"
|
||||
)
|
||||
|
||||
// invalidEstimate indicates that we currently have no valid estimate cached.
|
||||
const invalidEstimate int32 = -1
|
||||
|
||||
var (
|
||||
ErrNotEnoughData = fmt.Errorf("not enough data")
|
||||
ErrWrongNumOfPeers = fmt.Errorf("expected bucket size number of peers")
|
||||
)
|
||||
|
||||
var (
|
||||
logger = logging.Logger("dht/netsize")
|
||||
MaxMeasurementAge = 2 * time.Hour
|
||||
MinMeasurementsThreshold = 5
|
||||
MaxMeasurementsThreshold = 150
|
||||
keyspaceMaxInt, _ = new(big.Int).SetString(strings.Repeat("1", 256), 2)
|
||||
keyspaceMaxFloat = new(big.Float).SetInt(keyspaceMaxInt)
|
||||
)
|
||||
|
||||
type Estimator struct {
|
||||
localID kbucket.ID
|
||||
rt *kbucket.RoutingTable
|
||||
bucketSize int
|
||||
|
||||
measurementsLk sync.RWMutex
|
||||
measurements map[int][]measurement
|
||||
|
||||
netSizeCache int32
|
||||
}
|
||||
|
||||
func NewEstimator(localID peer.ID, rt *kbucket.RoutingTable, bucketSize int) *Estimator {
|
||||
// initialize map to hold measurement observations
|
||||
measurements := map[int][]measurement{}
|
||||
for i := 0; i < bucketSize; i++ {
|
||||
measurements[i] = []measurement{}
|
||||
}
|
||||
|
||||
return &Estimator{
|
||||
localID: kbucket.ConvertPeerID(localID),
|
||||
rt: rt,
|
||||
bucketSize: bucketSize,
|
||||
measurements: measurements,
|
||||
netSizeCache: invalidEstimate,
|
||||
}
|
||||
}
|
||||
|
||||
// NormedDistance calculates the normed XOR distance of the given keys (from 0 to 1).
|
||||
func NormedDistance(p peer.ID, k ks.Key) float64 {
|
||||
pKey := ks.XORKeySpace.Key([]byte(p))
|
||||
ksDistance := new(big.Float).SetInt(pKey.Distance(k))
|
||||
normedDist, _ := new(big.Float).Quo(ksDistance, keyspaceMaxFloat).Float64()
|
||||
return normedDist
|
||||
}
|
||||
|
||||
type measurement struct {
|
||||
distance float64
|
||||
weight float64
|
||||
timestamp time.Time
|
||||
}
|
||||
|
||||
// Track tracks the list of peers for the given key to incorporate in the next network size estimate.
|
||||
// key is expected **NOT** to be in the kademlia keyspace and peers is expected to be a sorted list of
|
||||
// the closest peers to the given key (the closest first).
|
||||
// This function expects peers to have the same length as the routing table bucket size. It also
|
||||
// strips old and limits the number of data points (favouring new).
|
||||
func (e *Estimator) Track(key string, peers []peer.ID) error {
|
||||
e.measurementsLk.Lock()
|
||||
defer e.measurementsLk.Unlock()
|
||||
|
||||
// sanity check
|
||||
if len(peers) != e.bucketSize {
|
||||
return ErrWrongNumOfPeers
|
||||
}
|
||||
|
||||
logger.Debugw("Tracking peers for key", "key", key)
|
||||
|
||||
now := time.Now()
|
||||
|
||||
// invalidate cache
|
||||
atomic.StoreInt32(&e.netSizeCache, invalidEstimate)
|
||||
|
||||
// Calculate weight for the peer distances.
|
||||
weight := e.calcWeight(key, peers)
|
||||
|
||||
// Map given key to the Kademlia key space (hash it)
|
||||
ksKey := ks.XORKeySpace.Key([]byte(key))
|
||||
|
||||
// the maximum age timestamp of the measurement data points
|
||||
maxAgeTs := now.Add(-MaxMeasurementAge)
|
||||
|
||||
for i, p := range peers {
|
||||
// Construct measurement struct
|
||||
m := measurement{
|
||||
distance: NormedDistance(p, ksKey),
|
||||
weight: weight,
|
||||
timestamp: now,
|
||||
}
|
||||
|
||||
measurements := append(e.measurements[i], m)
|
||||
|
||||
// find the smallest index of a measurement that is still in the allowed time window
|
||||
// all measurements with a lower index should be discarded as they are too old
|
||||
n := len(measurements)
|
||||
idx := sort.Search(n, func(j int) bool {
|
||||
return measurements[j].timestamp.After(maxAgeTs)
|
||||
})
|
||||
|
||||
// if measurements are outside the allowed time window remove them.
|
||||
// idx == n - there is no measurement in the allowed time window -> reset slice
|
||||
// idx == 0 - the normal case where we only have valid entries
|
||||
// idx != 0 - there is a mix of valid and obsolete entries
|
||||
if idx != 0 {
|
||||
x := make([]measurement, n-idx)
|
||||
copy(x, measurements[idx:])
|
||||
measurements = x
|
||||
}
|
||||
|
||||
// if the number of data points exceed the max threshold, strip oldest measurement data points.
|
||||
if len(measurements) > MaxMeasurementsThreshold {
|
||||
measurements = measurements[len(measurements)-MaxMeasurementsThreshold:]
|
||||
}
|
||||
|
||||
e.measurements[i] = measurements
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// NetworkSize instructs the Estimator to calculate the current network size estimate.
|
||||
func (e *Estimator) NetworkSize() (int32, error) {
|
||||
|
||||
// return cached calculation lock-free (fast path)
|
||||
if estimate := atomic.LoadInt32(&e.netSizeCache); estimate != invalidEstimate {
|
||||
logger.Debugw("Cached network size estimation", "estimate", estimate)
|
||||
return estimate, nil
|
||||
}
|
||||
|
||||
e.measurementsLk.Lock()
|
||||
defer e.measurementsLk.Unlock()
|
||||
|
||||
// Check a second time. This is needed because we maybe had to wait on another goroutine doing the computation.
|
||||
// Then the computation was just finished by the other goroutine, and we don't need to redo it.
|
||||
if estimate := e.netSizeCache; estimate != invalidEstimate {
|
||||
logger.Debugw("Cached network size estimation", "estimate", estimate)
|
||||
return estimate, nil
|
||||
}
|
||||
|
||||
// remove obsolete data points
|
||||
e.garbageCollect()
|
||||
|
||||
// initialize slices for linear fit
|
||||
xs := make([]float64, e.bucketSize)
|
||||
ys := make([]float64, e.bucketSize)
|
||||
yerrs := make([]float64, e.bucketSize)
|
||||
|
||||
for i := 0; i < e.bucketSize; i++ {
|
||||
observationCount := len(e.measurements[i])
|
||||
|
||||
// If we don't have enough data to reasonably calculate the network size, return early
|
||||
if observationCount < MinMeasurementsThreshold {
|
||||
return 0, ErrNotEnoughData
|
||||
}
|
||||
|
||||
// Calculate Average Distance
|
||||
sumDistances := 0.0
|
||||
sumWeights := 0.0
|
||||
for _, m := range e.measurements[i] {
|
||||
sumDistances += m.weight * m.distance
|
||||
sumWeights += m.weight
|
||||
}
|
||||
distanceAvg := sumDistances / sumWeights
|
||||
|
||||
// Calculate standard deviation
|
||||
sumWeightedDiffs := 0.0
|
||||
for _, m := range e.measurements[i] {
|
||||
diff := m.distance - distanceAvg
|
||||
sumWeightedDiffs += m.weight * diff * diff
|
||||
}
|
||||
variance := sumWeightedDiffs / (float64(observationCount-1) / float64(observationCount) * sumWeights)
|
||||
distanceStd := math.Sqrt(variance)
|
||||
|
||||
// Track calculations
|
||||
xs[i] = float64(i + 1)
|
||||
ys[i] = distanceAvg
|
||||
yerrs[i] = distanceStd
|
||||
}
|
||||
|
||||
// Calculate linear regression (assumes the line goes through the origin)
|
||||
var x2Sum, xySum float64
|
||||
for i, xi := range xs {
|
||||
yi := ys[i]
|
||||
xySum += yerrs[i] * xi * yi
|
||||
x2Sum += yerrs[i] * xi * xi
|
||||
}
|
||||
slope := xySum / x2Sum
|
||||
|
||||
// calculate final network size
|
||||
netSize := int32(1/slope - 1)
|
||||
|
||||
// cache network size estimation
|
||||
atomic.StoreInt32(&e.netSizeCache, netSize)
|
||||
|
||||
logger.Debugw("New network size estimation", "estimate", netSize)
|
||||
return netSize, nil
|
||||
}
|
||||
|
||||
// calcWeight weighs data points exponentially less if they fall into a non-full bucket.
|
||||
// It weighs distance estimates based on their CPLs and bucket levels.
|
||||
// Bucket Level: 20 -> 1/2^0 -> weight: 1
|
||||
// Bucket Level: 17 -> 1/2^3 -> weight: 1/8
|
||||
// Bucket Level: 10 -> 1/2^10 -> weight: 1/1024
|
||||
//
|
||||
// It can happen that the routing table doesn't have a full bucket, but we are tracking here
|
||||
// a list of peers that would theoretically have been suitable for that bucket. Let's imagine
|
||||
// there are only 13 peers in bucket 3 although there is space for 20. Now, the Track function
|
||||
// gets a peers list (len 20) where all peers fall into bucket 3. The weight of this set of peers
|
||||
// should be 1 instead of 1/2^7.
|
||||
// I actually thought this cannot happen as peers would have been added to the routing table before
|
||||
// the Track function gets called. But they seem sometimes not to be added.
|
||||
func (e *Estimator) calcWeight(key string, peers []peer.ID) float64 {
|
||||
|
||||
cpl := kbucket.CommonPrefixLen(kbucket.ConvertKey(key), e.localID)
|
||||
bucketLevel := e.rt.NPeersForCpl(uint(cpl))
|
||||
|
||||
if bucketLevel < e.bucketSize {
|
||||
// routing table doesn't have a full bucket. Check how many peers would fit into that bucket
|
||||
peerLevel := 0
|
||||
for _, p := range peers {
|
||||
if cpl == kbucket.CommonPrefixLen(kbucket.ConvertPeerID(p), e.localID) {
|
||||
peerLevel += 1
|
||||
}
|
||||
}
|
||||
|
||||
if peerLevel > bucketLevel {
|
||||
return math.Pow(2, float64(peerLevel-e.bucketSize))
|
||||
}
|
||||
}
|
||||
|
||||
return math.Pow(2, float64(bucketLevel-e.bucketSize))
|
||||
}
|
||||
|
||||
// garbageCollect removes all measurements from the list that fell out of the measurement time window.
|
||||
func (e *Estimator) garbageCollect() {
|
||||
logger.Debug("Running garbage collection")
|
||||
|
||||
// the maximum age timestamp of the measurement data points
|
||||
maxAgeTs := time.Now().Add(-MaxMeasurementAge)
|
||||
|
||||
for i := 0; i < e.bucketSize; i++ {
|
||||
|
||||
// find the smallest index of a measurement that is still in the allowed time window
|
||||
// all measurements with a lower index should be discarded as they are too old
|
||||
n := len(e.measurements[i])
|
||||
idx := sort.Search(n, func(j int) bool {
|
||||
return e.measurements[i][j].timestamp.After(maxAgeTs)
|
||||
})
|
||||
|
||||
// if measurements are outside the allowed time window remove them.
|
||||
// idx == n - there is no measurement in the allowed time window -> reset slice
|
||||
// idx == 0 - the normal case where we only have valid entries
|
||||
// idx != 0 - there is a mix of valid and obsolete entries
|
||||
if idx == n {
|
||||
e.measurements[i] = []measurement{}
|
||||
} else if idx != 0 {
|
||||
e.measurements[i] = e.measurements[i][idx:]
|
||||
}
|
||||
}
|
||||
}
|
||||
7
vendor/github.com/libp2p/go-libp2p-kad-dht/optimizations.md
generated
vendored
Normal file
7
vendor/github.com/libp2p/go-libp2p-kad-dht/optimizations.md
generated
vendored
Normal file
@@ -0,0 +1,7 @@
|
||||
# Client-side optimizations
|
||||
|
||||
This document reflects client-side optimizations that are implemented in this repository. Client-side optimizations are not part of the [Kademlia spec](https://github.com/libp2p/specs/tree/master/kad-dht), and are not required to be implemented on all clients.
|
||||
|
||||
## Checking before Adding
|
||||
|
||||
A Kademlia server should try to add remote peers querying it to its routing table. However, the Kademlia server has no guarantee that remote peers issuing requests are able to answer Kademlia requests correctly, even though they advertise speaking the Kademlia server protocol. It is important that only server nodes able to answer Kademlia requests end up in other peers' routing tables. Hence, before adding a remote peer to the Kademlia server's routing table, the Kademlia server will send a trivial `FIND_NODE` request to the remote peer, and add it to its routing table only if it is able to provide a valid response.
|
||||
11
vendor/github.com/libp2p/go-libp2p-kad-dht/pb/Makefile
generated
vendored
Normal file
11
vendor/github.com/libp2p/go-libp2p-kad-dht/pb/Makefile
generated
vendored
Normal file
@@ -0,0 +1,11 @@
|
||||
PB = $(wildcard *.proto)
|
||||
GO = $(PB:.proto=.pb.go)
|
||||
|
||||
all: $(GO)
|
||||
|
||||
%.pb.go: %.proto
|
||||
protoc --proto_path=$(GOPATH)/src:. --gogofast_out=. $<
|
||||
|
||||
clean:
|
||||
rm -f *.pb.go
|
||||
rm -f *.go
|
||||
42
vendor/github.com/libp2p/go-libp2p-kad-dht/pb/bytestring.go
generated
vendored
Normal file
42
vendor/github.com/libp2p/go-libp2p-kad-dht/pb/bytestring.go
generated
vendored
Normal file
@@ -0,0 +1,42 @@
|
||||
package dht_pb
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
)
|
||||
|
||||
type byteString string
|
||||
|
||||
func (b byteString) Marshal() ([]byte, error) {
|
||||
return []byte(b), nil
|
||||
}
|
||||
|
||||
func (b *byteString) MarshalTo(data []byte) (int, error) {
|
||||
return copy(data, *b), nil
|
||||
}
|
||||
|
||||
func (b *byteString) Unmarshal(data []byte) error {
|
||||
*b = byteString(data)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *byteString) Size() int {
|
||||
return len(*b)
|
||||
}
|
||||
|
||||
func (b byteString) MarshalJSON() ([]byte, error) {
|
||||
return json.Marshal([]byte(b))
|
||||
}
|
||||
|
||||
func (b *byteString) UnmarshalJSON(data []byte) error {
|
||||
var buf []byte
|
||||
err := json.Unmarshal(data, &buf)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*b = byteString(buf)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b byteString) Equal(other byteString) bool {
|
||||
return b == other
|
||||
}
|
||||
959
vendor/github.com/libp2p/go-libp2p-kad-dht/pb/dht.pb.go
generated
vendored
Normal file
959
vendor/github.com/libp2p/go-libp2p-kad-dht/pb/dht.pb.go
generated
vendored
Normal file
@@ -0,0 +1,959 @@
|
||||
// Code generated by protoc-gen-gogo. DO NOT EDIT.
|
||||
// source: dht.proto
|
||||
|
||||
package dht_pb
|
||||
|
||||
import (
|
||||
fmt "fmt"
|
||||
io "io"
|
||||
math "math"
|
||||
math_bits "math/bits"
|
||||
|
||||
_ "github.com/gogo/protobuf/gogoproto"
|
||||
proto "github.com/gogo/protobuf/proto"
|
||||
pb "github.com/libp2p/go-libp2p-record/pb"
|
||||
)
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ = proto.Marshal
|
||||
var _ = fmt.Errorf
|
||||
var _ = math.Inf
|
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the proto package it is being compiled against.
|
||||
// A compilation error at this line likely means your copy of the
|
||||
// proto package needs to be updated.
|
||||
const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
|
||||
|
||||
type Message_MessageType int32
|
||||
|
||||
const (
|
||||
Message_PUT_VALUE Message_MessageType = 0
|
||||
Message_GET_VALUE Message_MessageType = 1
|
||||
Message_ADD_PROVIDER Message_MessageType = 2
|
||||
Message_GET_PROVIDERS Message_MessageType = 3
|
||||
Message_FIND_NODE Message_MessageType = 4
|
||||
Message_PING Message_MessageType = 5
|
||||
)
|
||||
|
||||
var Message_MessageType_name = map[int32]string{
|
||||
0: "PUT_VALUE",
|
||||
1: "GET_VALUE",
|
||||
2: "ADD_PROVIDER",
|
||||
3: "GET_PROVIDERS",
|
||||
4: "FIND_NODE",
|
||||
5: "PING",
|
||||
}
|
||||
|
||||
var Message_MessageType_value = map[string]int32{
|
||||
"PUT_VALUE": 0,
|
||||
"GET_VALUE": 1,
|
||||
"ADD_PROVIDER": 2,
|
||||
"GET_PROVIDERS": 3,
|
||||
"FIND_NODE": 4,
|
||||
"PING": 5,
|
||||
}
|
||||
|
||||
func (x Message_MessageType) String() string {
|
||||
return proto.EnumName(Message_MessageType_name, int32(x))
|
||||
}
|
||||
|
||||
func (Message_MessageType) EnumDescriptor() ([]byte, []int) {
|
||||
return fileDescriptor_616a434b24c97ff4, []int{0, 0}
|
||||
}
|
||||
|
||||
type Message_ConnectionType int32
|
||||
|
||||
const (
|
||||
// sender does not have a connection to peer, and no extra information (default)
|
||||
Message_NOT_CONNECTED Message_ConnectionType = 0
|
||||
// sender has a live connection to peer
|
||||
Message_CONNECTED Message_ConnectionType = 1
|
||||
// sender recently connected to peer
|
||||
Message_CAN_CONNECT Message_ConnectionType = 2
|
||||
// sender recently tried to connect to peer repeatedly but failed to connect
|
||||
// ("try" here is loose, but this should signal "made strong effort, failed")
|
||||
Message_CANNOT_CONNECT Message_ConnectionType = 3
|
||||
)
|
||||
|
||||
var Message_ConnectionType_name = map[int32]string{
|
||||
0: "NOT_CONNECTED",
|
||||
1: "CONNECTED",
|
||||
2: "CAN_CONNECT",
|
||||
3: "CANNOT_CONNECT",
|
||||
}
|
||||
|
||||
var Message_ConnectionType_value = map[string]int32{
|
||||
"NOT_CONNECTED": 0,
|
||||
"CONNECTED": 1,
|
||||
"CAN_CONNECT": 2,
|
||||
"CANNOT_CONNECT": 3,
|
||||
}
|
||||
|
||||
func (x Message_ConnectionType) String() string {
|
||||
return proto.EnumName(Message_ConnectionType_name, int32(x))
|
||||
}
|
||||
|
||||
func (Message_ConnectionType) EnumDescriptor() ([]byte, []int) {
|
||||
return fileDescriptor_616a434b24c97ff4, []int{0, 1}
|
||||
}
|
||||
|
||||
type Message struct {
|
||||
// defines what type of message it is.
|
||||
Type Message_MessageType `protobuf:"varint,1,opt,name=type,proto3,enum=dht.pb.Message_MessageType" json:"type,omitempty"`
|
||||
// defines what coral cluster level this query/response belongs to.
|
||||
// in case we want to implement coral's cluster rings in the future.
|
||||
ClusterLevelRaw int32 `protobuf:"varint,10,opt,name=clusterLevelRaw,proto3" json:"clusterLevelRaw,omitempty"`
|
||||
// Used to specify the key associated with this message.
|
||||
// PUT_VALUE, GET_VALUE, ADD_PROVIDER, GET_PROVIDERS
|
||||
Key []byte `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"`
|
||||
// Used to return a value
|
||||
// PUT_VALUE, GET_VALUE
|
||||
Record *pb.Record `protobuf:"bytes,3,opt,name=record,proto3" json:"record,omitempty"`
|
||||
// Used to return peers closer to a key in a query
|
||||
// GET_VALUE, GET_PROVIDERS, FIND_NODE
|
||||
CloserPeers []Message_Peer `protobuf:"bytes,8,rep,name=closerPeers,proto3" json:"closerPeers"`
|
||||
// Used to return Providers
|
||||
// GET_VALUE, ADD_PROVIDER, GET_PROVIDERS
|
||||
ProviderPeers []Message_Peer `protobuf:"bytes,9,rep,name=providerPeers,proto3" json:"providerPeers"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *Message) Reset() { *m = Message{} }
|
||||
func (m *Message) String() string { return proto.CompactTextString(m) }
|
||||
func (*Message) ProtoMessage() {}
|
||||
func (*Message) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_616a434b24c97ff4, []int{0}
|
||||
}
|
||||
func (m *Message) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
}
|
||||
func (m *Message) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
if deterministic {
|
||||
return xxx_messageInfo_Message.Marshal(b, m, deterministic)
|
||||
} else {
|
||||
b = b[:cap(b)]
|
||||
n, err := m.MarshalToSizedBuffer(b)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return b[:n], nil
|
||||
}
|
||||
}
|
||||
func (m *Message) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_Message.Merge(m, src)
|
||||
}
|
||||
func (m *Message) XXX_Size() int {
|
||||
return m.Size()
|
||||
}
|
||||
func (m *Message) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_Message.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_Message proto.InternalMessageInfo
|
||||
|
||||
func (m *Message) GetType() Message_MessageType {
|
||||
if m != nil {
|
||||
return m.Type
|
||||
}
|
||||
return Message_PUT_VALUE
|
||||
}
|
||||
|
||||
func (m *Message) GetClusterLevelRaw() int32 {
|
||||
if m != nil {
|
||||
return m.ClusterLevelRaw
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *Message) GetKey() []byte {
|
||||
if m != nil {
|
||||
return m.Key
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *Message) GetRecord() *pb.Record {
|
||||
if m != nil {
|
||||
return m.Record
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *Message) GetCloserPeers() []Message_Peer {
|
||||
if m != nil {
|
||||
return m.CloserPeers
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *Message) GetProviderPeers() []Message_Peer {
|
||||
if m != nil {
|
||||
return m.ProviderPeers
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type Message_Peer struct {
|
||||
// ID of a given peer.
|
||||
Id byteString `protobuf:"bytes,1,opt,name=id,proto3,customtype=byteString" json:"id"`
|
||||
// multiaddrs for a given peer
|
||||
Addrs [][]byte `protobuf:"bytes,2,rep,name=addrs,proto3" json:"addrs,omitempty"`
|
||||
// used to signal the sender's connection capabilities to the peer
|
||||
Connection Message_ConnectionType `protobuf:"varint,3,opt,name=connection,proto3,enum=dht.pb.Message_ConnectionType" json:"connection,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *Message_Peer) Reset() { *m = Message_Peer{} }
|
||||
func (m *Message_Peer) String() string { return proto.CompactTextString(m) }
|
||||
func (*Message_Peer) ProtoMessage() {}
|
||||
func (*Message_Peer) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_616a434b24c97ff4, []int{0, 0}
|
||||
}
|
||||
func (m *Message_Peer) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
}
|
||||
func (m *Message_Peer) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
if deterministic {
|
||||
return xxx_messageInfo_Message_Peer.Marshal(b, m, deterministic)
|
||||
} else {
|
||||
b = b[:cap(b)]
|
||||
n, err := m.MarshalToSizedBuffer(b)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return b[:n], nil
|
||||
}
|
||||
}
|
||||
func (m *Message_Peer) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_Message_Peer.Merge(m, src)
|
||||
}
|
||||
func (m *Message_Peer) XXX_Size() int {
|
||||
return m.Size()
|
||||
}
|
||||
func (m *Message_Peer) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_Message_Peer.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_Message_Peer proto.InternalMessageInfo
|
||||
|
||||
func (m *Message_Peer) GetAddrs() [][]byte {
|
||||
if m != nil {
|
||||
return m.Addrs
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *Message_Peer) GetConnection() Message_ConnectionType {
|
||||
if m != nil {
|
||||
return m.Connection
|
||||
}
|
||||
return Message_NOT_CONNECTED
|
||||
}
|
||||
|
||||
func init() {
|
||||
proto.RegisterEnum("dht.pb.Message_MessageType", Message_MessageType_name, Message_MessageType_value)
|
||||
proto.RegisterEnum("dht.pb.Message_ConnectionType", Message_ConnectionType_name, Message_ConnectionType_value)
|
||||
proto.RegisterType((*Message)(nil), "dht.pb.Message")
|
||||
proto.RegisterType((*Message_Peer)(nil), "dht.pb.Message.Peer")
|
||||
}
|
||||
|
||||
func init() { proto.RegisterFile("dht.proto", fileDescriptor_616a434b24c97ff4) }
|
||||
|
||||
var fileDescriptor_616a434b24c97ff4 = []byte{
|
||||
// 469 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x92, 0xb1, 0x6f, 0x9b, 0x40,
|
||||
0x18, 0xc5, 0x73, 0x80, 0xdd, 0xf8, 0x03, 0x3b, 0xe4, 0x94, 0x01, 0xb9, 0x92, 0x83, 0x3c, 0xd1,
|
||||
0xc1, 0x20, 0xd1, 0xb5, 0xaa, 0x6a, 0x03, 0x8d, 0x2c, 0xa5, 0xd8, 0xba, 0x38, 0xe9, 0x68, 0x19,
|
||||
0xb8, 0x12, 0x54, 0xd7, 0x87, 0x00, 0xa7, 0xf2, 0xd6, 0x3f, 0x2f, 0x63, 0xe7, 0x0e, 0x51, 0xe5,
|
||||
0xa9, 0x7f, 0x46, 0xc5, 0x11, 0x5a, 0xec, 0x25, 0x13, 0xef, 0x7d, 0xf7, 0x7e, 0xe2, 0xdd, 0xa7,
|
||||
0x83, 0x4e, 0x74, 0x5f, 0x98, 0x69, 0xc6, 0x0a, 0x86, 0xdb, 0x5c, 0x06, 0x7d, 0x3b, 0x4e, 0x8a,
|
||||
0xfb, 0x6d, 0x60, 0x86, 0xec, 0x9b, 0xb5, 0x4e, 0x82, 0xd4, 0x4e, 0xad, 0x98, 0x8d, 0x2a, 0x35,
|
||||
0xca, 0x68, 0xc8, 0xb2, 0xc8, 0x4a, 0x03, 0xab, 0x52, 0x15, 0xdb, 0x1f, 0x35, 0x98, 0x98, 0xc5,
|
||||
0xcc, 0xe2, 0xe3, 0x60, 0xfb, 0x85, 0x3b, 0x6e, 0xb8, 0xaa, 0xe2, 0xc3, 0x3f, 0x12, 0xbc, 0xfa,
|
||||
0x44, 0xf3, 0x7c, 0x15, 0x53, 0x6c, 0x81, 0x54, 0xec, 0x52, 0xaa, 0x21, 0x1d, 0x19, 0x3d, 0xfb,
|
||||
0xb5, 0x59, 0xb5, 0x30, 0x9f, 0x8f, 0xeb, 0xef, 0x62, 0x97, 0x52, 0xc2, 0x83, 0xd8, 0x80, 0xb3,
|
||||
0x70, 0xbd, 0xcd, 0x0b, 0x9a, 0x5d, 0xd3, 0x07, 0xba, 0x26, 0xab, 0xef, 0x1a, 0xe8, 0xc8, 0x68,
|
||||
0x91, 0xe3, 0x31, 0x56, 0x41, 0xfc, 0x4a, 0x77, 0x9a, 0xa0, 0x23, 0x43, 0x21, 0xa5, 0xc4, 0x6f,
|
||||
0xa0, 0x5d, 0xf5, 0xd6, 0x44, 0x1d, 0x19, 0xb2, 0x7d, 0x6e, 0xd6, 0xd7, 0x08, 0x4c, 0xc2, 0x15,
|
||||
0x79, 0x0e, 0xe0, 0x77, 0x20, 0x87, 0x6b, 0x96, 0xd3, 0x6c, 0x4e, 0x69, 0x96, 0x6b, 0xa7, 0xba,
|
||||
0x68, 0xc8, 0xf6, 0xc5, 0x71, 0xbd, 0xf2, 0x70, 0x22, 0x3d, 0x3e, 0x5d, 0x9e, 0x90, 0x66, 0x1c,
|
||||
0x7f, 0x80, 0x6e, 0x9a, 0xb1, 0x87, 0x24, 0xaa, 0xf9, 0xce, 0x8b, 0xfc, 0x21, 0xd0, 0xff, 0x81,
|
||||
0x40, 0x2a, 0x15, 0x1e, 0x82, 0x90, 0x44, 0x7c, 0x3d, 0xca, 0x04, 0x97, 0xc9, 0x5f, 0x4f, 0x97,
|
||||
0x10, 0xec, 0x0a, 0x7a, 0x53, 0x64, 0xc9, 0x26, 0x26, 0x42, 0x12, 0xe1, 0x0b, 0x68, 0xad, 0xa2,
|
||||
0x28, 0xcb, 0x35, 0x41, 0x17, 0x0d, 0x85, 0x54, 0x06, 0xbf, 0x07, 0x08, 0xd9, 0x66, 0x43, 0xc3,
|
||||
0x22, 0x61, 0x1b, 0x7e, 0xe3, 0x9e, 0x3d, 0x38, 0x6e, 0xe0, 0xfc, 0x4b, 0xf0, 0x1d, 0x37, 0x88,
|
||||
0x61, 0x02, 0x72, 0x63, 0xfd, 0xb8, 0x0b, 0x9d, 0xf9, 0xed, 0x62, 0x79, 0x37, 0xbe, 0xbe, 0xf5,
|
||||
0xd4, 0x93, 0xd2, 0x5e, 0x79, 0xb5, 0x45, 0x58, 0x05, 0x65, 0xec, 0xba, 0xcb, 0x39, 0x99, 0xdd,
|
||||
0x4d, 0x5d, 0x8f, 0xa8, 0x02, 0x3e, 0x87, 0x6e, 0x19, 0xa8, 0x27, 0x37, 0xaa, 0x58, 0x32, 0x1f,
|
||||
0xa7, 0xbe, 0xbb, 0xf4, 0x67, 0xae, 0xa7, 0x4a, 0xf8, 0x14, 0xa4, 0xf9, 0xd4, 0xbf, 0x52, 0x5b,
|
||||
0xc3, 0xcf, 0xd0, 0x3b, 0x2c, 0x52, 0xd2, 0xfe, 0x6c, 0xb1, 0x74, 0x66, 0xbe, 0xef, 0x39, 0x0b,
|
||||
0xcf, 0xad, 0xfe, 0xf8, 0xdf, 0x22, 0x7c, 0x06, 0xb2, 0x33, 0xf6, 0xeb, 0x84, 0x2a, 0x60, 0x0c,
|
||||
0x3d, 0x67, 0xec, 0x37, 0x28, 0x55, 0x9c, 0x28, 0x8f, 0xfb, 0x01, 0xfa, 0xb9, 0x1f, 0xa0, 0xdf,
|
||||
0xfb, 0x01, 0x0a, 0xda, 0xfc, 0xfd, 0xbd, 0xfd, 0x1b, 0x00, 0x00, 0xff, 0xff, 0x8d, 0x1a, 0xa1,
|
||||
0xbe, 0xf7, 0x02, 0x00, 0x00,
|
||||
}
|
||||
|
||||
func (m *Message) Marshal() (dAtA []byte, err error) {
|
||||
size := m.Size()
|
||||
dAtA = make([]byte, size)
|
||||
n, err := m.MarshalToSizedBuffer(dAtA[:size])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return dAtA[:n], nil
|
||||
}
|
||||
|
||||
func (m *Message) MarshalTo(dAtA []byte) (int, error) {
|
||||
size := m.Size()
|
||||
return m.MarshalToSizedBuffer(dAtA[:size])
|
||||
}
|
||||
|
||||
func (m *Message) MarshalToSizedBuffer(dAtA []byte) (int, error) {
|
||||
i := len(dAtA)
|
||||
_ = i
|
||||
var l int
|
||||
_ = l
|
||||
if m.XXX_unrecognized != nil {
|
||||
i -= len(m.XXX_unrecognized)
|
||||
copy(dAtA[i:], m.XXX_unrecognized)
|
||||
}
|
||||
if m.ClusterLevelRaw != 0 {
|
||||
i = encodeVarintDht(dAtA, i, uint64(m.ClusterLevelRaw))
|
||||
i--
|
||||
dAtA[i] = 0x50
|
||||
}
|
||||
if len(m.ProviderPeers) > 0 {
|
||||
for iNdEx := len(m.ProviderPeers) - 1; iNdEx >= 0; iNdEx-- {
|
||||
{
|
||||
size, err := m.ProviderPeers[iNdEx].MarshalToSizedBuffer(dAtA[:i])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
i -= size
|
||||
i = encodeVarintDht(dAtA, i, uint64(size))
|
||||
}
|
||||
i--
|
||||
dAtA[i] = 0x4a
|
||||
}
|
||||
}
|
||||
if len(m.CloserPeers) > 0 {
|
||||
for iNdEx := len(m.CloserPeers) - 1; iNdEx >= 0; iNdEx-- {
|
||||
{
|
||||
size, err := m.CloserPeers[iNdEx].MarshalToSizedBuffer(dAtA[:i])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
i -= size
|
||||
i = encodeVarintDht(dAtA, i, uint64(size))
|
||||
}
|
||||
i--
|
||||
dAtA[i] = 0x42
|
||||
}
|
||||
}
|
||||
if m.Record != nil {
|
||||
{
|
||||
size, err := m.Record.MarshalToSizedBuffer(dAtA[:i])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
i -= size
|
||||
i = encodeVarintDht(dAtA, i, uint64(size))
|
||||
}
|
||||
i--
|
||||
dAtA[i] = 0x1a
|
||||
}
|
||||
if len(m.Key) > 0 {
|
||||
i -= len(m.Key)
|
||||
copy(dAtA[i:], m.Key)
|
||||
i = encodeVarintDht(dAtA, i, uint64(len(m.Key)))
|
||||
i--
|
||||
dAtA[i] = 0x12
|
||||
}
|
||||
if m.Type != 0 {
|
||||
i = encodeVarintDht(dAtA, i, uint64(m.Type))
|
||||
i--
|
||||
dAtA[i] = 0x8
|
||||
}
|
||||
return len(dAtA) - i, nil
|
||||
}
|
||||
|
||||
func (m *Message_Peer) Marshal() (dAtA []byte, err error) {
|
||||
size := m.Size()
|
||||
dAtA = make([]byte, size)
|
||||
n, err := m.MarshalToSizedBuffer(dAtA[:size])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return dAtA[:n], nil
|
||||
}
|
||||
|
||||
func (m *Message_Peer) MarshalTo(dAtA []byte) (int, error) {
|
||||
size := m.Size()
|
||||
return m.MarshalToSizedBuffer(dAtA[:size])
|
||||
}
|
||||
|
||||
func (m *Message_Peer) MarshalToSizedBuffer(dAtA []byte) (int, error) {
|
||||
i := len(dAtA)
|
||||
_ = i
|
||||
var l int
|
||||
_ = l
|
||||
if m.XXX_unrecognized != nil {
|
||||
i -= len(m.XXX_unrecognized)
|
||||
copy(dAtA[i:], m.XXX_unrecognized)
|
||||
}
|
||||
if m.Connection != 0 {
|
||||
i = encodeVarintDht(dAtA, i, uint64(m.Connection))
|
||||
i--
|
||||
dAtA[i] = 0x18
|
||||
}
|
||||
if len(m.Addrs) > 0 {
|
||||
for iNdEx := len(m.Addrs) - 1; iNdEx >= 0; iNdEx-- {
|
||||
i -= len(m.Addrs[iNdEx])
|
||||
copy(dAtA[i:], m.Addrs[iNdEx])
|
||||
i = encodeVarintDht(dAtA, i, uint64(len(m.Addrs[iNdEx])))
|
||||
i--
|
||||
dAtA[i] = 0x12
|
||||
}
|
||||
}
|
||||
{
|
||||
size := m.Id.Size()
|
||||
i -= size
|
||||
if _, err := m.Id.MarshalTo(dAtA[i:]); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
i = encodeVarintDht(dAtA, i, uint64(size))
|
||||
}
|
||||
i--
|
||||
dAtA[i] = 0xa
|
||||
return len(dAtA) - i, nil
|
||||
}
|
||||
|
||||
func encodeVarintDht(dAtA []byte, offset int, v uint64) int {
|
||||
offset -= sovDht(v)
|
||||
base := offset
|
||||
for v >= 1<<7 {
|
||||
dAtA[offset] = uint8(v&0x7f | 0x80)
|
||||
v >>= 7
|
||||
offset++
|
||||
}
|
||||
dAtA[offset] = uint8(v)
|
||||
return base
|
||||
}
|
||||
func (m *Message) Size() (n int) {
|
||||
if m == nil {
|
||||
return 0
|
||||
}
|
||||
var l int
|
||||
_ = l
|
||||
if m.Type != 0 {
|
||||
n += 1 + sovDht(uint64(m.Type))
|
||||
}
|
||||
l = len(m.Key)
|
||||
if l > 0 {
|
||||
n += 1 + l + sovDht(uint64(l))
|
||||
}
|
||||
if m.Record != nil {
|
||||
l = m.Record.Size()
|
||||
n += 1 + l + sovDht(uint64(l))
|
||||
}
|
||||
if len(m.CloserPeers) > 0 {
|
||||
for _, e := range m.CloserPeers {
|
||||
l = e.Size()
|
||||
n += 1 + l + sovDht(uint64(l))
|
||||
}
|
||||
}
|
||||
if len(m.ProviderPeers) > 0 {
|
||||
for _, e := range m.ProviderPeers {
|
||||
l = e.Size()
|
||||
n += 1 + l + sovDht(uint64(l))
|
||||
}
|
||||
}
|
||||
if m.ClusterLevelRaw != 0 {
|
||||
n += 1 + sovDht(uint64(m.ClusterLevelRaw))
|
||||
}
|
||||
if m.XXX_unrecognized != nil {
|
||||
n += len(m.XXX_unrecognized)
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func (m *Message_Peer) Size() (n int) {
|
||||
if m == nil {
|
||||
return 0
|
||||
}
|
||||
var l int
|
||||
_ = l
|
||||
l = m.Id.Size()
|
||||
n += 1 + l + sovDht(uint64(l))
|
||||
if len(m.Addrs) > 0 {
|
||||
for _, b := range m.Addrs {
|
||||
l = len(b)
|
||||
n += 1 + l + sovDht(uint64(l))
|
||||
}
|
||||
}
|
||||
if m.Connection != 0 {
|
||||
n += 1 + sovDht(uint64(m.Connection))
|
||||
}
|
||||
if m.XXX_unrecognized != nil {
|
||||
n += len(m.XXX_unrecognized)
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func sovDht(x uint64) (n int) {
|
||||
return (math_bits.Len64(x|1) + 6) / 7
|
||||
}
|
||||
func sozDht(x uint64) (n int) {
|
||||
return sovDht(uint64((x << 1) ^ uint64((int64(x) >> 63))))
|
||||
}
|
||||
func (m *Message) Unmarshal(dAtA []byte) error {
|
||||
l := len(dAtA)
|
||||
iNdEx := 0
|
||||
for iNdEx < l {
|
||||
preIndex := iNdEx
|
||||
var wire uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowDht
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
wire |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
fieldNum := int32(wire >> 3)
|
||||
wireType := int(wire & 0x7)
|
||||
if wireType == 4 {
|
||||
return fmt.Errorf("proto: Message: wiretype end group for non-group")
|
||||
}
|
||||
if fieldNum <= 0 {
|
||||
return fmt.Errorf("proto: Message: illegal tag %d (wire type %d)", fieldNum, wire)
|
||||
}
|
||||
switch fieldNum {
|
||||
case 1:
|
||||
if wireType != 0 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
|
||||
}
|
||||
m.Type = 0
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowDht
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
m.Type |= Message_MessageType(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
case 2:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType)
|
||||
}
|
||||
var byteLen int
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowDht
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
byteLen |= int(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
if byteLen < 0 {
|
||||
return ErrInvalidLengthDht
|
||||
}
|
||||
postIndex := iNdEx + byteLen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthDht
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...)
|
||||
if m.Key == nil {
|
||||
m.Key = []byte{}
|
||||
}
|
||||
iNdEx = postIndex
|
||||
case 3:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field Record", wireType)
|
||||
}
|
||||
var msglen int
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowDht
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
msglen |= int(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
if msglen < 0 {
|
||||
return ErrInvalidLengthDht
|
||||
}
|
||||
postIndex := iNdEx + msglen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthDht
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
if m.Record == nil {
|
||||
m.Record = &pb.Record{}
|
||||
}
|
||||
if err := m.Record.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
||||
return err
|
||||
}
|
||||
iNdEx = postIndex
|
||||
case 8:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field CloserPeers", wireType)
|
||||
}
|
||||
var msglen int
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowDht
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
msglen |= int(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
if msglen < 0 {
|
||||
return ErrInvalidLengthDht
|
||||
}
|
||||
postIndex := iNdEx + msglen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthDht
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.CloserPeers = append(m.CloserPeers, Message_Peer{})
|
||||
if err := m.CloserPeers[len(m.CloserPeers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
||||
return err
|
||||
}
|
||||
iNdEx = postIndex
|
||||
case 9:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field ProviderPeers", wireType)
|
||||
}
|
||||
var msglen int
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowDht
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
msglen |= int(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
if msglen < 0 {
|
||||
return ErrInvalidLengthDht
|
||||
}
|
||||
postIndex := iNdEx + msglen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthDht
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.ProviderPeers = append(m.ProviderPeers, Message_Peer{})
|
||||
if err := m.ProviderPeers[len(m.ProviderPeers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
||||
return err
|
||||
}
|
||||
iNdEx = postIndex
|
||||
case 10:
|
||||
if wireType != 0 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field ClusterLevelRaw", wireType)
|
||||
}
|
||||
m.ClusterLevelRaw = 0
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowDht
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
m.ClusterLevelRaw |= int32(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
default:
|
||||
iNdEx = preIndex
|
||||
skippy, err := skipDht(dAtA[iNdEx:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if (skippy < 0) || (iNdEx+skippy) < 0 {
|
||||
return ErrInvalidLengthDht
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
|
||||
iNdEx += skippy
|
||||
}
|
||||
}
|
||||
|
||||
if iNdEx > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func (m *Message_Peer) Unmarshal(dAtA []byte) error {
|
||||
l := len(dAtA)
|
||||
iNdEx := 0
|
||||
for iNdEx < l {
|
||||
preIndex := iNdEx
|
||||
var wire uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowDht
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
wire |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
fieldNum := int32(wire >> 3)
|
||||
wireType := int(wire & 0x7)
|
||||
if wireType == 4 {
|
||||
return fmt.Errorf("proto: Peer: wiretype end group for non-group")
|
||||
}
|
||||
if fieldNum <= 0 {
|
||||
return fmt.Errorf("proto: Peer: illegal tag %d (wire type %d)", fieldNum, wire)
|
||||
}
|
||||
switch fieldNum {
|
||||
case 1:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field Id", wireType)
|
||||
}
|
||||
var byteLen int
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowDht
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
byteLen |= int(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
if byteLen < 0 {
|
||||
return ErrInvalidLengthDht
|
||||
}
|
||||
postIndex := iNdEx + byteLen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthDht
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
if err := m.Id.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
||||
return err
|
||||
}
|
||||
iNdEx = postIndex
|
||||
case 2:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field Addrs", wireType)
|
||||
}
|
||||
var byteLen int
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowDht
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
byteLen |= int(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
if byteLen < 0 {
|
||||
return ErrInvalidLengthDht
|
||||
}
|
||||
postIndex := iNdEx + byteLen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthDht
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.Addrs = append(m.Addrs, make([]byte, postIndex-iNdEx))
|
||||
copy(m.Addrs[len(m.Addrs)-1], dAtA[iNdEx:postIndex])
|
||||
iNdEx = postIndex
|
||||
case 3:
|
||||
if wireType != 0 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field Connection", wireType)
|
||||
}
|
||||
m.Connection = 0
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowDht
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
m.Connection |= Message_ConnectionType(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
default:
|
||||
iNdEx = preIndex
|
||||
skippy, err := skipDht(dAtA[iNdEx:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if (skippy < 0) || (iNdEx+skippy) < 0 {
|
||||
return ErrInvalidLengthDht
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
|
||||
iNdEx += skippy
|
||||
}
|
||||
}
|
||||
|
||||
if iNdEx > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func skipDht(dAtA []byte) (n int, err error) {
|
||||
l := len(dAtA)
|
||||
iNdEx := 0
|
||||
depth := 0
|
||||
for iNdEx < l {
|
||||
var wire uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return 0, ErrIntOverflowDht
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
wire |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
wireType := int(wire & 0x7)
|
||||
switch wireType {
|
||||
case 0:
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return 0, ErrIntOverflowDht
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
iNdEx++
|
||||
if dAtA[iNdEx-1] < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
case 1:
|
||||
iNdEx += 8
|
||||
case 2:
|
||||
var length int
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return 0, ErrIntOverflowDht
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
length |= (int(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
if length < 0 {
|
||||
return 0, ErrInvalidLengthDht
|
||||
}
|
||||
iNdEx += length
|
||||
case 3:
|
||||
depth++
|
||||
case 4:
|
||||
if depth == 0 {
|
||||
return 0, ErrUnexpectedEndOfGroupDht
|
||||
}
|
||||
depth--
|
||||
case 5:
|
||||
iNdEx += 4
|
||||
default:
|
||||
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
|
||||
}
|
||||
if iNdEx < 0 {
|
||||
return 0, ErrInvalidLengthDht
|
||||
}
|
||||
if depth == 0 {
|
||||
return iNdEx, nil
|
||||
}
|
||||
}
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
|
||||
var (
|
||||
ErrInvalidLengthDht = fmt.Errorf("proto: negative length found during unmarshaling")
|
||||
ErrIntOverflowDht = fmt.Errorf("proto: integer overflow")
|
||||
ErrUnexpectedEndOfGroupDht = fmt.Errorf("proto: unexpected end of group")
|
||||
)
|
||||
72
vendor/github.com/libp2p/go-libp2p-kad-dht/pb/dht.proto
generated
vendored
Normal file
72
vendor/github.com/libp2p/go-libp2p-kad-dht/pb/dht.proto
generated
vendored
Normal file
@@ -0,0 +1,72 @@
|
||||
// In order to re-generate the golang packages for `Message` you will need...
|
||||
// 1. Protobuf binary (tested with protoc 3.0.0). - https://github.com/gogo/protobuf/releases
|
||||
// 2. Gogo Protobuf (tested with gogo 0.3). - https://github.com/gogo/protobuf
|
||||
// 3. To have cloned `libp2p/go-libp2p-{record,kad-dht}` under the same directory.
|
||||
// Now from `libp2p/go-libp2p-kad-dht/pb` you can run...
|
||||
// `protoc --gogo_out=. --proto_path=../../go-libp2p-record/pb/ --proto_path=./ dht.proto`
|
||||
|
||||
syntax = "proto3";
|
||||
package dht.pb;
|
||||
|
||||
import "github.com/libp2p/go-libp2p-record/pb/record.proto";
|
||||
import "github.com/gogo/protobuf/gogoproto/gogo.proto";
|
||||
|
||||
message Message {
|
||||
enum MessageType {
|
||||
PUT_VALUE = 0;
|
||||
GET_VALUE = 1;
|
||||
ADD_PROVIDER = 2;
|
||||
GET_PROVIDERS = 3;
|
||||
FIND_NODE = 4;
|
||||
PING = 5;
|
||||
}
|
||||
|
||||
enum ConnectionType {
|
||||
// sender does not have a connection to peer, and no extra information (default)
|
||||
NOT_CONNECTED = 0;
|
||||
|
||||
// sender has a live connection to peer
|
||||
CONNECTED = 1;
|
||||
|
||||
// sender recently connected to peer
|
||||
CAN_CONNECT = 2;
|
||||
|
||||
// sender recently tried to connect to peer repeatedly but failed to connect
|
||||
// ("try" here is loose, but this should signal "made strong effort, failed")
|
||||
CANNOT_CONNECT = 3;
|
||||
}
|
||||
|
||||
message Peer {
|
||||
// ID of a given peer.
|
||||
bytes id = 1 [(gogoproto.customtype) = "byteString", (gogoproto.nullable) = false];
|
||||
|
||||
// multiaddrs for a given peer
|
||||
repeated bytes addrs = 2;
|
||||
|
||||
// used to signal the sender's connection capabilities to the peer
|
||||
ConnectionType connection = 3;
|
||||
}
|
||||
|
||||
// defines what type of message it is.
|
||||
MessageType type = 1;
|
||||
|
||||
// defines what coral cluster level this query/response belongs to.
|
||||
// in case we want to implement coral's cluster rings in the future.
|
||||
int32 clusterLevelRaw = 10;
|
||||
|
||||
// Used to specify the key associated with this message.
|
||||
// PUT_VALUE, GET_VALUE, ADD_PROVIDER, GET_PROVIDERS
|
||||
bytes key = 2;
|
||||
|
||||
// Used to return a value
|
||||
// PUT_VALUE, GET_VALUE
|
||||
record.pb.Record record = 3;
|
||||
|
||||
// Used to return peers closer to a key in a query
|
||||
// GET_VALUE, GET_PROVIDERS, FIND_NODE
|
||||
repeated Peer closerPeers = 8 [(gogoproto.nullable) = false];
|
||||
|
||||
// Used to return Providers
|
||||
// GET_VALUE, ADD_PROVIDER, GET_PROVIDERS
|
||||
repeated Peer providerPeers = 9 [(gogoproto.nullable) = false];
|
||||
}
|
||||
171
vendor/github.com/libp2p/go-libp2p-kad-dht/pb/message.go
generated
vendored
Normal file
171
vendor/github.com/libp2p/go-libp2p-kad-dht/pb/message.go
generated
vendored
Normal file
@@ -0,0 +1,171 @@
|
||||
package dht_pb
|
||||
|
||||
import (
|
||||
"github.com/libp2p/go-libp2p/core/network"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
|
||||
logging "github.com/ipfs/go-log"
|
||||
ma "github.com/multiformats/go-multiaddr"
|
||||
)
|
||||
|
||||
var log = logging.Logger("dht.pb")
|
||||
|
||||
type PeerRoutingInfo struct {
|
||||
peer.AddrInfo
|
||||
network.Connectedness
|
||||
}
|
||||
|
||||
// NewMessage constructs a new dht message with given type, key, and level
|
||||
func NewMessage(typ Message_MessageType, key []byte, level int) *Message {
|
||||
m := &Message{
|
||||
Type: typ,
|
||||
Key: key,
|
||||
}
|
||||
m.SetClusterLevel(level)
|
||||
return m
|
||||
}
|
||||
|
||||
func peerRoutingInfoToPBPeer(p PeerRoutingInfo) Message_Peer {
|
||||
var pbp Message_Peer
|
||||
|
||||
pbp.Addrs = make([][]byte, len(p.Addrs))
|
||||
for i, maddr := range p.Addrs {
|
||||
pbp.Addrs[i] = maddr.Bytes() // Bytes, not String. Compressed.
|
||||
}
|
||||
pbp.Id = byteString(p.ID)
|
||||
pbp.Connection = ConnectionType(p.Connectedness)
|
||||
return pbp
|
||||
}
|
||||
|
||||
func peerInfoToPBPeer(p peer.AddrInfo) Message_Peer {
|
||||
var pbp Message_Peer
|
||||
|
||||
pbp.Addrs = make([][]byte, len(p.Addrs))
|
||||
for i, maddr := range p.Addrs {
|
||||
pbp.Addrs[i] = maddr.Bytes() // Bytes, not String. Compressed.
|
||||
}
|
||||
pbp.Id = byteString(p.ID)
|
||||
return pbp
|
||||
}
|
||||
|
||||
// PBPeerToPeer turns a *Message_Peer into its peer.AddrInfo counterpart
|
||||
func PBPeerToPeerInfo(pbp Message_Peer) peer.AddrInfo {
|
||||
return peer.AddrInfo{
|
||||
ID: peer.ID(pbp.Id),
|
||||
Addrs: pbp.Addresses(),
|
||||
}
|
||||
}
|
||||
|
||||
// RawPeerInfosToPBPeers converts a slice of Peers into a slice of *Message_Peers,
|
||||
// ready to go out on the wire.
|
||||
func RawPeerInfosToPBPeers(peers []peer.AddrInfo) []Message_Peer {
|
||||
pbpeers := make([]Message_Peer, len(peers))
|
||||
for i, p := range peers {
|
||||
pbpeers[i] = peerInfoToPBPeer(p)
|
||||
}
|
||||
return pbpeers
|
||||
}
|
||||
|
||||
// PeersToPBPeers converts given []peer.Peer into a set of []*Message_Peer,
|
||||
// which can be written to a message and sent out. the key thing this function
|
||||
// does (in addition to PeersToPBPeers) is set the ConnectionType with
|
||||
// information from the given network.Network.
|
||||
func PeerInfosToPBPeers(n network.Network, peers []peer.AddrInfo) []Message_Peer {
|
||||
pbps := RawPeerInfosToPBPeers(peers)
|
||||
for i, pbp := range pbps {
|
||||
c := ConnectionType(n.Connectedness(peers[i].ID))
|
||||
pbp.Connection = c
|
||||
}
|
||||
return pbps
|
||||
}
|
||||
|
||||
func PeerRoutingInfosToPBPeers(peers []PeerRoutingInfo) []Message_Peer {
|
||||
pbpeers := make([]Message_Peer, len(peers))
|
||||
for i, p := range peers {
|
||||
pbpeers[i] = peerRoutingInfoToPBPeer(p)
|
||||
}
|
||||
return pbpeers
|
||||
}
|
||||
|
||||
// PBPeersToPeerInfos converts given []*Message_Peer into []peer.AddrInfo
|
||||
// Invalid addresses will be silently omitted.
|
||||
func PBPeersToPeerInfos(pbps []Message_Peer) []*peer.AddrInfo {
|
||||
peers := make([]*peer.AddrInfo, 0, len(pbps))
|
||||
for _, pbp := range pbps {
|
||||
ai := PBPeerToPeerInfo(pbp)
|
||||
peers = append(peers, &ai)
|
||||
}
|
||||
return peers
|
||||
}
|
||||
|
||||
// Addresses returns a multiaddr associated with the Message_Peer entry
|
||||
func (m *Message_Peer) Addresses() []ma.Multiaddr {
|
||||
if m == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
maddrs := make([]ma.Multiaddr, 0, len(m.Addrs))
|
||||
for _, addr := range m.Addrs {
|
||||
maddr, err := ma.NewMultiaddrBytes(addr)
|
||||
if err != nil {
|
||||
log.Debugw("error decoding multiaddr for peer", "peer", peer.ID(m.Id), "error", err)
|
||||
continue
|
||||
}
|
||||
|
||||
maddrs = append(maddrs, maddr)
|
||||
}
|
||||
return maddrs
|
||||
}
|
||||
|
||||
// GetClusterLevel gets and adjusts the cluster level on the message.
|
||||
// a +/- 1 adjustment is needed to distinguish a valid first level (1) and
|
||||
// default "no value" protobuf behavior (0)
|
||||
func (m *Message) GetClusterLevel() int {
|
||||
level := m.GetClusterLevelRaw() - 1
|
||||
if level < 0 {
|
||||
return 0
|
||||
}
|
||||
return int(level)
|
||||
}
|
||||
|
||||
// SetClusterLevel adjusts and sets the cluster level on the message.
|
||||
// a +/- 1 adjustment is needed to distinguish a valid first level (1) and
|
||||
// default "no value" protobuf behavior (0)
|
||||
func (m *Message) SetClusterLevel(level int) {
|
||||
lvl := int32(level)
|
||||
m.ClusterLevelRaw = lvl + 1
|
||||
}
|
||||
|
||||
// ConnectionType returns a Message_ConnectionType associated with the
|
||||
// network.Connectedness.
|
||||
func ConnectionType(c network.Connectedness) Message_ConnectionType {
|
||||
switch c {
|
||||
default:
|
||||
return Message_NOT_CONNECTED
|
||||
case network.NotConnected:
|
||||
return Message_NOT_CONNECTED
|
||||
case network.Connected:
|
||||
return Message_CONNECTED
|
||||
case network.CanConnect:
|
||||
return Message_CAN_CONNECT
|
||||
case network.CannotConnect:
|
||||
return Message_CANNOT_CONNECT
|
||||
}
|
||||
}
|
||||
|
||||
// Connectedness returns an network.Connectedness associated with the
|
||||
// Message_ConnectionType.
|
||||
func Connectedness(c Message_ConnectionType) network.Connectedness {
|
||||
switch c {
|
||||
default:
|
||||
return network.NotConnected
|
||||
case Message_NOT_CONNECTED:
|
||||
return network.NotConnected
|
||||
case Message_CONNECTED:
|
||||
return network.Connected
|
||||
case Message_CAN_CONNECT:
|
||||
return network.CanConnect
|
||||
case Message_CANNOT_CONNECT:
|
||||
return network.CannotConnect
|
||||
}
|
||||
}
|
||||
261
vendor/github.com/libp2p/go-libp2p-kad-dht/pb/protocol_messenger.go
generated
vendored
Normal file
261
vendor/github.com/libp2p/go-libp2p-kad-dht/pb/protocol_messenger.go
generated
vendored
Normal file
@@ -0,0 +1,261 @@
|
||||
package dht_pb
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
logging "github.com/ipfs/go-log"
|
||||
recpb "github.com/libp2p/go-libp2p-record/pb"
|
||||
"github.com/libp2p/go-libp2p/core/host"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/multiformats/go-multihash"
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
"go.opentelemetry.io/otel/codes"
|
||||
|
||||
"github.com/libp2p/go-libp2p-kad-dht/internal"
|
||||
)
|
||||
|
||||
var logger = logging.Logger("dht")
|
||||
|
||||
// ProtocolMessenger can be used for sending DHT messages to peers and processing their responses.
|
||||
// This decouples the wire protocol format from both the DHT protocol implementation and from the implementation of the
|
||||
// routing.Routing interface.
|
||||
//
|
||||
// Note: the ProtocolMessenger's MessageSender still needs to deal with some wire protocol details such as using
|
||||
// varint-delineated protobufs
|
||||
type ProtocolMessenger struct {
|
||||
m MessageSender
|
||||
}
|
||||
|
||||
type ProtocolMessengerOption func(*ProtocolMessenger) error
|
||||
|
||||
// NewProtocolMessenger creates a new ProtocolMessenger that is used for sending DHT messages to peers and processing
|
||||
// their responses.
|
||||
func NewProtocolMessenger(msgSender MessageSender, opts ...ProtocolMessengerOption) (*ProtocolMessenger, error) {
|
||||
pm := &ProtocolMessenger{
|
||||
m: msgSender,
|
||||
}
|
||||
|
||||
for _, o := range opts {
|
||||
if err := o(pm); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return pm, nil
|
||||
}
|
||||
|
||||
type MessageSenderWithDisconnect interface {
|
||||
MessageSender
|
||||
|
||||
OnDisconnect(context.Context, peer.ID)
|
||||
}
|
||||
|
||||
// MessageSender handles sending wire protocol messages to a given peer
|
||||
type MessageSender interface {
|
||||
// SendRequest sends a peer a message and waits for its response
|
||||
SendRequest(ctx context.Context, p peer.ID, pmes *Message) (*Message, error)
|
||||
// SendMessage sends a peer a message without waiting on a response
|
||||
SendMessage(ctx context.Context, p peer.ID, pmes *Message) error
|
||||
}
|
||||
|
||||
// PutValue asks a peer to store the given key/value pair.
|
||||
func (pm *ProtocolMessenger) PutValue(ctx context.Context, p peer.ID, rec *recpb.Record) (err error) {
|
||||
ctx, span := internal.StartSpan(ctx, "ProtocolMessenger.PutValue")
|
||||
defer span.End()
|
||||
if span.IsRecording() {
|
||||
span.SetAttributes(attribute.Stringer("to", p), attribute.Stringer("record", rec))
|
||||
defer func() {
|
||||
if err != nil {
|
||||
span.SetStatus(codes.Error, err.Error())
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
pmes := NewMessage(Message_PUT_VALUE, rec.Key, 0)
|
||||
pmes.Record = rec
|
||||
rpmes, err := pm.m.SendRequest(ctx, p, pmes)
|
||||
if err != nil {
|
||||
logger.Debugw("failed to put value to peer", "to", p, "key", internal.LoggableRecordKeyBytes(rec.Key), "error", err)
|
||||
return err
|
||||
}
|
||||
|
||||
if !bytes.Equal(rpmes.GetRecord().Value, pmes.GetRecord().Value) {
|
||||
const errStr = "value not put correctly"
|
||||
logger.Infow(errStr, "put-message", pmes, "get-message", rpmes)
|
||||
return errors.New(errStr)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetValue asks a peer for the value corresponding to the given key. Also returns the K closest peers to the key
|
||||
// as described in GetClosestPeers.
|
||||
func (pm *ProtocolMessenger) GetValue(ctx context.Context, p peer.ID, key string) (record *recpb.Record, closerPeers []*peer.AddrInfo, err error) {
|
||||
ctx, span := internal.StartSpan(ctx, "ProtocolMessenger.GetValue")
|
||||
defer span.End()
|
||||
if span.IsRecording() {
|
||||
span.SetAttributes(attribute.Stringer("to", p), internal.KeyAsAttribute("key", key))
|
||||
defer func() {
|
||||
if err != nil {
|
||||
span.SetStatus(codes.Error, err.Error())
|
||||
} else {
|
||||
peers := make([]string, len(closerPeers))
|
||||
for i, v := range closerPeers {
|
||||
peers[i] = v.String()
|
||||
}
|
||||
span.SetAttributes(
|
||||
attribute.Stringer("record", record),
|
||||
attribute.StringSlice("closestPeers", peers),
|
||||
)
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
pmes := NewMessage(Message_GET_VALUE, []byte(key), 0)
|
||||
respMsg, err := pm.m.SendRequest(ctx, p, pmes)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
// Perhaps we were given closer peers
|
||||
peers := PBPeersToPeerInfos(respMsg.GetCloserPeers())
|
||||
|
||||
if rec := respMsg.GetRecord(); rec != nil {
|
||||
// Success! We were given the value
|
||||
logger.Debug("got value")
|
||||
|
||||
// Check that record matches the one we are looking for (validation of the record does not happen here)
|
||||
if !bytes.Equal([]byte(key), rec.GetKey()) {
|
||||
logger.Debug("received incorrect record")
|
||||
return nil, nil, internal.ErrIncorrectRecord
|
||||
}
|
||||
|
||||
return rec, peers, err
|
||||
}
|
||||
|
||||
return nil, peers, nil
|
||||
}
|
||||
|
||||
// GetClosestPeers asks a peer to return the K (a DHT-wide parameter) DHT server peers closest in XOR space to the id
|
||||
// Note: If the peer happens to know another peer whose peerID exactly matches the given id it will return that peer
|
||||
// even if that peer is not a DHT server node.
|
||||
func (pm *ProtocolMessenger) GetClosestPeers(ctx context.Context, p peer.ID, id peer.ID) (closerPeers []*peer.AddrInfo, err error) {
|
||||
ctx, span := internal.StartSpan(ctx, "ProtocolMessenger.GetClosestPeers")
|
||||
defer span.End()
|
||||
if span.IsRecording() {
|
||||
span.SetAttributes(attribute.Stringer("to", p), attribute.Stringer("key", id))
|
||||
defer func() {
|
||||
if err != nil {
|
||||
span.SetStatus(codes.Error, err.Error())
|
||||
} else {
|
||||
peers := make([]string, len(closerPeers))
|
||||
for i, v := range closerPeers {
|
||||
peers[i] = v.String()
|
||||
}
|
||||
span.SetAttributes(attribute.StringSlice("peers", peers))
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
pmes := NewMessage(Message_FIND_NODE, []byte(id), 0)
|
||||
respMsg, err := pm.m.SendRequest(ctx, p, pmes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
peers := PBPeersToPeerInfos(respMsg.GetCloserPeers())
|
||||
return peers, nil
|
||||
}
|
||||
|
||||
// PutProvider is deprecated please use [ProtocolMessenger.PutProviderAddrs].
|
||||
func (pm *ProtocolMessenger) PutProvider(ctx context.Context, p peer.ID, key multihash.Multihash, h host.Host) error {
|
||||
return pm.PutProviderAddrs(ctx, p, key, peer.AddrInfo{
|
||||
ID: h.ID(),
|
||||
Addrs: h.Addrs(),
|
||||
})
|
||||
}
|
||||
|
||||
// PutProviderAddrs asks a peer to store that we are a provider for the given key.
|
||||
func (pm *ProtocolMessenger) PutProviderAddrs(ctx context.Context, p peer.ID, key multihash.Multihash, self peer.AddrInfo) (err error) {
|
||||
ctx, span := internal.StartSpan(ctx, "ProtocolMessenger.PutProvider")
|
||||
defer span.End()
|
||||
if span.IsRecording() {
|
||||
span.SetAttributes(attribute.Stringer("to", p), attribute.Stringer("key", key))
|
||||
defer func() {
|
||||
if err != nil {
|
||||
span.SetStatus(codes.Error, err.Error())
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// TODO: We may want to limit the type of addresses in our provider records
|
||||
// For example, in a WAN-only DHT prohibit sharing non-WAN addresses (e.g. 192.168.0.100)
|
||||
if len(self.Addrs) < 1 {
|
||||
return fmt.Errorf("no known addresses for self, cannot put provider")
|
||||
}
|
||||
|
||||
pmes := NewMessage(Message_ADD_PROVIDER, key, 0)
|
||||
pmes.ProviderPeers = RawPeerInfosToPBPeers([]peer.AddrInfo{self})
|
||||
|
||||
return pm.m.SendMessage(ctx, p, pmes)
|
||||
}
|
||||
|
||||
// GetProviders asks a peer for the providers it knows of for a given key. Also returns the K closest peers to the key
|
||||
// as described in GetClosestPeers.
|
||||
func (pm *ProtocolMessenger) GetProviders(ctx context.Context, p peer.ID, key multihash.Multihash) (provs []*peer.AddrInfo, closerPeers []*peer.AddrInfo, err error) {
|
||||
ctx, span := internal.StartSpan(ctx, "ProtocolMessenger.GetProviders")
|
||||
defer span.End()
|
||||
if span.IsRecording() {
|
||||
span.SetAttributes(attribute.Stringer("to", p), attribute.Stringer("key", key))
|
||||
defer func() {
|
||||
if err != nil {
|
||||
span.SetStatus(codes.Error, err.Error())
|
||||
} else {
|
||||
provsStr := make([]string, len(provs))
|
||||
for i, v := range provs {
|
||||
provsStr[i] = v.String()
|
||||
}
|
||||
closerPeersStr := make([]string, len(provs))
|
||||
for i, v := range provs {
|
||||
closerPeersStr[i] = v.String()
|
||||
}
|
||||
span.SetAttributes(attribute.StringSlice("provs", provsStr), attribute.StringSlice("closestPeers", closerPeersStr))
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
pmes := NewMessage(Message_GET_PROVIDERS, key, 0)
|
||||
respMsg, err := pm.m.SendRequest(ctx, p, pmes)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
provs = PBPeersToPeerInfos(respMsg.GetProviderPeers())
|
||||
closerPeers = PBPeersToPeerInfos(respMsg.GetCloserPeers())
|
||||
return provs, closerPeers, nil
|
||||
}
|
||||
|
||||
// Ping sends a ping message to the passed peer and waits for a response.
|
||||
func (pm *ProtocolMessenger) Ping(ctx context.Context, p peer.ID) (err error) {
|
||||
ctx, span := internal.StartSpan(ctx, "ProtocolMessenger.Ping")
|
||||
defer span.End()
|
||||
if span.IsRecording() {
|
||||
span.SetAttributes(attribute.Stringer("to", p))
|
||||
defer func() {
|
||||
if err != nil {
|
||||
span.SetStatus(codes.Error, err.Error())
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
req := NewMessage(Message_PING, nil, 0)
|
||||
resp, err := pm.m.SendRequest(ctx, p, req)
|
||||
if err != nil {
|
||||
return fmt.Errorf("sending request: %w", err)
|
||||
}
|
||||
if resp.Type != Message_PING {
|
||||
return fmt.Errorf("got unexpected response type: %v", resp.Type)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
12
vendor/github.com/libp2p/go-libp2p-kad-dht/protocol.go
generated
vendored
Normal file
12
vendor/github.com/libp2p/go-libp2p-kad-dht/protocol.go
generated
vendored
Normal file
@@ -0,0 +1,12 @@
|
||||
package dht
|
||||
|
||||
import (
|
||||
"github.com/libp2p/go-libp2p/core/protocol"
|
||||
)
|
||||
|
||||
var (
|
||||
// ProtocolDHT is the default DHT protocol.
|
||||
ProtocolDHT protocol.ID = "/ipfs/kad/1.0.0"
|
||||
// DefaultProtocols spoken by the DHT.
|
||||
DefaultProtocols = []protocol.ID{ProtocolDHT}
|
||||
)
|
||||
34
vendor/github.com/libp2p/go-libp2p-kad-dht/providers/provider_set.go
generated
vendored
Normal file
34
vendor/github.com/libp2p/go-libp2p-kad-dht/providers/provider_set.go
generated
vendored
Normal file
@@ -0,0 +1,34 @@
|
||||
package providers
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
)
|
||||
|
||||
// A providerSet has the list of providers and the time that they were added
|
||||
// It is used as an intermediary data struct between what is stored in the datastore
|
||||
// and the list of providers that get passed to the consumer of a .GetProviders call
|
||||
type providerSet struct {
|
||||
providers []peer.ID
|
||||
set map[peer.ID]time.Time
|
||||
}
|
||||
|
||||
func newProviderSet() *providerSet {
|
||||
return &providerSet{
|
||||
set: make(map[peer.ID]time.Time),
|
||||
}
|
||||
}
|
||||
|
||||
func (ps *providerSet) Add(p peer.ID) {
|
||||
ps.setVal(p, time.Now())
|
||||
}
|
||||
|
||||
func (ps *providerSet) setVal(p peer.ID, t time.Time) {
|
||||
_, found := ps.set[p]
|
||||
if !found {
|
||||
ps.providers = append(ps.providers, p)
|
||||
}
|
||||
|
||||
ps.set[p] = t
|
||||
}
|
||||
412
vendor/github.com/libp2p/go-libp2p-kad-dht/providers/providers_manager.go
generated
vendored
Normal file
412
vendor/github.com/libp2p/go-libp2p-kad-dht/providers/providers_manager.go
generated
vendored
Normal file
@@ -0,0 +1,412 @@
|
||||
package providers
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"io"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
lru "github.com/hashicorp/golang-lru/simplelru"
|
||||
ds "github.com/ipfs/go-datastore"
|
||||
"github.com/ipfs/go-datastore/autobatch"
|
||||
dsq "github.com/ipfs/go-datastore/query"
|
||||
logging "github.com/ipfs/go-log"
|
||||
"github.com/libp2p/go-libp2p-kad-dht/internal"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/libp2p/go-libp2p/core/peerstore"
|
||||
peerstoreImpl "github.com/libp2p/go-libp2p/p2p/host/peerstore"
|
||||
"github.com/multiformats/go-base32"
|
||||
)
|
||||
|
||||
const (
|
||||
// ProvidersKeyPrefix is the prefix/namespace for ALL provider record
|
||||
// keys stored in the data store.
|
||||
ProvidersKeyPrefix = "/providers/"
|
||||
|
||||
// ProviderAddrTTL is the TTL to keep the multi addresses of provider
|
||||
// peers around. Those addresses are returned alongside provider. After
|
||||
// it expires, the returned records will require an extra lookup, to
|
||||
// find the multiaddress associated with the returned peer id.
|
||||
ProviderAddrTTL = 24 * time.Hour
|
||||
)
|
||||
|
||||
// ProvideValidity is the default time that a Provider Record should last on DHT
|
||||
// This value is also known as Provider Record Expiration Interval.
|
||||
var ProvideValidity = time.Hour * 48
|
||||
var defaultCleanupInterval = time.Hour
|
||||
var lruCacheSize = 256
|
||||
var batchBufferSize = 256
|
||||
var log = logging.Logger("providers")
|
||||
|
||||
// ProviderStore represents a store that associates peers and their addresses to keys.
|
||||
type ProviderStore interface {
|
||||
AddProvider(ctx context.Context, key []byte, prov peer.AddrInfo) error
|
||||
GetProviders(ctx context.Context, key []byte) ([]peer.AddrInfo, error)
|
||||
io.Closer
|
||||
}
|
||||
|
||||
// ProviderManager adds and pulls providers out of the datastore,
|
||||
// caching them in between
|
||||
type ProviderManager struct {
|
||||
self peer.ID
|
||||
// all non channel fields are meant to be accessed only within
|
||||
// the run method
|
||||
cache lru.LRUCache
|
||||
pstore peerstore.Peerstore
|
||||
dstore *autobatch.Datastore
|
||||
|
||||
newprovs chan *addProv
|
||||
getprovs chan *getProv
|
||||
|
||||
cleanupInterval time.Duration
|
||||
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
wg sync.WaitGroup
|
||||
}
|
||||
|
||||
var _ ProviderStore = (*ProviderManager)(nil)
|
||||
|
||||
// Option is a function that sets a provider manager option.
|
||||
type Option func(*ProviderManager) error
|
||||
|
||||
func (pm *ProviderManager) applyOptions(opts ...Option) error {
|
||||
for i, opt := range opts {
|
||||
if err := opt(pm); err != nil {
|
||||
return fmt.Errorf("provider manager option %d failed: %s", i, err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// CleanupInterval sets the time between GC runs.
|
||||
// Defaults to 1h.
|
||||
func CleanupInterval(d time.Duration) Option {
|
||||
return func(pm *ProviderManager) error {
|
||||
pm.cleanupInterval = d
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// Cache sets the LRU cache implementation.
|
||||
// Defaults to a simple LRU cache.
|
||||
func Cache(c lru.LRUCache) Option {
|
||||
return func(pm *ProviderManager) error {
|
||||
pm.cache = c
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
type addProv struct {
|
||||
ctx context.Context
|
||||
key []byte
|
||||
val peer.ID
|
||||
}
|
||||
|
||||
type getProv struct {
|
||||
ctx context.Context
|
||||
key []byte
|
||||
resp chan []peer.ID
|
||||
}
|
||||
|
||||
// NewProviderManager constructor
|
||||
func NewProviderManager(local peer.ID, ps peerstore.Peerstore, dstore ds.Batching, opts ...Option) (*ProviderManager, error) {
|
||||
pm := new(ProviderManager)
|
||||
pm.self = local
|
||||
pm.getprovs = make(chan *getProv)
|
||||
pm.newprovs = make(chan *addProv)
|
||||
pm.pstore = ps
|
||||
pm.dstore = autobatch.NewAutoBatching(dstore, batchBufferSize)
|
||||
cache, err := lru.NewLRU(lruCacheSize, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
pm.cache = cache
|
||||
pm.cleanupInterval = defaultCleanupInterval
|
||||
if err := pm.applyOptions(opts...); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
pm.ctx, pm.cancel = context.WithCancel(context.Background())
|
||||
pm.run()
|
||||
return pm, nil
|
||||
}
|
||||
|
||||
func (pm *ProviderManager) run() {
|
||||
pm.wg.Add(1)
|
||||
go func() {
|
||||
defer pm.wg.Done()
|
||||
|
||||
var gcQuery dsq.Results
|
||||
gcTimer := time.NewTimer(pm.cleanupInterval)
|
||||
|
||||
defer func() {
|
||||
gcTimer.Stop()
|
||||
if gcQuery != nil {
|
||||
// don't really care if this fails.
|
||||
_ = gcQuery.Close()
|
||||
}
|
||||
if err := pm.dstore.Flush(context.Background()); err != nil {
|
||||
log.Error("failed to flush datastore: ", err)
|
||||
}
|
||||
}()
|
||||
|
||||
var gcQueryRes <-chan dsq.Result
|
||||
var gcSkip map[string]struct{}
|
||||
var gcTime time.Time
|
||||
for {
|
||||
select {
|
||||
case np := <-pm.newprovs:
|
||||
err := pm.addProv(np.ctx, np.key, np.val)
|
||||
if err != nil {
|
||||
log.Error("error adding new providers: ", err)
|
||||
continue
|
||||
}
|
||||
if gcSkip != nil {
|
||||
// we have an gc, tell it to skip this provider
|
||||
// as we've updated it since the GC started.
|
||||
gcSkip[mkProvKeyFor(np.key, np.val)] = struct{}{}
|
||||
}
|
||||
case gp := <-pm.getprovs:
|
||||
provs, err := pm.getProvidersForKey(gp.ctx, gp.key)
|
||||
if err != nil && err != ds.ErrNotFound {
|
||||
log.Error("error reading providers: ", err)
|
||||
}
|
||||
|
||||
// set the cap so the user can't append to this.
|
||||
gp.resp <- provs[0:len(provs):len(provs)]
|
||||
case res, ok := <-gcQueryRes:
|
||||
if !ok {
|
||||
if err := gcQuery.Close(); err != nil {
|
||||
log.Error("failed to close provider GC query: ", err)
|
||||
}
|
||||
gcTimer.Reset(pm.cleanupInterval)
|
||||
|
||||
// cleanup GC round
|
||||
gcQueryRes = nil
|
||||
gcSkip = nil
|
||||
gcQuery = nil
|
||||
continue
|
||||
}
|
||||
if res.Error != nil {
|
||||
log.Error("got error from GC query: ", res.Error)
|
||||
continue
|
||||
}
|
||||
if _, ok := gcSkip[res.Key]; ok {
|
||||
// We've updated this record since starting the
|
||||
// GC round, skip it.
|
||||
continue
|
||||
}
|
||||
|
||||
// check expiration time
|
||||
t, err := readTimeValue(res.Value)
|
||||
switch {
|
||||
case err != nil:
|
||||
// couldn't parse the time
|
||||
log.Error("parsing providers record from disk: ", err)
|
||||
fallthrough
|
||||
case gcTime.Sub(t) > ProvideValidity:
|
||||
// or expired
|
||||
err = pm.dstore.Delete(pm.ctx, ds.RawKey(res.Key))
|
||||
if err != nil && err != ds.ErrNotFound {
|
||||
log.Error("failed to remove provider record from disk: ", err)
|
||||
}
|
||||
}
|
||||
|
||||
case gcTime = <-gcTimer.C:
|
||||
// You know the wonderful thing about caches? You can
|
||||
// drop them.
|
||||
//
|
||||
// Much faster than GCing.
|
||||
pm.cache.Purge()
|
||||
|
||||
// Now, kick off a GC of the datastore.
|
||||
q, err := pm.dstore.Query(pm.ctx, dsq.Query{
|
||||
Prefix: ProvidersKeyPrefix,
|
||||
})
|
||||
if err != nil {
|
||||
log.Error("provider record GC query failed: ", err)
|
||||
continue
|
||||
}
|
||||
gcQuery = q
|
||||
gcQueryRes = q.Next()
|
||||
gcSkip = make(map[string]struct{})
|
||||
case <-pm.ctx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
func (pm *ProviderManager) Close() error {
|
||||
pm.cancel()
|
||||
pm.wg.Wait()
|
||||
return nil
|
||||
}
|
||||
|
||||
// AddProvider adds a provider
|
||||
func (pm *ProviderManager) AddProvider(ctx context.Context, k []byte, provInfo peer.AddrInfo) error {
|
||||
ctx, span := internal.StartSpan(ctx, "ProviderManager.AddProvider")
|
||||
defer span.End()
|
||||
|
||||
if provInfo.ID != pm.self { // don't add own addrs.
|
||||
pm.pstore.AddAddrs(provInfo.ID, provInfo.Addrs, ProviderAddrTTL)
|
||||
}
|
||||
prov := &addProv{
|
||||
ctx: ctx,
|
||||
key: k,
|
||||
val: provInfo.ID,
|
||||
}
|
||||
select {
|
||||
case pm.newprovs <- prov:
|
||||
return nil
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
}
|
||||
}
|
||||
|
||||
// addProv updates the cache if needed
|
||||
func (pm *ProviderManager) addProv(ctx context.Context, k []byte, p peer.ID) error {
|
||||
now := time.Now()
|
||||
if provs, ok := pm.cache.Get(string(k)); ok {
|
||||
provs.(*providerSet).setVal(p, now)
|
||||
} // else not cached, just write through
|
||||
|
||||
return writeProviderEntry(ctx, pm.dstore, k, p, now)
|
||||
}
|
||||
|
||||
// writeProviderEntry writes the provider into the datastore
|
||||
func writeProviderEntry(ctx context.Context, dstore ds.Datastore, k []byte, p peer.ID, t time.Time) error {
|
||||
dsk := mkProvKeyFor(k, p)
|
||||
|
||||
buf := make([]byte, 16)
|
||||
n := binary.PutVarint(buf, t.UnixNano())
|
||||
|
||||
return dstore.Put(ctx, ds.NewKey(dsk), buf[:n])
|
||||
}
|
||||
|
||||
func mkProvKeyFor(k []byte, p peer.ID) string {
|
||||
return mkProvKey(k) + "/" + base32.RawStdEncoding.EncodeToString([]byte(p))
|
||||
}
|
||||
|
||||
func mkProvKey(k []byte) string {
|
||||
return ProvidersKeyPrefix + base32.RawStdEncoding.EncodeToString(k)
|
||||
}
|
||||
|
||||
// GetProviders returns the set of providers for the given key.
|
||||
// This method _does not_ copy the set. Do not modify it.
|
||||
func (pm *ProviderManager) GetProviders(ctx context.Context, k []byte) ([]peer.AddrInfo, error) {
|
||||
ctx, span := internal.StartSpan(ctx, "ProviderManager.GetProviders")
|
||||
defer span.End()
|
||||
|
||||
gp := &getProv{
|
||||
ctx: ctx,
|
||||
key: k,
|
||||
resp: make(chan []peer.ID, 1), // buffered to prevent sender from blocking
|
||||
}
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return nil, ctx.Err()
|
||||
case pm.getprovs <- gp:
|
||||
}
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return nil, ctx.Err()
|
||||
case peers := <-gp.resp:
|
||||
return peerstoreImpl.PeerInfos(pm.pstore, peers), nil
|
||||
}
|
||||
}
|
||||
|
||||
func (pm *ProviderManager) getProvidersForKey(ctx context.Context, k []byte) ([]peer.ID, error) {
|
||||
pset, err := pm.getProviderSetForKey(ctx, k)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return pset.providers, nil
|
||||
}
|
||||
|
||||
// returns the ProviderSet if it already exists on cache, otherwise loads it from datasatore
|
||||
func (pm *ProviderManager) getProviderSetForKey(ctx context.Context, k []byte) (*providerSet, error) {
|
||||
cached, ok := pm.cache.Get(string(k))
|
||||
if ok {
|
||||
return cached.(*providerSet), nil
|
||||
}
|
||||
|
||||
pset, err := loadProviderSet(ctx, pm.dstore, k)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(pset.providers) > 0 {
|
||||
pm.cache.Add(string(k), pset)
|
||||
}
|
||||
|
||||
return pset, nil
|
||||
}
|
||||
|
||||
// loads the ProviderSet out of the datastore
|
||||
func loadProviderSet(ctx context.Context, dstore ds.Datastore, k []byte) (*providerSet, error) {
|
||||
res, err := dstore.Query(ctx, dsq.Query{Prefix: mkProvKey(k)})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer res.Close()
|
||||
|
||||
now := time.Now()
|
||||
out := newProviderSet()
|
||||
for {
|
||||
e, ok := res.NextSync()
|
||||
if !ok {
|
||||
break
|
||||
}
|
||||
if e.Error != nil {
|
||||
log.Error("got an error: ", e.Error)
|
||||
continue
|
||||
}
|
||||
|
||||
// check expiration time
|
||||
t, err := readTimeValue(e.Value)
|
||||
switch {
|
||||
case err != nil:
|
||||
// couldn't parse the time
|
||||
log.Error("parsing providers record from disk: ", err)
|
||||
fallthrough
|
||||
case now.Sub(t) > ProvideValidity:
|
||||
// or just expired
|
||||
err = dstore.Delete(ctx, ds.RawKey(e.Key))
|
||||
if err != nil && err != ds.ErrNotFound {
|
||||
log.Error("failed to remove provider record from disk: ", err)
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
lix := strings.LastIndex(e.Key, "/")
|
||||
|
||||
decstr, err := base32.RawStdEncoding.DecodeString(e.Key[lix+1:])
|
||||
if err != nil {
|
||||
log.Error("base32 decoding error: ", err)
|
||||
err = dstore.Delete(ctx, ds.RawKey(e.Key))
|
||||
if err != nil && err != ds.ErrNotFound {
|
||||
log.Error("failed to remove provider record from disk: ", err)
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
pid := peer.ID(decstr)
|
||||
|
||||
out.setVal(pid, t)
|
||||
}
|
||||
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func readTimeValue(data []byte) (time.Time, error) {
|
||||
nsec, n := binary.Varint(data)
|
||||
if n <= 0 {
|
||||
return time.Time{}, fmt.Errorf("failed to parse time")
|
||||
}
|
||||
|
||||
return time.Unix(0, nsec), nil
|
||||
}
|
||||
159
vendor/github.com/libp2p/go-libp2p-kad-dht/qpeerset/qpeerset.go
generated
vendored
Normal file
159
vendor/github.com/libp2p/go-libp2p-kad-dht/qpeerset/qpeerset.go
generated
vendored
Normal file
@@ -0,0 +1,159 @@
|
||||
package qpeerset
|
||||
|
||||
import (
|
||||
"math/big"
|
||||
"sort"
|
||||
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
ks "github.com/whyrusleeping/go-keyspace"
|
||||
)
|
||||
|
||||
// PeerState describes the state of a peer ID during the lifecycle of an individual lookup.
|
||||
type PeerState int
|
||||
|
||||
const (
|
||||
// PeerHeard is applied to peers which have not been queried yet.
|
||||
PeerHeard PeerState = iota
|
||||
// PeerWaiting is applied to peers that are currently being queried.
|
||||
PeerWaiting
|
||||
// PeerQueried is applied to peers who have been queried and a response was retrieved successfully.
|
||||
PeerQueried
|
||||
// PeerUnreachable is applied to peers who have been queried and a response was not retrieved successfully.
|
||||
PeerUnreachable
|
||||
)
|
||||
|
||||
// QueryPeerset maintains the state of a Kademlia asynchronous lookup.
|
||||
// The lookup state is a set of peers, each labeled with a peer state.
|
||||
type QueryPeerset struct {
|
||||
// the key being searched for
|
||||
key ks.Key
|
||||
|
||||
// all known peers
|
||||
all []queryPeerState
|
||||
|
||||
// sorted is true if all is currently in sorted order
|
||||
sorted bool
|
||||
}
|
||||
|
||||
type queryPeerState struct {
|
||||
id peer.ID
|
||||
distance *big.Int
|
||||
state PeerState
|
||||
referredBy peer.ID
|
||||
}
|
||||
|
||||
type sortedQueryPeerset QueryPeerset
|
||||
|
||||
func (sqp *sortedQueryPeerset) Len() int {
|
||||
return len(sqp.all)
|
||||
}
|
||||
|
||||
func (sqp *sortedQueryPeerset) Swap(i, j int) {
|
||||
sqp.all[i], sqp.all[j] = sqp.all[j], sqp.all[i]
|
||||
}
|
||||
|
||||
func (sqp *sortedQueryPeerset) Less(i, j int) bool {
|
||||
di, dj := sqp.all[i].distance, sqp.all[j].distance
|
||||
return di.Cmp(dj) == -1
|
||||
}
|
||||
|
||||
// NewQueryPeerset creates a new empty set of peers.
|
||||
// key is the target key of the lookup that this peer set is for.
|
||||
func NewQueryPeerset(key string) *QueryPeerset {
|
||||
return &QueryPeerset{
|
||||
key: ks.XORKeySpace.Key([]byte(key)),
|
||||
all: []queryPeerState{},
|
||||
sorted: false,
|
||||
}
|
||||
}
|
||||
|
||||
func (qp *QueryPeerset) find(p peer.ID) int {
|
||||
for i := range qp.all {
|
||||
if qp.all[i].id == p {
|
||||
return i
|
||||
}
|
||||
}
|
||||
return -1
|
||||
}
|
||||
|
||||
func (qp *QueryPeerset) distanceToKey(p peer.ID) *big.Int {
|
||||
return ks.XORKeySpace.Key([]byte(p)).Distance(qp.key)
|
||||
}
|
||||
|
||||
// TryAdd adds the peer p to the peer set.
|
||||
// If the peer is already present, no action is taken.
|
||||
// Otherwise, the peer is added with state set to PeerHeard.
|
||||
// TryAdd returns true iff the peer was not already present.
|
||||
func (qp *QueryPeerset) TryAdd(p, referredBy peer.ID) bool {
|
||||
if qp.find(p) >= 0 {
|
||||
return false
|
||||
} else {
|
||||
qp.all = append(qp.all,
|
||||
queryPeerState{id: p, distance: qp.distanceToKey(p), state: PeerHeard, referredBy: referredBy})
|
||||
qp.sorted = false
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
func (qp *QueryPeerset) sort() {
|
||||
if qp.sorted {
|
||||
return
|
||||
}
|
||||
sort.Sort((*sortedQueryPeerset)(qp))
|
||||
qp.sorted = true
|
||||
}
|
||||
|
||||
// SetState sets the state of peer p to s.
|
||||
// If p is not in the peerset, SetState panics.
|
||||
func (qp *QueryPeerset) SetState(p peer.ID, s PeerState) {
|
||||
qp.all[qp.find(p)].state = s
|
||||
}
|
||||
|
||||
// GetState returns the state of peer p.
|
||||
// If p is not in the peerset, GetState panics.
|
||||
func (qp *QueryPeerset) GetState(p peer.ID) PeerState {
|
||||
return qp.all[qp.find(p)].state
|
||||
}
|
||||
|
||||
// GetReferrer returns the peer that referred us to the peer p.
|
||||
// If p is not in the peerset, GetReferrer panics.
|
||||
func (qp *QueryPeerset) GetReferrer(p peer.ID) peer.ID {
|
||||
return qp.all[qp.find(p)].referredBy
|
||||
}
|
||||
|
||||
// GetClosestNInStates returns the closest to the key peers, which are in one of the given states.
|
||||
// It returns n peers or less, if fewer peers meet the condition.
|
||||
// The returned peers are sorted in ascending order by their distance to the key.
|
||||
func (qp *QueryPeerset) GetClosestNInStates(n int, states ...PeerState) (result []peer.ID) {
|
||||
qp.sort()
|
||||
m := make(map[PeerState]struct{}, len(states))
|
||||
for i := range states {
|
||||
m[states[i]] = struct{}{}
|
||||
}
|
||||
|
||||
for _, p := range qp.all {
|
||||
if _, ok := m[p.state]; ok {
|
||||
result = append(result, p.id)
|
||||
}
|
||||
}
|
||||
if len(result) >= n {
|
||||
return result[:n]
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// GetClosestInStates returns the peers, which are in one of the given states.
|
||||
// The returned peers are sorted in ascending order by their distance to the key.
|
||||
func (qp *QueryPeerset) GetClosestInStates(states ...PeerState) (result []peer.ID) {
|
||||
return qp.GetClosestNInStates(len(qp.all), states...)
|
||||
}
|
||||
|
||||
// NumHeard returns the number of peers in state PeerHeard.
|
||||
func (qp *QueryPeerset) NumHeard() int {
|
||||
return len(qp.GetClosestInStates(PeerHeard))
|
||||
}
|
||||
|
||||
// NumWaiting returns the number of peers in state PeerWaiting.
|
||||
func (qp *QueryPeerset) NumWaiting() int {
|
||||
return len(qp.GetClosestInStates(PeerWaiting))
|
||||
}
|
||||
556
vendor/github.com/libp2p/go-libp2p-kad-dht/query.go
generated
vendored
Normal file
556
vendor/github.com/libp2p/go-libp2p-kad-dht/query.go
generated
vendored
Normal file
@@ -0,0 +1,556 @@
|
||||
package dht
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/libp2p/go-libp2p/core/network"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
pstore "github.com/libp2p/go-libp2p/core/peerstore"
|
||||
"github.com/libp2p/go-libp2p/core/routing"
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
"go.opentelemetry.io/otel/trace"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/libp2p/go-libp2p-kad-dht/internal"
|
||||
"github.com/libp2p/go-libp2p-kad-dht/qpeerset"
|
||||
kb "github.com/libp2p/go-libp2p-kbucket"
|
||||
)
|
||||
|
||||
// ErrNoPeersQueried is returned when we failed to connect to any peers.
|
||||
var ErrNoPeersQueried = errors.New("failed to query any peers")
|
||||
|
||||
type queryFn func(context.Context, peer.ID) ([]*peer.AddrInfo, error)
|
||||
type stopFn func(*qpeerset.QueryPeerset) bool
|
||||
|
||||
// query represents a single DHT query.
|
||||
type query struct {
|
||||
// unique identifier for the lookup instance
|
||||
id uuid.UUID
|
||||
|
||||
// target key for the lookup
|
||||
key string
|
||||
|
||||
// the query context.
|
||||
ctx context.Context
|
||||
|
||||
dht *IpfsDHT
|
||||
|
||||
// seedPeers is the set of peers that seed the query
|
||||
seedPeers []peer.ID
|
||||
|
||||
// peerTimes contains the duration of each successful query to a peer
|
||||
peerTimes map[peer.ID]time.Duration
|
||||
|
||||
// queryPeers is the set of peers known by this query and their respective states.
|
||||
queryPeers *qpeerset.QueryPeerset
|
||||
|
||||
// terminated is set when the first worker thread encounters the termination condition.
|
||||
// Its role is to make sure that once termination is determined, it is sticky.
|
||||
terminated bool
|
||||
|
||||
// waitGroup ensures lookup does not end until all query goroutines complete.
|
||||
waitGroup sync.WaitGroup
|
||||
|
||||
// the function that will be used to query a single peer.
|
||||
queryFn queryFn
|
||||
|
||||
// stopFn is used to determine if we should stop the WHOLE disjoint query.
|
||||
stopFn stopFn
|
||||
}
|
||||
|
||||
type lookupWithFollowupResult struct {
|
||||
peers []peer.ID // the top K not unreachable peers at the end of the query
|
||||
state []qpeerset.PeerState // the peer states at the end of the query of the peers slice (not closest)
|
||||
closest []peer.ID // the top K peers at the end of the query
|
||||
|
||||
// indicates that neither the lookup nor the followup has been prematurely terminated by an external condition such
|
||||
// as context cancellation or the stop function being called.
|
||||
completed bool
|
||||
}
|
||||
|
||||
// runLookupWithFollowup executes the lookup on the target using the given query function and stopping when either the
|
||||
// context is cancelled or the stop function returns true. Note: if the stop function is not sticky, i.e. it does not
|
||||
// return true every time after the first time it returns true, it is not guaranteed to cause a stop to occur just
|
||||
// because it momentarily returns true.
|
||||
//
|
||||
// After the lookup is complete the query function is run (unless stopped) against all of the top K peers from the
|
||||
// lookup that have not already been successfully queried.
|
||||
func (dht *IpfsDHT) runLookupWithFollowup(ctx context.Context, target string, queryFn queryFn, stopFn stopFn) (*lookupWithFollowupResult, error) {
|
||||
ctx, span := internal.StartSpan(ctx, "IpfsDHT.RunLookupWithFollowup", trace.WithAttributes(internal.KeyAsAttribute("Target", target)))
|
||||
defer span.End()
|
||||
|
||||
// run the query
|
||||
lookupRes, qps, err := dht.runQuery(ctx, target, queryFn, stopFn)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// query all of the top K peers we've either Heard about or have outstanding queries we're Waiting on.
|
||||
// This ensures that all of the top K results have been queried which adds to resiliency against churn for query
|
||||
// functions that carry state (e.g. FindProviders and GetValue) as well as establish connections that are needed
|
||||
// by stateless query functions (e.g. GetClosestPeers and therefore Provide and PutValue)
|
||||
queryPeers := make([]peer.ID, 0, len(lookupRes.peers))
|
||||
for i, p := range lookupRes.peers {
|
||||
if state := lookupRes.state[i]; state == qpeerset.PeerHeard || state == qpeerset.PeerWaiting {
|
||||
queryPeers = append(queryPeers, p)
|
||||
}
|
||||
}
|
||||
|
||||
if len(queryPeers) == 0 {
|
||||
return lookupRes, nil
|
||||
}
|
||||
|
||||
// return if the lookup has been externally stopped
|
||||
if ctx.Err() != nil || stopFn(qps) {
|
||||
lookupRes.completed = false
|
||||
return lookupRes, nil
|
||||
}
|
||||
|
||||
doneCh := make(chan struct{}, len(queryPeers))
|
||||
followUpCtx, cancelFollowUp := context.WithCancel(ctx)
|
||||
defer cancelFollowUp()
|
||||
for _, p := range queryPeers {
|
||||
qp := p
|
||||
go func() {
|
||||
_, _ = queryFn(followUpCtx, qp)
|
||||
doneCh <- struct{}{}
|
||||
}()
|
||||
}
|
||||
|
||||
// wait for all queries to complete before returning, aborting ongoing queries if we've been externally stopped
|
||||
followupsCompleted := 0
|
||||
processFollowUp:
|
||||
for i := 0; i < len(queryPeers); i++ {
|
||||
select {
|
||||
case <-doneCh:
|
||||
followupsCompleted++
|
||||
if stopFn(qps) {
|
||||
cancelFollowUp()
|
||||
if i < len(queryPeers)-1 {
|
||||
lookupRes.completed = false
|
||||
}
|
||||
break processFollowUp
|
||||
}
|
||||
case <-ctx.Done():
|
||||
lookupRes.completed = false
|
||||
cancelFollowUp()
|
||||
break processFollowUp
|
||||
}
|
||||
}
|
||||
|
||||
if !lookupRes.completed {
|
||||
for i := followupsCompleted; i < len(queryPeers); i++ {
|
||||
<-doneCh
|
||||
}
|
||||
}
|
||||
|
||||
return lookupRes, nil
|
||||
}
|
||||
|
||||
func (dht *IpfsDHT) runQuery(ctx context.Context, target string, queryFn queryFn, stopFn stopFn) (*lookupWithFollowupResult, *qpeerset.QueryPeerset, error) {
|
||||
ctx, span := internal.StartSpan(ctx, "IpfsDHT.RunQuery")
|
||||
defer span.End()
|
||||
|
||||
// pick the K closest peers to the key in our Routing table.
|
||||
targetKadID := kb.ConvertKey(target)
|
||||
seedPeers := dht.routingTable.NearestPeers(targetKadID, dht.bucketSize)
|
||||
if len(seedPeers) == 0 {
|
||||
routing.PublishQueryEvent(ctx, &routing.QueryEvent{
|
||||
Type: routing.QueryError,
|
||||
Extra: kb.ErrLookupFailure.Error(),
|
||||
})
|
||||
return nil, nil, kb.ErrLookupFailure
|
||||
}
|
||||
|
||||
q := &query{
|
||||
id: uuid.New(),
|
||||
key: target,
|
||||
ctx: ctx,
|
||||
dht: dht,
|
||||
queryPeers: qpeerset.NewQueryPeerset(target),
|
||||
seedPeers: seedPeers,
|
||||
peerTimes: make(map[peer.ID]time.Duration),
|
||||
terminated: false,
|
||||
queryFn: queryFn,
|
||||
stopFn: stopFn,
|
||||
}
|
||||
|
||||
// run the query
|
||||
q.run()
|
||||
|
||||
if ctx.Err() == nil {
|
||||
q.recordValuablePeers()
|
||||
}
|
||||
|
||||
res := q.constructLookupResult(targetKadID)
|
||||
return res, q.queryPeers, nil
|
||||
}
|
||||
|
||||
func (q *query) recordPeerIsValuable(p peer.ID) {
|
||||
if !q.dht.routingTable.UpdateLastUsefulAt(p, time.Now()) {
|
||||
// not in routing table
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func (q *query) recordValuablePeers() {
|
||||
// Valuable peers algorithm:
|
||||
// Label the seed peer that responded to a query in the shortest amount of time as the "most valuable peer" (MVP)
|
||||
// Each seed peer that responded to a query within some range (i.e. 2x) of the MVP's time is a valuable peer
|
||||
// Mark the MVP and all the other valuable peers as valuable
|
||||
mvpDuration := time.Duration(math.MaxInt64)
|
||||
for _, p := range q.seedPeers {
|
||||
if queryTime, ok := q.peerTimes[p]; ok && queryTime < mvpDuration {
|
||||
mvpDuration = queryTime
|
||||
}
|
||||
}
|
||||
|
||||
for _, p := range q.seedPeers {
|
||||
if queryTime, ok := q.peerTimes[p]; ok && queryTime < mvpDuration*2 {
|
||||
q.recordPeerIsValuable(p)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// constructLookupResult takes the query information and uses it to construct the lookup result
|
||||
func (q *query) constructLookupResult(target kb.ID) *lookupWithFollowupResult {
|
||||
// determine if the query terminated early
|
||||
completed := true
|
||||
|
||||
// Lookup and starvation are both valid ways for a lookup to complete. (Starvation does not imply failure.)
|
||||
// Lookup termination (as defined in isLookupTermination) is not possible in small networks.
|
||||
// Starvation is a successful query termination in small networks.
|
||||
if !(q.isLookupTermination() || q.isStarvationTermination()) {
|
||||
completed = false
|
||||
}
|
||||
|
||||
// extract the top K not unreachable peers
|
||||
var peers []peer.ID
|
||||
peerState := make(map[peer.ID]qpeerset.PeerState)
|
||||
qp := q.queryPeers.GetClosestNInStates(q.dht.bucketSize, qpeerset.PeerHeard, qpeerset.PeerWaiting, qpeerset.PeerQueried)
|
||||
for _, p := range qp {
|
||||
state := q.queryPeers.GetState(p)
|
||||
peerState[p] = state
|
||||
peers = append(peers, p)
|
||||
}
|
||||
|
||||
// get the top K overall peers
|
||||
sortedPeers := kb.SortClosestPeers(peers, target)
|
||||
if len(sortedPeers) > q.dht.bucketSize {
|
||||
sortedPeers = sortedPeers[:q.dht.bucketSize]
|
||||
}
|
||||
|
||||
closest := q.queryPeers.GetClosestNInStates(q.dht.bucketSize, qpeerset.PeerHeard, qpeerset.PeerWaiting, qpeerset.PeerQueried, qpeerset.PeerUnreachable)
|
||||
|
||||
// return the top K not unreachable peers as well as their states at the end of the query
|
||||
res := &lookupWithFollowupResult{
|
||||
peers: sortedPeers,
|
||||
state: make([]qpeerset.PeerState, len(sortedPeers)),
|
||||
completed: completed,
|
||||
closest: closest,
|
||||
}
|
||||
|
||||
for i, p := range sortedPeers {
|
||||
res.state[i] = peerState[p]
|
||||
}
|
||||
|
||||
return res
|
||||
}
|
||||
|
||||
type queryUpdate struct {
|
||||
cause peer.ID
|
||||
queried []peer.ID
|
||||
heard []peer.ID
|
||||
unreachable []peer.ID
|
||||
|
||||
queryDuration time.Duration
|
||||
}
|
||||
|
||||
func (q *query) run() {
|
||||
ctx, span := internal.StartSpan(q.ctx, "IpfsDHT.Query.Run")
|
||||
defer span.End()
|
||||
|
||||
pathCtx, cancelPath := context.WithCancel(ctx)
|
||||
defer cancelPath()
|
||||
|
||||
alpha := q.dht.alpha
|
||||
|
||||
ch := make(chan *queryUpdate, alpha)
|
||||
ch <- &queryUpdate{cause: q.dht.self, heard: q.seedPeers}
|
||||
|
||||
// return only once all outstanding queries have completed.
|
||||
defer q.waitGroup.Wait()
|
||||
for {
|
||||
var cause peer.ID
|
||||
select {
|
||||
case update := <-ch:
|
||||
q.updateState(pathCtx, update)
|
||||
cause = update.cause
|
||||
case <-pathCtx.Done():
|
||||
q.terminate(pathCtx, cancelPath, LookupCancelled)
|
||||
}
|
||||
|
||||
// calculate the maximum number of queries we could be spawning.
|
||||
// Note: NumWaiting will be updated in spawnQuery
|
||||
maxNumQueriesToSpawn := alpha - q.queryPeers.NumWaiting()
|
||||
|
||||
// termination is triggered on end-of-lookup conditions or starvation of unused peers
|
||||
// it also returns the peers we should query next for a maximum of `maxNumQueriesToSpawn` peers.
|
||||
ready, reason, qPeers := q.isReadyToTerminate(pathCtx, maxNumQueriesToSpawn)
|
||||
if ready {
|
||||
q.terminate(pathCtx, cancelPath, reason)
|
||||
}
|
||||
|
||||
if q.terminated {
|
||||
return
|
||||
}
|
||||
|
||||
// try spawning the queries, if there are no available peers to query then we won't spawn them
|
||||
for _, p := range qPeers {
|
||||
q.spawnQuery(pathCtx, cause, p, ch)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// spawnQuery starts one query, if an available heard peer is found
|
||||
func (q *query) spawnQuery(ctx context.Context, cause peer.ID, queryPeer peer.ID, ch chan<- *queryUpdate) {
|
||||
ctx, span := internal.StartSpan(ctx, "IpfsDHT.SpawnQuery", trace.WithAttributes(
|
||||
attribute.String("Cause", cause.String()),
|
||||
attribute.String("QueryPeer", queryPeer.String()),
|
||||
))
|
||||
defer span.End()
|
||||
|
||||
PublishLookupEvent(ctx,
|
||||
NewLookupEvent(
|
||||
q.dht.self,
|
||||
q.id,
|
||||
q.key,
|
||||
NewLookupUpdateEvent(
|
||||
cause,
|
||||
q.queryPeers.GetReferrer(queryPeer),
|
||||
nil, // heard
|
||||
[]peer.ID{queryPeer}, // waiting
|
||||
nil, // queried
|
||||
nil, // unreachable
|
||||
),
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
)
|
||||
q.queryPeers.SetState(queryPeer, qpeerset.PeerWaiting)
|
||||
q.waitGroup.Add(1)
|
||||
go q.queryPeer(ctx, ch, queryPeer)
|
||||
}
|
||||
|
||||
func (q *query) isReadyToTerminate(ctx context.Context, nPeersToQuery int) (bool, LookupTerminationReason, []peer.ID) {
|
||||
// give the application logic a chance to terminate
|
||||
if q.stopFn(q.queryPeers) {
|
||||
return true, LookupStopped, nil
|
||||
}
|
||||
if q.isStarvationTermination() {
|
||||
return true, LookupStarvation, nil
|
||||
}
|
||||
if q.isLookupTermination() {
|
||||
return true, LookupCompleted, nil
|
||||
}
|
||||
|
||||
// The peers we query next should be ones that we have only Heard about.
|
||||
var peersToQuery []peer.ID
|
||||
peers := q.queryPeers.GetClosestInStates(qpeerset.PeerHeard)
|
||||
count := 0
|
||||
for _, p := range peers {
|
||||
peersToQuery = append(peersToQuery, p)
|
||||
count++
|
||||
if count == nPeersToQuery {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return false, -1, peersToQuery
|
||||
}
|
||||
|
||||
// From the set of all nodes that are not unreachable,
|
||||
// if the closest beta nodes are all queried, the lookup can terminate.
|
||||
func (q *query) isLookupTermination() bool {
|
||||
peers := q.queryPeers.GetClosestNInStates(q.dht.beta, qpeerset.PeerHeard, qpeerset.PeerWaiting, qpeerset.PeerQueried)
|
||||
for _, p := range peers {
|
||||
if q.queryPeers.GetState(p) != qpeerset.PeerQueried {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (q *query) isStarvationTermination() bool {
|
||||
return q.queryPeers.NumHeard() == 0 && q.queryPeers.NumWaiting() == 0
|
||||
}
|
||||
|
||||
func (q *query) terminate(ctx context.Context, cancel context.CancelFunc, reason LookupTerminationReason) {
|
||||
ctx, span := internal.StartSpan(ctx, "IpfsDHT.Query.Terminate", trace.WithAttributes(attribute.Stringer("Reason", reason)))
|
||||
defer span.End()
|
||||
|
||||
if q.terminated {
|
||||
return
|
||||
}
|
||||
|
||||
PublishLookupEvent(ctx,
|
||||
NewLookupEvent(
|
||||
q.dht.self,
|
||||
q.id,
|
||||
q.key,
|
||||
nil,
|
||||
nil,
|
||||
NewLookupTerminateEvent(reason),
|
||||
),
|
||||
)
|
||||
cancel() // abort outstanding queries
|
||||
q.terminated = true
|
||||
}
|
||||
|
||||
// queryPeer queries a single peer and reports its findings on the channel.
|
||||
// queryPeer does not access the query state in queryPeers!
|
||||
func (q *query) queryPeer(ctx context.Context, ch chan<- *queryUpdate, p peer.ID) {
|
||||
defer q.waitGroup.Done()
|
||||
|
||||
ctx, span := internal.StartSpan(ctx, "IpfsDHT.QueryPeer")
|
||||
defer span.End()
|
||||
|
||||
dialCtx, queryCtx := ctx, ctx
|
||||
|
||||
// dial the peer
|
||||
if err := q.dht.dialPeer(dialCtx, p); err != nil {
|
||||
// remove the peer if there was a dial failure..but not because of a context cancellation
|
||||
if dialCtx.Err() == nil {
|
||||
q.dht.peerStoppedDHT(p)
|
||||
}
|
||||
ch <- &queryUpdate{cause: p, unreachable: []peer.ID{p}}
|
||||
return
|
||||
}
|
||||
|
||||
startQuery := time.Now()
|
||||
// send query RPC to the remote peer
|
||||
newPeers, err := q.queryFn(queryCtx, p)
|
||||
if err != nil {
|
||||
if queryCtx.Err() == nil {
|
||||
q.dht.peerStoppedDHT(p)
|
||||
}
|
||||
ch <- &queryUpdate{cause: p, unreachable: []peer.ID{p}}
|
||||
return
|
||||
}
|
||||
|
||||
queryDuration := time.Since(startQuery)
|
||||
|
||||
// query successful, try to add to RT
|
||||
q.dht.validPeerFound(p)
|
||||
|
||||
// process new peers
|
||||
saw := []peer.ID{}
|
||||
for _, next := range newPeers {
|
||||
if next.ID == q.dht.self { // don't add self.
|
||||
logger.Debugf("PEERS CLOSER -- worker for: %v found self", p)
|
||||
continue
|
||||
}
|
||||
|
||||
// add any other know addresses for the candidate peer.
|
||||
curInfo := q.dht.peerstore.PeerInfo(next.ID)
|
||||
next.Addrs = append(next.Addrs, curInfo.Addrs...)
|
||||
|
||||
// add their addresses to the dialer's peerstore
|
||||
//
|
||||
// add the next peer to the query if matches the query target even if it would otherwise fail the query filter
|
||||
// TODO: this behavior is really specific to how FindPeer works and not GetClosestPeers or any other function
|
||||
isTarget := string(next.ID) == q.key
|
||||
if isTarget || q.dht.queryPeerFilter(q.dht, *next) {
|
||||
q.dht.maybeAddAddrs(next.ID, next.Addrs, pstore.TempAddrTTL)
|
||||
saw = append(saw, next.ID)
|
||||
}
|
||||
}
|
||||
|
||||
ch <- &queryUpdate{cause: p, heard: saw, queried: []peer.ID{p}, queryDuration: queryDuration}
|
||||
}
|
||||
|
||||
func (q *query) updateState(ctx context.Context, up *queryUpdate) {
|
||||
if q.terminated {
|
||||
panic("update should not be invoked after the logical lookup termination")
|
||||
}
|
||||
PublishLookupEvent(ctx,
|
||||
NewLookupEvent(
|
||||
q.dht.self,
|
||||
q.id,
|
||||
q.key,
|
||||
nil,
|
||||
NewLookupUpdateEvent(
|
||||
up.cause,
|
||||
up.cause,
|
||||
up.heard, // heard
|
||||
nil, // waiting
|
||||
up.queried, // queried
|
||||
up.unreachable, // unreachable
|
||||
),
|
||||
nil,
|
||||
),
|
||||
)
|
||||
for _, p := range up.heard {
|
||||
if p == q.dht.self { // don't add self.
|
||||
continue
|
||||
}
|
||||
q.queryPeers.TryAdd(p, up.cause)
|
||||
}
|
||||
for _, p := range up.queried {
|
||||
if p == q.dht.self { // don't add self.
|
||||
continue
|
||||
}
|
||||
if st := q.queryPeers.GetState(p); st == qpeerset.PeerWaiting {
|
||||
q.queryPeers.SetState(p, qpeerset.PeerQueried)
|
||||
q.peerTimes[p] = up.queryDuration
|
||||
} else {
|
||||
panic(fmt.Errorf("kademlia protocol error: tried to transition to the queried state from state %v", st))
|
||||
}
|
||||
}
|
||||
for _, p := range up.unreachable {
|
||||
if p == q.dht.self { // don't add self.
|
||||
continue
|
||||
}
|
||||
|
||||
if st := q.queryPeers.GetState(p); st == qpeerset.PeerWaiting {
|
||||
q.queryPeers.SetState(p, qpeerset.PeerUnreachable)
|
||||
} else {
|
||||
panic(fmt.Errorf("kademlia protocol error: tried to transition to the unreachable state from state %v", st))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (dht *IpfsDHT) dialPeer(ctx context.Context, p peer.ID) error {
|
||||
ctx, span := internal.StartSpan(ctx, "IpfsDHT.DialPeer", trace.WithAttributes(attribute.String("PeerID", p.String())))
|
||||
defer span.End()
|
||||
|
||||
// short-circuit if we're already connected.
|
||||
if dht.host.Network().Connectedness(p) == network.Connected {
|
||||
return nil
|
||||
}
|
||||
|
||||
logger.Debug("not connected. dialing.")
|
||||
routing.PublishQueryEvent(ctx, &routing.QueryEvent{
|
||||
Type: routing.DialingPeer,
|
||||
ID: p,
|
||||
})
|
||||
|
||||
pi := peer.AddrInfo{ID: p}
|
||||
if err := dht.host.Connect(ctx, pi); err != nil {
|
||||
logger.Debugf("error connecting: %s", err)
|
||||
routing.PublishQueryEvent(ctx, &routing.QueryEvent{
|
||||
Type: routing.QueryError,
|
||||
Extra: err.Error(),
|
||||
ID: p,
|
||||
})
|
||||
|
||||
return err
|
||||
}
|
||||
logger.Debugf("connected. dial success.")
|
||||
return nil
|
||||
}
|
||||
138
vendor/github.com/libp2p/go-libp2p-kad-dht/records.go
generated
vendored
Normal file
138
vendor/github.com/libp2p/go-libp2p-kad-dht/records.go
generated
vendored
Normal file
@@ -0,0 +1,138 @@
|
||||
package dht
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/libp2p/go-libp2p-kad-dht/internal"
|
||||
ci "github.com/libp2p/go-libp2p/core/crypto"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/libp2p/go-libp2p/core/routing"
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
"go.opentelemetry.io/otel/trace"
|
||||
)
|
||||
|
||||
type pubkrs struct {
|
||||
pubk ci.PubKey
|
||||
err error
|
||||
}
|
||||
|
||||
// GetPublicKey gets the public key when given a Peer ID. It will extract from
|
||||
// the Peer ID if inlined or ask the node it belongs to or ask the DHT.
|
||||
func (dht *IpfsDHT) GetPublicKey(ctx context.Context, p peer.ID) (ci.PubKey, error) {
|
||||
ctx, span := internal.StartSpan(ctx, "IpfsDHT.GetPublicKey", trace.WithAttributes(attribute.Stringer("PeerID", p)))
|
||||
defer span.End()
|
||||
|
||||
if !dht.enableValues {
|
||||
return nil, routing.ErrNotSupported
|
||||
}
|
||||
|
||||
logger.Debugf("getPublicKey for: %s", p)
|
||||
|
||||
// Check locally. Will also try to extract the public key from the peer
|
||||
// ID itself if possible (if inlined).
|
||||
pk := dht.peerstore.PubKey(p)
|
||||
if pk != nil {
|
||||
return pk, nil
|
||||
}
|
||||
|
||||
// Try getting the public key both directly from the node it identifies
|
||||
// and from the DHT, in parallel
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
resp := make(chan pubkrs, 2)
|
||||
go func() {
|
||||
pubk, err := dht.getPublicKeyFromNode(ctx, p)
|
||||
resp <- pubkrs{pubk, err}
|
||||
}()
|
||||
|
||||
// Note that the number of open connections is capped by the dial
|
||||
// limiter, so there is a chance that getPublicKeyFromDHT(), which
|
||||
// potentially opens a lot of connections, will block
|
||||
// getPublicKeyFromNode() from getting a connection.
|
||||
// Currently this doesn't seem to cause an issue so leaving as is
|
||||
// for now.
|
||||
go func() {
|
||||
pubk, err := dht.getPublicKeyFromDHT(ctx, p)
|
||||
resp <- pubkrs{pubk, err}
|
||||
}()
|
||||
|
||||
// Wait for one of the two go routines to return
|
||||
// a public key (or for both to error out)
|
||||
var err error
|
||||
for i := 0; i < 2; i++ {
|
||||
r := <-resp
|
||||
if r.err == nil {
|
||||
// Found the public key
|
||||
err := dht.peerstore.AddPubKey(p, r.pubk)
|
||||
if err != nil {
|
||||
logger.Errorw("failed to add public key to peerstore", "peer", p)
|
||||
}
|
||||
return r.pubk, nil
|
||||
}
|
||||
err = r.err
|
||||
}
|
||||
|
||||
// Both go routines failed to find a public key
|
||||
return nil, err
|
||||
}
|
||||
|
||||
func (dht *IpfsDHT) getPublicKeyFromDHT(ctx context.Context, p peer.ID) (ci.PubKey, error) {
|
||||
// Only retrieve one value, because the public key is immutable
|
||||
// so there's no need to retrieve multiple versions
|
||||
pkkey := routing.KeyForPublicKey(p)
|
||||
val, err := dht.GetValue(ctx, pkkey, Quorum(1))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
pubk, err := ci.UnmarshalPublicKey(val)
|
||||
if err != nil {
|
||||
logger.Errorf("Could not unmarshal public key retrieved from DHT for %v", p)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Note: No need to check that public key hash matches peer ID
|
||||
// because this is done by GetValues()
|
||||
logger.Debugf("Got public key for %s from DHT", p)
|
||||
return pubk, nil
|
||||
}
|
||||
|
||||
func (dht *IpfsDHT) getPublicKeyFromNode(ctx context.Context, p peer.ID) (ci.PubKey, error) {
|
||||
// check locally, just in case...
|
||||
pk := dht.peerstore.PubKey(p)
|
||||
if pk != nil {
|
||||
return pk, nil
|
||||
}
|
||||
|
||||
// Get the key from the node itself
|
||||
pkkey := routing.KeyForPublicKey(p)
|
||||
record, _, err := dht.protoMessenger.GetValue(ctx, p, pkkey)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// node doesn't have key :(
|
||||
if record == nil {
|
||||
return nil, fmt.Errorf("node %v not responding with its public key", p)
|
||||
}
|
||||
|
||||
pubk, err := ci.UnmarshalPublicKey(record.GetValue())
|
||||
if err != nil {
|
||||
logger.Errorf("Could not unmarshal public key for %v", p)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Make sure the public key matches the peer ID
|
||||
id, err := peer.IDFromPublicKey(pubk)
|
||||
if err != nil {
|
||||
logger.Errorf("Could not extract peer id from public key for %v", p)
|
||||
return nil, err
|
||||
}
|
||||
if id != p {
|
||||
return nil, fmt.Errorf("public key %v does not match peer %v", id, p)
|
||||
}
|
||||
|
||||
logger.Debugf("Got public key from node %v itself", p)
|
||||
return pubk, nil
|
||||
}
|
||||
694
vendor/github.com/libp2p/go-libp2p-kad-dht/routing.go
generated
vendored
Normal file
694
vendor/github.com/libp2p/go-libp2p-kad-dht/routing.go
generated
vendored
Normal file
@@ -0,0 +1,694 @@
|
||||
package dht
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/libp2p/go-libp2p/core/network"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/libp2p/go-libp2p/core/peerstore"
|
||||
"github.com/libp2p/go-libp2p/core/routing"
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
"go.opentelemetry.io/otel/trace"
|
||||
|
||||
u "github.com/ipfs/boxo/util"
|
||||
"github.com/ipfs/go-cid"
|
||||
"github.com/libp2p/go-libp2p-kad-dht/internal"
|
||||
internalConfig "github.com/libp2p/go-libp2p-kad-dht/internal/config"
|
||||
"github.com/libp2p/go-libp2p-kad-dht/netsize"
|
||||
"github.com/libp2p/go-libp2p-kad-dht/qpeerset"
|
||||
kb "github.com/libp2p/go-libp2p-kbucket"
|
||||
record "github.com/libp2p/go-libp2p-record"
|
||||
"github.com/multiformats/go-multihash"
|
||||
)
|
||||
|
||||
// This file implements the Routing interface for the IpfsDHT struct.
|
||||
|
||||
// Basic Put/Get
|
||||
|
||||
// PutValue adds value corresponding to given Key.
|
||||
// This is the top level "Store" operation of the DHT
|
||||
func (dht *IpfsDHT) PutValue(ctx context.Context, key string, value []byte, opts ...routing.Option) (err error) {
|
||||
ctx, end := tracer.PutValue(dhtName, ctx, key, value, opts...)
|
||||
defer func() { end(err) }()
|
||||
|
||||
if !dht.enableValues {
|
||||
return routing.ErrNotSupported
|
||||
}
|
||||
|
||||
logger.Debugw("putting value", "key", internal.LoggableRecordKeyString(key))
|
||||
|
||||
// don't even allow local users to put bad values.
|
||||
if err := dht.Validator.Validate(key, value); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
old, err := dht.getLocal(ctx, key)
|
||||
if err != nil {
|
||||
// Means something is wrong with the datastore.
|
||||
return err
|
||||
}
|
||||
|
||||
// Check if we have an old value that's not the same as the new one.
|
||||
if old != nil && !bytes.Equal(old.GetValue(), value) {
|
||||
// Check to see if the new one is better.
|
||||
i, err := dht.Validator.Select(key, [][]byte{value, old.GetValue()})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if i != 0 {
|
||||
return fmt.Errorf("can't replace a newer value with an older value")
|
||||
}
|
||||
}
|
||||
|
||||
rec := record.MakePutRecord(key, value)
|
||||
rec.TimeReceived = u.FormatRFC3339(time.Now())
|
||||
err = dht.putLocal(ctx, key, rec)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
peers, err := dht.GetClosestPeers(ctx, key)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
wg := sync.WaitGroup{}
|
||||
for _, p := range peers {
|
||||
wg.Add(1)
|
||||
go func(p peer.ID) {
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
defer wg.Done()
|
||||
routing.PublishQueryEvent(ctx, &routing.QueryEvent{
|
||||
Type: routing.Value,
|
||||
ID: p,
|
||||
})
|
||||
|
||||
err := dht.protoMessenger.PutValue(ctx, p, rec)
|
||||
if err != nil {
|
||||
logger.Debugf("failed putting value to peer: %s", err)
|
||||
}
|
||||
}(p)
|
||||
}
|
||||
wg.Wait()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// recvdVal stores a value and the peer from which we got the value.
|
||||
type recvdVal struct {
|
||||
Val []byte
|
||||
From peer.ID
|
||||
}
|
||||
|
||||
// GetValue searches for the value corresponding to given Key.
|
||||
func (dht *IpfsDHT) GetValue(ctx context.Context, key string, opts ...routing.Option) (result []byte, err error) {
|
||||
ctx, end := tracer.GetValue(dhtName, ctx, key, opts...)
|
||||
defer func() { end(result, err) }()
|
||||
|
||||
if !dht.enableValues {
|
||||
return nil, routing.ErrNotSupported
|
||||
}
|
||||
|
||||
// apply defaultQuorum if relevant
|
||||
var cfg routing.Options
|
||||
if err := cfg.Apply(opts...); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
opts = append(opts, Quorum(internalConfig.GetQuorum(&cfg)))
|
||||
|
||||
responses, err := dht.SearchValue(ctx, key, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var best []byte
|
||||
|
||||
for r := range responses {
|
||||
best = r
|
||||
}
|
||||
|
||||
if ctx.Err() != nil {
|
||||
return best, ctx.Err()
|
||||
}
|
||||
|
||||
if best == nil {
|
||||
return nil, routing.ErrNotFound
|
||||
}
|
||||
logger.Debugf("GetValue %v %x", internal.LoggableRecordKeyString(key), best)
|
||||
return best, nil
|
||||
}
|
||||
|
||||
// SearchValue searches for the value corresponding to given Key and streams the results.
|
||||
func (dht *IpfsDHT) SearchValue(ctx context.Context, key string, opts ...routing.Option) (ch <-chan []byte, err error) {
|
||||
ctx, end := tracer.SearchValue(dhtName, ctx, key, opts...)
|
||||
defer func() { ch, err = end(ch, err) }()
|
||||
|
||||
if !dht.enableValues {
|
||||
return nil, routing.ErrNotSupported
|
||||
}
|
||||
|
||||
var cfg routing.Options
|
||||
if err := cfg.Apply(opts...); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
responsesNeeded := 0
|
||||
if !cfg.Offline {
|
||||
responsesNeeded = internalConfig.GetQuorum(&cfg)
|
||||
}
|
||||
|
||||
stopCh := make(chan struct{})
|
||||
valCh, lookupRes := dht.getValues(ctx, key, stopCh)
|
||||
|
||||
out := make(chan []byte)
|
||||
go func() {
|
||||
defer close(out)
|
||||
best, peersWithBest, aborted := dht.searchValueQuorum(ctx, key, valCh, stopCh, out, responsesNeeded)
|
||||
if best == nil || aborted {
|
||||
return
|
||||
}
|
||||
|
||||
updatePeers := make([]peer.ID, 0, dht.bucketSize)
|
||||
select {
|
||||
case l := <-lookupRes:
|
||||
if l == nil {
|
||||
return
|
||||
}
|
||||
|
||||
for _, p := range l.peers {
|
||||
if _, ok := peersWithBest[p]; !ok {
|
||||
updatePeers = append(updatePeers, p)
|
||||
}
|
||||
}
|
||||
case <-ctx.Done():
|
||||
return
|
||||
}
|
||||
|
||||
dht.updatePeerValues(dht.Context(), key, best, updatePeers)
|
||||
}()
|
||||
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (dht *IpfsDHT) searchValueQuorum(ctx context.Context, key string, valCh <-chan recvdVal, stopCh chan struct{},
|
||||
out chan<- []byte, nvals int) ([]byte, map[peer.ID]struct{}, bool) {
|
||||
numResponses := 0
|
||||
return dht.processValues(ctx, key, valCh,
|
||||
func(ctx context.Context, v recvdVal, better bool) bool {
|
||||
numResponses++
|
||||
if better {
|
||||
select {
|
||||
case out <- v.Val:
|
||||
case <-ctx.Done():
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
if nvals > 0 && numResponses > nvals {
|
||||
close(stopCh)
|
||||
return true
|
||||
}
|
||||
return false
|
||||
})
|
||||
}
|
||||
|
||||
func (dht *IpfsDHT) processValues(ctx context.Context, key string, vals <-chan recvdVal,
|
||||
newVal func(ctx context.Context, v recvdVal, better bool) bool) (best []byte, peersWithBest map[peer.ID]struct{}, aborted bool) {
|
||||
loop:
|
||||
for {
|
||||
if aborted {
|
||||
return
|
||||
}
|
||||
|
||||
select {
|
||||
case v, ok := <-vals:
|
||||
if !ok {
|
||||
break loop
|
||||
}
|
||||
|
||||
// Select best value
|
||||
if best != nil {
|
||||
if bytes.Equal(best, v.Val) {
|
||||
peersWithBest[v.From] = struct{}{}
|
||||
aborted = newVal(ctx, v, false)
|
||||
continue
|
||||
}
|
||||
sel, err := dht.Validator.Select(key, [][]byte{best, v.Val})
|
||||
if err != nil {
|
||||
logger.Warnw("failed to select best value", "key", internal.LoggableRecordKeyString(key), "error", err)
|
||||
continue
|
||||
}
|
||||
if sel != 1 {
|
||||
aborted = newVal(ctx, v, false)
|
||||
continue
|
||||
}
|
||||
}
|
||||
peersWithBest = make(map[peer.ID]struct{})
|
||||
peersWithBest[v.From] = struct{}{}
|
||||
best = v.Val
|
||||
aborted = newVal(ctx, v, true)
|
||||
case <-ctx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (dht *IpfsDHT) updatePeerValues(ctx context.Context, key string, val []byte, peers []peer.ID) {
|
||||
fixupRec := record.MakePutRecord(key, val)
|
||||
for _, p := range peers {
|
||||
go func(p peer.ID) {
|
||||
// TODO: Is this possible?
|
||||
if p == dht.self {
|
||||
err := dht.putLocal(ctx, key, fixupRec)
|
||||
if err != nil {
|
||||
logger.Error("Error correcting local dht entry:", err)
|
||||
}
|
||||
return
|
||||
}
|
||||
ctx, cancel := context.WithTimeout(ctx, time.Second*30)
|
||||
defer cancel()
|
||||
err := dht.protoMessenger.PutValue(ctx, p, fixupRec)
|
||||
if err != nil {
|
||||
logger.Debug("Error correcting DHT entry: ", err)
|
||||
}
|
||||
}(p)
|
||||
}
|
||||
}
|
||||
|
||||
func (dht *IpfsDHT) getValues(ctx context.Context, key string, stopQuery chan struct{}) (<-chan recvdVal, <-chan *lookupWithFollowupResult) {
|
||||
valCh := make(chan recvdVal, 1)
|
||||
lookupResCh := make(chan *lookupWithFollowupResult, 1)
|
||||
|
||||
logger.Debugw("finding value", "key", internal.LoggableRecordKeyString(key))
|
||||
|
||||
if rec, err := dht.getLocal(ctx, key); rec != nil && err == nil {
|
||||
select {
|
||||
case valCh <- recvdVal{
|
||||
Val: rec.GetValue(),
|
||||
From: dht.self,
|
||||
}:
|
||||
case <-ctx.Done():
|
||||
}
|
||||
}
|
||||
|
||||
go func() {
|
||||
defer close(valCh)
|
||||
defer close(lookupResCh)
|
||||
lookupRes, err := dht.runLookupWithFollowup(ctx, key,
|
||||
func(ctx context.Context, p peer.ID) ([]*peer.AddrInfo, error) {
|
||||
// For DHT query command
|
||||
routing.PublishQueryEvent(ctx, &routing.QueryEvent{
|
||||
Type: routing.SendingQuery,
|
||||
ID: p,
|
||||
})
|
||||
|
||||
rec, peers, err := dht.protoMessenger.GetValue(ctx, p, key)
|
||||
if err != nil {
|
||||
logger.Debugf("error getting closer peers: %s", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// For DHT query command
|
||||
routing.PublishQueryEvent(ctx, &routing.QueryEvent{
|
||||
Type: routing.PeerResponse,
|
||||
ID: p,
|
||||
Responses: peers,
|
||||
})
|
||||
|
||||
if rec == nil {
|
||||
return peers, nil
|
||||
}
|
||||
|
||||
val := rec.GetValue()
|
||||
if val == nil {
|
||||
logger.Debug("received a nil record value")
|
||||
return peers, nil
|
||||
}
|
||||
if err := dht.Validator.Validate(key, val); err != nil {
|
||||
// make sure record is valid
|
||||
logger.Debugw("received invalid record (discarded)", "error", err)
|
||||
return peers, nil
|
||||
}
|
||||
|
||||
// the record is present and valid, send it out for processing
|
||||
select {
|
||||
case valCh <- recvdVal{
|
||||
Val: val,
|
||||
From: p,
|
||||
}:
|
||||
case <-ctx.Done():
|
||||
return nil, ctx.Err()
|
||||
}
|
||||
|
||||
return peers, nil
|
||||
},
|
||||
func(*qpeerset.QueryPeerset) bool {
|
||||
select {
|
||||
case <-stopQuery:
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
},
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
lookupResCh <- lookupRes
|
||||
|
||||
if ctx.Err() == nil {
|
||||
dht.refreshRTIfNoShortcut(kb.ConvertKey(key), lookupRes)
|
||||
}
|
||||
}()
|
||||
|
||||
return valCh, lookupResCh
|
||||
}
|
||||
|
||||
func (dht *IpfsDHT) refreshRTIfNoShortcut(key kb.ID, lookupRes *lookupWithFollowupResult) {
|
||||
if lookupRes.completed {
|
||||
// refresh the cpl for this key as the query was successful
|
||||
dht.routingTable.ResetCplRefreshedAtForID(key, time.Now())
|
||||
}
|
||||
}
|
||||
|
||||
// Provider abstraction for indirect stores.
|
||||
// Some DHTs store values directly, while an indirect store stores pointers to
|
||||
// locations of the value, similarly to Coral and Mainline DHT.
|
||||
|
||||
// Provide makes this node announce that it can provide a value for the given key
|
||||
func (dht *IpfsDHT) Provide(ctx context.Context, key cid.Cid, brdcst bool) (err error) {
|
||||
ctx, end := tracer.Provide(dhtName, ctx, key, brdcst)
|
||||
defer func() { end(err) }()
|
||||
|
||||
if !dht.enableProviders {
|
||||
return routing.ErrNotSupported
|
||||
} else if !key.Defined() {
|
||||
return fmt.Errorf("invalid cid: undefined")
|
||||
}
|
||||
keyMH := key.Hash()
|
||||
logger.Debugw("providing", "cid", key, "mh", internal.LoggableProviderRecordBytes(keyMH))
|
||||
|
||||
// add self locally
|
||||
dht.providerStore.AddProvider(ctx, keyMH, peer.AddrInfo{ID: dht.self})
|
||||
if !brdcst {
|
||||
return nil
|
||||
}
|
||||
|
||||
if dht.enableOptProv {
|
||||
err := dht.optimisticProvide(ctx, keyMH)
|
||||
if errors.Is(err, netsize.ErrNotEnoughData) {
|
||||
logger.Debugln("not enough data for optimistic provide taking classic approach")
|
||||
return dht.classicProvide(ctx, keyMH)
|
||||
}
|
||||
return err
|
||||
}
|
||||
return dht.classicProvide(ctx, keyMH)
|
||||
}
|
||||
|
||||
func (dht *IpfsDHT) classicProvide(ctx context.Context, keyMH multihash.Multihash) error {
|
||||
closerCtx := ctx
|
||||
if deadline, ok := ctx.Deadline(); ok {
|
||||
now := time.Now()
|
||||
timeout := deadline.Sub(now)
|
||||
|
||||
if timeout < 0 {
|
||||
// timed out
|
||||
return context.DeadlineExceeded
|
||||
} else if timeout < 10*time.Second {
|
||||
// Reserve 10% for the final put.
|
||||
deadline = deadline.Add(-timeout / 10)
|
||||
} else {
|
||||
// Otherwise, reserve a second (we'll already be
|
||||
// connected so this should be fast).
|
||||
deadline = deadline.Add(-time.Second)
|
||||
}
|
||||
var cancel context.CancelFunc
|
||||
closerCtx, cancel = context.WithDeadline(ctx, deadline)
|
||||
defer cancel()
|
||||
}
|
||||
|
||||
var exceededDeadline bool
|
||||
peers, err := dht.GetClosestPeers(closerCtx, string(keyMH))
|
||||
switch err {
|
||||
case context.DeadlineExceeded:
|
||||
// If the _inner_ deadline has been exceeded but the _outer_
|
||||
// context is still fine, provide the value to the closest peers
|
||||
// we managed to find, even if they're not the _actual_ closest peers.
|
||||
if ctx.Err() != nil {
|
||||
return ctx.Err()
|
||||
}
|
||||
exceededDeadline = true
|
||||
case nil:
|
||||
default:
|
||||
return err
|
||||
}
|
||||
|
||||
wg := sync.WaitGroup{}
|
||||
for _, p := range peers {
|
||||
wg.Add(1)
|
||||
go func(p peer.ID) {
|
||||
defer wg.Done()
|
||||
logger.Debugf("putProvider(%s, %s)", internal.LoggableProviderRecordBytes(keyMH), p)
|
||||
err := dht.protoMessenger.PutProviderAddrs(ctx, p, keyMH, peer.AddrInfo{
|
||||
ID: dht.self,
|
||||
Addrs: dht.filterAddrs(dht.host.Addrs()),
|
||||
})
|
||||
if err != nil {
|
||||
logger.Debug(err)
|
||||
}
|
||||
}(p)
|
||||
}
|
||||
wg.Wait()
|
||||
if exceededDeadline {
|
||||
return context.DeadlineExceeded
|
||||
}
|
||||
return ctx.Err()
|
||||
}
|
||||
|
||||
// FindProviders searches until the context expires.
|
||||
func (dht *IpfsDHT) FindProviders(ctx context.Context, c cid.Cid) ([]peer.AddrInfo, error) {
|
||||
if !dht.enableProviders {
|
||||
return nil, routing.ErrNotSupported
|
||||
} else if !c.Defined() {
|
||||
return nil, fmt.Errorf("invalid cid: undefined")
|
||||
}
|
||||
|
||||
var providers []peer.AddrInfo
|
||||
for p := range dht.FindProvidersAsync(ctx, c, dht.bucketSize) {
|
||||
providers = append(providers, p)
|
||||
}
|
||||
return providers, nil
|
||||
}
|
||||
|
||||
// FindProvidersAsync is the same thing as FindProviders, but returns a channel.
|
||||
// Peers will be returned on the channel as soon as they are found, even before
|
||||
// the search query completes. If count is zero then the query will run until it
|
||||
// completes. Note: not reading from the returned channel may block the query
|
||||
// from progressing.
|
||||
func (dht *IpfsDHT) FindProvidersAsync(ctx context.Context, key cid.Cid, count int) (ch <-chan peer.AddrInfo) {
|
||||
ctx, end := tracer.FindProvidersAsync(dhtName, ctx, key, count)
|
||||
defer func() { ch = end(ch, nil) }()
|
||||
|
||||
if !dht.enableProviders || !key.Defined() {
|
||||
peerOut := make(chan peer.AddrInfo)
|
||||
close(peerOut)
|
||||
return peerOut
|
||||
}
|
||||
|
||||
peerOut := make(chan peer.AddrInfo)
|
||||
|
||||
keyMH := key.Hash()
|
||||
|
||||
logger.Debugw("finding providers", "cid", key, "mh", internal.LoggableProviderRecordBytes(keyMH))
|
||||
go dht.findProvidersAsyncRoutine(ctx, keyMH, count, peerOut)
|
||||
return peerOut
|
||||
}
|
||||
|
||||
func (dht *IpfsDHT) findProvidersAsyncRoutine(ctx context.Context, key multihash.Multihash, count int, peerOut chan peer.AddrInfo) {
|
||||
// use a span here because unlike tracer.FindProvidersAsync we know who told us about it and that intresting to log.
|
||||
ctx, span := internal.StartSpan(ctx, "IpfsDHT.FindProvidersAsyncRoutine")
|
||||
defer span.End()
|
||||
|
||||
defer close(peerOut)
|
||||
|
||||
findAll := count == 0
|
||||
|
||||
ps := make(map[peer.ID]peer.AddrInfo)
|
||||
psLock := &sync.Mutex{}
|
||||
psTryAdd := func(p peer.AddrInfo) bool {
|
||||
psLock.Lock()
|
||||
defer psLock.Unlock()
|
||||
pi, ok := ps[p.ID]
|
||||
if (!ok || ((len(pi.Addrs) == 0) && len(p.Addrs) > 0)) && (len(ps) < count || findAll) {
|
||||
ps[p.ID] = p
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
psSize := func() int {
|
||||
psLock.Lock()
|
||||
defer psLock.Unlock()
|
||||
return len(ps)
|
||||
}
|
||||
|
||||
provs, err := dht.providerStore.GetProviders(ctx, key)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
for _, p := range provs {
|
||||
// NOTE: Assuming that this list of peers is unique
|
||||
if psTryAdd(p) {
|
||||
select {
|
||||
case peerOut <- p:
|
||||
span.AddEvent("found provider", trace.WithAttributes(
|
||||
attribute.Stringer("peer", p.ID),
|
||||
attribute.Stringer("from", dht.self),
|
||||
))
|
||||
case <-ctx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// If we have enough peers locally, don't bother with remote RPC
|
||||
// TODO: is this a DOS vector?
|
||||
if !findAll && len(ps) >= count {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
lookupRes, err := dht.runLookupWithFollowup(ctx, string(key),
|
||||
func(ctx context.Context, p peer.ID) ([]*peer.AddrInfo, error) {
|
||||
|
||||
// For DHT query command
|
||||
routing.PublishQueryEvent(ctx, &routing.QueryEvent{
|
||||
Type: routing.SendingQuery,
|
||||
ID: p,
|
||||
})
|
||||
|
||||
provs, closest, err := dht.protoMessenger.GetProviders(ctx, p, key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
logger.Debugf("%d provider entries", len(provs))
|
||||
|
||||
// Add unique providers from request, up to 'count'
|
||||
for _, prov := range provs {
|
||||
dht.maybeAddAddrs(prov.ID, prov.Addrs, peerstore.TempAddrTTL)
|
||||
logger.Debugf("got provider: %s", prov)
|
||||
if psTryAdd(*prov) {
|
||||
logger.Debugf("using provider: %s", prov)
|
||||
select {
|
||||
case peerOut <- *prov:
|
||||
span.AddEvent("found provider", trace.WithAttributes(
|
||||
attribute.Stringer("peer", prov.ID),
|
||||
attribute.Stringer("from", p),
|
||||
))
|
||||
case <-ctx.Done():
|
||||
logger.Debug("context timed out sending more providers")
|
||||
return nil, ctx.Err()
|
||||
}
|
||||
}
|
||||
if !findAll && psSize() >= count {
|
||||
logger.Debugf("got enough providers (%d/%d)", psSize(), count)
|
||||
return nil, nil
|
||||
}
|
||||
}
|
||||
|
||||
// Give closer peers back to the query to be queried
|
||||
logger.Debugf("got closer peers: %d %s", len(closest), closest)
|
||||
|
||||
routing.PublishQueryEvent(ctx, &routing.QueryEvent{
|
||||
Type: routing.PeerResponse,
|
||||
ID: p,
|
||||
Responses: closest,
|
||||
})
|
||||
|
||||
return closest, nil
|
||||
},
|
||||
func(*qpeerset.QueryPeerset) bool {
|
||||
return !findAll && psSize() >= count
|
||||
},
|
||||
)
|
||||
|
||||
if err == nil && ctx.Err() == nil {
|
||||
dht.refreshRTIfNoShortcut(kb.ConvertKey(string(key)), lookupRes)
|
||||
}
|
||||
}
|
||||
|
||||
// FindPeer searches for a peer with given ID.
|
||||
func (dht *IpfsDHT) FindPeer(ctx context.Context, id peer.ID) (pi peer.AddrInfo, err error) {
|
||||
ctx, end := tracer.FindPeer(dhtName, ctx, id)
|
||||
defer func() { end(pi, err) }()
|
||||
|
||||
if err := id.Validate(); err != nil {
|
||||
return peer.AddrInfo{}, err
|
||||
}
|
||||
|
||||
logger.Debugw("finding peer", "peer", id)
|
||||
|
||||
// Check if were already connected to them
|
||||
if pi := dht.FindLocal(ctx, id); pi.ID != "" {
|
||||
return pi, nil
|
||||
}
|
||||
|
||||
lookupRes, err := dht.runLookupWithFollowup(ctx, string(id),
|
||||
func(ctx context.Context, p peer.ID) ([]*peer.AddrInfo, error) {
|
||||
// For DHT query command
|
||||
routing.PublishQueryEvent(ctx, &routing.QueryEvent{
|
||||
Type: routing.SendingQuery,
|
||||
ID: p,
|
||||
})
|
||||
|
||||
peers, err := dht.protoMessenger.GetClosestPeers(ctx, p, id)
|
||||
if err != nil {
|
||||
logger.Debugf("error getting closer peers: %s", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// For DHT query command
|
||||
routing.PublishQueryEvent(ctx, &routing.QueryEvent{
|
||||
Type: routing.PeerResponse,
|
||||
ID: p,
|
||||
Responses: peers,
|
||||
})
|
||||
|
||||
return peers, err
|
||||
},
|
||||
func(*qpeerset.QueryPeerset) bool {
|
||||
return dht.host.Network().Connectedness(id) == network.Connected
|
||||
},
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
return peer.AddrInfo{}, err
|
||||
}
|
||||
|
||||
dialedPeerDuringQuery := false
|
||||
for i, p := range lookupRes.peers {
|
||||
if p == id {
|
||||
// Note: we consider PeerUnreachable to be a valid state because the peer may not support the DHT protocol
|
||||
// and therefore the peer would fail the query. The fact that a peer that is returned can be a non-DHT
|
||||
// server peer and is not identified as such is a bug.
|
||||
dialedPeerDuringQuery = (lookupRes.state[i] == qpeerset.PeerQueried || lookupRes.state[i] == qpeerset.PeerUnreachable || lookupRes.state[i] == qpeerset.PeerWaiting)
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// Return peer information if we tried to dial the peer during the query or we are (or recently were) connected
|
||||
// to the peer.
|
||||
connectedness := dht.host.Network().Connectedness(id)
|
||||
if dialedPeerDuringQuery || connectedness == network.Connected || connectedness == network.CanConnect {
|
||||
return dht.peerstore.PeerInfo(id), nil
|
||||
}
|
||||
|
||||
return peer.AddrInfo{}, routing.ErrNotFound
|
||||
}
|
||||
21
vendor/github.com/libp2p/go-libp2p-kad-dht/routing_options.go
generated
vendored
Normal file
21
vendor/github.com/libp2p/go-libp2p-kad-dht/routing_options.go
generated
vendored
Normal file
@@ -0,0 +1,21 @@
|
||||
package dht
|
||||
|
||||
import (
|
||||
internalConfig "github.com/libp2p/go-libp2p-kad-dht/internal/config"
|
||||
"github.com/libp2p/go-libp2p/core/routing"
|
||||
)
|
||||
|
||||
// Quorum is a DHT option that tells the DHT how many peers it needs to get
|
||||
// values from before returning the best one. Zero means the DHT query
|
||||
// should complete instead of returning early.
|
||||
//
|
||||
// Default: 0
|
||||
func Quorum(n int) routing.Option {
|
||||
return func(opts *routing.Options) error {
|
||||
if opts.Other == nil {
|
||||
opts.Other = make(map[interface{}]interface{}, 1)
|
||||
}
|
||||
opts.Other[internalConfig.QuorumOptionKey{}] = n
|
||||
return nil
|
||||
}
|
||||
}
|
||||
103
vendor/github.com/libp2p/go-libp2p-kad-dht/rt_diversity_filter.go
generated
vendored
Normal file
103
vendor/github.com/libp2p/go-libp2p-kad-dht/rt_diversity_filter.go
generated
vendored
Normal file
@@ -0,0 +1,103 @@
|
||||
package dht
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"github.com/libp2p/go-libp2p/core/host"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
|
||||
"github.com/libp2p/go-libp2p-kbucket/peerdiversity"
|
||||
|
||||
ma "github.com/multiformats/go-multiaddr"
|
||||
)
|
||||
|
||||
var _ peerdiversity.PeerIPGroupFilter = (*rtPeerIPGroupFilter)(nil)
|
||||
|
||||
type rtPeerIPGroupFilter struct {
|
||||
mu sync.RWMutex
|
||||
h host.Host
|
||||
|
||||
maxPerCpl int
|
||||
maxForTable int
|
||||
|
||||
cplIpGroupCount map[int]map[peerdiversity.PeerIPGroupKey]int
|
||||
tableIpGroupCount map[peerdiversity.PeerIPGroupKey]int
|
||||
}
|
||||
|
||||
// NewRTPeerDiversityFilter constructs the `PeerIPGroupFilter` that will be used to configure
|
||||
// the diversity filter for the Routing Table.
|
||||
// Please see the docs for `peerdiversity.PeerIPGroupFilter` AND `peerdiversity.Filter` for more details.
|
||||
func NewRTPeerDiversityFilter(h host.Host, maxPerCpl, maxForTable int) *rtPeerIPGroupFilter {
|
||||
return &rtPeerIPGroupFilter{
|
||||
h: h,
|
||||
|
||||
maxPerCpl: maxPerCpl,
|
||||
maxForTable: maxForTable,
|
||||
|
||||
cplIpGroupCount: make(map[int]map[peerdiversity.PeerIPGroupKey]int),
|
||||
tableIpGroupCount: make(map[peerdiversity.PeerIPGroupKey]int),
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func (r *rtPeerIPGroupFilter) Allow(g peerdiversity.PeerGroupInfo) bool {
|
||||
r.mu.RLock()
|
||||
defer r.mu.RUnlock()
|
||||
|
||||
key := g.IPGroupKey
|
||||
cpl := g.Cpl
|
||||
|
||||
if r.tableIpGroupCount[key] >= r.maxForTable {
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
c, ok := r.cplIpGroupCount[cpl]
|
||||
allow := !ok || c[key] < r.maxPerCpl
|
||||
return allow
|
||||
}
|
||||
|
||||
func (r *rtPeerIPGroupFilter) Increment(g peerdiversity.PeerGroupInfo) {
|
||||
r.mu.Lock()
|
||||
defer r.mu.Unlock()
|
||||
|
||||
key := g.IPGroupKey
|
||||
cpl := g.Cpl
|
||||
|
||||
r.tableIpGroupCount[key] = r.tableIpGroupCount[key] + 1
|
||||
if _, ok := r.cplIpGroupCount[cpl]; !ok {
|
||||
r.cplIpGroupCount[cpl] = make(map[peerdiversity.PeerIPGroupKey]int)
|
||||
}
|
||||
|
||||
r.cplIpGroupCount[cpl][key] = r.cplIpGroupCount[cpl][key] + 1
|
||||
}
|
||||
|
||||
func (r *rtPeerIPGroupFilter) Decrement(g peerdiversity.PeerGroupInfo) {
|
||||
r.mu.Lock()
|
||||
defer r.mu.Unlock()
|
||||
|
||||
key := g.IPGroupKey
|
||||
cpl := g.Cpl
|
||||
|
||||
r.tableIpGroupCount[key] = r.tableIpGroupCount[key] - 1
|
||||
if r.tableIpGroupCount[key] == 0 {
|
||||
delete(r.tableIpGroupCount, key)
|
||||
}
|
||||
|
||||
r.cplIpGroupCount[cpl][key] = r.cplIpGroupCount[cpl][key] - 1
|
||||
if r.cplIpGroupCount[cpl][key] == 0 {
|
||||
delete(r.cplIpGroupCount[cpl], key)
|
||||
}
|
||||
if len(r.cplIpGroupCount[cpl]) == 0 {
|
||||
delete(r.cplIpGroupCount, cpl)
|
||||
}
|
||||
}
|
||||
|
||||
func (r *rtPeerIPGroupFilter) PeerAddresses(p peer.ID) []ma.Multiaddr {
|
||||
cs := r.h.Network().ConnsToPeer(p)
|
||||
addr := make([]ma.Multiaddr, 0, len(cs))
|
||||
for _, c := range cs {
|
||||
addr = append(addr, c.RemoteMultiaddr())
|
||||
}
|
||||
return addr
|
||||
}
|
||||
372
vendor/github.com/libp2p/go-libp2p-kad-dht/rtrefresh/rt_refresh_manager.go
generated
vendored
Normal file
372
vendor/github.com/libp2p/go-libp2p-kad-dht/rtrefresh/rt_refresh_manager.go
generated
vendored
Normal file
@@ -0,0 +1,372 @@
|
||||
package rtrefresh
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/hashicorp/go-multierror"
|
||||
logging "github.com/ipfs/go-log"
|
||||
"github.com/libp2p/go-libp2p-kad-dht/internal"
|
||||
kbucket "github.com/libp2p/go-libp2p-kbucket"
|
||||
"github.com/libp2p/go-libp2p/core/host"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/multiformats/go-base32"
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
"go.opentelemetry.io/otel/codes"
|
||||
"go.opentelemetry.io/otel/trace"
|
||||
)
|
||||
|
||||
var logger = logging.Logger("dht/RtRefreshManager")
|
||||
|
||||
const (
|
||||
peerPingTimeout = 10 * time.Second
|
||||
)
|
||||
|
||||
type triggerRefreshReq struct {
|
||||
respCh chan error
|
||||
forceCplRefresh bool
|
||||
}
|
||||
|
||||
type RtRefreshManager struct {
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
refcount sync.WaitGroup
|
||||
|
||||
// peerId of this DHT peer i.e. self peerId.
|
||||
h host.Host
|
||||
dhtPeerId peer.ID
|
||||
rt *kbucket.RoutingTable
|
||||
|
||||
enableAutoRefresh bool // should run periodic refreshes ?
|
||||
refreshKeyGenFnc func(cpl uint) (string, error) // generate the key for the query to refresh this cpl
|
||||
refreshQueryFnc func(ctx context.Context, key string) error // query to run for a refresh.
|
||||
refreshPingFnc func(ctx context.Context, p peer.ID) error // request to check liveness of remote peer
|
||||
refreshQueryTimeout time.Duration // timeout for one refresh query
|
||||
|
||||
// interval between two periodic refreshes.
|
||||
// also, a cpl wont be refreshed if the time since it was last refreshed
|
||||
// is below the interval..unless a "forced" refresh is done.
|
||||
refreshInterval time.Duration
|
||||
successfulOutboundQueryGracePeriod time.Duration
|
||||
|
||||
triggerRefresh chan *triggerRefreshReq // channel to write refresh requests to.
|
||||
|
||||
refreshDoneCh chan struct{} // write to this channel after every refresh
|
||||
}
|
||||
|
||||
func NewRtRefreshManager(h host.Host, rt *kbucket.RoutingTable, autoRefresh bool,
|
||||
refreshKeyGenFnc func(cpl uint) (string, error),
|
||||
refreshQueryFnc func(ctx context.Context, key string) error,
|
||||
refreshPingFnc func(ctx context.Context, p peer.ID) error,
|
||||
refreshQueryTimeout time.Duration,
|
||||
refreshInterval time.Duration,
|
||||
successfulOutboundQueryGracePeriod time.Duration,
|
||||
refreshDoneCh chan struct{}) (*RtRefreshManager, error) {
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
return &RtRefreshManager{
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
h: h,
|
||||
dhtPeerId: h.ID(),
|
||||
rt: rt,
|
||||
|
||||
enableAutoRefresh: autoRefresh,
|
||||
refreshKeyGenFnc: refreshKeyGenFnc,
|
||||
refreshQueryFnc: refreshQueryFnc,
|
||||
refreshPingFnc: refreshPingFnc,
|
||||
|
||||
refreshQueryTimeout: refreshQueryTimeout,
|
||||
refreshInterval: refreshInterval,
|
||||
successfulOutboundQueryGracePeriod: successfulOutboundQueryGracePeriod,
|
||||
|
||||
triggerRefresh: make(chan *triggerRefreshReq),
|
||||
refreshDoneCh: refreshDoneCh,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (r *RtRefreshManager) Start() {
|
||||
r.refcount.Add(1)
|
||||
go r.loop()
|
||||
}
|
||||
|
||||
func (r *RtRefreshManager) Close() error {
|
||||
r.cancel()
|
||||
r.refcount.Wait()
|
||||
return nil
|
||||
}
|
||||
|
||||
// RefreshRoutingTable requests the refresh manager to refresh the Routing Table.
|
||||
// If the force parameter is set to true true, all buckets will be refreshed irrespective of when they were last refreshed.
|
||||
//
|
||||
// The returned channel will block until the refresh finishes, then yield the
|
||||
// error and close. The channel is buffered and safe to ignore.
|
||||
func (r *RtRefreshManager) Refresh(force bool) <-chan error {
|
||||
resp := make(chan error, 1)
|
||||
r.refcount.Add(1)
|
||||
go func() {
|
||||
defer r.refcount.Done()
|
||||
select {
|
||||
case r.triggerRefresh <- &triggerRefreshReq{respCh: resp, forceCplRefresh: force}:
|
||||
case <-r.ctx.Done():
|
||||
resp <- r.ctx.Err()
|
||||
close(resp)
|
||||
}
|
||||
}()
|
||||
|
||||
return resp
|
||||
}
|
||||
|
||||
// RefreshNoWait requests the refresh manager to refresh the Routing Table.
|
||||
// However, it moves on without blocking if it's request can't get through.
|
||||
func (r *RtRefreshManager) RefreshNoWait() {
|
||||
select {
|
||||
case r.triggerRefresh <- &triggerRefreshReq{}:
|
||||
default:
|
||||
}
|
||||
}
|
||||
|
||||
// pingAndEvictPeers pings Routing Table peers that haven't been heard of/from
|
||||
// in the interval they should have been and evict them if they don't reply.
|
||||
func (r *RtRefreshManager) pingAndEvictPeers(ctx context.Context) {
|
||||
ctx, span := internal.StartSpan(ctx, "RefreshManager.PingAndEvictPeers")
|
||||
defer span.End()
|
||||
|
||||
var peersChecked int
|
||||
var alive int64
|
||||
var wg sync.WaitGroup
|
||||
peers := r.rt.GetPeerInfos()
|
||||
for _, ps := range peers {
|
||||
if time.Since(ps.LastSuccessfulOutboundQueryAt) <= r.successfulOutboundQueryGracePeriod {
|
||||
continue
|
||||
}
|
||||
|
||||
peersChecked++
|
||||
wg.Add(1)
|
||||
go func(ps kbucket.PeerInfo) {
|
||||
defer wg.Done()
|
||||
|
||||
livelinessCtx, cancel := context.WithTimeout(ctx, peerPingTimeout)
|
||||
defer cancel()
|
||||
peerIdStr := ps.Id.String()
|
||||
livelinessCtx, span := internal.StartSpan(livelinessCtx, "RefreshManager.PingAndEvictPeers.worker", trace.WithAttributes(attribute.String("peer", peerIdStr)))
|
||||
defer span.End()
|
||||
|
||||
if err := r.h.Connect(livelinessCtx, peer.AddrInfo{ID: ps.Id}); err != nil {
|
||||
logger.Debugw("evicting peer after failed connection", "peer", peerIdStr, "error", err)
|
||||
span.RecordError(err)
|
||||
r.rt.RemovePeer(ps.Id)
|
||||
return
|
||||
}
|
||||
|
||||
if err := r.refreshPingFnc(livelinessCtx, ps.Id); err != nil {
|
||||
logger.Debugw("evicting peer after failed ping", "peer", peerIdStr, "error", err)
|
||||
span.RecordError(err)
|
||||
r.rt.RemovePeer(ps.Id)
|
||||
return
|
||||
}
|
||||
|
||||
atomic.AddInt64(&alive, 1)
|
||||
}(ps)
|
||||
}
|
||||
wg.Wait()
|
||||
|
||||
span.SetAttributes(attribute.Int("NumPeersChecked", peersChecked), attribute.Int("NumPeersSkipped", len(peers)-peersChecked), attribute.Int64("NumPeersAlive", alive))
|
||||
}
|
||||
|
||||
func (r *RtRefreshManager) loop() {
|
||||
defer r.refcount.Done()
|
||||
|
||||
var refreshTickrCh <-chan time.Time
|
||||
if r.enableAutoRefresh {
|
||||
err := r.doRefresh(r.ctx, true)
|
||||
if err != nil {
|
||||
logger.Warn("failed when refreshing routing table", err)
|
||||
}
|
||||
t := time.NewTicker(r.refreshInterval)
|
||||
defer t.Stop()
|
||||
refreshTickrCh = t.C
|
||||
}
|
||||
|
||||
for {
|
||||
var waiting []chan<- error
|
||||
var forced bool
|
||||
select {
|
||||
case <-refreshTickrCh:
|
||||
case triggerRefreshReq := <-r.triggerRefresh:
|
||||
if triggerRefreshReq.respCh != nil {
|
||||
waiting = append(waiting, triggerRefreshReq.respCh)
|
||||
}
|
||||
forced = forced || triggerRefreshReq.forceCplRefresh
|
||||
case <-r.ctx.Done():
|
||||
return
|
||||
}
|
||||
|
||||
// Batch multiple refresh requests if they're all waiting at the same time.
|
||||
OuterLoop:
|
||||
for {
|
||||
select {
|
||||
case triggerRefreshReq := <-r.triggerRefresh:
|
||||
if triggerRefreshReq.respCh != nil {
|
||||
waiting = append(waiting, triggerRefreshReq.respCh)
|
||||
}
|
||||
forced = forced || triggerRefreshReq.forceCplRefresh
|
||||
default:
|
||||
break OuterLoop
|
||||
}
|
||||
}
|
||||
|
||||
ctx, span := internal.StartSpan(r.ctx, "RefreshManager.Refresh")
|
||||
|
||||
r.pingAndEvictPeers(ctx)
|
||||
|
||||
// Query for self and refresh the required buckets
|
||||
err := r.doRefresh(ctx, forced)
|
||||
for _, w := range waiting {
|
||||
w <- err
|
||||
close(w)
|
||||
}
|
||||
if err != nil {
|
||||
logger.Warnw("failed when refreshing routing table", "error", err)
|
||||
}
|
||||
|
||||
span.End()
|
||||
}
|
||||
}
|
||||
|
||||
func (r *RtRefreshManager) doRefresh(ctx context.Context, forceRefresh bool) error {
|
||||
ctx, span := internal.StartSpan(ctx, "RefreshManager.doRefresh")
|
||||
defer span.End()
|
||||
|
||||
var merr error
|
||||
|
||||
if err := r.queryForSelf(ctx); err != nil {
|
||||
merr = multierror.Append(merr, err)
|
||||
}
|
||||
|
||||
refreshCpls := r.rt.GetTrackedCplsForRefresh()
|
||||
|
||||
rfnc := func(cpl uint) (err error) {
|
||||
if forceRefresh {
|
||||
err = r.refreshCpl(ctx, cpl)
|
||||
} else {
|
||||
err = r.refreshCplIfEligible(ctx, cpl, refreshCpls[cpl])
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
for c := range refreshCpls {
|
||||
cpl := uint(c)
|
||||
if err := rfnc(cpl); err != nil {
|
||||
merr = multierror.Append(merr, err)
|
||||
} else {
|
||||
// If we see a gap at a Cpl in the Routing table, we ONLY refresh up until the maximum cpl we
|
||||
// have in the Routing Table OR (2 * (Cpl+ 1) with the gap), whichever is smaller.
|
||||
// This is to prevent refreshes for Cpls that have no peers in the network but happen to be before a very high max Cpl
|
||||
// for which we do have peers in the network.
|
||||
// The number of 2 * (Cpl + 1) can be proved and a proof would have been written here if the programmer
|
||||
// had paid more attention in the Math classes at university.
|
||||
// So, please be patient and a doc explaining it will be published soon.
|
||||
if r.rt.NPeersForCpl(cpl) == 0 {
|
||||
lastCpl := min(2*(c+1), len(refreshCpls)-1)
|
||||
for i := c + 1; i < lastCpl+1; i++ {
|
||||
if err := rfnc(uint(i)); err != nil {
|
||||
merr = multierror.Append(merr, err)
|
||||
}
|
||||
}
|
||||
return merr
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
select {
|
||||
case r.refreshDoneCh <- struct{}{}:
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
}
|
||||
|
||||
return merr
|
||||
}
|
||||
|
||||
func min(a int, b int) int {
|
||||
if a <= b {
|
||||
return a
|
||||
}
|
||||
|
||||
return b
|
||||
}
|
||||
|
||||
func (r *RtRefreshManager) refreshCplIfEligible(ctx context.Context, cpl uint, lastRefreshedAt time.Time) error {
|
||||
if time.Since(lastRefreshedAt) <= r.refreshInterval {
|
||||
logger.Debugf("not running refresh for cpl %d as time since last refresh not above interval", cpl)
|
||||
return nil
|
||||
}
|
||||
|
||||
return r.refreshCpl(ctx, cpl)
|
||||
}
|
||||
|
||||
func (r *RtRefreshManager) refreshCpl(ctx context.Context, cpl uint) error {
|
||||
ctx, span := internal.StartSpan(ctx, "RefreshManager.refreshCpl", trace.WithAttributes(attribute.Int("cpl", int(cpl))))
|
||||
defer span.End()
|
||||
|
||||
// gen a key for the query to refresh the cpl
|
||||
key, err := r.refreshKeyGenFnc(cpl)
|
||||
if err != nil {
|
||||
span.SetStatus(codes.Error, err.Error())
|
||||
return fmt.Errorf("failed to generated query key for cpl=%d, err=%s", cpl, err)
|
||||
}
|
||||
|
||||
logger.Infof("starting refreshing cpl %d with key %s (routing table size was %d)",
|
||||
cpl, loggableRawKeyString(key), r.rt.Size())
|
||||
|
||||
if err := r.runRefreshDHTQuery(ctx, key); err != nil {
|
||||
span.SetStatus(codes.Error, err.Error())
|
||||
return fmt.Errorf("failed to refresh cpl=%d, err=%s", cpl, err)
|
||||
}
|
||||
|
||||
sz := r.rt.Size()
|
||||
logger.Infof("finished refreshing cpl %d, routing table size is now %d", cpl, sz)
|
||||
span.SetAttributes(attribute.Int("NewSize", sz))
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *RtRefreshManager) queryForSelf(ctx context.Context) error {
|
||||
ctx, span := internal.StartSpan(ctx, "RefreshManager.queryForSelf")
|
||||
defer span.End()
|
||||
|
||||
if err := r.runRefreshDHTQuery(ctx, string(r.dhtPeerId)); err != nil {
|
||||
span.SetStatus(codes.Error, err.Error())
|
||||
return fmt.Errorf("failed to query for self, err=%s", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *RtRefreshManager) runRefreshDHTQuery(ctx context.Context, key string) error {
|
||||
queryCtx, cancel := context.WithTimeout(ctx, r.refreshQueryTimeout)
|
||||
defer cancel()
|
||||
|
||||
err := r.refreshQueryFnc(queryCtx, key)
|
||||
|
||||
if err == nil || (err == context.DeadlineExceeded && queryCtx.Err() == context.DeadlineExceeded) {
|
||||
return nil
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
type loggableRawKeyString string
|
||||
|
||||
func (lk loggableRawKeyString) String() string {
|
||||
k := string(lk)
|
||||
|
||||
if len(k) == 0 {
|
||||
return k
|
||||
}
|
||||
|
||||
encStr := base32.RawStdEncoding.EncodeToString([]byte(k))
|
||||
|
||||
return encStr
|
||||
}
|
||||
141
vendor/github.com/libp2p/go-libp2p-kad-dht/subscriber_notifee.go
generated
vendored
Normal file
141
vendor/github.com/libp2p/go-libp2p-kad-dht/subscriber_notifee.go
generated
vendored
Normal file
@@ -0,0 +1,141 @@
|
||||
package dht
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/libp2p/go-libp2p/core/event"
|
||||
"github.com/libp2p/go-libp2p/core/network"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/libp2p/go-libp2p/p2p/host/eventbus"
|
||||
)
|
||||
|
||||
func (dht *IpfsDHT) startNetworkSubscriber() error {
|
||||
bufSize := eventbus.BufSize(256)
|
||||
|
||||
evts := []interface{}{
|
||||
// register for event bus notifications of when peers successfully complete identification in order to update
|
||||
// the routing table
|
||||
new(event.EvtPeerIdentificationCompleted),
|
||||
|
||||
// register for event bus protocol ID changes in order to update the routing table
|
||||
new(event.EvtPeerProtocolsUpdated),
|
||||
|
||||
// register for event bus notifications for when our local address/addresses change so we can
|
||||
// advertise those to the network
|
||||
new(event.EvtLocalAddressesUpdated),
|
||||
|
||||
// we want to know when we are disconnecting from other peers.
|
||||
new(event.EvtPeerConnectednessChanged),
|
||||
}
|
||||
|
||||
// register for event bus local routability changes in order to trigger switching between client and server modes
|
||||
// only register for events if the DHT is operating in ModeAuto
|
||||
if dht.auto == ModeAuto || dht.auto == ModeAutoServer {
|
||||
evts = append(evts, new(event.EvtLocalReachabilityChanged))
|
||||
}
|
||||
|
||||
subs, err := dht.host.EventBus().Subscribe(evts, bufSize)
|
||||
if err != nil {
|
||||
return fmt.Errorf("dht could not subscribe to eventbus events: %w", err)
|
||||
}
|
||||
|
||||
dht.wg.Add(1)
|
||||
go func() {
|
||||
defer dht.wg.Done()
|
||||
defer subs.Close()
|
||||
|
||||
for {
|
||||
select {
|
||||
case e, more := <-subs.Out():
|
||||
if !more {
|
||||
return
|
||||
}
|
||||
|
||||
switch evt := e.(type) {
|
||||
case event.EvtLocalAddressesUpdated:
|
||||
// when our address changes, we should proactively tell our closest peers about it so
|
||||
// we become discoverable quickly. The Identify protocol will push a signed peer record
|
||||
// with our new address to all peers we are connected to. However, we might not necessarily be connected
|
||||
// to our closet peers & so in the true spirit of Zen, searching for ourself in the network really is the best way
|
||||
// to to forge connections with those matter.
|
||||
if dht.autoRefresh || dht.testAddressUpdateProcessing {
|
||||
dht.rtRefreshManager.RefreshNoWait()
|
||||
}
|
||||
case event.EvtPeerProtocolsUpdated:
|
||||
handlePeerChangeEvent(dht, evt.Peer)
|
||||
case event.EvtPeerIdentificationCompleted:
|
||||
handlePeerChangeEvent(dht, evt.Peer)
|
||||
case event.EvtPeerConnectednessChanged:
|
||||
if evt.Connectedness != network.Connected {
|
||||
dht.msgSender.OnDisconnect(dht.ctx, evt.Peer)
|
||||
}
|
||||
case event.EvtLocalReachabilityChanged:
|
||||
if dht.auto == ModeAuto || dht.auto == ModeAutoServer {
|
||||
handleLocalReachabilityChangedEvent(dht, evt)
|
||||
} else {
|
||||
// something has gone really wrong if we get an event we did not subscribe to
|
||||
logger.Errorf("received LocalReachabilityChanged event that was not subscribed to")
|
||||
}
|
||||
default:
|
||||
// something has gone really wrong if we get an event for another type
|
||||
logger.Errorf("got wrong type from subscription: %T", e)
|
||||
}
|
||||
case <-dht.ctx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func handlePeerChangeEvent(dht *IpfsDHT, p peer.ID) {
|
||||
valid, err := dht.validRTPeer(p)
|
||||
if err != nil {
|
||||
logger.Errorf("could not check peerstore for protocol support: err: %s", err)
|
||||
return
|
||||
} else if valid {
|
||||
dht.peerFound(p)
|
||||
} else {
|
||||
dht.peerStoppedDHT(p)
|
||||
}
|
||||
}
|
||||
|
||||
func handleLocalReachabilityChangedEvent(dht *IpfsDHT, e event.EvtLocalReachabilityChanged) {
|
||||
var target mode
|
||||
|
||||
switch e.Reachability {
|
||||
case network.ReachabilityPrivate:
|
||||
target = modeClient
|
||||
case network.ReachabilityUnknown:
|
||||
if dht.auto == ModeAutoServer {
|
||||
target = modeServer
|
||||
} else {
|
||||
target = modeClient
|
||||
}
|
||||
case network.ReachabilityPublic:
|
||||
target = modeServer
|
||||
}
|
||||
|
||||
logger.Infof("processed event %T; performing dht mode switch", e)
|
||||
|
||||
err := dht.setMode(target)
|
||||
// NOTE: the mode will be printed out as a decimal.
|
||||
if err == nil {
|
||||
logger.Infow("switched DHT mode successfully", "mode", target)
|
||||
} else {
|
||||
logger.Errorw("switching DHT mode failed", "mode", target, "error", err)
|
||||
}
|
||||
}
|
||||
|
||||
// validRTPeer returns true if the peer supports the DHT protocol and false otherwise. Supporting the DHT protocol means
|
||||
// supporting the primary protocols, we do not want to add peers that are speaking obsolete secondary protocols to our
|
||||
// routing table
|
||||
func (dht *IpfsDHT) validRTPeer(p peer.ID) (bool, error) {
|
||||
b, err := dht.peerstore.FirstSupportedProtocol(p, dht.protocols...)
|
||||
if len(b) == 0 || err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
return dht.routingTablePeerFilter == nil || dht.routingTablePeerFilter(dht, p), nil
|
||||
}
|
||||
3
vendor/github.com/libp2p/go-libp2p-kad-dht/version.json
generated
vendored
Normal file
3
vendor/github.com/libp2p/go-libp2p-kad-dht/version.json
generated
vendored
Normal file
@@ -0,0 +1,3 @@
|
||||
{
|
||||
"version": "v0.25.2"
|
||||
}
|
||||
Reference in New Issue
Block a user