WIP: Save agent roles integration work before CHORUS rebrand
- Agent roles and coordination features - Chat API integration testing - New configuration and workspace management 🤖 Generated with [Claude Code](https://claude.ai/code) Co-Authored-By: Claude <noreply@anthropic.com>
This commit is contained in:
450
vendor/github.com/libp2p/go-libp2p/p2p/host/autonat/autonat.go
generated
vendored
Normal file
450
vendor/github.com/libp2p/go-libp2p/p2p/host/autonat/autonat.go
generated
vendored
Normal file
@@ -0,0 +1,450 @@
|
||||
package autonat
|
||||
|
||||
import (
|
||||
"context"
|
||||
"math/rand"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/libp2p/go-libp2p/core/event"
|
||||
"github.com/libp2p/go-libp2p/core/host"
|
||||
"github.com/libp2p/go-libp2p/core/network"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/libp2p/go-libp2p/p2p/host/eventbus"
|
||||
|
||||
logging "github.com/ipfs/go-log/v2"
|
||||
ma "github.com/multiformats/go-multiaddr"
|
||||
manet "github.com/multiformats/go-multiaddr/net"
|
||||
)
|
||||
|
||||
var log = logging.Logger("autonat")
|
||||
|
||||
const maxConfidence = 3
|
||||
|
||||
// AmbientAutoNAT is the implementation of ambient NAT autodiscovery
|
||||
type AmbientAutoNAT struct {
|
||||
host host.Host
|
||||
|
||||
*config
|
||||
|
||||
ctx context.Context
|
||||
ctxCancel context.CancelFunc // is closed when Close is called
|
||||
backgroundRunning chan struct{} // is closed when the background go routine exits
|
||||
|
||||
inboundConn chan network.Conn
|
||||
dialResponses chan error
|
||||
// status is an autoNATResult reflecting current status.
|
||||
status atomic.Pointer[network.Reachability]
|
||||
// Reflects the confidence on of the NATStatus being private, as a single
|
||||
// dialback may fail for reasons unrelated to NAT.
|
||||
// If it is <3, then multiple autoNAT peers may be contacted for dialback
|
||||
// If only a single autoNAT peer is known, then the confidence increases
|
||||
// for each failure until it reaches 3.
|
||||
confidence int
|
||||
lastInbound time.Time
|
||||
lastProbeTry time.Time
|
||||
lastProbe time.Time
|
||||
recentProbes map[peer.ID]time.Time
|
||||
|
||||
service *autoNATService
|
||||
|
||||
emitReachabilityChanged event.Emitter
|
||||
subscriber event.Subscription
|
||||
}
|
||||
|
||||
// StaticAutoNAT is a simple AutoNAT implementation when a single NAT status is desired.
|
||||
type StaticAutoNAT struct {
|
||||
host host.Host
|
||||
reachability network.Reachability
|
||||
service *autoNATService
|
||||
}
|
||||
|
||||
// New creates a new NAT autodiscovery system attached to a host
|
||||
func New(h host.Host, options ...Option) (AutoNAT, error) {
|
||||
var err error
|
||||
conf := new(config)
|
||||
conf.host = h
|
||||
conf.dialPolicy.host = h
|
||||
|
||||
if err = defaults(conf); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if conf.addressFunc == nil {
|
||||
conf.addressFunc = h.Addrs
|
||||
}
|
||||
|
||||
for _, o := range options {
|
||||
if err = o(conf); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
emitReachabilityChanged, _ := h.EventBus().Emitter(new(event.EvtLocalReachabilityChanged), eventbus.Stateful)
|
||||
|
||||
var service *autoNATService
|
||||
if (!conf.forceReachability || conf.reachability == network.ReachabilityPublic) && conf.dialer != nil {
|
||||
service, err = newAutoNATService(conf)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
service.Enable()
|
||||
}
|
||||
|
||||
if conf.forceReachability {
|
||||
emitReachabilityChanged.Emit(event.EvtLocalReachabilityChanged{Reachability: conf.reachability})
|
||||
|
||||
return &StaticAutoNAT{
|
||||
host: h,
|
||||
reachability: conf.reachability,
|
||||
service: service,
|
||||
}, nil
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
as := &AmbientAutoNAT{
|
||||
ctx: ctx,
|
||||
ctxCancel: cancel,
|
||||
backgroundRunning: make(chan struct{}),
|
||||
host: h,
|
||||
config: conf,
|
||||
inboundConn: make(chan network.Conn, 5),
|
||||
dialResponses: make(chan error, 1),
|
||||
|
||||
emitReachabilityChanged: emitReachabilityChanged,
|
||||
service: service,
|
||||
recentProbes: make(map[peer.ID]time.Time),
|
||||
}
|
||||
reachability := network.ReachabilityUnknown
|
||||
as.status.Store(&reachability)
|
||||
|
||||
subscriber, err := as.host.EventBus().Subscribe(
|
||||
[]any{new(event.EvtLocalAddressesUpdated), new(event.EvtPeerIdentificationCompleted)},
|
||||
eventbus.Name("autonat"),
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
as.subscriber = subscriber
|
||||
|
||||
h.Network().Notify(as)
|
||||
go as.background()
|
||||
|
||||
return as, nil
|
||||
}
|
||||
|
||||
// Status returns the AutoNAT observed reachability status.
|
||||
func (as *AmbientAutoNAT) Status() network.Reachability {
|
||||
s := as.status.Load()
|
||||
return *s
|
||||
}
|
||||
|
||||
func (as *AmbientAutoNAT) emitStatus() {
|
||||
status := *as.status.Load()
|
||||
as.emitReachabilityChanged.Emit(event.EvtLocalReachabilityChanged{Reachability: status})
|
||||
if as.metricsTracer != nil {
|
||||
as.metricsTracer.ReachabilityStatus(status)
|
||||
}
|
||||
}
|
||||
|
||||
func ipInList(candidate ma.Multiaddr, list []ma.Multiaddr) bool {
|
||||
candidateIP, _ := manet.ToIP(candidate)
|
||||
for _, i := range list {
|
||||
if ip, err := manet.ToIP(i); err == nil && ip.Equal(candidateIP) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (as *AmbientAutoNAT) background() {
|
||||
defer close(as.backgroundRunning)
|
||||
// wait a bit for the node to come online and establish some connections
|
||||
// before starting autodetection
|
||||
delay := as.config.bootDelay
|
||||
|
||||
subChan := as.subscriber.Out()
|
||||
defer as.subscriber.Close()
|
||||
defer as.emitReachabilityChanged.Close()
|
||||
|
||||
timer := time.NewTimer(delay)
|
||||
defer timer.Stop()
|
||||
timerRunning := true
|
||||
retryProbe := false
|
||||
for {
|
||||
select {
|
||||
// new inbound connection.
|
||||
case conn := <-as.inboundConn:
|
||||
localAddrs := as.host.Addrs()
|
||||
if manet.IsPublicAddr(conn.RemoteMultiaddr()) &&
|
||||
!ipInList(conn.RemoteMultiaddr(), localAddrs) {
|
||||
as.lastInbound = time.Now()
|
||||
}
|
||||
|
||||
case e := <-subChan:
|
||||
switch e := e.(type) {
|
||||
case event.EvtLocalAddressesUpdated:
|
||||
// On local address update, reduce confidence from maximum so that we schedule
|
||||
// the next probe sooner
|
||||
if as.confidence == maxConfidence {
|
||||
as.confidence--
|
||||
}
|
||||
case event.EvtPeerIdentificationCompleted:
|
||||
if s, err := as.host.Peerstore().SupportsProtocols(e.Peer, AutoNATProto); err == nil && len(s) > 0 {
|
||||
currentStatus := *as.status.Load()
|
||||
if currentStatus == network.ReachabilityUnknown {
|
||||
as.tryProbe(e.Peer)
|
||||
}
|
||||
}
|
||||
default:
|
||||
log.Errorf("unknown event type: %T", e)
|
||||
}
|
||||
|
||||
// probe finished.
|
||||
case err, ok := <-as.dialResponses:
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
if IsDialRefused(err) {
|
||||
retryProbe = true
|
||||
} else {
|
||||
as.handleDialResponse(err)
|
||||
}
|
||||
case <-timer.C:
|
||||
peer := as.getPeerToProbe()
|
||||
as.tryProbe(peer)
|
||||
timerRunning = false
|
||||
retryProbe = false
|
||||
case <-as.ctx.Done():
|
||||
return
|
||||
}
|
||||
|
||||
// Drain the timer channel if it hasn't fired in preparation for Resetting it.
|
||||
if timerRunning && !timer.Stop() {
|
||||
<-timer.C
|
||||
}
|
||||
timer.Reset(as.scheduleProbe(retryProbe))
|
||||
timerRunning = true
|
||||
}
|
||||
}
|
||||
|
||||
func (as *AmbientAutoNAT) cleanupRecentProbes() {
|
||||
fixedNow := time.Now()
|
||||
for k, v := range as.recentProbes {
|
||||
if fixedNow.Sub(v) > as.throttlePeerPeriod {
|
||||
delete(as.recentProbes, k)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// scheduleProbe calculates when the next probe should be scheduled for.
|
||||
func (as *AmbientAutoNAT) scheduleProbe(retryProbe bool) time.Duration {
|
||||
// Our baseline is a probe every 'AutoNATRefreshInterval'
|
||||
// This is modulated by:
|
||||
// * if we are in an unknown state, have low confidence, or we want to retry because a probe was refused that
|
||||
// should drop to 'AutoNATRetryInterval'
|
||||
// * recent inbound connections (implying continued connectivity) should decrease the retry when public
|
||||
// * recent inbound connections when not public mean we should try more actively to see if we're public.
|
||||
fixedNow := time.Now()
|
||||
currentStatus := *as.status.Load()
|
||||
|
||||
nextProbe := fixedNow
|
||||
// Don't look for peers in the peer store more than once per second.
|
||||
if !as.lastProbeTry.IsZero() {
|
||||
backoff := as.lastProbeTry.Add(time.Second)
|
||||
if backoff.After(nextProbe) {
|
||||
nextProbe = backoff
|
||||
}
|
||||
}
|
||||
if !as.lastProbe.IsZero() {
|
||||
untilNext := as.config.refreshInterval
|
||||
if retryProbe {
|
||||
untilNext = as.config.retryInterval
|
||||
} else if currentStatus == network.ReachabilityUnknown {
|
||||
untilNext = as.config.retryInterval
|
||||
} else if as.confidence < maxConfidence {
|
||||
untilNext = as.config.retryInterval
|
||||
} else if currentStatus == network.ReachabilityPublic && as.lastInbound.After(as.lastProbe) {
|
||||
untilNext *= 2
|
||||
} else if currentStatus != network.ReachabilityPublic && as.lastInbound.After(as.lastProbe) {
|
||||
untilNext /= 5
|
||||
}
|
||||
|
||||
if as.lastProbe.Add(untilNext).After(nextProbe) {
|
||||
nextProbe = as.lastProbe.Add(untilNext)
|
||||
}
|
||||
}
|
||||
if as.metricsTracer != nil {
|
||||
as.metricsTracer.NextProbeTime(nextProbe)
|
||||
}
|
||||
return nextProbe.Sub(fixedNow)
|
||||
}
|
||||
|
||||
// handleDialResponse updates the current status based on dial response.
|
||||
func (as *AmbientAutoNAT) handleDialResponse(dialErr error) {
|
||||
var observation network.Reachability
|
||||
switch {
|
||||
case dialErr == nil:
|
||||
observation = network.ReachabilityPublic
|
||||
case IsDialError(dialErr):
|
||||
observation = network.ReachabilityPrivate
|
||||
default:
|
||||
observation = network.ReachabilityUnknown
|
||||
}
|
||||
|
||||
as.recordObservation(observation)
|
||||
}
|
||||
|
||||
// recordObservation updates NAT status and confidence
|
||||
func (as *AmbientAutoNAT) recordObservation(observation network.Reachability) {
|
||||
|
||||
currentStatus := *as.status.Load()
|
||||
|
||||
if observation == network.ReachabilityPublic {
|
||||
changed := false
|
||||
if currentStatus != network.ReachabilityPublic {
|
||||
// Aggressively switch to public from other states ignoring confidence
|
||||
log.Debugf("NAT status is public")
|
||||
|
||||
// we are flipping our NATStatus, so confidence drops to 0
|
||||
as.confidence = 0
|
||||
if as.service != nil {
|
||||
as.service.Enable()
|
||||
}
|
||||
changed = true
|
||||
} else if as.confidence < maxConfidence {
|
||||
as.confidence++
|
||||
}
|
||||
as.status.Store(&observation)
|
||||
if changed {
|
||||
as.emitStatus()
|
||||
}
|
||||
} else if observation == network.ReachabilityPrivate {
|
||||
if currentStatus != network.ReachabilityPrivate {
|
||||
if as.confidence > 0 {
|
||||
as.confidence--
|
||||
} else {
|
||||
log.Debugf("NAT status is private")
|
||||
|
||||
// we are flipping our NATStatus, so confidence drops to 0
|
||||
as.confidence = 0
|
||||
as.status.Store(&observation)
|
||||
if as.service != nil {
|
||||
as.service.Disable()
|
||||
}
|
||||
as.emitStatus()
|
||||
}
|
||||
} else if as.confidence < maxConfidence {
|
||||
as.confidence++
|
||||
as.status.Store(&observation)
|
||||
}
|
||||
} else if as.confidence > 0 {
|
||||
// don't just flip to unknown, reduce confidence first
|
||||
as.confidence--
|
||||
} else {
|
||||
log.Debugf("NAT status is unknown")
|
||||
as.status.Store(&observation)
|
||||
if currentStatus != network.ReachabilityUnknown {
|
||||
if as.service != nil {
|
||||
as.service.Enable()
|
||||
}
|
||||
as.emitStatus()
|
||||
}
|
||||
}
|
||||
if as.metricsTracer != nil {
|
||||
as.metricsTracer.ReachabilityStatusConfidence(as.confidence)
|
||||
}
|
||||
}
|
||||
|
||||
func (as *AmbientAutoNAT) tryProbe(p peer.ID) bool {
|
||||
as.lastProbeTry = time.Now()
|
||||
if p.Validate() != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
if lastTime, ok := as.recentProbes[p]; ok {
|
||||
if time.Since(lastTime) < as.throttlePeerPeriod {
|
||||
return false
|
||||
}
|
||||
}
|
||||
as.cleanupRecentProbes()
|
||||
|
||||
info := as.host.Peerstore().PeerInfo(p)
|
||||
|
||||
if !as.config.dialPolicy.skipPeer(info.Addrs) {
|
||||
as.recentProbes[p] = time.Now()
|
||||
as.lastProbe = time.Now()
|
||||
go as.probe(&info)
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (as *AmbientAutoNAT) probe(pi *peer.AddrInfo) {
|
||||
cli := NewAutoNATClient(as.host, as.config.addressFunc, as.metricsTracer)
|
||||
ctx, cancel := context.WithTimeout(as.ctx, as.config.requestTimeout)
|
||||
defer cancel()
|
||||
|
||||
err := cli.DialBack(ctx, pi.ID)
|
||||
log.Debugf("Dialback through peer %s completed: err: %s", pi.ID, err)
|
||||
|
||||
select {
|
||||
case as.dialResponses <- err:
|
||||
case <-as.ctx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func (as *AmbientAutoNAT) getPeerToProbe() peer.ID {
|
||||
peers := as.host.Network().Peers()
|
||||
if len(peers) == 0 {
|
||||
return ""
|
||||
}
|
||||
|
||||
candidates := make([]peer.ID, 0, len(peers))
|
||||
|
||||
for _, p := range peers {
|
||||
info := as.host.Peerstore().PeerInfo(p)
|
||||
// Exclude peers which don't support the autonat protocol.
|
||||
if proto, err := as.host.Peerstore().SupportsProtocols(p, AutoNATProto); len(proto) == 0 || err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
// Exclude peers in backoff.
|
||||
if lastTime, ok := as.recentProbes[p]; ok {
|
||||
if time.Since(lastTime) < as.throttlePeerPeriod {
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
if as.config.dialPolicy.skipPeer(info.Addrs) {
|
||||
continue
|
||||
}
|
||||
candidates = append(candidates, p)
|
||||
}
|
||||
|
||||
if len(candidates) == 0 {
|
||||
return ""
|
||||
}
|
||||
|
||||
return candidates[rand.Intn(len(candidates))]
|
||||
}
|
||||
|
||||
func (as *AmbientAutoNAT) Close() error {
|
||||
as.ctxCancel()
|
||||
if as.service != nil {
|
||||
as.service.Disable()
|
||||
}
|
||||
<-as.backgroundRunning
|
||||
return nil
|
||||
}
|
||||
|
||||
// Status returns the AutoNAT observed reachability status.
|
||||
func (s *StaticAutoNAT) Status() network.Reachability {
|
||||
return s.reachability
|
||||
}
|
||||
|
||||
func (s *StaticAutoNAT) Close() error {
|
||||
if s.service != nil {
|
||||
s.service.Disable()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
122
vendor/github.com/libp2p/go-libp2p/p2p/host/autonat/client.go
generated
vendored
Normal file
122
vendor/github.com/libp2p/go-libp2p/p2p/host/autonat/client.go
generated
vendored
Normal file
@@ -0,0 +1,122 @@
|
||||
package autonat
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/libp2p/go-libp2p/core/host"
|
||||
"github.com/libp2p/go-libp2p/core/network"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/libp2p/go-libp2p/p2p/host/autonat/pb"
|
||||
|
||||
"github.com/libp2p/go-msgio/pbio"
|
||||
)
|
||||
|
||||
// NewAutoNATClient creates a fresh instance of an AutoNATClient
|
||||
// If addrFunc is nil, h.Addrs will be used
|
||||
func NewAutoNATClient(h host.Host, addrFunc AddrFunc, mt MetricsTracer) Client {
|
||||
if addrFunc == nil {
|
||||
addrFunc = h.Addrs
|
||||
}
|
||||
return &client{h: h, addrFunc: addrFunc, mt: mt}
|
||||
}
|
||||
|
||||
type client struct {
|
||||
h host.Host
|
||||
addrFunc AddrFunc
|
||||
mt MetricsTracer
|
||||
}
|
||||
|
||||
// DialBack asks peer p to dial us back on all addresses returned by the addrFunc.
|
||||
// It blocks until we've received a response from the peer.
|
||||
//
|
||||
// Note: A returned error Message_E_DIAL_ERROR does not imply that the server
|
||||
// actually performed a dial attempt. Servers that run a version < v0.20.0 also
|
||||
// return Message_E_DIAL_ERROR if the dial was skipped due to the dialPolicy.
|
||||
func (c *client) DialBack(ctx context.Context, p peer.ID) error {
|
||||
s, err := c.h.NewStream(ctx, p, AutoNATProto)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := s.Scope().SetService(ServiceName); err != nil {
|
||||
log.Debugf("error attaching stream to autonat service: %s", err)
|
||||
s.Reset()
|
||||
return err
|
||||
}
|
||||
|
||||
if err := s.Scope().ReserveMemory(maxMsgSize, network.ReservationPriorityAlways); err != nil {
|
||||
log.Debugf("error reserving memory for autonat stream: %s", err)
|
||||
s.Reset()
|
||||
return err
|
||||
}
|
||||
defer s.Scope().ReleaseMemory(maxMsgSize)
|
||||
|
||||
s.SetDeadline(time.Now().Add(streamTimeout))
|
||||
// Might as well just reset the stream. Once we get to this point, we
|
||||
// don't care about being nice.
|
||||
defer s.Close()
|
||||
|
||||
r := pbio.NewDelimitedReader(s, maxMsgSize)
|
||||
w := pbio.NewDelimitedWriter(s)
|
||||
|
||||
req := newDialMessage(peer.AddrInfo{ID: c.h.ID(), Addrs: c.addrFunc()})
|
||||
if err := w.WriteMsg(req); err != nil {
|
||||
s.Reset()
|
||||
return err
|
||||
}
|
||||
|
||||
var res pb.Message
|
||||
if err := r.ReadMsg(&res); err != nil {
|
||||
s.Reset()
|
||||
return err
|
||||
}
|
||||
if res.GetType() != pb.Message_DIAL_RESPONSE {
|
||||
s.Reset()
|
||||
return fmt.Errorf("unexpected response: %s", res.GetType().String())
|
||||
}
|
||||
|
||||
status := res.GetDialResponse().GetStatus()
|
||||
if c.mt != nil {
|
||||
c.mt.ReceivedDialResponse(status)
|
||||
}
|
||||
switch status {
|
||||
case pb.Message_OK:
|
||||
return nil
|
||||
default:
|
||||
return Error{Status: status, Text: res.GetDialResponse().GetStatusText()}
|
||||
}
|
||||
}
|
||||
|
||||
// Error wraps errors signalled by AutoNAT services
|
||||
type Error struct {
|
||||
Status pb.Message_ResponseStatus
|
||||
Text string
|
||||
}
|
||||
|
||||
func (e Error) Error() string {
|
||||
return fmt.Sprintf("AutoNAT error: %s (%s)", e.Text, e.Status.String())
|
||||
}
|
||||
|
||||
// IsDialError returns true if the error was due to a dial back failure
|
||||
func (e Error) IsDialError() bool {
|
||||
return e.Status == pb.Message_E_DIAL_ERROR
|
||||
}
|
||||
|
||||
// IsDialRefused returns true if the error was due to a refusal to dial back
|
||||
func (e Error) IsDialRefused() bool {
|
||||
return e.Status == pb.Message_E_DIAL_REFUSED
|
||||
}
|
||||
|
||||
// IsDialError returns true if the AutoNAT peer signalled an error dialing back
|
||||
func IsDialError(e error) bool {
|
||||
ae, ok := e.(Error)
|
||||
return ok && ae.IsDialError()
|
||||
}
|
||||
|
||||
// IsDialRefused returns true if the AutoNAT peer signalled refusal to dial back
|
||||
func IsDialRefused(e error) bool {
|
||||
ae, ok := e.(Error)
|
||||
return ok && ae.IsDialRefused()
|
||||
}
|
||||
95
vendor/github.com/libp2p/go-libp2p/p2p/host/autonat/dialpolicy.go
generated
vendored
Normal file
95
vendor/github.com/libp2p/go-libp2p/p2p/host/autonat/dialpolicy.go
generated
vendored
Normal file
@@ -0,0 +1,95 @@
|
||||
package autonat
|
||||
|
||||
import (
|
||||
"net"
|
||||
|
||||
"github.com/libp2p/go-libp2p/core/host"
|
||||
|
||||
ma "github.com/multiformats/go-multiaddr"
|
||||
manet "github.com/multiformats/go-multiaddr/net"
|
||||
)
|
||||
|
||||
type dialPolicy struct {
|
||||
allowSelfDials bool
|
||||
host host.Host
|
||||
}
|
||||
|
||||
// skipDial indicates that a multiaddress isn't worth attempted dialing.
|
||||
// The same logic is used when the autonat client is considering if
|
||||
// a remote peer is worth using as a server, and when the server is
|
||||
// considering if a requested client is worth dialing back.
|
||||
func (d *dialPolicy) skipDial(addr ma.Multiaddr) bool {
|
||||
// skip relay addresses
|
||||
_, err := addr.ValueForProtocol(ma.P_CIRCUIT)
|
||||
if err == nil {
|
||||
return true
|
||||
}
|
||||
|
||||
if d.allowSelfDials {
|
||||
return false
|
||||
}
|
||||
|
||||
// skip private network (unroutable) addresses
|
||||
if !manet.IsPublicAddr(addr) {
|
||||
return true
|
||||
}
|
||||
candidateIP, err := manet.ToIP(addr)
|
||||
if err != nil {
|
||||
return true
|
||||
}
|
||||
|
||||
// Skip dialing addresses we believe are the local node's
|
||||
for _, localAddr := range d.host.Addrs() {
|
||||
localIP, err := manet.ToIP(localAddr)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
if localIP.Equal(candidateIP) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// skipPeer indicates that the collection of multiaddresses representing a peer
|
||||
// isn't worth attempted dialing. If one of the addresses matches an address
|
||||
// we believe is ours, we exclude the peer, even if there are other valid
|
||||
// public addresses in the list.
|
||||
func (d *dialPolicy) skipPeer(addrs []ma.Multiaddr) bool {
|
||||
localAddrs := d.host.Addrs()
|
||||
localHosts := make([]net.IP, 0)
|
||||
for _, lAddr := range localAddrs {
|
||||
if _, err := lAddr.ValueForProtocol(ma.P_CIRCUIT); err != nil && manet.IsPublicAddr(lAddr) {
|
||||
lIP, err := manet.ToIP(lAddr)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
localHosts = append(localHosts, lIP)
|
||||
}
|
||||
}
|
||||
|
||||
// if a public IP of the peer is one of ours: skip the peer.
|
||||
goodPublic := false
|
||||
for _, addr := range addrs {
|
||||
if _, err := addr.ValueForProtocol(ma.P_CIRCUIT); err != nil && manet.IsPublicAddr(addr) {
|
||||
aIP, err := manet.ToIP(addr)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
for _, lIP := range localHosts {
|
||||
if lIP.Equal(aIP) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
goodPublic = true
|
||||
}
|
||||
}
|
||||
|
||||
if d.allowSelfDials {
|
||||
return false
|
||||
}
|
||||
|
||||
return !goodPublic
|
||||
}
|
||||
31
vendor/github.com/libp2p/go-libp2p/p2p/host/autonat/interface.go
generated
vendored
Normal file
31
vendor/github.com/libp2p/go-libp2p/p2p/host/autonat/interface.go
generated
vendored
Normal file
@@ -0,0 +1,31 @@
|
||||
package autonat
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
|
||||
"github.com/libp2p/go-libp2p/core/network"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
|
||||
ma "github.com/multiformats/go-multiaddr"
|
||||
)
|
||||
|
||||
// AutoNAT is the interface for NAT autodiscovery
|
||||
type AutoNAT interface {
|
||||
// Status returns the current NAT status
|
||||
Status() network.Reachability
|
||||
io.Closer
|
||||
}
|
||||
|
||||
// Client is a stateless client interface to AutoNAT peers
|
||||
type Client interface {
|
||||
// DialBack requests from a peer providing AutoNAT services to test dial back
|
||||
// and report the address on a successful connection.
|
||||
DialBack(ctx context.Context, p peer.ID) error
|
||||
}
|
||||
|
||||
// AddrFunc is a function returning the candidate addresses for the local host.
|
||||
type AddrFunc func() []ma.Multiaddr
|
||||
|
||||
// Option is an Autonat option for configuration
|
||||
type Option func(*config) error
|
||||
162
vendor/github.com/libp2p/go-libp2p/p2p/host/autonat/metrics.go
generated
vendored
Normal file
162
vendor/github.com/libp2p/go-libp2p/p2p/host/autonat/metrics.go
generated
vendored
Normal file
@@ -0,0 +1,162 @@
|
||||
package autonat
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/libp2p/go-libp2p/core/network"
|
||||
"github.com/libp2p/go-libp2p/p2p/host/autonat/pb"
|
||||
"github.com/libp2p/go-libp2p/p2p/metricshelper"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
)
|
||||
|
||||
const metricNamespace = "libp2p_autonat"
|
||||
|
||||
var (
|
||||
reachabilityStatus = prometheus.NewGauge(
|
||||
prometheus.GaugeOpts{
|
||||
Namespace: metricNamespace,
|
||||
Name: "reachability_status",
|
||||
Help: "Current node reachability",
|
||||
},
|
||||
)
|
||||
reachabilityStatusConfidence = prometheus.NewGauge(
|
||||
prometheus.GaugeOpts{
|
||||
Namespace: metricNamespace,
|
||||
Name: "reachability_status_confidence",
|
||||
Help: "Node reachability status confidence",
|
||||
},
|
||||
)
|
||||
receivedDialResponseTotal = prometheus.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Namespace: metricNamespace,
|
||||
Name: "received_dial_response_total",
|
||||
Help: "Count of dial responses for client",
|
||||
},
|
||||
[]string{"response_status"},
|
||||
)
|
||||
outgoingDialResponseTotal = prometheus.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Namespace: metricNamespace,
|
||||
Name: "outgoing_dial_response_total",
|
||||
Help: "Count of dial responses for server",
|
||||
},
|
||||
[]string{"response_status"},
|
||||
)
|
||||
outgoingDialRefusedTotal = prometheus.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Namespace: metricNamespace,
|
||||
Name: "outgoing_dial_refused_total",
|
||||
Help: "Count of dial requests refused by server",
|
||||
},
|
||||
[]string{"refusal_reason"},
|
||||
)
|
||||
nextProbeTimestamp = prometheus.NewGauge(
|
||||
prometheus.GaugeOpts{
|
||||
Namespace: metricNamespace,
|
||||
Name: "next_probe_timestamp",
|
||||
Help: "Time of next probe",
|
||||
},
|
||||
)
|
||||
collectors = []prometheus.Collector{
|
||||
reachabilityStatus,
|
||||
reachabilityStatusConfidence,
|
||||
receivedDialResponseTotal,
|
||||
outgoingDialResponseTotal,
|
||||
outgoingDialRefusedTotal,
|
||||
nextProbeTimestamp,
|
||||
}
|
||||
)
|
||||
|
||||
type MetricsTracer interface {
|
||||
ReachabilityStatus(status network.Reachability)
|
||||
ReachabilityStatusConfidence(confidence int)
|
||||
ReceivedDialResponse(status pb.Message_ResponseStatus)
|
||||
OutgoingDialResponse(status pb.Message_ResponseStatus)
|
||||
OutgoingDialRefused(reason string)
|
||||
NextProbeTime(t time.Time)
|
||||
}
|
||||
|
||||
func getResponseStatus(status pb.Message_ResponseStatus) string {
|
||||
var s string
|
||||
switch status {
|
||||
case pb.Message_OK:
|
||||
s = "ok"
|
||||
case pb.Message_E_DIAL_ERROR:
|
||||
s = "dial error"
|
||||
case pb.Message_E_DIAL_REFUSED:
|
||||
s = "dial refused"
|
||||
case pb.Message_E_BAD_REQUEST:
|
||||
s = "bad request"
|
||||
case pb.Message_E_INTERNAL_ERROR:
|
||||
s = "internal error"
|
||||
default:
|
||||
s = "unknown"
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
const (
|
||||
rate_limited = "rate limited"
|
||||
dial_blocked = "dial blocked"
|
||||
no_valid_address = "no valid address"
|
||||
)
|
||||
|
||||
type metricsTracer struct{}
|
||||
|
||||
var _ MetricsTracer = &metricsTracer{}
|
||||
|
||||
type metricsTracerSetting struct {
|
||||
reg prometheus.Registerer
|
||||
}
|
||||
|
||||
type MetricsTracerOption func(*metricsTracerSetting)
|
||||
|
||||
func WithRegisterer(reg prometheus.Registerer) MetricsTracerOption {
|
||||
return func(s *metricsTracerSetting) {
|
||||
if reg != nil {
|
||||
s.reg = reg
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func NewMetricsTracer(opts ...MetricsTracerOption) MetricsTracer {
|
||||
setting := &metricsTracerSetting{reg: prometheus.DefaultRegisterer}
|
||||
for _, opt := range opts {
|
||||
opt(setting)
|
||||
}
|
||||
metricshelper.RegisterCollectors(setting.reg, collectors...)
|
||||
return &metricsTracer{}
|
||||
}
|
||||
|
||||
func (mt *metricsTracer) ReachabilityStatus(status network.Reachability) {
|
||||
reachabilityStatus.Set(float64(status))
|
||||
}
|
||||
|
||||
func (mt *metricsTracer) ReachabilityStatusConfidence(confidence int) {
|
||||
reachabilityStatusConfidence.Set(float64(confidence))
|
||||
}
|
||||
|
||||
func (mt *metricsTracer) ReceivedDialResponse(status pb.Message_ResponseStatus) {
|
||||
tags := metricshelper.GetStringSlice()
|
||||
defer metricshelper.PutStringSlice(tags)
|
||||
*tags = append(*tags, getResponseStatus(status))
|
||||
receivedDialResponseTotal.WithLabelValues(*tags...).Inc()
|
||||
}
|
||||
|
||||
func (mt *metricsTracer) OutgoingDialResponse(status pb.Message_ResponseStatus) {
|
||||
tags := metricshelper.GetStringSlice()
|
||||
defer metricshelper.PutStringSlice(tags)
|
||||
*tags = append(*tags, getResponseStatus(status))
|
||||
outgoingDialResponseTotal.WithLabelValues(*tags...).Inc()
|
||||
}
|
||||
|
||||
func (mt *metricsTracer) OutgoingDialRefused(reason string) {
|
||||
tags := metricshelper.GetStringSlice()
|
||||
defer metricshelper.PutStringSlice(tags)
|
||||
*tags = append(*tags, reason)
|
||||
outgoingDialRefusedTotal.WithLabelValues(*tags...).Inc()
|
||||
}
|
||||
|
||||
func (mt *metricsTracer) NextProbeTime(t time.Time) {
|
||||
nextProbeTimestamp.Set(float64(t.Unix()))
|
||||
}
|
||||
30
vendor/github.com/libp2p/go-libp2p/p2p/host/autonat/notify.go
generated
vendored
Normal file
30
vendor/github.com/libp2p/go-libp2p/p2p/host/autonat/notify.go
generated
vendored
Normal file
@@ -0,0 +1,30 @@
|
||||
package autonat
|
||||
|
||||
import (
|
||||
"github.com/libp2p/go-libp2p/core/network"
|
||||
|
||||
ma "github.com/multiformats/go-multiaddr"
|
||||
manet "github.com/multiformats/go-multiaddr/net"
|
||||
)
|
||||
|
||||
var _ network.Notifiee = (*AmbientAutoNAT)(nil)
|
||||
|
||||
// Listen is part of the network.Notifiee interface
|
||||
func (as *AmbientAutoNAT) Listen(net network.Network, a ma.Multiaddr) {}
|
||||
|
||||
// ListenClose is part of the network.Notifiee interface
|
||||
func (as *AmbientAutoNAT) ListenClose(net network.Network, a ma.Multiaddr) {}
|
||||
|
||||
// Connected is part of the network.Notifiee interface
|
||||
func (as *AmbientAutoNAT) Connected(net network.Network, c network.Conn) {
|
||||
if c.Stat().Direction == network.DirInbound &&
|
||||
manet.IsPublicAddr(c.RemoteMultiaddr()) {
|
||||
select {
|
||||
case as.inboundConn <- c:
|
||||
default:
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Disconnected is part of the network.Notifiee interface
|
||||
func (as *AmbientAutoNAT) Disconnected(net network.Network, c network.Conn) {}
|
||||
153
vendor/github.com/libp2p/go-libp2p/p2p/host/autonat/options.go
generated
vendored
Normal file
153
vendor/github.com/libp2p/go-libp2p/p2p/host/autonat/options.go
generated
vendored
Normal file
@@ -0,0 +1,153 @@
|
||||
package autonat
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"time"
|
||||
|
||||
"github.com/libp2p/go-libp2p/core/host"
|
||||
"github.com/libp2p/go-libp2p/core/network"
|
||||
)
|
||||
|
||||
// config holds configurable options for the autonat subsystem.
|
||||
type config struct {
|
||||
host host.Host
|
||||
|
||||
addressFunc AddrFunc
|
||||
dialPolicy dialPolicy
|
||||
dialer network.Network
|
||||
forceReachability bool
|
||||
reachability network.Reachability
|
||||
metricsTracer MetricsTracer
|
||||
|
||||
// client
|
||||
bootDelay time.Duration
|
||||
retryInterval time.Duration
|
||||
refreshInterval time.Duration
|
||||
requestTimeout time.Duration
|
||||
throttlePeerPeriod time.Duration
|
||||
|
||||
// server
|
||||
dialTimeout time.Duration
|
||||
maxPeerAddresses int
|
||||
throttleGlobalMax int
|
||||
throttlePeerMax int
|
||||
throttleResetPeriod time.Duration
|
||||
throttleResetJitter time.Duration
|
||||
}
|
||||
|
||||
var defaults = func(c *config) error {
|
||||
c.bootDelay = 15 * time.Second
|
||||
c.retryInterval = 90 * time.Second
|
||||
c.refreshInterval = 15 * time.Minute
|
||||
c.requestTimeout = 30 * time.Second
|
||||
c.throttlePeerPeriod = 90 * time.Second
|
||||
|
||||
c.dialTimeout = 15 * time.Second
|
||||
c.maxPeerAddresses = 16
|
||||
c.throttleGlobalMax = 30
|
||||
c.throttlePeerMax = 3
|
||||
c.throttleResetPeriod = 1 * time.Minute
|
||||
c.throttleResetJitter = 15 * time.Second
|
||||
return nil
|
||||
}
|
||||
|
||||
// EnableService specifies that AutoNAT should be allowed to run a NAT service to help
|
||||
// other peers determine their own NAT status. The provided Network should not be the
|
||||
// default network/dialer of the host passed to `New`, as the NAT system will need to
|
||||
// make parallel connections, and as such will modify both the associated peerstore
|
||||
// and terminate connections of this dialer. The dialer provided
|
||||
// should be compatible (TCP/UDP) however with the transports of the libp2p network.
|
||||
func EnableService(dialer network.Network) Option {
|
||||
return func(c *config) error {
|
||||
if dialer == c.host.Network() || dialer.Peerstore() == c.host.Peerstore() {
|
||||
return errors.New("dialer should not be that of the host")
|
||||
}
|
||||
c.dialer = dialer
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithReachability overrides autonat to simply report an over-ridden reachability
|
||||
// status.
|
||||
func WithReachability(reachability network.Reachability) Option {
|
||||
return func(c *config) error {
|
||||
c.forceReachability = true
|
||||
c.reachability = reachability
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// UsingAddresses allows overriding which Addresses the AutoNAT client believes
|
||||
// are "its own". Useful for testing, or for more exotic port-forwarding
|
||||
// scenarios where the host may be listening on different ports than it wants
|
||||
// to externally advertise or verify connectability on.
|
||||
func UsingAddresses(addrFunc AddrFunc) Option {
|
||||
return func(c *config) error {
|
||||
if addrFunc == nil {
|
||||
return errors.New("invalid address function supplied")
|
||||
}
|
||||
c.addressFunc = addrFunc
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithSchedule configures how aggressively probes will be made to verify the
|
||||
// address of the host. retryInterval indicates how often probes should be made
|
||||
// when the host lacks confidence about its address, while refreshInterval
|
||||
// is the schedule of periodic probes when the host believes it knows its
|
||||
// steady-state reachability.
|
||||
func WithSchedule(retryInterval, refreshInterval time.Duration) Option {
|
||||
return func(c *config) error {
|
||||
c.retryInterval = retryInterval
|
||||
c.refreshInterval = refreshInterval
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithoutStartupDelay removes the initial delay the NAT subsystem typically
|
||||
// uses as a buffer for ensuring that connectivity and guesses as to the hosts
|
||||
// local interfaces have settled down during startup.
|
||||
func WithoutStartupDelay() Option {
|
||||
return func(c *config) error {
|
||||
c.bootDelay = 1
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithoutThrottling indicates that this autonat service should not place
|
||||
// restrictions on how many peers it is willing to help when acting as
|
||||
// a server.
|
||||
func WithoutThrottling() Option {
|
||||
return func(c *config) error {
|
||||
c.throttleGlobalMax = 0
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithThrottling specifies how many peers (`amount`) it is willing to help
|
||||
// ever `interval` amount of time when acting as a server.
|
||||
func WithThrottling(amount int, interval time.Duration) Option {
|
||||
return func(c *config) error {
|
||||
c.throttleGlobalMax = amount
|
||||
c.throttleResetPeriod = interval
|
||||
c.throttleResetJitter = interval / 4
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithPeerThrottling specifies a limit for the maximum number of IP checks
|
||||
// this node will provide to an individual peer in each `interval`.
|
||||
func WithPeerThrottling(amount int) Option {
|
||||
return func(c *config) error {
|
||||
c.throttlePeerMax = amount
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithMetricsTracer uses mt to track autonat metrics
|
||||
func WithMetricsTracer(mt MetricsTracer) Option {
|
||||
return func(c *config) error {
|
||||
c.metricsTracer = mt
|
||||
return nil
|
||||
}
|
||||
}
|
||||
524
vendor/github.com/libp2p/go-libp2p/p2p/host/autonat/pb/autonat.pb.go
generated
vendored
Normal file
524
vendor/github.com/libp2p/go-libp2p/p2p/host/autonat/pb/autonat.pb.go
generated
vendored
Normal file
@@ -0,0 +1,524 @@
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// versions:
|
||||
// protoc-gen-go v1.30.0
|
||||
// protoc v3.21.12
|
||||
// source: pb/autonat.proto
|
||||
|
||||
package pb
|
||||
|
||||
import (
|
||||
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
|
||||
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
|
||||
reflect "reflect"
|
||||
sync "sync"
|
||||
)
|
||||
|
||||
const (
|
||||
// Verify that this generated code is sufficiently up-to-date.
|
||||
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
|
||||
// Verify that runtime/protoimpl is sufficiently up-to-date.
|
||||
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
|
||||
)
|
||||
|
||||
type Message_MessageType int32
|
||||
|
||||
const (
|
||||
Message_DIAL Message_MessageType = 0
|
||||
Message_DIAL_RESPONSE Message_MessageType = 1
|
||||
)
|
||||
|
||||
// Enum value maps for Message_MessageType.
|
||||
var (
|
||||
Message_MessageType_name = map[int32]string{
|
||||
0: "DIAL",
|
||||
1: "DIAL_RESPONSE",
|
||||
}
|
||||
Message_MessageType_value = map[string]int32{
|
||||
"DIAL": 0,
|
||||
"DIAL_RESPONSE": 1,
|
||||
}
|
||||
)
|
||||
|
||||
func (x Message_MessageType) Enum() *Message_MessageType {
|
||||
p := new(Message_MessageType)
|
||||
*p = x
|
||||
return p
|
||||
}
|
||||
|
||||
func (x Message_MessageType) String() string {
|
||||
return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
|
||||
}
|
||||
|
||||
func (Message_MessageType) Descriptor() protoreflect.EnumDescriptor {
|
||||
return file_pb_autonat_proto_enumTypes[0].Descriptor()
|
||||
}
|
||||
|
||||
func (Message_MessageType) Type() protoreflect.EnumType {
|
||||
return &file_pb_autonat_proto_enumTypes[0]
|
||||
}
|
||||
|
||||
func (x Message_MessageType) Number() protoreflect.EnumNumber {
|
||||
return protoreflect.EnumNumber(x)
|
||||
}
|
||||
|
||||
// Deprecated: Do not use.
|
||||
func (x *Message_MessageType) UnmarshalJSON(b []byte) error {
|
||||
num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*x = Message_MessageType(num)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Deprecated: Use Message_MessageType.Descriptor instead.
|
||||
func (Message_MessageType) EnumDescriptor() ([]byte, []int) {
|
||||
return file_pb_autonat_proto_rawDescGZIP(), []int{0, 0}
|
||||
}
|
||||
|
||||
type Message_ResponseStatus int32
|
||||
|
||||
const (
|
||||
Message_OK Message_ResponseStatus = 0
|
||||
Message_E_DIAL_ERROR Message_ResponseStatus = 100
|
||||
Message_E_DIAL_REFUSED Message_ResponseStatus = 101
|
||||
Message_E_BAD_REQUEST Message_ResponseStatus = 200
|
||||
Message_E_INTERNAL_ERROR Message_ResponseStatus = 300
|
||||
)
|
||||
|
||||
// Enum value maps for Message_ResponseStatus.
|
||||
var (
|
||||
Message_ResponseStatus_name = map[int32]string{
|
||||
0: "OK",
|
||||
100: "E_DIAL_ERROR",
|
||||
101: "E_DIAL_REFUSED",
|
||||
200: "E_BAD_REQUEST",
|
||||
300: "E_INTERNAL_ERROR",
|
||||
}
|
||||
Message_ResponseStatus_value = map[string]int32{
|
||||
"OK": 0,
|
||||
"E_DIAL_ERROR": 100,
|
||||
"E_DIAL_REFUSED": 101,
|
||||
"E_BAD_REQUEST": 200,
|
||||
"E_INTERNAL_ERROR": 300,
|
||||
}
|
||||
)
|
||||
|
||||
func (x Message_ResponseStatus) Enum() *Message_ResponseStatus {
|
||||
p := new(Message_ResponseStatus)
|
||||
*p = x
|
||||
return p
|
||||
}
|
||||
|
||||
func (x Message_ResponseStatus) String() string {
|
||||
return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
|
||||
}
|
||||
|
||||
func (Message_ResponseStatus) Descriptor() protoreflect.EnumDescriptor {
|
||||
return file_pb_autonat_proto_enumTypes[1].Descriptor()
|
||||
}
|
||||
|
||||
func (Message_ResponseStatus) Type() protoreflect.EnumType {
|
||||
return &file_pb_autonat_proto_enumTypes[1]
|
||||
}
|
||||
|
||||
func (x Message_ResponseStatus) Number() protoreflect.EnumNumber {
|
||||
return protoreflect.EnumNumber(x)
|
||||
}
|
||||
|
||||
// Deprecated: Do not use.
|
||||
func (x *Message_ResponseStatus) UnmarshalJSON(b []byte) error {
|
||||
num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*x = Message_ResponseStatus(num)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Deprecated: Use Message_ResponseStatus.Descriptor instead.
|
||||
func (Message_ResponseStatus) EnumDescriptor() ([]byte, []int) {
|
||||
return file_pb_autonat_proto_rawDescGZIP(), []int{0, 1}
|
||||
}
|
||||
|
||||
type Message struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
Type *Message_MessageType `protobuf:"varint,1,opt,name=type,enum=autonat.pb.Message_MessageType" json:"type,omitempty"`
|
||||
Dial *Message_Dial `protobuf:"bytes,2,opt,name=dial" json:"dial,omitempty"`
|
||||
DialResponse *Message_DialResponse `protobuf:"bytes,3,opt,name=dialResponse" json:"dialResponse,omitempty"`
|
||||
}
|
||||
|
||||
func (x *Message) Reset() {
|
||||
*x = Message{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_pb_autonat_proto_msgTypes[0]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *Message) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*Message) ProtoMessage() {}
|
||||
|
||||
func (x *Message) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_pb_autonat_proto_msgTypes[0]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use Message.ProtoReflect.Descriptor instead.
|
||||
func (*Message) Descriptor() ([]byte, []int) {
|
||||
return file_pb_autonat_proto_rawDescGZIP(), []int{0}
|
||||
}
|
||||
|
||||
func (x *Message) GetType() Message_MessageType {
|
||||
if x != nil && x.Type != nil {
|
||||
return *x.Type
|
||||
}
|
||||
return Message_DIAL
|
||||
}
|
||||
|
||||
func (x *Message) GetDial() *Message_Dial {
|
||||
if x != nil {
|
||||
return x.Dial
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *Message) GetDialResponse() *Message_DialResponse {
|
||||
if x != nil {
|
||||
return x.DialResponse
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type Message_PeerInfo struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
Id []byte `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"`
|
||||
Addrs [][]byte `protobuf:"bytes,2,rep,name=addrs" json:"addrs,omitempty"`
|
||||
}
|
||||
|
||||
func (x *Message_PeerInfo) Reset() {
|
||||
*x = Message_PeerInfo{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_pb_autonat_proto_msgTypes[1]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *Message_PeerInfo) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*Message_PeerInfo) ProtoMessage() {}
|
||||
|
||||
func (x *Message_PeerInfo) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_pb_autonat_proto_msgTypes[1]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use Message_PeerInfo.ProtoReflect.Descriptor instead.
|
||||
func (*Message_PeerInfo) Descriptor() ([]byte, []int) {
|
||||
return file_pb_autonat_proto_rawDescGZIP(), []int{0, 0}
|
||||
}
|
||||
|
||||
func (x *Message_PeerInfo) GetId() []byte {
|
||||
if x != nil {
|
||||
return x.Id
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *Message_PeerInfo) GetAddrs() [][]byte {
|
||||
if x != nil {
|
||||
return x.Addrs
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type Message_Dial struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
Peer *Message_PeerInfo `protobuf:"bytes,1,opt,name=peer" json:"peer,omitempty"`
|
||||
}
|
||||
|
||||
func (x *Message_Dial) Reset() {
|
||||
*x = Message_Dial{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_pb_autonat_proto_msgTypes[2]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *Message_Dial) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*Message_Dial) ProtoMessage() {}
|
||||
|
||||
func (x *Message_Dial) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_pb_autonat_proto_msgTypes[2]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use Message_Dial.ProtoReflect.Descriptor instead.
|
||||
func (*Message_Dial) Descriptor() ([]byte, []int) {
|
||||
return file_pb_autonat_proto_rawDescGZIP(), []int{0, 1}
|
||||
}
|
||||
|
||||
func (x *Message_Dial) GetPeer() *Message_PeerInfo {
|
||||
if x != nil {
|
||||
return x.Peer
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type Message_DialResponse struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
Status *Message_ResponseStatus `protobuf:"varint,1,opt,name=status,enum=autonat.pb.Message_ResponseStatus" json:"status,omitempty"`
|
||||
StatusText *string `protobuf:"bytes,2,opt,name=statusText" json:"statusText,omitempty"`
|
||||
Addr []byte `protobuf:"bytes,3,opt,name=addr" json:"addr,omitempty"`
|
||||
}
|
||||
|
||||
func (x *Message_DialResponse) Reset() {
|
||||
*x = Message_DialResponse{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_pb_autonat_proto_msgTypes[3]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *Message_DialResponse) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*Message_DialResponse) ProtoMessage() {}
|
||||
|
||||
func (x *Message_DialResponse) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_pb_autonat_proto_msgTypes[3]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use Message_DialResponse.ProtoReflect.Descriptor instead.
|
||||
func (*Message_DialResponse) Descriptor() ([]byte, []int) {
|
||||
return file_pb_autonat_proto_rawDescGZIP(), []int{0, 2}
|
||||
}
|
||||
|
||||
func (x *Message_DialResponse) GetStatus() Message_ResponseStatus {
|
||||
if x != nil && x.Status != nil {
|
||||
return *x.Status
|
||||
}
|
||||
return Message_OK
|
||||
}
|
||||
|
||||
func (x *Message_DialResponse) GetStatusText() string {
|
||||
if x != nil && x.StatusText != nil {
|
||||
return *x.StatusText
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (x *Message_DialResponse) GetAddr() []byte {
|
||||
if x != nil {
|
||||
return x.Addr
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
var File_pb_autonat_proto protoreflect.FileDescriptor
|
||||
|
||||
var file_pb_autonat_proto_rawDesc = []byte{
|
||||
0x0a, 0x10, 0x70, 0x62, 0x2f, 0x61, 0x75, 0x74, 0x6f, 0x6e, 0x61, 0x74, 0x2e, 0x70, 0x72, 0x6f,
|
||||
0x74, 0x6f, 0x12, 0x0a, 0x61, 0x75, 0x74, 0x6f, 0x6e, 0x61, 0x74, 0x2e, 0x70, 0x62, 0x22, 0xb5,
|
||||
0x04, 0x0a, 0x07, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x33, 0x0a, 0x04, 0x74, 0x79,
|
||||
0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1f, 0x2e, 0x61, 0x75, 0x74, 0x6f, 0x6e,
|
||||
0x61, 0x74, 0x2e, 0x70, 0x62, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x2e, 0x4d, 0x65,
|
||||
0x73, 0x73, 0x61, 0x67, 0x65, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12,
|
||||
0x2c, 0x0a, 0x04, 0x64, 0x69, 0x61, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e,
|
||||
0x61, 0x75, 0x74, 0x6f, 0x6e, 0x61, 0x74, 0x2e, 0x70, 0x62, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61,
|
||||
0x67, 0x65, 0x2e, 0x44, 0x69, 0x61, 0x6c, 0x52, 0x04, 0x64, 0x69, 0x61, 0x6c, 0x12, 0x44, 0x0a,
|
||||
0x0c, 0x64, 0x69, 0x61, 0x6c, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x03, 0x20,
|
||||
0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x61, 0x75, 0x74, 0x6f, 0x6e, 0x61, 0x74, 0x2e, 0x70, 0x62,
|
||||
0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x2e, 0x44, 0x69, 0x61, 0x6c, 0x52, 0x65, 0x73,
|
||||
0x70, 0x6f, 0x6e, 0x73, 0x65, 0x52, 0x0c, 0x64, 0x69, 0x61, 0x6c, 0x52, 0x65, 0x73, 0x70, 0x6f,
|
||||
0x6e, 0x73, 0x65, 0x1a, 0x30, 0x0a, 0x08, 0x50, 0x65, 0x65, 0x72, 0x49, 0x6e, 0x66, 0x6f, 0x12,
|
||||
0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x02, 0x69, 0x64, 0x12,
|
||||
0x14, 0x0a, 0x05, 0x61, 0x64, 0x64, 0x72, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x05,
|
||||
0x61, 0x64, 0x64, 0x72, 0x73, 0x1a, 0x38, 0x0a, 0x04, 0x44, 0x69, 0x61, 0x6c, 0x12, 0x30, 0x0a,
|
||||
0x04, 0x70, 0x65, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x61, 0x75,
|
||||
0x74, 0x6f, 0x6e, 0x61, 0x74, 0x2e, 0x70, 0x62, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65,
|
||||
0x2e, 0x50, 0x65, 0x65, 0x72, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x04, 0x70, 0x65, 0x65, 0x72, 0x1a,
|
||||
0x7e, 0x0a, 0x0c, 0x44, 0x69, 0x61, 0x6c, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12,
|
||||
0x3a, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32,
|
||||
0x22, 0x2e, 0x61, 0x75, 0x74, 0x6f, 0x6e, 0x61, 0x74, 0x2e, 0x70, 0x62, 0x2e, 0x4d, 0x65, 0x73,
|
||||
0x73, 0x61, 0x67, 0x65, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x53, 0x74, 0x61,
|
||||
0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x1e, 0x0a, 0x0a, 0x73,
|
||||
0x74, 0x61, 0x74, 0x75, 0x73, 0x54, 0x65, 0x78, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52,
|
||||
0x0a, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x54, 0x65, 0x78, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x61,
|
||||
0x64, 0x64, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x61, 0x64, 0x64, 0x72, 0x22,
|
||||
0x2a, 0x0a, 0x0b, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x08,
|
||||
0x0a, 0x04, 0x44, 0x49, 0x41, 0x4c, 0x10, 0x00, 0x12, 0x11, 0x0a, 0x0d, 0x44, 0x49, 0x41, 0x4c,
|
||||
0x5f, 0x52, 0x45, 0x53, 0x50, 0x4f, 0x4e, 0x53, 0x45, 0x10, 0x01, 0x22, 0x69, 0x0a, 0x0e, 0x52,
|
||||
0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x06, 0x0a,
|
||||
0x02, 0x4f, 0x4b, 0x10, 0x00, 0x12, 0x10, 0x0a, 0x0c, 0x45, 0x5f, 0x44, 0x49, 0x41, 0x4c, 0x5f,
|
||||
0x45, 0x52, 0x52, 0x4f, 0x52, 0x10, 0x64, 0x12, 0x12, 0x0a, 0x0e, 0x45, 0x5f, 0x44, 0x49, 0x41,
|
||||
0x4c, 0x5f, 0x52, 0x45, 0x46, 0x55, 0x53, 0x45, 0x44, 0x10, 0x65, 0x12, 0x12, 0x0a, 0x0d, 0x45,
|
||||
0x5f, 0x42, 0x41, 0x44, 0x5f, 0x52, 0x45, 0x51, 0x55, 0x45, 0x53, 0x54, 0x10, 0xc8, 0x01, 0x12,
|
||||
0x15, 0x0a, 0x10, 0x45, 0x5f, 0x49, 0x4e, 0x54, 0x45, 0x52, 0x4e, 0x41, 0x4c, 0x5f, 0x45, 0x52,
|
||||
0x52, 0x4f, 0x52, 0x10, 0xac, 0x02,
|
||||
}
|
||||
|
||||
var (
|
||||
file_pb_autonat_proto_rawDescOnce sync.Once
|
||||
file_pb_autonat_proto_rawDescData = file_pb_autonat_proto_rawDesc
|
||||
)
|
||||
|
||||
func file_pb_autonat_proto_rawDescGZIP() []byte {
|
||||
file_pb_autonat_proto_rawDescOnce.Do(func() {
|
||||
file_pb_autonat_proto_rawDescData = protoimpl.X.CompressGZIP(file_pb_autonat_proto_rawDescData)
|
||||
})
|
||||
return file_pb_autonat_proto_rawDescData
|
||||
}
|
||||
|
||||
var file_pb_autonat_proto_enumTypes = make([]protoimpl.EnumInfo, 2)
|
||||
var file_pb_autonat_proto_msgTypes = make([]protoimpl.MessageInfo, 4)
|
||||
var file_pb_autonat_proto_goTypes = []interface{}{
|
||||
(Message_MessageType)(0), // 0: autonat.pb.Message.MessageType
|
||||
(Message_ResponseStatus)(0), // 1: autonat.pb.Message.ResponseStatus
|
||||
(*Message)(nil), // 2: autonat.pb.Message
|
||||
(*Message_PeerInfo)(nil), // 3: autonat.pb.Message.PeerInfo
|
||||
(*Message_Dial)(nil), // 4: autonat.pb.Message.Dial
|
||||
(*Message_DialResponse)(nil), // 5: autonat.pb.Message.DialResponse
|
||||
}
|
||||
var file_pb_autonat_proto_depIdxs = []int32{
|
||||
0, // 0: autonat.pb.Message.type:type_name -> autonat.pb.Message.MessageType
|
||||
4, // 1: autonat.pb.Message.dial:type_name -> autonat.pb.Message.Dial
|
||||
5, // 2: autonat.pb.Message.dialResponse:type_name -> autonat.pb.Message.DialResponse
|
||||
3, // 3: autonat.pb.Message.Dial.peer:type_name -> autonat.pb.Message.PeerInfo
|
||||
1, // 4: autonat.pb.Message.DialResponse.status:type_name -> autonat.pb.Message.ResponseStatus
|
||||
5, // [5:5] is the sub-list for method output_type
|
||||
5, // [5:5] is the sub-list for method input_type
|
||||
5, // [5:5] is the sub-list for extension type_name
|
||||
5, // [5:5] is the sub-list for extension extendee
|
||||
0, // [0:5] is the sub-list for field type_name
|
||||
}
|
||||
|
||||
func init() { file_pb_autonat_proto_init() }
|
||||
func file_pb_autonat_proto_init() {
|
||||
if File_pb_autonat_proto != nil {
|
||||
return
|
||||
}
|
||||
if !protoimpl.UnsafeEnabled {
|
||||
file_pb_autonat_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*Message); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_pb_autonat_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*Message_PeerInfo); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_pb_autonat_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*Message_Dial); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_pb_autonat_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*Message_DialResponse); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
type x struct{}
|
||||
out := protoimpl.TypeBuilder{
|
||||
File: protoimpl.DescBuilder{
|
||||
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
|
||||
RawDescriptor: file_pb_autonat_proto_rawDesc,
|
||||
NumEnums: 2,
|
||||
NumMessages: 4,
|
||||
NumExtensions: 0,
|
||||
NumServices: 0,
|
||||
},
|
||||
GoTypes: file_pb_autonat_proto_goTypes,
|
||||
DependencyIndexes: file_pb_autonat_proto_depIdxs,
|
||||
EnumInfos: file_pb_autonat_proto_enumTypes,
|
||||
MessageInfos: file_pb_autonat_proto_msgTypes,
|
||||
}.Build()
|
||||
File_pb_autonat_proto = out.File
|
||||
file_pb_autonat_proto_rawDesc = nil
|
||||
file_pb_autonat_proto_goTypes = nil
|
||||
file_pb_autonat_proto_depIdxs = nil
|
||||
}
|
||||
37
vendor/github.com/libp2p/go-libp2p/p2p/host/autonat/pb/autonat.proto
generated
vendored
Normal file
37
vendor/github.com/libp2p/go-libp2p/p2p/host/autonat/pb/autonat.proto
generated
vendored
Normal file
@@ -0,0 +1,37 @@
|
||||
syntax = "proto2";
|
||||
|
||||
package autonat.pb;
|
||||
|
||||
message Message {
|
||||
enum MessageType {
|
||||
DIAL = 0;
|
||||
DIAL_RESPONSE = 1;
|
||||
}
|
||||
|
||||
enum ResponseStatus {
|
||||
OK = 0;
|
||||
E_DIAL_ERROR = 100;
|
||||
E_DIAL_REFUSED = 101;
|
||||
E_BAD_REQUEST = 200;
|
||||
E_INTERNAL_ERROR = 300;
|
||||
}
|
||||
|
||||
message PeerInfo {
|
||||
optional bytes id = 1;
|
||||
repeated bytes addrs = 2;
|
||||
}
|
||||
|
||||
message Dial {
|
||||
optional PeerInfo peer = 1;
|
||||
}
|
||||
|
||||
message DialResponse {
|
||||
optional ResponseStatus status = 1;
|
||||
optional string statusText = 2;
|
||||
optional bytes addr = 3;
|
||||
}
|
||||
|
||||
optional MessageType type = 1;
|
||||
optional Dial dial = 2;
|
||||
optional DialResponse dialResponse = 3;
|
||||
}
|
||||
41
vendor/github.com/libp2p/go-libp2p/p2p/host/autonat/proto.go
generated
vendored
Normal file
41
vendor/github.com/libp2p/go-libp2p/p2p/host/autonat/proto.go
generated
vendored
Normal file
@@ -0,0 +1,41 @@
|
||||
package autonat
|
||||
|
||||
import (
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/libp2p/go-libp2p/p2p/host/autonat/pb"
|
||||
|
||||
ma "github.com/multiformats/go-multiaddr"
|
||||
)
|
||||
|
||||
//go:generate protoc --proto_path=$PWD:$PWD/../../.. --go_out=. --go_opt=Mpb/autonat.proto=./pb pb/autonat.proto
|
||||
|
||||
// AutoNATProto identifies the autonat service protocol
|
||||
const AutoNATProto = "/libp2p/autonat/1.0.0"
|
||||
|
||||
func newDialMessage(pi peer.AddrInfo) *pb.Message {
|
||||
msg := new(pb.Message)
|
||||
msg.Type = pb.Message_DIAL.Enum()
|
||||
msg.Dial = new(pb.Message_Dial)
|
||||
msg.Dial.Peer = new(pb.Message_PeerInfo)
|
||||
msg.Dial.Peer.Id = []byte(pi.ID)
|
||||
msg.Dial.Peer.Addrs = make([][]byte, len(pi.Addrs))
|
||||
for i, addr := range pi.Addrs {
|
||||
msg.Dial.Peer.Addrs[i] = addr.Bytes()
|
||||
}
|
||||
|
||||
return msg
|
||||
}
|
||||
|
||||
func newDialResponseOK(addr ma.Multiaddr) *pb.Message_DialResponse {
|
||||
dr := new(pb.Message_DialResponse)
|
||||
dr.Status = pb.Message_OK.Enum()
|
||||
dr.Addr = addr.Bytes()
|
||||
return dr
|
||||
}
|
||||
|
||||
func newDialResponseError(status pb.Message_ResponseStatus, text string) *pb.Message_DialResponse {
|
||||
dr := new(pb.Message_DialResponse)
|
||||
dr.Status = status.Enum()
|
||||
dr.StatusText = &text
|
||||
return dr
|
||||
}
|
||||
295
vendor/github.com/libp2p/go-libp2p/p2p/host/autonat/svc.go
generated
vendored
Normal file
295
vendor/github.com/libp2p/go-libp2p/p2p/host/autonat/svc.go
generated
vendored
Normal file
@@ -0,0 +1,295 @@
|
||||
package autonat
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"math/rand"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/libp2p/go-libp2p/core/network"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/libp2p/go-libp2p/core/peerstore"
|
||||
"github.com/libp2p/go-libp2p/p2p/host/autonat/pb"
|
||||
|
||||
"github.com/libp2p/go-msgio/pbio"
|
||||
|
||||
ma "github.com/multiformats/go-multiaddr"
|
||||
)
|
||||
|
||||
var streamTimeout = 60 * time.Second
|
||||
|
||||
const (
|
||||
ServiceName = "libp2p.autonat"
|
||||
|
||||
maxMsgSize = 4096
|
||||
)
|
||||
|
||||
// AutoNATService provides NAT autodetection services to other peers
|
||||
type autoNATService struct {
|
||||
instanceLock sync.Mutex
|
||||
instance context.CancelFunc
|
||||
backgroundRunning chan struct{} // closed when background exits
|
||||
|
||||
config *config
|
||||
|
||||
// rate limiter
|
||||
mx sync.Mutex
|
||||
reqs map[peer.ID]int
|
||||
globalReqs int
|
||||
}
|
||||
|
||||
// NewAutoNATService creates a new AutoNATService instance attached to a host
|
||||
func newAutoNATService(c *config) (*autoNATService, error) {
|
||||
if c.dialer == nil {
|
||||
return nil, errors.New("cannot create NAT service without a network")
|
||||
}
|
||||
return &autoNATService{
|
||||
config: c,
|
||||
reqs: make(map[peer.ID]int),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (as *autoNATService) handleStream(s network.Stream) {
|
||||
if err := s.Scope().SetService(ServiceName); err != nil {
|
||||
log.Debugf("error attaching stream to autonat service: %s", err)
|
||||
s.Reset()
|
||||
return
|
||||
}
|
||||
|
||||
if err := s.Scope().ReserveMemory(maxMsgSize, network.ReservationPriorityAlways); err != nil {
|
||||
log.Debugf("error reserving memory for autonat stream: %s", err)
|
||||
s.Reset()
|
||||
return
|
||||
}
|
||||
defer s.Scope().ReleaseMemory(maxMsgSize)
|
||||
|
||||
s.SetDeadline(time.Now().Add(streamTimeout))
|
||||
defer s.Close()
|
||||
|
||||
pid := s.Conn().RemotePeer()
|
||||
log.Debugf("New stream from %s", pid)
|
||||
|
||||
r := pbio.NewDelimitedReader(s, maxMsgSize)
|
||||
w := pbio.NewDelimitedWriter(s)
|
||||
|
||||
var req pb.Message
|
||||
var res pb.Message
|
||||
|
||||
err := r.ReadMsg(&req)
|
||||
if err != nil {
|
||||
log.Debugf("Error reading message from %s: %s", pid, err.Error())
|
||||
s.Reset()
|
||||
return
|
||||
}
|
||||
|
||||
t := req.GetType()
|
||||
if t != pb.Message_DIAL {
|
||||
log.Debugf("Unexpected message from %s: %s (%d)", pid, t.String(), t)
|
||||
s.Reset()
|
||||
return
|
||||
}
|
||||
|
||||
dr := as.handleDial(pid, s.Conn().RemoteMultiaddr(), req.GetDial().GetPeer())
|
||||
res.Type = pb.Message_DIAL_RESPONSE.Enum()
|
||||
res.DialResponse = dr
|
||||
|
||||
err = w.WriteMsg(&res)
|
||||
if err != nil {
|
||||
log.Debugf("Error writing response to %s: %s", pid, err.Error())
|
||||
s.Reset()
|
||||
return
|
||||
}
|
||||
if as.config.metricsTracer != nil {
|
||||
as.config.metricsTracer.OutgoingDialResponse(res.GetDialResponse().GetStatus())
|
||||
}
|
||||
}
|
||||
|
||||
func (as *autoNATService) handleDial(p peer.ID, obsaddr ma.Multiaddr, mpi *pb.Message_PeerInfo) *pb.Message_DialResponse {
|
||||
if mpi == nil {
|
||||
return newDialResponseError(pb.Message_E_BAD_REQUEST, "missing peer info")
|
||||
}
|
||||
|
||||
mpid := mpi.GetId()
|
||||
if mpid != nil {
|
||||
mp, err := peer.IDFromBytes(mpid)
|
||||
if err != nil {
|
||||
return newDialResponseError(pb.Message_E_BAD_REQUEST, "bad peer id")
|
||||
}
|
||||
|
||||
if mp != p {
|
||||
return newDialResponseError(pb.Message_E_BAD_REQUEST, "peer id mismatch")
|
||||
}
|
||||
}
|
||||
|
||||
addrs := make([]ma.Multiaddr, 0, as.config.maxPeerAddresses)
|
||||
seen := make(map[string]struct{})
|
||||
|
||||
// Don't even try to dial peers with blocked remote addresses. In order to dial a peer, we
|
||||
// need to know their public IP address, and it needs to be different from our public IP
|
||||
// address.
|
||||
if as.config.dialPolicy.skipDial(obsaddr) {
|
||||
if as.config.metricsTracer != nil {
|
||||
as.config.metricsTracer.OutgoingDialRefused(dial_blocked)
|
||||
}
|
||||
// Note: versions < v0.20.0 return Message_E_DIAL_ERROR here, thus we can not rely on this error code.
|
||||
return newDialResponseError(pb.Message_E_DIAL_REFUSED, "refusing to dial peer with blocked observed address")
|
||||
}
|
||||
|
||||
// Determine the peer's IP address.
|
||||
hostIP, _ := ma.SplitFirst(obsaddr)
|
||||
switch hostIP.Protocol().Code {
|
||||
case ma.P_IP4, ma.P_IP6:
|
||||
default:
|
||||
// This shouldn't be possible as we should skip all addresses that don't include
|
||||
// public IP addresses.
|
||||
return newDialResponseError(pb.Message_E_INTERNAL_ERROR, "expected an IP address")
|
||||
}
|
||||
|
||||
// add observed addr to the list of addresses to dial
|
||||
addrs = append(addrs, obsaddr)
|
||||
seen[obsaddr.String()] = struct{}{}
|
||||
|
||||
for _, maddr := range mpi.GetAddrs() {
|
||||
addr, err := ma.NewMultiaddrBytes(maddr)
|
||||
if err != nil {
|
||||
log.Debugf("Error parsing multiaddr: %s", err.Error())
|
||||
continue
|
||||
}
|
||||
|
||||
// For security reasons, we _only_ dial the observed IP address.
|
||||
// Replace other IP addresses with the observed one so we can still try the
|
||||
// requested ports/transports.
|
||||
if ip, rest := ma.SplitFirst(addr); !ip.Equal(hostIP) {
|
||||
// Make sure it's an IP address
|
||||
switch ip.Protocol().Code {
|
||||
case ma.P_IP4, ma.P_IP6:
|
||||
default:
|
||||
continue
|
||||
}
|
||||
addr = hostIP
|
||||
if rest != nil {
|
||||
addr = addr.Encapsulate(rest)
|
||||
}
|
||||
}
|
||||
|
||||
// Make sure we're willing to dial the rest of the address (e.g., not a circuit
|
||||
// address).
|
||||
if as.config.dialPolicy.skipDial(addr) {
|
||||
continue
|
||||
}
|
||||
|
||||
str := addr.String()
|
||||
_, ok := seen[str]
|
||||
if ok {
|
||||
continue
|
||||
}
|
||||
|
||||
addrs = append(addrs, addr)
|
||||
seen[str] = struct{}{}
|
||||
|
||||
if len(addrs) >= as.config.maxPeerAddresses {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if len(addrs) == 0 {
|
||||
if as.config.metricsTracer != nil {
|
||||
as.config.metricsTracer.OutgoingDialRefused(no_valid_address)
|
||||
}
|
||||
// Note: versions < v0.20.0 return Message_E_DIAL_ERROR here, thus we can not rely on this error code.
|
||||
return newDialResponseError(pb.Message_E_DIAL_REFUSED, "no dialable addresses")
|
||||
}
|
||||
|
||||
return as.doDial(peer.AddrInfo{ID: p, Addrs: addrs})
|
||||
}
|
||||
|
||||
func (as *autoNATService) doDial(pi peer.AddrInfo) *pb.Message_DialResponse {
|
||||
// rate limit check
|
||||
as.mx.Lock()
|
||||
count := as.reqs[pi.ID]
|
||||
if count >= as.config.throttlePeerMax || (as.config.throttleGlobalMax > 0 &&
|
||||
as.globalReqs >= as.config.throttleGlobalMax) {
|
||||
as.mx.Unlock()
|
||||
if as.config.metricsTracer != nil {
|
||||
as.config.metricsTracer.OutgoingDialRefused(rate_limited)
|
||||
}
|
||||
return newDialResponseError(pb.Message_E_DIAL_REFUSED, "too many dials")
|
||||
}
|
||||
as.reqs[pi.ID] = count + 1
|
||||
as.globalReqs++
|
||||
as.mx.Unlock()
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), as.config.dialTimeout)
|
||||
defer cancel()
|
||||
|
||||
as.config.dialer.Peerstore().ClearAddrs(pi.ID)
|
||||
|
||||
as.config.dialer.Peerstore().AddAddrs(pi.ID, pi.Addrs, peerstore.TempAddrTTL)
|
||||
|
||||
defer func() {
|
||||
as.config.dialer.Peerstore().ClearAddrs(pi.ID)
|
||||
as.config.dialer.Peerstore().RemovePeer(pi.ID)
|
||||
}()
|
||||
|
||||
conn, err := as.config.dialer.DialPeer(ctx, pi.ID)
|
||||
if err != nil {
|
||||
log.Debugf("error dialing %s: %s", pi.ID, err.Error())
|
||||
// wait for the context to timeout to avoid leaking timing information
|
||||
// this renders the service ineffective as a port scanner
|
||||
<-ctx.Done()
|
||||
return newDialResponseError(pb.Message_E_DIAL_ERROR, "dial failed")
|
||||
}
|
||||
|
||||
ra := conn.RemoteMultiaddr()
|
||||
as.config.dialer.ClosePeer(pi.ID)
|
||||
return newDialResponseOK(ra)
|
||||
}
|
||||
|
||||
// Enable the autoNAT service if it is not running.
|
||||
func (as *autoNATService) Enable() {
|
||||
as.instanceLock.Lock()
|
||||
defer as.instanceLock.Unlock()
|
||||
if as.instance != nil {
|
||||
return
|
||||
}
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
as.instance = cancel
|
||||
as.backgroundRunning = make(chan struct{})
|
||||
as.config.host.SetStreamHandler(AutoNATProto, as.handleStream)
|
||||
|
||||
go as.background(ctx)
|
||||
}
|
||||
|
||||
// Disable the autoNAT service if it is running.
|
||||
func (as *autoNATService) Disable() {
|
||||
as.instanceLock.Lock()
|
||||
defer as.instanceLock.Unlock()
|
||||
if as.instance != nil {
|
||||
as.config.host.RemoveStreamHandler(AutoNATProto)
|
||||
as.instance()
|
||||
as.instance = nil
|
||||
<-as.backgroundRunning
|
||||
}
|
||||
}
|
||||
|
||||
func (as *autoNATService) background(ctx context.Context) {
|
||||
defer close(as.backgroundRunning)
|
||||
|
||||
timer := time.NewTimer(as.config.throttleResetPeriod)
|
||||
defer timer.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-timer.C:
|
||||
as.mx.Lock()
|
||||
as.reqs = make(map[peer.ID]int)
|
||||
as.globalReqs = 0
|
||||
as.mx.Unlock()
|
||||
jitter := rand.Float32() * float32(as.config.throttleResetJitter)
|
||||
timer.Reset(as.config.throttleResetPeriod + time.Duration(int64(jitter)))
|
||||
case <-ctx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
165
vendor/github.com/libp2p/go-libp2p/p2p/host/autorelay/addrsplosion.go
generated
vendored
Normal file
165
vendor/github.com/libp2p/go-libp2p/p2p/host/autorelay/addrsplosion.go
generated
vendored
Normal file
@@ -0,0 +1,165 @@
|
||||
package autorelay
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
|
||||
ma "github.com/multiformats/go-multiaddr"
|
||||
manet "github.com/multiformats/go-multiaddr/net"
|
||||
)
|
||||
|
||||
// This function cleans up a relay's address set to remove private addresses and curtail
|
||||
// addrsplosion.
|
||||
func cleanupAddressSet(addrs []ma.Multiaddr) []ma.Multiaddr {
|
||||
var public, private []ma.Multiaddr
|
||||
|
||||
for _, a := range addrs {
|
||||
if isRelayAddr(a) {
|
||||
continue
|
||||
}
|
||||
|
||||
if manet.IsPublicAddr(a) || isDNSAddr(a) {
|
||||
public = append(public, a)
|
||||
continue
|
||||
}
|
||||
|
||||
// discard unroutable addrs
|
||||
if manet.IsPrivateAddr(a) {
|
||||
private = append(private, a)
|
||||
}
|
||||
}
|
||||
|
||||
if !hasAddrsplosion(public) {
|
||||
return public
|
||||
}
|
||||
|
||||
return sanitizeAddrsplodedSet(public, private)
|
||||
}
|
||||
|
||||
func isRelayAddr(a ma.Multiaddr) bool {
|
||||
isRelay := false
|
||||
|
||||
ma.ForEach(a, func(c ma.Component) bool {
|
||||
switch c.Protocol().Code {
|
||||
case ma.P_CIRCUIT:
|
||||
isRelay = true
|
||||
return false
|
||||
default:
|
||||
return true
|
||||
}
|
||||
})
|
||||
|
||||
return isRelay
|
||||
}
|
||||
|
||||
func isDNSAddr(a ma.Multiaddr) bool {
|
||||
if first, _ := ma.SplitFirst(a); first != nil {
|
||||
switch first.Protocol().Code {
|
||||
case ma.P_DNS4, ma.P_DNS6, ma.P_DNSADDR:
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// we have addrsplosion if for some protocol we advertise multiple ports on
|
||||
// the same base address.
|
||||
func hasAddrsplosion(addrs []ma.Multiaddr) bool {
|
||||
aset := make(map[string]int)
|
||||
|
||||
for _, a := range addrs {
|
||||
key, port := addrKeyAndPort(a)
|
||||
xport, ok := aset[key]
|
||||
if ok && port != xport {
|
||||
return true
|
||||
}
|
||||
aset[key] = port
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func addrKeyAndPort(a ma.Multiaddr) (string, int) {
|
||||
var (
|
||||
key string
|
||||
port int
|
||||
)
|
||||
|
||||
ma.ForEach(a, func(c ma.Component) bool {
|
||||
switch c.Protocol().Code {
|
||||
case ma.P_TCP, ma.P_UDP:
|
||||
port = int(binary.BigEndian.Uint16(c.RawValue()))
|
||||
key += "/" + c.Protocol().Name
|
||||
default:
|
||||
val := c.Value()
|
||||
if val == "" {
|
||||
val = c.Protocol().Name
|
||||
}
|
||||
key += "/" + val
|
||||
}
|
||||
return true
|
||||
})
|
||||
|
||||
return key, port
|
||||
}
|
||||
|
||||
// clean up addrsplosion
|
||||
// the following heuristic is used:
|
||||
// - for each base address/protocol combination, if there are multiple ports advertised then
|
||||
// only accept the default port if present.
|
||||
// - If the default port is not present, we check for non-standard ports by tracking
|
||||
// private port bindings if present.
|
||||
// - If there is no default or private port binding, then we can't infer the correct
|
||||
// port and give up and return all addrs (for that base address)
|
||||
func sanitizeAddrsplodedSet(public, private []ma.Multiaddr) []ma.Multiaddr {
|
||||
type portAndAddr struct {
|
||||
addr ma.Multiaddr
|
||||
port int
|
||||
}
|
||||
|
||||
privports := make(map[int]struct{})
|
||||
pubaddrs := make(map[string][]portAndAddr)
|
||||
|
||||
for _, a := range private {
|
||||
_, port := addrKeyAndPort(a)
|
||||
privports[port] = struct{}{}
|
||||
}
|
||||
|
||||
for _, a := range public {
|
||||
key, port := addrKeyAndPort(a)
|
||||
pubaddrs[key] = append(pubaddrs[key], portAndAddr{addr: a, port: port})
|
||||
}
|
||||
|
||||
var result []ma.Multiaddr
|
||||
for _, pas := range pubaddrs {
|
||||
if len(pas) == 1 {
|
||||
// it's not addrsploded
|
||||
result = append(result, pas[0].addr)
|
||||
continue
|
||||
}
|
||||
|
||||
haveAddr := false
|
||||
for _, pa := range pas {
|
||||
if _, ok := privports[pa.port]; ok {
|
||||
// it matches a privately bound port, use it
|
||||
result = append(result, pa.addr)
|
||||
haveAddr = true
|
||||
continue
|
||||
}
|
||||
|
||||
if pa.port == 4001 || pa.port == 4002 {
|
||||
// it's a default port, use it
|
||||
result = append(result, pa.addr)
|
||||
haveAddr = true
|
||||
}
|
||||
}
|
||||
|
||||
if !haveAddr {
|
||||
// we weren't able to select a port; bite the bullet and use them all
|
||||
for _, pa := range pas {
|
||||
result = append(result, pa.addr)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
125
vendor/github.com/libp2p/go-libp2p/p2p/host/autorelay/autorelay.go
generated
vendored
Normal file
125
vendor/github.com/libp2p/go-libp2p/p2p/host/autorelay/autorelay.go
generated
vendored
Normal file
@@ -0,0 +1,125 @@
|
||||
package autorelay
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"sync"
|
||||
|
||||
"github.com/libp2p/go-libp2p/core/event"
|
||||
"github.com/libp2p/go-libp2p/core/host"
|
||||
"github.com/libp2p/go-libp2p/core/network"
|
||||
basic "github.com/libp2p/go-libp2p/p2p/host/basic"
|
||||
"github.com/libp2p/go-libp2p/p2p/host/eventbus"
|
||||
|
||||
logging "github.com/ipfs/go-log/v2"
|
||||
ma "github.com/multiformats/go-multiaddr"
|
||||
)
|
||||
|
||||
var log = logging.Logger("autorelay")
|
||||
|
||||
type AutoRelay struct {
|
||||
refCount sync.WaitGroup
|
||||
ctx context.Context
|
||||
ctxCancel context.CancelFunc
|
||||
|
||||
conf *config
|
||||
|
||||
mx sync.Mutex
|
||||
status network.Reachability
|
||||
|
||||
relayFinder *relayFinder
|
||||
|
||||
host host.Host
|
||||
addrsF basic.AddrsFactory
|
||||
|
||||
metricsTracer MetricsTracer
|
||||
}
|
||||
|
||||
func NewAutoRelay(bhost *basic.BasicHost, opts ...Option) (*AutoRelay, error) {
|
||||
r := &AutoRelay{
|
||||
host: bhost,
|
||||
addrsF: bhost.AddrsFactory,
|
||||
status: network.ReachabilityUnknown,
|
||||
}
|
||||
conf := defaultConfig
|
||||
for _, opt := range opts {
|
||||
if err := opt(&conf); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
r.ctx, r.ctxCancel = context.WithCancel(context.Background())
|
||||
r.conf = &conf
|
||||
r.relayFinder = newRelayFinder(bhost, conf.peerSource, &conf)
|
||||
r.metricsTracer = &wrappedMetricsTracer{conf.metricsTracer}
|
||||
bhost.AddrsFactory = r.hostAddrs
|
||||
|
||||
return r, nil
|
||||
}
|
||||
|
||||
func (r *AutoRelay) Start() {
|
||||
r.refCount.Add(1)
|
||||
go func() {
|
||||
defer r.refCount.Done()
|
||||
r.background()
|
||||
}()
|
||||
}
|
||||
|
||||
func (r *AutoRelay) background() {
|
||||
subReachability, err := r.host.EventBus().Subscribe(new(event.EvtLocalReachabilityChanged), eventbus.Name("autorelay (background)"))
|
||||
if err != nil {
|
||||
log.Debug("failed to subscribe to the EvtLocalReachabilityChanged")
|
||||
return
|
||||
}
|
||||
defer subReachability.Close()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-r.ctx.Done():
|
||||
return
|
||||
case ev, ok := <-subReachability.Out():
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
// TODO: push changed addresses
|
||||
evt := ev.(event.EvtLocalReachabilityChanged)
|
||||
switch evt.Reachability {
|
||||
case network.ReachabilityPrivate, network.ReachabilityUnknown:
|
||||
err := r.relayFinder.Start()
|
||||
if errors.Is(err, errAlreadyRunning) {
|
||||
log.Debug("tried to start already running relay finder")
|
||||
} else if err != nil {
|
||||
log.Errorw("failed to start relay finder", "error", err)
|
||||
} else {
|
||||
r.metricsTracer.RelayFinderStatus(true)
|
||||
}
|
||||
case network.ReachabilityPublic:
|
||||
r.relayFinder.Stop()
|
||||
r.metricsTracer.RelayFinderStatus(false)
|
||||
}
|
||||
r.mx.Lock()
|
||||
r.status = evt.Reachability
|
||||
r.mx.Unlock()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (r *AutoRelay) hostAddrs(addrs []ma.Multiaddr) []ma.Multiaddr {
|
||||
return r.relayAddrs(r.addrsF(addrs))
|
||||
}
|
||||
|
||||
func (r *AutoRelay) relayAddrs(addrs []ma.Multiaddr) []ma.Multiaddr {
|
||||
r.mx.Lock()
|
||||
defer r.mx.Unlock()
|
||||
|
||||
if r.status != network.ReachabilityPrivate {
|
||||
return addrs
|
||||
}
|
||||
return r.relayFinder.relayAddrs(addrs)
|
||||
}
|
||||
|
||||
func (r *AutoRelay) Close() error {
|
||||
r.ctxCancel()
|
||||
err := r.relayFinder.Stop()
|
||||
r.refCount.Wait()
|
||||
return err
|
||||
}
|
||||
23
vendor/github.com/libp2p/go-libp2p/p2p/host/autorelay/host.go
generated
vendored
Normal file
23
vendor/github.com/libp2p/go-libp2p/p2p/host/autorelay/host.go
generated
vendored
Normal file
@@ -0,0 +1,23 @@
|
||||
package autorelay
|
||||
|
||||
import (
|
||||
"github.com/libp2p/go-libp2p/core/host"
|
||||
)
|
||||
|
||||
type AutoRelayHost struct {
|
||||
host.Host
|
||||
ar *AutoRelay
|
||||
}
|
||||
|
||||
func (h *AutoRelayHost) Close() error {
|
||||
_ = h.ar.Close()
|
||||
return h.Host.Close()
|
||||
}
|
||||
|
||||
func (h *AutoRelayHost) Start() {
|
||||
h.ar.Start()
|
||||
}
|
||||
|
||||
func NewAutoRelayHost(h host.Host, ar *AutoRelay) *AutoRelayHost {
|
||||
return &AutoRelayHost{Host: h, ar: ar}
|
||||
}
|
||||
373
vendor/github.com/libp2p/go-libp2p/p2p/host/autorelay/metrics.go
generated
vendored
Normal file
373
vendor/github.com/libp2p/go-libp2p/p2p/host/autorelay/metrics.go
generated
vendored
Normal file
@@ -0,0 +1,373 @@
|
||||
package autorelay
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
"github.com/libp2p/go-libp2p/p2p/metricshelper"
|
||||
"github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/client"
|
||||
pbv2 "github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/pb"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
)
|
||||
|
||||
const metricNamespace = "libp2p_autorelay"
|
||||
|
||||
var (
|
||||
status = prometheus.NewGauge(prometheus.GaugeOpts{
|
||||
Namespace: metricNamespace,
|
||||
Name: "status",
|
||||
Help: "relay finder active",
|
||||
})
|
||||
reservationsOpenedTotal = prometheus.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
Namespace: metricNamespace,
|
||||
Name: "reservations_opened_total",
|
||||
Help: "Reservations Opened",
|
||||
},
|
||||
)
|
||||
reservationsClosedTotal = prometheus.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
Namespace: metricNamespace,
|
||||
Name: "reservations_closed_total",
|
||||
Help: "Reservations Closed",
|
||||
},
|
||||
)
|
||||
reservationRequestsOutcomeTotal = prometheus.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Namespace: metricNamespace,
|
||||
Name: "reservation_requests_outcome_total",
|
||||
Help: "Reservation Request Outcome",
|
||||
},
|
||||
[]string{"request_type", "outcome"},
|
||||
)
|
||||
|
||||
relayAddressesUpdatedTotal = prometheus.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
Namespace: metricNamespace,
|
||||
Name: "relay_addresses_updated_total",
|
||||
Help: "Relay Addresses Updated Count",
|
||||
},
|
||||
)
|
||||
relayAddressesCount = prometheus.NewGauge(
|
||||
prometheus.GaugeOpts{
|
||||
Namespace: metricNamespace,
|
||||
Name: "relay_addresses_count",
|
||||
Help: "Relay Addresses Count",
|
||||
},
|
||||
)
|
||||
|
||||
candidatesCircuitV2SupportTotal = prometheus.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Namespace: metricNamespace,
|
||||
Name: "candidates_circuit_v2_support_total",
|
||||
Help: "Candidiates supporting circuit v2",
|
||||
},
|
||||
[]string{"support"},
|
||||
)
|
||||
candidatesTotal = prometheus.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Namespace: metricNamespace,
|
||||
Name: "candidates_total",
|
||||
Help: "Candidates Total",
|
||||
},
|
||||
[]string{"type"},
|
||||
)
|
||||
candLoopState = prometheus.NewGauge(
|
||||
prometheus.GaugeOpts{
|
||||
Namespace: metricNamespace,
|
||||
Name: "candidate_loop_state",
|
||||
Help: "Candidate Loop State",
|
||||
},
|
||||
)
|
||||
|
||||
scheduledWorkTime = prometheus.NewGaugeVec(
|
||||
prometheus.GaugeOpts{
|
||||
Namespace: metricNamespace,
|
||||
Name: "scheduled_work_time",
|
||||
Help: "Scheduled Work Times",
|
||||
},
|
||||
[]string{"work_type"},
|
||||
)
|
||||
|
||||
desiredReservations = prometheus.NewGauge(
|
||||
prometheus.GaugeOpts{
|
||||
Namespace: metricNamespace,
|
||||
Name: "desired_reservations",
|
||||
Help: "Desired Reservations",
|
||||
},
|
||||
)
|
||||
|
||||
collectors = []prometheus.Collector{
|
||||
status,
|
||||
reservationsOpenedTotal,
|
||||
reservationsClosedTotal,
|
||||
reservationRequestsOutcomeTotal,
|
||||
relayAddressesUpdatedTotal,
|
||||
relayAddressesCount,
|
||||
candidatesCircuitV2SupportTotal,
|
||||
candidatesTotal,
|
||||
candLoopState,
|
||||
scheduledWorkTime,
|
||||
desiredReservations,
|
||||
}
|
||||
)
|
||||
|
||||
type candidateLoopState int
|
||||
|
||||
const (
|
||||
peerSourceRateLimited candidateLoopState = iota
|
||||
waitingOnPeerChan
|
||||
waitingForTrigger
|
||||
stopped
|
||||
)
|
||||
|
||||
// MetricsTracer is the interface for tracking metrics for autorelay
|
||||
type MetricsTracer interface {
|
||||
RelayFinderStatus(isActive bool)
|
||||
|
||||
ReservationEnded(cnt int)
|
||||
ReservationOpened(cnt int)
|
||||
ReservationRequestFinished(isRefresh bool, err error)
|
||||
|
||||
RelayAddressCount(int)
|
||||
RelayAddressUpdated()
|
||||
|
||||
CandidateChecked(supportsCircuitV2 bool)
|
||||
CandidateAdded(cnt int)
|
||||
CandidateRemoved(cnt int)
|
||||
CandidateLoopState(state candidateLoopState)
|
||||
|
||||
ScheduledWorkUpdated(scheduledWork *scheduledWorkTimes)
|
||||
|
||||
DesiredReservations(int)
|
||||
}
|
||||
|
||||
type metricsTracer struct{}
|
||||
|
||||
var _ MetricsTracer = &metricsTracer{}
|
||||
|
||||
type metricsTracerSetting struct {
|
||||
reg prometheus.Registerer
|
||||
}
|
||||
|
||||
type MetricsTracerOption func(*metricsTracerSetting)
|
||||
|
||||
func WithRegisterer(reg prometheus.Registerer) MetricsTracerOption {
|
||||
return func(s *metricsTracerSetting) {
|
||||
if reg != nil {
|
||||
s.reg = reg
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func NewMetricsTracer(opts ...MetricsTracerOption) MetricsTracer {
|
||||
setting := &metricsTracerSetting{reg: prometheus.DefaultRegisterer}
|
||||
for _, opt := range opts {
|
||||
opt(setting)
|
||||
}
|
||||
metricshelper.RegisterCollectors(setting.reg, collectors...)
|
||||
|
||||
// Initialise these counters to 0 otherwise the first reservation requests aren't handled
|
||||
// correctly when using promql increse function
|
||||
reservationRequestsOutcomeTotal.WithLabelValues("refresh", "success")
|
||||
reservationRequestsOutcomeTotal.WithLabelValues("new", "success")
|
||||
candidatesCircuitV2SupportTotal.WithLabelValues("yes")
|
||||
candidatesCircuitV2SupportTotal.WithLabelValues("no")
|
||||
return &metricsTracer{}
|
||||
}
|
||||
|
||||
func (mt *metricsTracer) RelayFinderStatus(isActive bool) {
|
||||
if isActive {
|
||||
status.Set(1)
|
||||
} else {
|
||||
status.Set(0)
|
||||
}
|
||||
}
|
||||
|
||||
func (mt *metricsTracer) ReservationEnded(cnt int) {
|
||||
reservationsClosedTotal.Add(float64(cnt))
|
||||
}
|
||||
|
||||
func (mt *metricsTracer) ReservationOpened(cnt int) {
|
||||
reservationsOpenedTotal.Add(float64(cnt))
|
||||
}
|
||||
|
||||
func (mt *metricsTracer) ReservationRequestFinished(isRefresh bool, err error) {
|
||||
tags := metricshelper.GetStringSlice()
|
||||
defer metricshelper.PutStringSlice(tags)
|
||||
|
||||
if isRefresh {
|
||||
*tags = append(*tags, "refresh")
|
||||
} else {
|
||||
*tags = append(*tags, "new")
|
||||
}
|
||||
*tags = append(*tags, getReservationRequestStatus(err))
|
||||
reservationRequestsOutcomeTotal.WithLabelValues(*tags...).Inc()
|
||||
|
||||
if !isRefresh && err == nil {
|
||||
reservationsOpenedTotal.Inc()
|
||||
}
|
||||
}
|
||||
|
||||
func (mt *metricsTracer) RelayAddressUpdated() {
|
||||
relayAddressesUpdatedTotal.Inc()
|
||||
}
|
||||
|
||||
func (mt *metricsTracer) RelayAddressCount(cnt int) {
|
||||
relayAddressesCount.Set(float64(cnt))
|
||||
}
|
||||
|
||||
func (mt *metricsTracer) CandidateChecked(supportsCircuitV2 bool) {
|
||||
tags := metricshelper.GetStringSlice()
|
||||
defer metricshelper.PutStringSlice(tags)
|
||||
if supportsCircuitV2 {
|
||||
*tags = append(*tags, "yes")
|
||||
} else {
|
||||
*tags = append(*tags, "no")
|
||||
}
|
||||
candidatesCircuitV2SupportTotal.WithLabelValues(*tags...).Inc()
|
||||
}
|
||||
|
||||
func (mt *metricsTracer) CandidateAdded(cnt int) {
|
||||
tags := metricshelper.GetStringSlice()
|
||||
defer metricshelper.PutStringSlice(tags)
|
||||
*tags = append(*tags, "added")
|
||||
candidatesTotal.WithLabelValues(*tags...).Add(float64(cnt))
|
||||
}
|
||||
|
||||
func (mt *metricsTracer) CandidateRemoved(cnt int) {
|
||||
tags := metricshelper.GetStringSlice()
|
||||
defer metricshelper.PutStringSlice(tags)
|
||||
*tags = append(*tags, "removed")
|
||||
candidatesTotal.WithLabelValues(*tags...).Add(float64(cnt))
|
||||
}
|
||||
|
||||
func (mt *metricsTracer) CandidateLoopState(state candidateLoopState) {
|
||||
candLoopState.Set(float64(state))
|
||||
}
|
||||
|
||||
func (mt *metricsTracer) ScheduledWorkUpdated(scheduledWork *scheduledWorkTimes) {
|
||||
tags := metricshelper.GetStringSlice()
|
||||
defer metricshelper.PutStringSlice(tags)
|
||||
|
||||
*tags = append(*tags, "allowed peer source call")
|
||||
scheduledWorkTime.WithLabelValues(*tags...).Set(float64(scheduledWork.nextAllowedCallToPeerSource.Unix()))
|
||||
*tags = (*tags)[:0]
|
||||
|
||||
*tags = append(*tags, "reservation refresh")
|
||||
scheduledWorkTime.WithLabelValues(*tags...).Set(float64(scheduledWork.nextRefresh.Unix()))
|
||||
*tags = (*tags)[:0]
|
||||
|
||||
*tags = append(*tags, "clear backoff")
|
||||
scheduledWorkTime.WithLabelValues(*tags...).Set(float64(scheduledWork.nextBackoff.Unix()))
|
||||
*tags = (*tags)[:0]
|
||||
|
||||
*tags = append(*tags, "old candidate check")
|
||||
scheduledWorkTime.WithLabelValues(*tags...).Set(float64(scheduledWork.nextOldCandidateCheck.Unix()))
|
||||
}
|
||||
|
||||
func (mt *metricsTracer) DesiredReservations(cnt int) {
|
||||
desiredReservations.Set(float64(cnt))
|
||||
}
|
||||
|
||||
func getReservationRequestStatus(err error) string {
|
||||
if err == nil {
|
||||
return "success"
|
||||
}
|
||||
|
||||
status := "err other"
|
||||
var re client.ReservationError
|
||||
if errors.As(err, &re) {
|
||||
switch re.Status {
|
||||
case pbv2.Status_CONNECTION_FAILED:
|
||||
return "connection failed"
|
||||
case pbv2.Status_MALFORMED_MESSAGE:
|
||||
return "malformed message"
|
||||
case pbv2.Status_RESERVATION_REFUSED:
|
||||
return "reservation refused"
|
||||
case pbv2.Status_PERMISSION_DENIED:
|
||||
return "permission denied"
|
||||
case pbv2.Status_RESOURCE_LIMIT_EXCEEDED:
|
||||
return "resource limit exceeded"
|
||||
}
|
||||
}
|
||||
return status
|
||||
}
|
||||
|
||||
// wrappedMetricsTracer wraps MetricsTracer and ignores all calls when mt is nil
|
||||
type wrappedMetricsTracer struct {
|
||||
mt MetricsTracer
|
||||
}
|
||||
|
||||
var _ MetricsTracer = &wrappedMetricsTracer{}
|
||||
|
||||
func (mt *wrappedMetricsTracer) RelayFinderStatus(isActive bool) {
|
||||
if mt.mt != nil {
|
||||
mt.mt.RelayFinderStatus(isActive)
|
||||
}
|
||||
}
|
||||
|
||||
func (mt *wrappedMetricsTracer) ReservationEnded(cnt int) {
|
||||
if mt.mt != nil {
|
||||
mt.mt.ReservationEnded(cnt)
|
||||
}
|
||||
}
|
||||
|
||||
func (mt *wrappedMetricsTracer) ReservationOpened(cnt int) {
|
||||
if mt.mt != nil {
|
||||
mt.mt.ReservationOpened(cnt)
|
||||
}
|
||||
}
|
||||
|
||||
func (mt *wrappedMetricsTracer) ReservationRequestFinished(isRefresh bool, err error) {
|
||||
if mt.mt != nil {
|
||||
mt.mt.ReservationRequestFinished(isRefresh, err)
|
||||
}
|
||||
}
|
||||
|
||||
func (mt *wrappedMetricsTracer) RelayAddressUpdated() {
|
||||
if mt.mt != nil {
|
||||
mt.mt.RelayAddressUpdated()
|
||||
}
|
||||
}
|
||||
|
||||
func (mt *wrappedMetricsTracer) RelayAddressCount(cnt int) {
|
||||
if mt.mt != nil {
|
||||
mt.mt.RelayAddressCount(cnt)
|
||||
}
|
||||
}
|
||||
|
||||
func (mt *wrappedMetricsTracer) CandidateChecked(supportsCircuitV2 bool) {
|
||||
if mt.mt != nil {
|
||||
mt.mt.CandidateChecked(supportsCircuitV2)
|
||||
}
|
||||
}
|
||||
|
||||
func (mt *wrappedMetricsTracer) CandidateAdded(cnt int) {
|
||||
if mt.mt != nil {
|
||||
mt.mt.CandidateAdded(cnt)
|
||||
}
|
||||
}
|
||||
|
||||
func (mt *wrappedMetricsTracer) CandidateRemoved(cnt int) {
|
||||
if mt.mt != nil {
|
||||
mt.mt.CandidateRemoved(cnt)
|
||||
}
|
||||
}
|
||||
|
||||
func (mt *wrappedMetricsTracer) ScheduledWorkUpdated(scheduledWork *scheduledWorkTimes) {
|
||||
if mt.mt != nil {
|
||||
mt.mt.ScheduledWorkUpdated(scheduledWork)
|
||||
}
|
||||
}
|
||||
|
||||
func (mt *wrappedMetricsTracer) DesiredReservations(cnt int) {
|
||||
if mt.mt != nil {
|
||||
mt.mt.DesiredReservations(cnt)
|
||||
}
|
||||
}
|
||||
|
||||
func (mt *wrappedMetricsTracer) CandidateLoopState(state candidateLoopState) {
|
||||
if mt.mt != nil {
|
||||
mt.mt.CandidateLoopState(state)
|
||||
}
|
||||
}
|
||||
233
vendor/github.com/libp2p/go-libp2p/p2p/host/autorelay/options.go
generated
vendored
Normal file
233
vendor/github.com/libp2p/go-libp2p/p2p/host/autorelay/options.go
generated
vendored
Normal file
@@ -0,0 +1,233 @@
|
||||
package autorelay
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"time"
|
||||
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
)
|
||||
|
||||
// AutoRelay will call this function when it needs new candidates because it is
|
||||
// not connected to the desired number of relays or we get disconnected from one
|
||||
// of the relays. Implementations must send *at most* numPeers, and close the
|
||||
// channel when they don't intend to provide any more peers. AutoRelay will not
|
||||
// call the callback again until the channel is closed. Implementations should
|
||||
// send new peers, but may send peers they sent before. AutoRelay implements a
|
||||
// per-peer backoff (see WithBackoff). See WithMinInterval for setting the
|
||||
// minimum interval between calls to the callback. The context.Context passed
|
||||
// may be canceled when AutoRelay feels satisfied, it will be canceled when the
|
||||
// node is shutting down. If the context is canceled you MUST close the output
|
||||
// channel at some point.
|
||||
type PeerSource func(ctx context.Context, num int) <-chan peer.AddrInfo
|
||||
|
||||
type config struct {
|
||||
clock ClockWithInstantTimer
|
||||
peerSource PeerSource
|
||||
// minimum interval used to call the peerSource callback
|
||||
minInterval time.Duration
|
||||
// see WithMinCandidates
|
||||
minCandidates int
|
||||
// see WithMaxCandidates
|
||||
maxCandidates int
|
||||
// Delay until we obtain reservations with relays, if we have less than minCandidates candidates.
|
||||
// See WithBootDelay.
|
||||
bootDelay time.Duration
|
||||
// backoff is the time we wait after failing to obtain a reservation with a candidate
|
||||
backoff time.Duration
|
||||
// Number of relays we strive to obtain a reservation with.
|
||||
desiredRelays int
|
||||
// see WithMaxCandidateAge
|
||||
maxCandidateAge time.Duration
|
||||
setMinCandidates bool
|
||||
// see WithMetricsTracer
|
||||
metricsTracer MetricsTracer
|
||||
}
|
||||
|
||||
var defaultConfig = config{
|
||||
clock: RealClock{},
|
||||
minCandidates: 4,
|
||||
maxCandidates: 20,
|
||||
bootDelay: 3 * time.Minute,
|
||||
backoff: time.Hour,
|
||||
desiredRelays: 2,
|
||||
maxCandidateAge: 30 * time.Minute,
|
||||
minInterval: 30 * time.Second,
|
||||
}
|
||||
|
||||
var (
|
||||
errAlreadyHavePeerSource = errors.New("can only use a single WithPeerSource or WithStaticRelays")
|
||||
)
|
||||
|
||||
type Option func(*config) error
|
||||
|
||||
func WithStaticRelays(static []peer.AddrInfo) Option {
|
||||
return func(c *config) error {
|
||||
if c.peerSource != nil {
|
||||
return errAlreadyHavePeerSource
|
||||
}
|
||||
|
||||
WithPeerSource(func(ctx context.Context, numPeers int) <-chan peer.AddrInfo {
|
||||
if len(static) < numPeers {
|
||||
numPeers = len(static)
|
||||
}
|
||||
c := make(chan peer.AddrInfo, numPeers)
|
||||
defer close(c)
|
||||
|
||||
for i := 0; i < numPeers; i++ {
|
||||
c <- static[i]
|
||||
}
|
||||
return c
|
||||
})(c)
|
||||
WithMinCandidates(len(static))(c)
|
||||
WithMaxCandidates(len(static))(c)
|
||||
WithNumRelays(len(static))(c)
|
||||
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithPeerSource defines a callback for AutoRelay to query for more relay candidates.
|
||||
func WithPeerSource(f PeerSource) Option {
|
||||
return func(c *config) error {
|
||||
if c.peerSource != nil {
|
||||
return errAlreadyHavePeerSource
|
||||
}
|
||||
c.peerSource = f
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithNumRelays sets the number of relays we strive to obtain reservations with.
|
||||
func WithNumRelays(n int) Option {
|
||||
return func(c *config) error {
|
||||
c.desiredRelays = n
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithMaxCandidates sets the number of relay candidates that we buffer.
|
||||
func WithMaxCandidates(n int) Option {
|
||||
return func(c *config) error {
|
||||
c.maxCandidates = n
|
||||
if c.minCandidates > n {
|
||||
c.minCandidates = n
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithMinCandidates sets the minimum number of relay candidates we collect before to get a reservation
|
||||
// with any of them (unless we've been running for longer than the boot delay).
|
||||
// This is to make sure that we don't just randomly connect to the first candidate that we discover.
|
||||
func WithMinCandidates(n int) Option {
|
||||
return func(c *config) error {
|
||||
if n > c.maxCandidates {
|
||||
n = c.maxCandidates
|
||||
}
|
||||
c.minCandidates = n
|
||||
c.setMinCandidates = true
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithBootDelay set the boot delay for finding relays.
|
||||
// We won't attempt any reservation if we've have less than a minimum number of candidates.
|
||||
// This prevents us to connect to the "first best" relay, and allows us to carefully select the relay.
|
||||
// However, in case we haven't found enough relays after the boot delay, we use what we have.
|
||||
func WithBootDelay(d time.Duration) Option {
|
||||
return func(c *config) error {
|
||||
c.bootDelay = d
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithBackoff sets the time we wait after failing to obtain a reservation with a candidate.
|
||||
func WithBackoff(d time.Duration) Option {
|
||||
return func(c *config) error {
|
||||
c.backoff = d
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithMaxCandidateAge sets the maximum age of a candidate.
|
||||
// When we are connected to the desired number of relays, we don't ask the peer source for new candidates.
|
||||
// This can lead to AutoRelay's candidate list becoming outdated, and means we won't be able
|
||||
// to quickly establish a new relay connection if our existing connection breaks, if all the candidates
|
||||
// have become stale.
|
||||
func WithMaxCandidateAge(d time.Duration) Option {
|
||||
return func(c *config) error {
|
||||
c.maxCandidateAge = d
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// InstantTimer is a timer that triggers at some instant rather than some duration
|
||||
type InstantTimer interface {
|
||||
Reset(d time.Time) bool
|
||||
Stop() bool
|
||||
Ch() <-chan time.Time
|
||||
}
|
||||
|
||||
// ClockWithInstantTimer is a clock that can create timers that trigger at some
|
||||
// instant rather than some duration
|
||||
type ClockWithInstantTimer interface {
|
||||
Now() time.Time
|
||||
Since(t time.Time) time.Duration
|
||||
InstantTimer(when time.Time) InstantTimer
|
||||
}
|
||||
|
||||
type RealTimer struct{ t *time.Timer }
|
||||
|
||||
var _ InstantTimer = (*RealTimer)(nil)
|
||||
|
||||
func (t RealTimer) Ch() <-chan time.Time {
|
||||
return t.t.C
|
||||
}
|
||||
|
||||
func (t RealTimer) Reset(d time.Time) bool {
|
||||
return t.t.Reset(time.Until(d))
|
||||
}
|
||||
|
||||
func (t RealTimer) Stop() bool {
|
||||
return t.t.Stop()
|
||||
}
|
||||
|
||||
type RealClock struct{}
|
||||
|
||||
var _ ClockWithInstantTimer = RealClock{}
|
||||
|
||||
func (RealClock) Now() time.Time {
|
||||
return time.Now()
|
||||
}
|
||||
func (RealClock) Since(t time.Time) time.Duration {
|
||||
return time.Since(t)
|
||||
}
|
||||
func (RealClock) InstantTimer(when time.Time) InstantTimer {
|
||||
t := time.NewTimer(time.Until(when))
|
||||
return &RealTimer{t}
|
||||
}
|
||||
|
||||
func WithClock(cl ClockWithInstantTimer) Option {
|
||||
return func(c *config) error {
|
||||
c.clock = cl
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithMinInterval sets the minimum interval after which peerSource callback will be called for more
|
||||
// candidates even if AutoRelay needs new candidates.
|
||||
func WithMinInterval(interval time.Duration) Option {
|
||||
return func(c *config) error {
|
||||
c.minInterval = interval
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithMetricsTracer configures autorelay to use mt to track metrics
|
||||
func WithMetricsTracer(mt MetricsTracer) Option {
|
||||
return func(c *config) error {
|
||||
c.metricsTracer = mt
|
||||
return nil
|
||||
}
|
||||
}
|
||||
17
vendor/github.com/libp2p/go-libp2p/p2p/host/autorelay/relay.go
generated
vendored
Normal file
17
vendor/github.com/libp2p/go-libp2p/p2p/host/autorelay/relay.go
generated
vendored
Normal file
@@ -0,0 +1,17 @@
|
||||
package autorelay
|
||||
|
||||
import (
|
||||
ma "github.com/multiformats/go-multiaddr"
|
||||
)
|
||||
|
||||
// Filter filters out all relay addresses.
|
||||
func Filter(addrs []ma.Multiaddr) []ma.Multiaddr {
|
||||
raddrs := make([]ma.Multiaddr, 0, len(addrs))
|
||||
for _, addr := range addrs {
|
||||
if isRelayAddr(addr) {
|
||||
continue
|
||||
}
|
||||
raddrs = append(raddrs, addr)
|
||||
}
|
||||
return raddrs
|
||||
}
|
||||
810
vendor/github.com/libp2p/go-libp2p/p2p/host/autorelay/relay_finder.go
generated
vendored
Normal file
810
vendor/github.com/libp2p/go-libp2p/p2p/host/autorelay/relay_finder.go
generated
vendored
Normal file
@@ -0,0 +1,810 @@
|
||||
package autorelay
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"golang.org/x/sync/errgroup"
|
||||
|
||||
"github.com/libp2p/go-libp2p/core/event"
|
||||
"github.com/libp2p/go-libp2p/core/network"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
basic "github.com/libp2p/go-libp2p/p2p/host/basic"
|
||||
"github.com/libp2p/go-libp2p/p2p/host/eventbus"
|
||||
circuitv2 "github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/client"
|
||||
circuitv2_proto "github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/proto"
|
||||
|
||||
ma "github.com/multiformats/go-multiaddr"
|
||||
manet "github.com/multiformats/go-multiaddr/net"
|
||||
)
|
||||
|
||||
const protoIDv2 = circuitv2_proto.ProtoIDv2Hop
|
||||
|
||||
// Terminology:
|
||||
// Candidate: Once we connect to a node and it supports relay protocol,
|
||||
// we call it a candidate, and consider using it as a relay.
|
||||
// Relay: Out of the list of candidates, we select a relay to connect to.
|
||||
// Currently, we just randomly select a candidate, but we can employ more sophisticated
|
||||
// selection strategies here (e.g. by facotring in the RTT).
|
||||
|
||||
const (
|
||||
rsvpRefreshInterval = time.Minute
|
||||
rsvpExpirationSlack = 2 * time.Minute
|
||||
|
||||
autorelayTag = "autorelay"
|
||||
)
|
||||
|
||||
type candidate struct {
|
||||
added time.Time
|
||||
supportsRelayV2 bool
|
||||
ai peer.AddrInfo
|
||||
}
|
||||
|
||||
// relayFinder is a Host that uses relays for connectivity when a NAT is detected.
|
||||
type relayFinder struct {
|
||||
bootTime time.Time
|
||||
host *basic.BasicHost
|
||||
|
||||
conf *config
|
||||
|
||||
refCount sync.WaitGroup
|
||||
|
||||
ctxCancel context.CancelFunc
|
||||
ctxCancelMx sync.Mutex
|
||||
|
||||
peerSource PeerSource
|
||||
|
||||
candidateFound chan struct{} // receives every time we find a new relay candidate
|
||||
candidateMx sync.Mutex
|
||||
candidates map[peer.ID]*candidate
|
||||
backoff map[peer.ID]time.Time
|
||||
maybeConnectToRelayTrigger chan struct{} // cap: 1
|
||||
// Any time _something_ hapens that might cause us to need new candidates.
|
||||
// This could be
|
||||
// * the disconnection of a relay
|
||||
// * the failed attempt to obtain a reservation with a current candidate
|
||||
// * a candidate is deleted due to its age
|
||||
maybeRequestNewCandidates chan struct{} // cap: 1.
|
||||
|
||||
relayUpdated chan struct{}
|
||||
|
||||
relayMx sync.Mutex
|
||||
relays map[peer.ID]*circuitv2.Reservation
|
||||
|
||||
cachedAddrs []ma.Multiaddr
|
||||
cachedAddrsExpiry time.Time
|
||||
|
||||
// A channel that triggers a run of `runScheduledWork`.
|
||||
triggerRunScheduledWork chan struct{}
|
||||
metricsTracer MetricsTracer
|
||||
}
|
||||
|
||||
var errAlreadyRunning = errors.New("relayFinder already running")
|
||||
|
||||
func newRelayFinder(host *basic.BasicHost, peerSource PeerSource, conf *config) *relayFinder {
|
||||
if peerSource == nil {
|
||||
panic("Can not create a new relayFinder. Need a Peer Source fn or a list of static relays. Refer to the documentation around `libp2p.EnableAutoRelay`")
|
||||
}
|
||||
|
||||
return &relayFinder{
|
||||
bootTime: conf.clock.Now(),
|
||||
host: host,
|
||||
conf: conf,
|
||||
peerSource: peerSource,
|
||||
candidates: make(map[peer.ID]*candidate),
|
||||
backoff: make(map[peer.ID]time.Time),
|
||||
candidateFound: make(chan struct{}, 1),
|
||||
maybeConnectToRelayTrigger: make(chan struct{}, 1),
|
||||
maybeRequestNewCandidates: make(chan struct{}, 1),
|
||||
triggerRunScheduledWork: make(chan struct{}, 1),
|
||||
relays: make(map[peer.ID]*circuitv2.Reservation),
|
||||
relayUpdated: make(chan struct{}, 1),
|
||||
metricsTracer: &wrappedMetricsTracer{conf.metricsTracer},
|
||||
}
|
||||
}
|
||||
|
||||
type scheduledWorkTimes struct {
|
||||
leastFrequentInterval time.Duration
|
||||
nextRefresh time.Time
|
||||
nextBackoff time.Time
|
||||
nextOldCandidateCheck time.Time
|
||||
nextAllowedCallToPeerSource time.Time
|
||||
}
|
||||
|
||||
func (rf *relayFinder) background(ctx context.Context) {
|
||||
peerSourceRateLimiter := make(chan struct{}, 1)
|
||||
rf.refCount.Add(1)
|
||||
go func() {
|
||||
defer rf.refCount.Done()
|
||||
rf.findNodes(ctx, peerSourceRateLimiter)
|
||||
}()
|
||||
|
||||
rf.refCount.Add(1)
|
||||
go func() {
|
||||
defer rf.refCount.Done()
|
||||
rf.handleNewCandidates(ctx)
|
||||
}()
|
||||
|
||||
subConnectedness, err := rf.host.EventBus().Subscribe(new(event.EvtPeerConnectednessChanged), eventbus.Name("autorelay (relay finder)"))
|
||||
if err != nil {
|
||||
log.Error("failed to subscribe to the EvtPeerConnectednessChanged")
|
||||
return
|
||||
}
|
||||
defer subConnectedness.Close()
|
||||
|
||||
now := rf.conf.clock.Now()
|
||||
bootDelayTimer := rf.conf.clock.InstantTimer(now.Add(rf.conf.bootDelay))
|
||||
defer bootDelayTimer.Stop()
|
||||
|
||||
// This is the least frequent event. It's our fallback timer if we don't have any other work to do.
|
||||
leastFrequentInterval := rf.conf.minInterval
|
||||
// Check if leastFrequentInterval is 0 to avoid busy looping
|
||||
if rf.conf.backoff > leastFrequentInterval || leastFrequentInterval == 0 {
|
||||
leastFrequentInterval = rf.conf.backoff
|
||||
}
|
||||
if rf.conf.maxCandidateAge > leastFrequentInterval || leastFrequentInterval == 0 {
|
||||
leastFrequentInterval = rf.conf.maxCandidateAge
|
||||
}
|
||||
if rsvpRefreshInterval > leastFrequentInterval || leastFrequentInterval == 0 {
|
||||
leastFrequentInterval = rsvpRefreshInterval
|
||||
}
|
||||
|
||||
scheduledWork := &scheduledWorkTimes{
|
||||
leastFrequentInterval: leastFrequentInterval,
|
||||
nextRefresh: now.Add(rsvpRefreshInterval),
|
||||
nextBackoff: now.Add(rf.conf.backoff),
|
||||
nextOldCandidateCheck: now.Add(rf.conf.maxCandidateAge),
|
||||
nextAllowedCallToPeerSource: now.Add(-time.Second), // allow immediately
|
||||
}
|
||||
|
||||
workTimer := rf.conf.clock.InstantTimer(rf.runScheduledWork(ctx, now, scheduledWork, peerSourceRateLimiter))
|
||||
defer workTimer.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case ev, ok := <-subConnectedness.Out():
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
evt := ev.(event.EvtPeerConnectednessChanged)
|
||||
if evt.Connectedness != network.NotConnected {
|
||||
continue
|
||||
}
|
||||
push := false
|
||||
|
||||
rf.relayMx.Lock()
|
||||
if rf.usingRelay(evt.Peer) { // we were disconnected from a relay
|
||||
log.Debugw("disconnected from relay", "id", evt.Peer)
|
||||
delete(rf.relays, evt.Peer)
|
||||
rf.notifyMaybeConnectToRelay()
|
||||
rf.notifyMaybeNeedNewCandidates()
|
||||
push = true
|
||||
}
|
||||
rf.relayMx.Unlock()
|
||||
|
||||
if push {
|
||||
rf.clearCachedAddrsAndSignalAddressChange()
|
||||
rf.metricsTracer.ReservationEnded(1)
|
||||
}
|
||||
case <-rf.candidateFound:
|
||||
rf.notifyMaybeConnectToRelay()
|
||||
case <-bootDelayTimer.Ch():
|
||||
rf.notifyMaybeConnectToRelay()
|
||||
case <-rf.relayUpdated:
|
||||
rf.clearCachedAddrsAndSignalAddressChange()
|
||||
case now := <-workTimer.Ch():
|
||||
// Note: `now` is not guaranteed to be the current time. It's the time
|
||||
// that the timer was fired. This is okay because we'll schedule
|
||||
// future work at a specific time.
|
||||
nextTime := rf.runScheduledWork(ctx, now, scheduledWork, peerSourceRateLimiter)
|
||||
workTimer.Reset(nextTime)
|
||||
case <-rf.triggerRunScheduledWork:
|
||||
// Ignore the next time because we aren't scheduling any future work here
|
||||
_ = rf.runScheduledWork(ctx, rf.conf.clock.Now(), scheduledWork, peerSourceRateLimiter)
|
||||
case <-ctx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (rf *relayFinder) clearCachedAddrsAndSignalAddressChange() {
|
||||
rf.relayMx.Lock()
|
||||
rf.cachedAddrs = nil
|
||||
rf.relayMx.Unlock()
|
||||
rf.host.SignalAddressChange()
|
||||
|
||||
rf.metricsTracer.RelayAddressUpdated()
|
||||
}
|
||||
|
||||
func (rf *relayFinder) runScheduledWork(ctx context.Context, now time.Time, scheduledWork *scheduledWorkTimes, peerSourceRateLimiter chan<- struct{}) time.Time {
|
||||
nextTime := now.Add(scheduledWork.leastFrequentInterval)
|
||||
|
||||
if now.After(scheduledWork.nextRefresh) {
|
||||
scheduledWork.nextRefresh = now.Add(rsvpRefreshInterval)
|
||||
if rf.refreshReservations(ctx, now) {
|
||||
rf.clearCachedAddrsAndSignalAddressChange()
|
||||
}
|
||||
}
|
||||
|
||||
if now.After(scheduledWork.nextBackoff) {
|
||||
scheduledWork.nextBackoff = rf.clearBackoff(now)
|
||||
}
|
||||
|
||||
if now.After(scheduledWork.nextOldCandidateCheck) {
|
||||
scheduledWork.nextOldCandidateCheck = rf.clearOldCandidates(now)
|
||||
}
|
||||
|
||||
if now.After(scheduledWork.nextAllowedCallToPeerSource) {
|
||||
select {
|
||||
case peerSourceRateLimiter <- struct{}{}:
|
||||
scheduledWork.nextAllowedCallToPeerSource = now.Add(rf.conf.minInterval)
|
||||
if scheduledWork.nextAllowedCallToPeerSource.Before(nextTime) {
|
||||
nextTime = scheduledWork.nextAllowedCallToPeerSource
|
||||
}
|
||||
default:
|
||||
}
|
||||
} else {
|
||||
// We still need to schedule this work if it's sooner than nextTime
|
||||
if scheduledWork.nextAllowedCallToPeerSource.Before(nextTime) {
|
||||
nextTime = scheduledWork.nextAllowedCallToPeerSource
|
||||
}
|
||||
}
|
||||
|
||||
// Find the next time we need to run scheduled work.
|
||||
if scheduledWork.nextRefresh.Before(nextTime) {
|
||||
nextTime = scheduledWork.nextRefresh
|
||||
}
|
||||
if scheduledWork.nextBackoff.Before(nextTime) {
|
||||
nextTime = scheduledWork.nextBackoff
|
||||
}
|
||||
if scheduledWork.nextOldCandidateCheck.Before(nextTime) {
|
||||
nextTime = scheduledWork.nextOldCandidateCheck
|
||||
}
|
||||
if nextTime == now {
|
||||
// Only happens in CI with a mock clock
|
||||
nextTime = nextTime.Add(1) // avoids an infinite loop
|
||||
}
|
||||
|
||||
rf.metricsTracer.ScheduledWorkUpdated(scheduledWork)
|
||||
|
||||
return nextTime
|
||||
}
|
||||
|
||||
// clearOldCandidates clears old candidates from the map. Returns the next time
|
||||
// to run this function.
|
||||
func (rf *relayFinder) clearOldCandidates(now time.Time) time.Time {
|
||||
// If we don't have any candidates, we should run this again in rf.conf.maxCandidateAge.
|
||||
nextTime := now.Add(rf.conf.maxCandidateAge)
|
||||
|
||||
var deleted bool
|
||||
rf.candidateMx.Lock()
|
||||
defer rf.candidateMx.Unlock()
|
||||
for id, cand := range rf.candidates {
|
||||
expiry := cand.added.Add(rf.conf.maxCandidateAge)
|
||||
if expiry.After(now) {
|
||||
if expiry.Before(nextTime) {
|
||||
nextTime = expiry
|
||||
}
|
||||
} else {
|
||||
log.Debugw("deleting candidate due to age", "id", id)
|
||||
deleted = true
|
||||
rf.removeCandidate(id)
|
||||
}
|
||||
}
|
||||
if deleted {
|
||||
rf.notifyMaybeNeedNewCandidates()
|
||||
}
|
||||
|
||||
return nextTime
|
||||
}
|
||||
|
||||
// clearBackoff clears old backoff entries from the map. Returns the next time
|
||||
// to run this function.
|
||||
func (rf *relayFinder) clearBackoff(now time.Time) time.Time {
|
||||
nextTime := now.Add(rf.conf.backoff)
|
||||
|
||||
rf.candidateMx.Lock()
|
||||
defer rf.candidateMx.Unlock()
|
||||
for id, t := range rf.backoff {
|
||||
expiry := t.Add(rf.conf.backoff)
|
||||
if expiry.After(now) {
|
||||
if expiry.Before(nextTime) {
|
||||
nextTime = expiry
|
||||
}
|
||||
} else {
|
||||
log.Debugw("removing backoff for node", "id", id)
|
||||
delete(rf.backoff, id)
|
||||
}
|
||||
}
|
||||
|
||||
return nextTime
|
||||
}
|
||||
|
||||
// findNodes accepts nodes from the channel and tests if they support relaying.
|
||||
// It is run on both public and private nodes.
|
||||
// It garbage collects old entries, so that nodes doesn't overflow.
|
||||
// This makes sure that as soon as we need to find relay candidates, we have them available.
|
||||
// peerSourceRateLimiter is used to limit how often we call the peer source.
|
||||
func (rf *relayFinder) findNodes(ctx context.Context, peerSourceRateLimiter <-chan struct{}) {
|
||||
var peerChan <-chan peer.AddrInfo
|
||||
var wg sync.WaitGroup
|
||||
for {
|
||||
rf.candidateMx.Lock()
|
||||
numCandidates := len(rf.candidates)
|
||||
rf.candidateMx.Unlock()
|
||||
|
||||
if peerChan == nil && numCandidates < rf.conf.minCandidates {
|
||||
rf.metricsTracer.CandidateLoopState(peerSourceRateLimited)
|
||||
|
||||
select {
|
||||
case <-peerSourceRateLimiter:
|
||||
peerChan = rf.peerSource(ctx, rf.conf.maxCandidates)
|
||||
select {
|
||||
case rf.triggerRunScheduledWork <- struct{}{}:
|
||||
default:
|
||||
}
|
||||
case <-ctx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if peerChan == nil {
|
||||
rf.metricsTracer.CandidateLoopState(waitingForTrigger)
|
||||
} else {
|
||||
rf.metricsTracer.CandidateLoopState(waitingOnPeerChan)
|
||||
}
|
||||
|
||||
select {
|
||||
case <-rf.maybeRequestNewCandidates:
|
||||
continue
|
||||
case pi, ok := <-peerChan:
|
||||
if !ok {
|
||||
wg.Wait()
|
||||
peerChan = nil
|
||||
continue
|
||||
}
|
||||
log.Debugw("found node", "id", pi.ID)
|
||||
rf.candidateMx.Lock()
|
||||
numCandidates := len(rf.candidates)
|
||||
backoffStart, isOnBackoff := rf.backoff[pi.ID]
|
||||
rf.candidateMx.Unlock()
|
||||
if isOnBackoff {
|
||||
log.Debugw("skipping node that we recently failed to obtain a reservation with", "id", pi.ID, "last attempt", rf.conf.clock.Since(backoffStart))
|
||||
continue
|
||||
}
|
||||
if numCandidates >= rf.conf.maxCandidates {
|
||||
log.Debugw("skipping node. Already have enough candidates", "id", pi.ID, "num", numCandidates, "max", rf.conf.maxCandidates)
|
||||
continue
|
||||
}
|
||||
rf.refCount.Add(1)
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer rf.refCount.Done()
|
||||
defer wg.Done()
|
||||
if added := rf.handleNewNode(ctx, pi); added {
|
||||
rf.notifyNewCandidate()
|
||||
}
|
||||
}()
|
||||
case <-ctx.Done():
|
||||
rf.metricsTracer.CandidateLoopState(stopped)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (rf *relayFinder) notifyMaybeConnectToRelay() {
|
||||
select {
|
||||
case rf.maybeConnectToRelayTrigger <- struct{}{}:
|
||||
default:
|
||||
}
|
||||
}
|
||||
|
||||
func (rf *relayFinder) notifyMaybeNeedNewCandidates() {
|
||||
select {
|
||||
case rf.maybeRequestNewCandidates <- struct{}{}:
|
||||
default:
|
||||
}
|
||||
}
|
||||
|
||||
func (rf *relayFinder) notifyNewCandidate() {
|
||||
select {
|
||||
case rf.candidateFound <- struct{}{}:
|
||||
default:
|
||||
}
|
||||
}
|
||||
|
||||
// handleNewNode tests if a peer supports circuit v2.
|
||||
// This method is only run on private nodes.
|
||||
// If a peer does, it is added to the candidates map.
|
||||
// Note that just supporting the protocol doesn't guarantee that we can also obtain a reservation.
|
||||
func (rf *relayFinder) handleNewNode(ctx context.Context, pi peer.AddrInfo) (added bool) {
|
||||
rf.relayMx.Lock()
|
||||
relayInUse := rf.usingRelay(pi.ID)
|
||||
rf.relayMx.Unlock()
|
||||
if relayInUse {
|
||||
return false
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(ctx, 20*time.Second)
|
||||
defer cancel()
|
||||
supportsV2, err := rf.tryNode(ctx, pi)
|
||||
if err != nil {
|
||||
log.Debugf("node %s not accepted as a candidate: %s", pi.ID, err)
|
||||
if err == errProtocolNotSupported {
|
||||
rf.metricsTracer.CandidateChecked(false)
|
||||
}
|
||||
return false
|
||||
}
|
||||
rf.metricsTracer.CandidateChecked(true)
|
||||
|
||||
rf.candidateMx.Lock()
|
||||
if len(rf.candidates) > rf.conf.maxCandidates {
|
||||
rf.candidateMx.Unlock()
|
||||
return false
|
||||
}
|
||||
log.Debugw("node supports relay protocol", "peer", pi.ID, "supports circuit v2", supportsV2)
|
||||
rf.addCandidate(&candidate{
|
||||
added: rf.conf.clock.Now(),
|
||||
ai: pi,
|
||||
supportsRelayV2: supportsV2,
|
||||
})
|
||||
rf.candidateMx.Unlock()
|
||||
return true
|
||||
}
|
||||
|
||||
var errProtocolNotSupported = errors.New("doesn't speak circuit v2")
|
||||
|
||||
// tryNode checks if a peer actually supports either circuit v2.
|
||||
// It does not modify any internal state.
|
||||
func (rf *relayFinder) tryNode(ctx context.Context, pi peer.AddrInfo) (supportsRelayV2 bool, err error) {
|
||||
if err := rf.host.Connect(ctx, pi); err != nil {
|
||||
return false, fmt.Errorf("error connecting to relay %s: %w", pi.ID, err)
|
||||
}
|
||||
|
||||
conns := rf.host.Network().ConnsToPeer(pi.ID)
|
||||
for _, conn := range conns {
|
||||
if isRelayAddr(conn.RemoteMultiaddr()) {
|
||||
return false, errors.New("not a public node")
|
||||
}
|
||||
}
|
||||
|
||||
// wait for identify to complete in at least one conn so that we can check the supported protocols
|
||||
ready := make(chan struct{}, 1)
|
||||
for _, conn := range conns {
|
||||
go func(conn network.Conn) {
|
||||
select {
|
||||
case <-rf.host.IDService().IdentifyWait(conn):
|
||||
select {
|
||||
case ready <- struct{}{}:
|
||||
default:
|
||||
}
|
||||
case <-ctx.Done():
|
||||
}
|
||||
}(conn)
|
||||
}
|
||||
|
||||
select {
|
||||
case <-ready:
|
||||
case <-ctx.Done():
|
||||
return false, ctx.Err()
|
||||
}
|
||||
|
||||
protos, err := rf.host.Peerstore().SupportsProtocols(pi.ID, protoIDv2)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("error checking relay protocol support for peer %s: %w", pi.ID, err)
|
||||
}
|
||||
if len(protos) == 0 {
|
||||
return false, errProtocolNotSupported
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// When a new node that could be a relay is found, we receive a notification on the maybeConnectToRelayTrigger chan.
|
||||
// This function makes sure that we only run one instance of maybeConnectToRelay at once, and buffers
|
||||
// exactly one more trigger event to run maybeConnectToRelay.
|
||||
func (rf *relayFinder) handleNewCandidates(ctx context.Context) {
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case <-rf.maybeConnectToRelayTrigger:
|
||||
rf.maybeConnectToRelay(ctx)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (rf *relayFinder) maybeConnectToRelay(ctx context.Context) {
|
||||
rf.relayMx.Lock()
|
||||
numRelays := len(rf.relays)
|
||||
rf.relayMx.Unlock()
|
||||
// We're already connected to our desired number of relays. Nothing to do here.
|
||||
if numRelays == rf.conf.desiredRelays {
|
||||
return
|
||||
}
|
||||
|
||||
rf.candidateMx.Lock()
|
||||
if len(rf.relays) == 0 && len(rf.candidates) < rf.conf.minCandidates && rf.conf.clock.Since(rf.bootTime) < rf.conf.bootDelay {
|
||||
// During the startup phase, we don't want to connect to the first candidate that we find.
|
||||
// Instead, we wait until we've found at least minCandidates, and then select the best of those.
|
||||
// However, if that takes too long (longer than bootDelay), we still go ahead.
|
||||
rf.candidateMx.Unlock()
|
||||
return
|
||||
}
|
||||
if len(rf.candidates) == 0 {
|
||||
rf.candidateMx.Unlock()
|
||||
return
|
||||
}
|
||||
candidates := rf.selectCandidates()
|
||||
rf.candidateMx.Unlock()
|
||||
|
||||
// We now iterate over the candidates, attempting (sequentially) to get reservations with them, until
|
||||
// we reach the desired number of relays.
|
||||
for _, cand := range candidates {
|
||||
id := cand.ai.ID
|
||||
rf.relayMx.Lock()
|
||||
usingRelay := rf.usingRelay(id)
|
||||
rf.relayMx.Unlock()
|
||||
if usingRelay {
|
||||
rf.candidateMx.Lock()
|
||||
rf.removeCandidate(id)
|
||||
rf.candidateMx.Unlock()
|
||||
rf.notifyMaybeNeedNewCandidates()
|
||||
continue
|
||||
}
|
||||
rsvp, err := rf.connectToRelay(ctx, cand)
|
||||
if err != nil {
|
||||
log.Debugw("failed to connect to relay", "peer", id, "error", err)
|
||||
rf.notifyMaybeNeedNewCandidates()
|
||||
rf.metricsTracer.ReservationRequestFinished(false, err)
|
||||
continue
|
||||
}
|
||||
log.Debugw("adding new relay", "id", id)
|
||||
rf.relayMx.Lock()
|
||||
rf.relays[id] = rsvp
|
||||
numRelays := len(rf.relays)
|
||||
rf.relayMx.Unlock()
|
||||
rf.notifyMaybeNeedNewCandidates()
|
||||
|
||||
rf.host.ConnManager().Protect(id, autorelayTag) // protect the connection
|
||||
|
||||
select {
|
||||
case rf.relayUpdated <- struct{}{}:
|
||||
default:
|
||||
}
|
||||
|
||||
rf.metricsTracer.ReservationRequestFinished(false, nil)
|
||||
|
||||
if numRelays >= rf.conf.desiredRelays {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (rf *relayFinder) connectToRelay(ctx context.Context, cand *candidate) (*circuitv2.Reservation, error) {
|
||||
id := cand.ai.ID
|
||||
|
||||
ctx, cancel := context.WithTimeout(ctx, 10*time.Second)
|
||||
defer cancel()
|
||||
|
||||
var rsvp *circuitv2.Reservation
|
||||
|
||||
// make sure we're still connected.
|
||||
if rf.host.Network().Connectedness(id) != network.Connected {
|
||||
if err := rf.host.Connect(ctx, cand.ai); err != nil {
|
||||
rf.candidateMx.Lock()
|
||||
rf.removeCandidate(cand.ai.ID)
|
||||
rf.candidateMx.Unlock()
|
||||
return nil, fmt.Errorf("failed to connect: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
rf.candidateMx.Lock()
|
||||
rf.backoff[id] = rf.conf.clock.Now()
|
||||
rf.candidateMx.Unlock()
|
||||
var err error
|
||||
if cand.supportsRelayV2 {
|
||||
rsvp, err = circuitv2.Reserve(ctx, rf.host, cand.ai)
|
||||
if err != nil {
|
||||
err = fmt.Errorf("failed to reserve slot: %w", err)
|
||||
}
|
||||
}
|
||||
rf.candidateMx.Lock()
|
||||
rf.removeCandidate(id)
|
||||
rf.candidateMx.Unlock()
|
||||
return rsvp, err
|
||||
}
|
||||
|
||||
func (rf *relayFinder) refreshReservations(ctx context.Context, now time.Time) bool {
|
||||
rf.relayMx.Lock()
|
||||
|
||||
// find reservations about to expire and refresh them in parallel
|
||||
g := new(errgroup.Group)
|
||||
for p, rsvp := range rf.relays {
|
||||
if now.Add(rsvpExpirationSlack).Before(rsvp.Expiration) {
|
||||
continue
|
||||
}
|
||||
|
||||
p := p
|
||||
g.Go(func() error {
|
||||
err := rf.refreshRelayReservation(ctx, p)
|
||||
rf.metricsTracer.ReservationRequestFinished(true, err)
|
||||
|
||||
return err
|
||||
})
|
||||
}
|
||||
rf.relayMx.Unlock()
|
||||
|
||||
err := g.Wait()
|
||||
return err != nil
|
||||
}
|
||||
|
||||
func (rf *relayFinder) refreshRelayReservation(ctx context.Context, p peer.ID) error {
|
||||
rsvp, err := circuitv2.Reserve(ctx, rf.host, peer.AddrInfo{ID: p})
|
||||
|
||||
rf.relayMx.Lock()
|
||||
if err != nil {
|
||||
log.Debugw("failed to refresh relay slot reservation", "relay", p, "error", err)
|
||||
_, exists := rf.relays[p]
|
||||
delete(rf.relays, p)
|
||||
// unprotect the connection
|
||||
rf.host.ConnManager().Unprotect(p, autorelayTag)
|
||||
rf.relayMx.Unlock()
|
||||
if exists {
|
||||
rf.metricsTracer.ReservationEnded(1)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
log.Debugw("refreshed relay slot reservation", "relay", p)
|
||||
rf.relays[p] = rsvp
|
||||
rf.relayMx.Unlock()
|
||||
return nil
|
||||
}
|
||||
|
||||
// usingRelay returns if we're currently using the given relay.
|
||||
func (rf *relayFinder) usingRelay(p peer.ID) bool {
|
||||
_, ok := rf.relays[p]
|
||||
return ok
|
||||
}
|
||||
|
||||
// addCandidates adds a candidate to the candidates set. Assumes caller holds candidateMx mutex
|
||||
func (rf *relayFinder) addCandidate(cand *candidate) {
|
||||
_, exists := rf.candidates[cand.ai.ID]
|
||||
rf.candidates[cand.ai.ID] = cand
|
||||
if !exists {
|
||||
rf.metricsTracer.CandidateAdded(1)
|
||||
}
|
||||
}
|
||||
|
||||
func (rf *relayFinder) removeCandidate(id peer.ID) {
|
||||
_, exists := rf.candidates[id]
|
||||
if exists {
|
||||
delete(rf.candidates, id)
|
||||
rf.metricsTracer.CandidateRemoved(1)
|
||||
}
|
||||
}
|
||||
|
||||
// selectCandidates returns an ordered slice of relay candidates.
|
||||
// Callers should attempt to obtain reservations with the candidates in this order.
|
||||
func (rf *relayFinder) selectCandidates() []*candidate {
|
||||
now := rf.conf.clock.Now()
|
||||
candidates := make([]*candidate, 0, len(rf.candidates))
|
||||
for _, cand := range rf.candidates {
|
||||
if cand.added.Add(rf.conf.maxCandidateAge).After(now) {
|
||||
candidates = append(candidates, cand)
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: better relay selection strategy; this just selects random relays,
|
||||
// but we should probably use ping latency as the selection metric
|
||||
rand.Shuffle(len(candidates), func(i, j int) {
|
||||
candidates[i], candidates[j] = candidates[j], candidates[i]
|
||||
})
|
||||
return candidates
|
||||
}
|
||||
|
||||
// This function is computes the NATed relay addrs when our status is private:
|
||||
// - The public addrs are removed from the address set.
|
||||
// - The non-public addrs are included verbatim so that peers behind the same NAT/firewall
|
||||
// can still dial us directly.
|
||||
// - On top of those, we add the relay-specific addrs for the relays to which we are
|
||||
// connected. For each non-private relay addr, we encapsulate the p2p-circuit addr
|
||||
// through which we can be dialed.
|
||||
func (rf *relayFinder) relayAddrs(addrs []ma.Multiaddr) []ma.Multiaddr {
|
||||
rf.relayMx.Lock()
|
||||
defer rf.relayMx.Unlock()
|
||||
|
||||
if rf.cachedAddrs != nil && rf.conf.clock.Now().Before(rf.cachedAddrsExpiry) {
|
||||
return rf.cachedAddrs
|
||||
}
|
||||
|
||||
raddrs := make([]ma.Multiaddr, 0, 4*len(rf.relays)+4)
|
||||
|
||||
// only keep private addrs from the original addr set
|
||||
for _, addr := range addrs {
|
||||
if manet.IsPrivateAddr(addr) {
|
||||
raddrs = append(raddrs, addr)
|
||||
}
|
||||
}
|
||||
|
||||
// add relay specific addrs to the list
|
||||
relayAddrCnt := 0
|
||||
for p := range rf.relays {
|
||||
addrs := cleanupAddressSet(rf.host.Peerstore().Addrs(p))
|
||||
relayAddrCnt += len(addrs)
|
||||
circuit := ma.StringCast(fmt.Sprintf("/p2p/%s/p2p-circuit", p))
|
||||
for _, addr := range addrs {
|
||||
pub := addr.Encapsulate(circuit)
|
||||
raddrs = append(raddrs, pub)
|
||||
}
|
||||
}
|
||||
|
||||
rf.cachedAddrs = raddrs
|
||||
rf.cachedAddrsExpiry = rf.conf.clock.Now().Add(30 * time.Second)
|
||||
|
||||
rf.metricsTracer.RelayAddressCount(relayAddrCnt)
|
||||
return raddrs
|
||||
}
|
||||
|
||||
func (rf *relayFinder) Start() error {
|
||||
rf.ctxCancelMx.Lock()
|
||||
defer rf.ctxCancelMx.Unlock()
|
||||
if rf.ctxCancel != nil {
|
||||
return errAlreadyRunning
|
||||
}
|
||||
log.Debug("starting relay finder")
|
||||
|
||||
rf.initMetrics()
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
rf.ctxCancel = cancel
|
||||
rf.refCount.Add(1)
|
||||
go func() {
|
||||
defer rf.refCount.Done()
|
||||
rf.background(ctx)
|
||||
}()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (rf *relayFinder) Stop() error {
|
||||
rf.ctxCancelMx.Lock()
|
||||
defer rf.ctxCancelMx.Unlock()
|
||||
log.Debug("stopping relay finder")
|
||||
if rf.ctxCancel != nil {
|
||||
rf.ctxCancel()
|
||||
}
|
||||
rf.refCount.Wait()
|
||||
rf.ctxCancel = nil
|
||||
|
||||
rf.resetMetrics()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (rf *relayFinder) initMetrics() {
|
||||
rf.metricsTracer.DesiredReservations(rf.conf.desiredRelays)
|
||||
|
||||
rf.relayMx.Lock()
|
||||
rf.metricsTracer.ReservationOpened(len(rf.relays))
|
||||
rf.relayMx.Unlock()
|
||||
|
||||
rf.candidateMx.Lock()
|
||||
rf.metricsTracer.CandidateAdded(len(rf.candidates))
|
||||
rf.candidateMx.Unlock()
|
||||
}
|
||||
|
||||
func (rf *relayFinder) resetMetrics() {
|
||||
rf.relayMx.Lock()
|
||||
rf.metricsTracer.ReservationEnded(len(rf.relays))
|
||||
rf.relayMx.Unlock()
|
||||
|
||||
rf.candidateMx.Lock()
|
||||
rf.metricsTracer.CandidateRemoved(len(rf.candidates))
|
||||
rf.candidateMx.Unlock()
|
||||
|
||||
rf.metricsTracer.RelayAddressCount(0)
|
||||
rf.metricsTracer.ScheduledWorkUpdated(&scheduledWorkTimes{})
|
||||
}
|
||||
1077
vendor/github.com/libp2p/go-libp2p/p2p/host/basic/basic_host.go
generated
vendored
Normal file
1077
vendor/github.com/libp2p/go-libp2p/p2p/host/basic/basic_host.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
6
vendor/github.com/libp2p/go-libp2p/p2p/host/basic/mocks.go
generated
vendored
Normal file
6
vendor/github.com/libp2p/go-libp2p/p2p/host/basic/mocks.go
generated
vendored
Normal file
@@ -0,0 +1,6 @@
|
||||
//go:build gomock || generate
|
||||
|
||||
package basichost
|
||||
|
||||
//go:generate sh -c "go run go.uber.org/mock/mockgen -build_flags=\"-tags=gomock\" -package basichost -destination mock_nat_test.go github.com/libp2p/go-libp2p/p2p/host/basic NAT"
|
||||
type NAT nat
|
||||
299
vendor/github.com/libp2p/go-libp2p/p2p/host/basic/natmgr.go
generated
vendored
Normal file
299
vendor/github.com/libp2p/go-libp2p/p2p/host/basic/natmgr.go
generated
vendored
Normal file
@@ -0,0 +1,299 @@
|
||||
package basichost
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"net"
|
||||
"net/netip"
|
||||
"strconv"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/libp2p/go-libp2p/core/network"
|
||||
inat "github.com/libp2p/go-libp2p/p2p/net/nat"
|
||||
|
||||
ma "github.com/multiformats/go-multiaddr"
|
||||
manet "github.com/multiformats/go-multiaddr/net"
|
||||
)
|
||||
|
||||
// NATManager is a simple interface to manage NAT devices.
|
||||
// It listens Listen and ListenClose notifications from the network.Network,
|
||||
// and tries to obtain port mappings for those.
|
||||
type NATManager interface {
|
||||
GetMapping(ma.Multiaddr) ma.Multiaddr
|
||||
HasDiscoveredNAT() bool
|
||||
io.Closer
|
||||
}
|
||||
|
||||
// NewNATManager creates a NAT manager.
|
||||
func NewNATManager(net network.Network) NATManager {
|
||||
return newNATManager(net)
|
||||
}
|
||||
|
||||
type entry struct {
|
||||
protocol string
|
||||
port int
|
||||
}
|
||||
|
||||
type nat interface {
|
||||
AddMapping(ctx context.Context, protocol string, port int) error
|
||||
RemoveMapping(ctx context.Context, protocol string, port int) error
|
||||
GetMapping(protocol string, port int) (netip.AddrPort, bool)
|
||||
io.Closer
|
||||
}
|
||||
|
||||
// so we can mock it in tests
|
||||
var discoverNAT = func(ctx context.Context) (nat, error) { return inat.DiscoverNAT(ctx) }
|
||||
|
||||
// natManager takes care of adding + removing port mappings to the nat.
|
||||
// Initialized with the host if it has a NATPortMap option enabled.
|
||||
// natManager receives signals from the network, and check on nat mappings:
|
||||
// - natManager listens to the network and adds or closes port mappings
|
||||
// as the network signals Listen() or ListenClose().
|
||||
// - closing the natManager closes the nat and its mappings.
|
||||
type natManager struct {
|
||||
net network.Network
|
||||
natMx sync.RWMutex
|
||||
nat nat
|
||||
|
||||
syncFlag chan struct{} // cap: 1
|
||||
|
||||
tracked map[entry]bool // the bool is only used in doSync and has no meaning outside of that function
|
||||
|
||||
refCount sync.WaitGroup
|
||||
ctx context.Context
|
||||
ctxCancel context.CancelFunc
|
||||
}
|
||||
|
||||
func newNATManager(net network.Network) *natManager {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
nmgr := &natManager{
|
||||
net: net,
|
||||
syncFlag: make(chan struct{}, 1),
|
||||
ctx: ctx,
|
||||
ctxCancel: cancel,
|
||||
tracked: make(map[entry]bool),
|
||||
}
|
||||
nmgr.refCount.Add(1)
|
||||
go nmgr.background(ctx)
|
||||
return nmgr
|
||||
}
|
||||
|
||||
// Close closes the natManager, closing the underlying nat
|
||||
// and unregistering from network events.
|
||||
func (nmgr *natManager) Close() error {
|
||||
nmgr.ctxCancel()
|
||||
nmgr.refCount.Wait()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (nmgr *natManager) HasDiscoveredNAT() bool {
|
||||
nmgr.natMx.RLock()
|
||||
defer nmgr.natMx.RUnlock()
|
||||
return nmgr.nat != nil
|
||||
}
|
||||
|
||||
func (nmgr *natManager) background(ctx context.Context) {
|
||||
defer nmgr.refCount.Done()
|
||||
|
||||
defer func() {
|
||||
nmgr.natMx.Lock()
|
||||
defer nmgr.natMx.Unlock()
|
||||
|
||||
if nmgr.nat != nil {
|
||||
nmgr.nat.Close()
|
||||
}
|
||||
}()
|
||||
|
||||
discoverCtx, cancel := context.WithTimeout(ctx, 10*time.Second)
|
||||
defer cancel()
|
||||
natInstance, err := discoverNAT(discoverCtx)
|
||||
if err != nil {
|
||||
log.Info("DiscoverNAT error:", err)
|
||||
return
|
||||
}
|
||||
|
||||
nmgr.natMx.Lock()
|
||||
nmgr.nat = natInstance
|
||||
nmgr.natMx.Unlock()
|
||||
|
||||
// sign natManager up for network notifications
|
||||
// we need to sign up here to avoid missing some notifs
|
||||
// before the NAT has been found.
|
||||
nmgr.net.Notify((*nmgrNetNotifiee)(nmgr))
|
||||
defer nmgr.net.StopNotify((*nmgrNetNotifiee)(nmgr))
|
||||
|
||||
nmgr.doSync() // sync one first.
|
||||
for {
|
||||
select {
|
||||
case <-nmgr.syncFlag:
|
||||
nmgr.doSync() // sync when our listen addresses chnage.
|
||||
case <-ctx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (nmgr *natManager) sync() {
|
||||
select {
|
||||
case nmgr.syncFlag <- struct{}{}:
|
||||
default:
|
||||
}
|
||||
}
|
||||
|
||||
// doSync syncs the current NAT mappings, removing any outdated mappings and adding any
|
||||
// new mappings.
|
||||
func (nmgr *natManager) doSync() {
|
||||
for e := range nmgr.tracked {
|
||||
nmgr.tracked[e] = false
|
||||
}
|
||||
var newAddresses []entry
|
||||
for _, maddr := range nmgr.net.ListenAddresses() {
|
||||
// Strip the IP
|
||||
maIP, rest := ma.SplitFirst(maddr)
|
||||
if maIP == nil || rest == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
switch maIP.Protocol().Code {
|
||||
case ma.P_IP6, ma.P_IP4:
|
||||
default:
|
||||
continue
|
||||
}
|
||||
|
||||
// Only bother if we're listening on an unicast / unspecified IP.
|
||||
ip := net.IP(maIP.RawValue())
|
||||
if !ip.IsGlobalUnicast() && !ip.IsUnspecified() {
|
||||
continue
|
||||
}
|
||||
|
||||
// Extract the port/protocol
|
||||
proto, _ := ma.SplitFirst(rest)
|
||||
if proto == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
var protocol string
|
||||
switch proto.Protocol().Code {
|
||||
case ma.P_TCP:
|
||||
protocol = "tcp"
|
||||
case ma.P_UDP:
|
||||
protocol = "udp"
|
||||
default:
|
||||
continue
|
||||
}
|
||||
port, err := strconv.ParseUint(proto.Value(), 10, 16)
|
||||
if err != nil {
|
||||
// bug in multiaddr
|
||||
panic(err)
|
||||
}
|
||||
e := entry{protocol: protocol, port: int(port)}
|
||||
if _, ok := nmgr.tracked[e]; ok {
|
||||
nmgr.tracked[e] = true
|
||||
} else {
|
||||
newAddresses = append(newAddresses, e)
|
||||
}
|
||||
}
|
||||
|
||||
var wg sync.WaitGroup
|
||||
defer wg.Wait()
|
||||
|
||||
// Close old mappings
|
||||
for e, v := range nmgr.tracked {
|
||||
if !v {
|
||||
nmgr.nat.RemoveMapping(nmgr.ctx, e.protocol, e.port)
|
||||
delete(nmgr.tracked, e)
|
||||
}
|
||||
}
|
||||
|
||||
// Create new mappings.
|
||||
for _, e := range newAddresses {
|
||||
if err := nmgr.nat.AddMapping(nmgr.ctx, e.protocol, e.port); err != nil {
|
||||
log.Errorf("failed to port-map %s port %d: %s", e.protocol, e.port, err)
|
||||
}
|
||||
nmgr.tracked[e] = false
|
||||
}
|
||||
}
|
||||
|
||||
func (nmgr *natManager) GetMapping(addr ma.Multiaddr) ma.Multiaddr {
|
||||
nmgr.natMx.Lock()
|
||||
defer nmgr.natMx.Unlock()
|
||||
|
||||
if nmgr.nat == nil { // NAT not yet initialized
|
||||
return nil
|
||||
}
|
||||
|
||||
var found bool
|
||||
var proto int // ma.P_TCP or ma.P_UDP
|
||||
transport, rest := ma.SplitFunc(addr, func(c ma.Component) bool {
|
||||
if found {
|
||||
return true
|
||||
}
|
||||
proto = c.Protocol().Code
|
||||
found = proto == ma.P_TCP || proto == ma.P_UDP
|
||||
return false
|
||||
})
|
||||
if !manet.IsThinWaist(transport) {
|
||||
return nil
|
||||
}
|
||||
|
||||
naddr, err := manet.ToNetAddr(transport)
|
||||
if err != nil {
|
||||
log.Error("error parsing net multiaddr %q: %s", transport, err)
|
||||
return nil
|
||||
}
|
||||
|
||||
var (
|
||||
ip net.IP
|
||||
port int
|
||||
protocol string
|
||||
)
|
||||
switch naddr := naddr.(type) {
|
||||
case *net.TCPAddr:
|
||||
ip = naddr.IP
|
||||
port = naddr.Port
|
||||
protocol = "tcp"
|
||||
case *net.UDPAddr:
|
||||
ip = naddr.IP
|
||||
port = naddr.Port
|
||||
protocol = "udp"
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
|
||||
if !ip.IsGlobalUnicast() && !ip.IsUnspecified() {
|
||||
// We only map global unicast & unspecified addresses ports, not broadcast, multicast, etc.
|
||||
return nil
|
||||
}
|
||||
|
||||
extAddr, ok := nmgr.nat.GetMapping(protocol, port)
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
|
||||
var mappedAddr net.Addr
|
||||
switch naddr.(type) {
|
||||
case *net.TCPAddr:
|
||||
mappedAddr = net.TCPAddrFromAddrPort(extAddr)
|
||||
case *net.UDPAddr:
|
||||
mappedAddr = net.UDPAddrFromAddrPort(extAddr)
|
||||
}
|
||||
mappedMaddr, err := manet.FromNetAddr(mappedAddr)
|
||||
if err != nil {
|
||||
log.Errorf("mapped addr can't be turned into a multiaddr %q: %s", mappedAddr, err)
|
||||
return nil
|
||||
}
|
||||
extMaddr := mappedMaddr
|
||||
if rest != nil {
|
||||
extMaddr = ma.Join(extMaddr, rest)
|
||||
}
|
||||
return extMaddr
|
||||
}
|
||||
|
||||
type nmgrNetNotifiee natManager
|
||||
|
||||
func (nn *nmgrNetNotifiee) natManager() *natManager { return (*natManager)(nn) }
|
||||
func (nn *nmgrNetNotifiee) Listen(network.Network, ma.Multiaddr) { nn.natManager().sync() }
|
||||
func (nn *nmgrNetNotifiee) ListenClose(n network.Network, addr ma.Multiaddr) { nn.natManager().sync() }
|
||||
func (nn *nmgrNetNotifiee) Connected(network.Network, network.Conn) {}
|
||||
func (nn *nmgrNetNotifiee) Disconnected(network.Network, network.Conn) {}
|
||||
232
vendor/github.com/libp2p/go-libp2p/p2p/host/blank/blank.go
generated
vendored
Normal file
232
vendor/github.com/libp2p/go-libp2p/p2p/host/blank/blank.go
generated
vendored
Normal file
@@ -0,0 +1,232 @@
|
||||
package blankhost
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
"github.com/libp2p/go-libp2p/core/connmgr"
|
||||
"github.com/libp2p/go-libp2p/core/event"
|
||||
"github.com/libp2p/go-libp2p/core/host"
|
||||
"github.com/libp2p/go-libp2p/core/network"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/libp2p/go-libp2p/core/peerstore"
|
||||
"github.com/libp2p/go-libp2p/core/protocol"
|
||||
"github.com/libp2p/go-libp2p/core/record"
|
||||
"github.com/libp2p/go-libp2p/p2p/host/eventbus"
|
||||
|
||||
logging "github.com/ipfs/go-log/v2"
|
||||
|
||||
ma "github.com/multiformats/go-multiaddr"
|
||||
mstream "github.com/multiformats/go-multistream"
|
||||
)
|
||||
|
||||
var log = logging.Logger("blankhost")
|
||||
|
||||
// BlankHost is the thinnest implementation of the host.Host interface
|
||||
type BlankHost struct {
|
||||
n network.Network
|
||||
mux *mstream.MultistreamMuxer[protocol.ID]
|
||||
cmgr connmgr.ConnManager
|
||||
eventbus event.Bus
|
||||
emitters struct {
|
||||
evtLocalProtocolsUpdated event.Emitter
|
||||
}
|
||||
}
|
||||
|
||||
type config struct {
|
||||
cmgr connmgr.ConnManager
|
||||
eventBus event.Bus
|
||||
}
|
||||
|
||||
type Option = func(cfg *config)
|
||||
|
||||
func WithConnectionManager(cmgr connmgr.ConnManager) Option {
|
||||
return func(cfg *config) {
|
||||
cfg.cmgr = cmgr
|
||||
}
|
||||
}
|
||||
|
||||
func WithEventBus(eventBus event.Bus) Option {
|
||||
return func(cfg *config) {
|
||||
cfg.eventBus = eventBus
|
||||
}
|
||||
}
|
||||
|
||||
func NewBlankHost(n network.Network, options ...Option) *BlankHost {
|
||||
cfg := config{
|
||||
cmgr: &connmgr.NullConnMgr{},
|
||||
}
|
||||
for _, opt := range options {
|
||||
opt(&cfg)
|
||||
}
|
||||
|
||||
bh := &BlankHost{
|
||||
n: n,
|
||||
cmgr: cfg.cmgr,
|
||||
mux: mstream.NewMultistreamMuxer[protocol.ID](),
|
||||
}
|
||||
if bh.eventbus == nil {
|
||||
bh.eventbus = eventbus.NewBus(eventbus.WithMetricsTracer(eventbus.NewMetricsTracer()))
|
||||
}
|
||||
|
||||
// subscribe the connection manager to network notifications (has no effect with NullConnMgr)
|
||||
n.Notify(bh.cmgr.Notifee())
|
||||
|
||||
var err error
|
||||
if bh.emitters.evtLocalProtocolsUpdated, err = bh.eventbus.Emitter(&event.EvtLocalProtocolsUpdated{}); err != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
n.SetStreamHandler(bh.newStreamHandler)
|
||||
|
||||
// persist a signed peer record for self to the peerstore.
|
||||
if err := bh.initSignedRecord(); err != nil {
|
||||
log.Errorf("error creating blank host, err=%s", err)
|
||||
return nil
|
||||
}
|
||||
|
||||
return bh
|
||||
}
|
||||
|
||||
func (bh *BlankHost) initSignedRecord() error {
|
||||
cab, ok := peerstore.GetCertifiedAddrBook(bh.n.Peerstore())
|
||||
if !ok {
|
||||
log.Error("peerstore does not support signed records")
|
||||
return errors.New("peerstore does not support signed records")
|
||||
}
|
||||
rec := peer.PeerRecordFromAddrInfo(peer.AddrInfo{ID: bh.ID(), Addrs: bh.Addrs()})
|
||||
ev, err := record.Seal(rec, bh.Peerstore().PrivKey(bh.ID()))
|
||||
if err != nil {
|
||||
log.Errorf("failed to create signed record for self, err=%s", err)
|
||||
return fmt.Errorf("failed to create signed record for self, err=%s", err)
|
||||
}
|
||||
_, err = cab.ConsumePeerRecord(ev, peerstore.PermanentAddrTTL)
|
||||
if err != nil {
|
||||
log.Errorf("failed to persist signed record to peerstore,err=%s", err)
|
||||
return fmt.Errorf("failed to persist signed record for self, err=%s", err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
var _ host.Host = (*BlankHost)(nil)
|
||||
|
||||
func (bh *BlankHost) Addrs() []ma.Multiaddr {
|
||||
addrs, err := bh.n.InterfaceListenAddresses()
|
||||
if err != nil {
|
||||
log.Debug("error retrieving network interface addrs: ", err)
|
||||
return nil
|
||||
}
|
||||
|
||||
return addrs
|
||||
}
|
||||
|
||||
func (bh *BlankHost) Close() error {
|
||||
return bh.n.Close()
|
||||
}
|
||||
|
||||
func (bh *BlankHost) Connect(ctx context.Context, ai peer.AddrInfo) error {
|
||||
// absorb addresses into peerstore
|
||||
bh.Peerstore().AddAddrs(ai.ID, ai.Addrs, peerstore.TempAddrTTL)
|
||||
|
||||
cs := bh.n.ConnsToPeer(ai.ID)
|
||||
if len(cs) > 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
_, err := bh.Network().DialPeer(ctx, ai.ID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to dial: %w", err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (bh *BlankHost) Peerstore() peerstore.Peerstore {
|
||||
return bh.n.Peerstore()
|
||||
}
|
||||
|
||||
func (bh *BlankHost) ID() peer.ID {
|
||||
return bh.n.LocalPeer()
|
||||
}
|
||||
|
||||
func (bh *BlankHost) NewStream(ctx context.Context, p peer.ID, protos ...protocol.ID) (network.Stream, error) {
|
||||
s, err := bh.n.NewStream(ctx, p)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to open stream: %w", err)
|
||||
}
|
||||
|
||||
selected, err := mstream.SelectOneOf(protos, s)
|
||||
if err != nil {
|
||||
s.Reset()
|
||||
return nil, fmt.Errorf("failed to negotiate protocol: %w", err)
|
||||
}
|
||||
|
||||
s.SetProtocol(selected)
|
||||
bh.Peerstore().AddProtocols(p, selected)
|
||||
|
||||
return s, nil
|
||||
}
|
||||
|
||||
func (bh *BlankHost) RemoveStreamHandler(pid protocol.ID) {
|
||||
bh.Mux().RemoveHandler(pid)
|
||||
bh.emitters.evtLocalProtocolsUpdated.Emit(event.EvtLocalProtocolsUpdated{
|
||||
Removed: []protocol.ID{pid},
|
||||
})
|
||||
}
|
||||
|
||||
func (bh *BlankHost) SetStreamHandler(pid protocol.ID, handler network.StreamHandler) {
|
||||
bh.Mux().AddHandler(pid, func(p protocol.ID, rwc io.ReadWriteCloser) error {
|
||||
is := rwc.(network.Stream)
|
||||
is.SetProtocol(p)
|
||||
handler(is)
|
||||
return nil
|
||||
})
|
||||
bh.emitters.evtLocalProtocolsUpdated.Emit(event.EvtLocalProtocolsUpdated{
|
||||
Added: []protocol.ID{pid},
|
||||
})
|
||||
}
|
||||
|
||||
func (bh *BlankHost) SetStreamHandlerMatch(pid protocol.ID, m func(protocol.ID) bool, handler network.StreamHandler) {
|
||||
bh.Mux().AddHandlerWithFunc(pid, m, func(p protocol.ID, rwc io.ReadWriteCloser) error {
|
||||
is := rwc.(network.Stream)
|
||||
is.SetProtocol(p)
|
||||
handler(is)
|
||||
return nil
|
||||
})
|
||||
bh.emitters.evtLocalProtocolsUpdated.Emit(event.EvtLocalProtocolsUpdated{
|
||||
Added: []protocol.ID{pid},
|
||||
})
|
||||
}
|
||||
|
||||
// newStreamHandler is the remote-opened stream handler for network.Network
|
||||
func (bh *BlankHost) newStreamHandler(s network.Stream) {
|
||||
protoID, handle, err := bh.Mux().Negotiate(s)
|
||||
if err != nil {
|
||||
log.Infow("protocol negotiation failed", "error", err)
|
||||
s.Reset()
|
||||
return
|
||||
}
|
||||
|
||||
s.SetProtocol(protoID)
|
||||
|
||||
handle(protoID, s)
|
||||
}
|
||||
|
||||
// TODO: i'm not sure this really needs to be here
|
||||
func (bh *BlankHost) Mux() protocol.Switch {
|
||||
return bh.mux
|
||||
}
|
||||
|
||||
// TODO: also not sure this fits... Might be better ways around this (leaky abstractions)
|
||||
func (bh *BlankHost) Network() network.Network {
|
||||
return bh.n
|
||||
}
|
||||
|
||||
func (bh *BlankHost) ConnManager() connmgr.ConnManager {
|
||||
return bh.cmgr
|
||||
}
|
||||
|
||||
func (bh *BlankHost) EventBus() event.Bus {
|
||||
return bh.eventbus
|
||||
}
|
||||
418
vendor/github.com/libp2p/go-libp2p/p2p/host/eventbus/basic.go
generated
vendored
Normal file
418
vendor/github.com/libp2p/go-libp2p/p2p/host/eventbus/basic.go
generated
vendored
Normal file
@@ -0,0 +1,418 @@
|
||||
package eventbus
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
|
||||
"github.com/libp2p/go-libp2p/core/event"
|
||||
)
|
||||
|
||||
// /////////////////////
|
||||
// BUS
|
||||
|
||||
// basicBus is a type-based event delivery system
|
||||
type basicBus struct {
|
||||
lk sync.RWMutex
|
||||
nodes map[reflect.Type]*node
|
||||
wildcard *wildcardNode
|
||||
metricsTracer MetricsTracer
|
||||
}
|
||||
|
||||
var _ event.Bus = (*basicBus)(nil)
|
||||
|
||||
type emitter struct {
|
||||
n *node
|
||||
w *wildcardNode
|
||||
typ reflect.Type
|
||||
closed atomic.Bool
|
||||
dropper func(reflect.Type)
|
||||
metricsTracer MetricsTracer
|
||||
}
|
||||
|
||||
func (e *emitter) Emit(evt interface{}) error {
|
||||
if e.closed.Load() {
|
||||
return fmt.Errorf("emitter is closed")
|
||||
}
|
||||
|
||||
e.n.emit(evt)
|
||||
e.w.emit(evt)
|
||||
|
||||
if e.metricsTracer != nil {
|
||||
e.metricsTracer.EventEmitted(e.typ)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (e *emitter) Close() error {
|
||||
if !e.closed.CompareAndSwap(false, true) {
|
||||
return fmt.Errorf("closed an emitter more than once")
|
||||
}
|
||||
if e.n.nEmitters.Add(-1) == 0 {
|
||||
e.dropper(e.typ)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func NewBus(opts ...Option) event.Bus {
|
||||
bus := &basicBus{
|
||||
nodes: map[reflect.Type]*node{},
|
||||
wildcard: &wildcardNode{},
|
||||
}
|
||||
for _, opt := range opts {
|
||||
opt(bus)
|
||||
}
|
||||
return bus
|
||||
}
|
||||
|
||||
func (b *basicBus) withNode(typ reflect.Type, cb func(*node), async func(*node)) {
|
||||
b.lk.Lock()
|
||||
|
||||
n, ok := b.nodes[typ]
|
||||
if !ok {
|
||||
n = newNode(typ, b.metricsTracer)
|
||||
b.nodes[typ] = n
|
||||
}
|
||||
|
||||
n.lk.Lock()
|
||||
b.lk.Unlock()
|
||||
|
||||
cb(n)
|
||||
|
||||
if async == nil {
|
||||
n.lk.Unlock()
|
||||
} else {
|
||||
go func() {
|
||||
defer n.lk.Unlock()
|
||||
async(n)
|
||||
}()
|
||||
}
|
||||
}
|
||||
|
||||
func (b *basicBus) tryDropNode(typ reflect.Type) {
|
||||
b.lk.Lock()
|
||||
n, ok := b.nodes[typ]
|
||||
if !ok { // already dropped
|
||||
b.lk.Unlock()
|
||||
return
|
||||
}
|
||||
|
||||
n.lk.Lock()
|
||||
if n.nEmitters.Load() > 0 || len(n.sinks) > 0 {
|
||||
n.lk.Unlock()
|
||||
b.lk.Unlock()
|
||||
return // still in use
|
||||
}
|
||||
n.lk.Unlock()
|
||||
|
||||
delete(b.nodes, typ)
|
||||
b.lk.Unlock()
|
||||
}
|
||||
|
||||
type wildcardSub struct {
|
||||
ch chan interface{}
|
||||
w *wildcardNode
|
||||
metricsTracer MetricsTracer
|
||||
name string
|
||||
}
|
||||
|
||||
func (w *wildcardSub) Out() <-chan interface{} {
|
||||
return w.ch
|
||||
}
|
||||
|
||||
func (w *wildcardSub) Close() error {
|
||||
w.w.removeSink(w.ch)
|
||||
if w.metricsTracer != nil {
|
||||
w.metricsTracer.RemoveSubscriber(reflect.TypeOf(event.WildcardSubscription))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (w *wildcardSub) Name() string {
|
||||
return w.name
|
||||
}
|
||||
|
||||
type namedSink struct {
|
||||
name string
|
||||
ch chan interface{}
|
||||
}
|
||||
|
||||
type sub struct {
|
||||
ch chan interface{}
|
||||
nodes []*node
|
||||
dropper func(reflect.Type)
|
||||
metricsTracer MetricsTracer
|
||||
name string
|
||||
}
|
||||
|
||||
func (s *sub) Name() string {
|
||||
return s.name
|
||||
}
|
||||
|
||||
func (s *sub) Out() <-chan interface{} {
|
||||
return s.ch
|
||||
}
|
||||
|
||||
func (s *sub) Close() error {
|
||||
go func() {
|
||||
// drain the event channel, will return when closed and drained.
|
||||
// this is necessary to unblock publishes to this channel.
|
||||
for range s.ch {
|
||||
}
|
||||
}()
|
||||
|
||||
for _, n := range s.nodes {
|
||||
n.lk.Lock()
|
||||
|
||||
for i := 0; i < len(n.sinks); i++ {
|
||||
if n.sinks[i].ch == s.ch {
|
||||
n.sinks[i], n.sinks[len(n.sinks)-1] = n.sinks[len(n.sinks)-1], nil
|
||||
n.sinks = n.sinks[:len(n.sinks)-1]
|
||||
|
||||
if s.metricsTracer != nil {
|
||||
s.metricsTracer.RemoveSubscriber(n.typ)
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
tryDrop := len(n.sinks) == 0 && n.nEmitters.Load() == 0
|
||||
|
||||
n.lk.Unlock()
|
||||
|
||||
if tryDrop {
|
||||
s.dropper(n.typ)
|
||||
}
|
||||
}
|
||||
close(s.ch)
|
||||
return nil
|
||||
}
|
||||
|
||||
var _ event.Subscription = (*sub)(nil)
|
||||
|
||||
// Subscribe creates new subscription. Failing to drain the channel will cause
|
||||
// publishers to get blocked. CancelFunc is guaranteed to return after last send
|
||||
// to the channel
|
||||
func (b *basicBus) Subscribe(evtTypes interface{}, opts ...event.SubscriptionOpt) (_ event.Subscription, err error) {
|
||||
settings := newSubSettings()
|
||||
for _, opt := range opts {
|
||||
if err := opt(&settings); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if evtTypes == event.WildcardSubscription {
|
||||
out := &wildcardSub{
|
||||
ch: make(chan interface{}, settings.buffer),
|
||||
w: b.wildcard,
|
||||
metricsTracer: b.metricsTracer,
|
||||
name: settings.name,
|
||||
}
|
||||
b.wildcard.addSink(&namedSink{ch: out.ch, name: out.name})
|
||||
return out, nil
|
||||
}
|
||||
|
||||
types, ok := evtTypes.([]interface{})
|
||||
if !ok {
|
||||
types = []interface{}{evtTypes}
|
||||
}
|
||||
|
||||
if len(types) > 1 {
|
||||
for _, t := range types {
|
||||
if t == event.WildcardSubscription {
|
||||
return nil, fmt.Errorf("wildcard subscriptions must be started separately")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
out := &sub{
|
||||
ch: make(chan interface{}, settings.buffer),
|
||||
nodes: make([]*node, len(types)),
|
||||
|
||||
dropper: b.tryDropNode,
|
||||
metricsTracer: b.metricsTracer,
|
||||
name: settings.name,
|
||||
}
|
||||
|
||||
for _, etyp := range types {
|
||||
if reflect.TypeOf(etyp).Kind() != reflect.Ptr {
|
||||
return nil, errors.New("subscribe called with non-pointer type")
|
||||
}
|
||||
}
|
||||
|
||||
for i, etyp := range types {
|
||||
typ := reflect.TypeOf(etyp)
|
||||
|
||||
b.withNode(typ.Elem(), func(n *node) {
|
||||
n.sinks = append(n.sinks, &namedSink{ch: out.ch, name: out.name})
|
||||
out.nodes[i] = n
|
||||
if b.metricsTracer != nil {
|
||||
b.metricsTracer.AddSubscriber(typ.Elem())
|
||||
}
|
||||
}, func(n *node) {
|
||||
if n.keepLast {
|
||||
l := n.last
|
||||
if l == nil {
|
||||
return
|
||||
}
|
||||
out.ch <- l
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
return out, nil
|
||||
}
|
||||
|
||||
// Emitter creates new emitter
|
||||
//
|
||||
// eventType accepts typed nil pointers, and uses the type information to
|
||||
// select output type
|
||||
//
|
||||
// Example:
|
||||
// emit, err := eventbus.Emitter(new(EventT))
|
||||
// defer emit.Close() // MUST call this after being done with the emitter
|
||||
//
|
||||
// emit(EventT{})
|
||||
func (b *basicBus) Emitter(evtType interface{}, opts ...event.EmitterOpt) (e event.Emitter, err error) {
|
||||
if evtType == event.WildcardSubscription {
|
||||
return nil, fmt.Errorf("illegal emitter for wildcard subscription")
|
||||
}
|
||||
|
||||
var settings emitterSettings
|
||||
for _, opt := range opts {
|
||||
if err := opt(&settings); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
typ := reflect.TypeOf(evtType)
|
||||
if typ.Kind() != reflect.Ptr {
|
||||
return nil, errors.New("emitter called with non-pointer type")
|
||||
}
|
||||
typ = typ.Elem()
|
||||
|
||||
b.withNode(typ, func(n *node) {
|
||||
n.nEmitters.Add(1)
|
||||
n.keepLast = n.keepLast || settings.makeStateful
|
||||
e = &emitter{n: n, typ: typ, dropper: b.tryDropNode, w: b.wildcard, metricsTracer: b.metricsTracer}
|
||||
}, nil)
|
||||
return
|
||||
}
|
||||
|
||||
// GetAllEventTypes returns all the event types that this bus has emitters
|
||||
// or subscribers for.
|
||||
func (b *basicBus) GetAllEventTypes() []reflect.Type {
|
||||
b.lk.RLock()
|
||||
defer b.lk.RUnlock()
|
||||
|
||||
types := make([]reflect.Type, 0, len(b.nodes))
|
||||
for t := range b.nodes {
|
||||
types = append(types, t)
|
||||
}
|
||||
return types
|
||||
}
|
||||
|
||||
// /////////////////////
|
||||
// NODE
|
||||
|
||||
type wildcardNode struct {
|
||||
sync.RWMutex
|
||||
nSinks atomic.Int32
|
||||
sinks []*namedSink
|
||||
metricsTracer MetricsTracer
|
||||
}
|
||||
|
||||
func (n *wildcardNode) addSink(sink *namedSink) {
|
||||
n.nSinks.Add(1) // ok to do outside the lock
|
||||
n.Lock()
|
||||
n.sinks = append(n.sinks, sink)
|
||||
n.Unlock()
|
||||
|
||||
if n.metricsTracer != nil {
|
||||
n.metricsTracer.AddSubscriber(reflect.TypeOf(event.WildcardSubscription))
|
||||
}
|
||||
}
|
||||
|
||||
func (n *wildcardNode) removeSink(ch chan interface{}) {
|
||||
n.nSinks.Add(-1) // ok to do outside the lock
|
||||
n.Lock()
|
||||
for i := 0; i < len(n.sinks); i++ {
|
||||
if n.sinks[i].ch == ch {
|
||||
n.sinks[i], n.sinks[len(n.sinks)-1] = n.sinks[len(n.sinks)-1], nil
|
||||
n.sinks = n.sinks[:len(n.sinks)-1]
|
||||
break
|
||||
}
|
||||
}
|
||||
n.Unlock()
|
||||
}
|
||||
|
||||
func (n *wildcardNode) emit(evt interface{}) {
|
||||
if n.nSinks.Load() == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
n.RLock()
|
||||
for _, sink := range n.sinks {
|
||||
|
||||
// Sending metrics before sending on channel allows us to
|
||||
// record channel full events before blocking
|
||||
sendSubscriberMetrics(n.metricsTracer, sink)
|
||||
|
||||
sink.ch <- evt
|
||||
}
|
||||
n.RUnlock()
|
||||
}
|
||||
|
||||
type node struct {
|
||||
// Note: make sure to NEVER lock basicBus.lk when this lock is held
|
||||
lk sync.Mutex
|
||||
|
||||
typ reflect.Type
|
||||
|
||||
// emitter ref count
|
||||
nEmitters atomic.Int32
|
||||
|
||||
keepLast bool
|
||||
last interface{}
|
||||
|
||||
sinks []*namedSink
|
||||
metricsTracer MetricsTracer
|
||||
}
|
||||
|
||||
func newNode(typ reflect.Type, metricsTracer MetricsTracer) *node {
|
||||
return &node{
|
||||
typ: typ,
|
||||
metricsTracer: metricsTracer,
|
||||
}
|
||||
}
|
||||
|
||||
func (n *node) emit(evt interface{}) {
|
||||
typ := reflect.TypeOf(evt)
|
||||
if typ != n.typ {
|
||||
panic(fmt.Sprintf("Emit called with wrong type. expected: %s, got: %s", n.typ, typ))
|
||||
}
|
||||
|
||||
n.lk.Lock()
|
||||
if n.keepLast {
|
||||
n.last = evt
|
||||
}
|
||||
|
||||
for _, sink := range n.sinks {
|
||||
|
||||
// Sending metrics before sending on channel allows us to
|
||||
// record channel full events before blocking
|
||||
sendSubscriberMetrics(n.metricsTracer, sink)
|
||||
sink.ch <- evt
|
||||
}
|
||||
n.lk.Unlock()
|
||||
}
|
||||
|
||||
func sendSubscriberMetrics(metricsTracer MetricsTracer, sink *namedSink) {
|
||||
if metricsTracer != nil {
|
||||
metricsTracer.SubscriberQueueLength(sink.name, len(sink.ch)+1)
|
||||
metricsTracer.SubscriberQueueFull(sink.name, len(sink.ch)+1 >= cap(sink.ch))
|
||||
metricsTracer.SubscriberEventQueued(sink.name)
|
||||
}
|
||||
}
|
||||
164
vendor/github.com/libp2p/go-libp2p/p2p/host/eventbus/basic_metrics.go
generated
vendored
Normal file
164
vendor/github.com/libp2p/go-libp2p/p2p/host/eventbus/basic_metrics.go
generated
vendored
Normal file
@@ -0,0 +1,164 @@
|
||||
package eventbus
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"strings"
|
||||
|
||||
"github.com/libp2p/go-libp2p/p2p/metricshelper"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
)
|
||||
|
||||
const metricNamespace = "libp2p_eventbus"
|
||||
|
||||
var (
|
||||
eventsEmitted = prometheus.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Namespace: metricNamespace,
|
||||
Name: "events_emitted_total",
|
||||
Help: "Events Emitted",
|
||||
},
|
||||
[]string{"event"},
|
||||
)
|
||||
totalSubscribers = prometheus.NewGaugeVec(
|
||||
prometheus.GaugeOpts{
|
||||
Namespace: metricNamespace,
|
||||
Name: "subscribers_total",
|
||||
Help: "Number of subscribers for an event type",
|
||||
},
|
||||
[]string{"event"},
|
||||
)
|
||||
subscriberQueueLength = prometheus.NewGaugeVec(
|
||||
prometheus.GaugeOpts{
|
||||
Namespace: metricNamespace,
|
||||
Name: "subscriber_queue_length",
|
||||
Help: "Subscriber queue length",
|
||||
},
|
||||
[]string{"subscriber_name"},
|
||||
)
|
||||
subscriberQueueFull = prometheus.NewGaugeVec(
|
||||
prometheus.GaugeOpts{
|
||||
Namespace: metricNamespace,
|
||||
Name: "subscriber_queue_full",
|
||||
Help: "Subscriber Queue completely full",
|
||||
},
|
||||
[]string{"subscriber_name"},
|
||||
)
|
||||
subscriberEventQueued = prometheus.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Namespace: metricNamespace,
|
||||
Name: "subscriber_event_queued",
|
||||
Help: "Event Queued for subscriber",
|
||||
},
|
||||
[]string{"subscriber_name"},
|
||||
)
|
||||
collectors = []prometheus.Collector{
|
||||
eventsEmitted,
|
||||
totalSubscribers,
|
||||
subscriberQueueLength,
|
||||
subscriberQueueFull,
|
||||
subscriberEventQueued,
|
||||
}
|
||||
)
|
||||
|
||||
// MetricsTracer tracks metrics for the eventbus subsystem
|
||||
type MetricsTracer interface {
|
||||
|
||||
// EventEmitted counts the total number of events grouped by event type
|
||||
EventEmitted(typ reflect.Type)
|
||||
|
||||
// AddSubscriber adds a subscriber for the event type
|
||||
AddSubscriber(typ reflect.Type)
|
||||
|
||||
// RemoveSubscriber removes a subscriber for the event type
|
||||
RemoveSubscriber(typ reflect.Type)
|
||||
|
||||
// SubscriberQueueLength is the length of the subscribers channel
|
||||
SubscriberQueueLength(name string, n int)
|
||||
|
||||
// SubscriberQueueFull tracks whether a subscribers channel if full
|
||||
SubscriberQueueFull(name string, isFull bool)
|
||||
|
||||
// SubscriberEventQueued counts the total number of events grouped by subscriber
|
||||
SubscriberEventQueued(name string)
|
||||
}
|
||||
|
||||
type metricsTracer struct{}
|
||||
|
||||
var _ MetricsTracer = &metricsTracer{}
|
||||
|
||||
type metricsTracerSetting struct {
|
||||
reg prometheus.Registerer
|
||||
}
|
||||
|
||||
type MetricsTracerOption func(*metricsTracerSetting)
|
||||
|
||||
func WithRegisterer(reg prometheus.Registerer) MetricsTracerOption {
|
||||
return func(s *metricsTracerSetting) {
|
||||
if reg != nil {
|
||||
s.reg = reg
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func NewMetricsTracer(opts ...MetricsTracerOption) MetricsTracer {
|
||||
setting := &metricsTracerSetting{reg: prometheus.DefaultRegisterer}
|
||||
for _, opt := range opts {
|
||||
opt(setting)
|
||||
}
|
||||
metricshelper.RegisterCollectors(setting.reg, collectors...)
|
||||
return &metricsTracer{}
|
||||
}
|
||||
|
||||
func (m *metricsTracer) EventEmitted(typ reflect.Type) {
|
||||
tags := metricshelper.GetStringSlice()
|
||||
defer metricshelper.PutStringSlice(tags)
|
||||
|
||||
*tags = append(*tags, strings.TrimPrefix(typ.String(), "event."))
|
||||
eventsEmitted.WithLabelValues(*tags...).Inc()
|
||||
}
|
||||
|
||||
func (m *metricsTracer) AddSubscriber(typ reflect.Type) {
|
||||
tags := metricshelper.GetStringSlice()
|
||||
defer metricshelper.PutStringSlice(tags)
|
||||
|
||||
*tags = append(*tags, strings.TrimPrefix(typ.String(), "event."))
|
||||
totalSubscribers.WithLabelValues(*tags...).Inc()
|
||||
}
|
||||
|
||||
func (m *metricsTracer) RemoveSubscriber(typ reflect.Type) {
|
||||
tags := metricshelper.GetStringSlice()
|
||||
defer metricshelper.PutStringSlice(tags)
|
||||
|
||||
*tags = append(*tags, strings.TrimPrefix(typ.String(), "event."))
|
||||
totalSubscribers.WithLabelValues(*tags...).Dec()
|
||||
}
|
||||
|
||||
func (m *metricsTracer) SubscriberQueueLength(name string, n int) {
|
||||
tags := metricshelper.GetStringSlice()
|
||||
defer metricshelper.PutStringSlice(tags)
|
||||
|
||||
*tags = append(*tags, name)
|
||||
subscriberQueueLength.WithLabelValues(*tags...).Set(float64(n))
|
||||
}
|
||||
|
||||
func (m *metricsTracer) SubscriberQueueFull(name string, isFull bool) {
|
||||
tags := metricshelper.GetStringSlice()
|
||||
defer metricshelper.PutStringSlice(tags)
|
||||
|
||||
*tags = append(*tags, name)
|
||||
observer := subscriberQueueFull.WithLabelValues(*tags...)
|
||||
if isFull {
|
||||
observer.Set(1)
|
||||
} else {
|
||||
observer.Set(0)
|
||||
}
|
||||
}
|
||||
|
||||
func (m *metricsTracer) SubscriberEventQueued(name string) {
|
||||
tags := metricshelper.GetStringSlice()
|
||||
defer metricshelper.PutStringSlice(tags)
|
||||
|
||||
*tags = append(*tags, name)
|
||||
subscriberEventQueued.WithLabelValues(*tags...).Inc()
|
||||
}
|
||||
79
vendor/github.com/libp2p/go-libp2p/p2p/host/eventbus/opts.go
generated
vendored
Normal file
79
vendor/github.com/libp2p/go-libp2p/p2p/host/eventbus/opts.go
generated
vendored
Normal file
@@ -0,0 +1,79 @@
|
||||
package eventbus
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"runtime"
|
||||
"strings"
|
||||
"sync/atomic"
|
||||
)
|
||||
|
||||
type subSettings struct {
|
||||
buffer int
|
||||
name string
|
||||
}
|
||||
|
||||
var subCnt atomic.Int64
|
||||
|
||||
var subSettingsDefault = subSettings{
|
||||
buffer: 16,
|
||||
}
|
||||
|
||||
// newSubSettings returns the settings for a new subscriber
|
||||
// The default naming strategy is sub-<fileName>-L<lineNum>
|
||||
func newSubSettings() subSettings {
|
||||
settings := subSettingsDefault
|
||||
_, file, line, ok := runtime.Caller(2) // skip=1 is eventbus.Subscriber
|
||||
if ok {
|
||||
file = strings.TrimPrefix(file, "github.com/")
|
||||
// remove the version number from the path, for example
|
||||
// go-libp2p-package@v0.x.y-some-hash-123/file.go will be shortened go go-libp2p-package/file.go
|
||||
if idx1 := strings.Index(file, "@"); idx1 != -1 {
|
||||
if idx2 := strings.Index(file[idx1:], "/"); idx2 != -1 {
|
||||
file = file[:idx1] + file[idx1+idx2:]
|
||||
}
|
||||
}
|
||||
settings.name = fmt.Sprintf("%s-L%d", file, line)
|
||||
} else {
|
||||
settings.name = fmt.Sprintf("subscriber-%d", subCnt.Add(1))
|
||||
}
|
||||
return settings
|
||||
}
|
||||
|
||||
func BufSize(n int) func(interface{}) error {
|
||||
return func(s interface{}) error {
|
||||
s.(*subSettings).buffer = n
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func Name(name string) func(interface{}) error {
|
||||
return func(s interface{}) error {
|
||||
s.(*subSettings).name = name
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
type emitterSettings struct {
|
||||
makeStateful bool
|
||||
}
|
||||
|
||||
// Stateful is an Emitter option which makes the eventbus channel
|
||||
// 'remember' last event sent, and when a new subscriber joins the
|
||||
// bus, the remembered event is immediately sent to the subscription
|
||||
// channel.
|
||||
//
|
||||
// This allows to provide state tracking for dynamic systems, and/or
|
||||
// allows new subscribers to verify that there are Emitters on the channel
|
||||
func Stateful(s interface{}) error {
|
||||
s.(*emitterSettings).makeStateful = true
|
||||
return nil
|
||||
}
|
||||
|
||||
type Option func(*basicBus)
|
||||
|
||||
func WithMetricsTracer(metricsTracer MetricsTracer) Option {
|
||||
return func(bus *basicBus) {
|
||||
bus.metricsTracer = metricsTracer
|
||||
bus.wildcard.metricsTracer = metricsTracer
|
||||
}
|
||||
}
|
||||
58
vendor/github.com/libp2p/go-libp2p/p2p/host/peerstore/metrics.go
generated
vendored
Normal file
58
vendor/github.com/libp2p/go-libp2p/p2p/host/peerstore/metrics.go
generated
vendored
Normal file
@@ -0,0 +1,58 @@
|
||||
package peerstore
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
)
|
||||
|
||||
// LatencyEWMASmoothing governs the decay of the EWMA (the speed
|
||||
// at which it changes). This must be a normalized (0-1) value.
|
||||
// 1 is 100% change, 0 is no change.
|
||||
var LatencyEWMASmoothing = 0.1
|
||||
|
||||
type metrics struct {
|
||||
mutex sync.RWMutex
|
||||
latmap map[peer.ID]time.Duration
|
||||
}
|
||||
|
||||
func NewMetrics() *metrics {
|
||||
return &metrics{
|
||||
latmap: make(map[peer.ID]time.Duration),
|
||||
}
|
||||
}
|
||||
|
||||
// RecordLatency records a new latency measurement
|
||||
func (m *metrics) RecordLatency(p peer.ID, next time.Duration) {
|
||||
nextf := float64(next)
|
||||
s := LatencyEWMASmoothing
|
||||
if s > 1 || s < 0 {
|
||||
s = 0.1 // ignore the knob. it's broken. look, it jiggles.
|
||||
}
|
||||
|
||||
m.mutex.Lock()
|
||||
ewma, found := m.latmap[p]
|
||||
ewmaf := float64(ewma)
|
||||
if !found {
|
||||
m.latmap[p] = next // when no data, just take it as the mean.
|
||||
} else {
|
||||
nextf = ((1.0 - s) * ewmaf) + (s * nextf)
|
||||
m.latmap[p] = time.Duration(nextf)
|
||||
}
|
||||
m.mutex.Unlock()
|
||||
}
|
||||
|
||||
// LatencyEWMA returns an exponentially-weighted moving avg.
|
||||
// of all measurements of a peer's latency.
|
||||
func (m *metrics) LatencyEWMA(p peer.ID) time.Duration {
|
||||
m.mutex.RLock()
|
||||
defer m.mutex.RUnlock()
|
||||
return m.latmap[p]
|
||||
}
|
||||
|
||||
func (m *metrics) RemovePeer(p peer.ID) {
|
||||
m.mutex.Lock()
|
||||
delete(m.latmap, p)
|
||||
m.mutex.Unlock()
|
||||
}
|
||||
22
vendor/github.com/libp2p/go-libp2p/p2p/host/peerstore/peerstore.go
generated
vendored
Normal file
22
vendor/github.com/libp2p/go-libp2p/p2p/host/peerstore/peerstore.go
generated
vendored
Normal file
@@ -0,0 +1,22 @@
|
||||
package peerstore
|
||||
|
||||
import (
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
pstore "github.com/libp2p/go-libp2p/core/peerstore"
|
||||
)
|
||||
|
||||
func PeerInfos(ps pstore.Peerstore, peers peer.IDSlice) []peer.AddrInfo {
|
||||
pi := make([]peer.AddrInfo, len(peers))
|
||||
for i, p := range peers {
|
||||
pi[i] = ps.PeerInfo(p)
|
||||
}
|
||||
return pi
|
||||
}
|
||||
|
||||
func PeerInfoIDs(pis []peer.AddrInfo) peer.IDSlice {
|
||||
ps := make(peer.IDSlice, len(pis))
|
||||
for i, pi := range pis {
|
||||
ps[i] = pi.ID
|
||||
}
|
||||
return ps
|
||||
}
|
||||
530
vendor/github.com/libp2p/go-libp2p/p2p/host/peerstore/pstoremem/addr_book.go
generated
vendored
Normal file
530
vendor/github.com/libp2p/go-libp2p/p2p/host/peerstore/pstoremem/addr_book.go
generated
vendored
Normal file
@@ -0,0 +1,530 @@
|
||||
package pstoremem
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sort"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
pstore "github.com/libp2p/go-libp2p/core/peerstore"
|
||||
"github.com/libp2p/go-libp2p/core/record"
|
||||
|
||||
logging "github.com/ipfs/go-log/v2"
|
||||
ma "github.com/multiformats/go-multiaddr"
|
||||
)
|
||||
|
||||
var log = logging.Logger("peerstore")
|
||||
|
||||
type expiringAddr struct {
|
||||
Addr ma.Multiaddr
|
||||
TTL time.Duration
|
||||
Expires time.Time
|
||||
}
|
||||
|
||||
func (e *expiringAddr) ExpiredBy(t time.Time) bool {
|
||||
return !t.Before(e.Expires)
|
||||
}
|
||||
|
||||
type peerRecordState struct {
|
||||
Envelope *record.Envelope
|
||||
Seq uint64
|
||||
}
|
||||
|
||||
type addrSegments [256]*addrSegment
|
||||
|
||||
type addrSegment struct {
|
||||
sync.RWMutex
|
||||
|
||||
// Use pointers to save memory. Maps always leave some fraction of their
|
||||
// space unused. storing the *values* directly in the map will
|
||||
// drastically increase the space waste. In our case, by 6x.
|
||||
addrs map[peer.ID]map[string]*expiringAddr
|
||||
|
||||
signedPeerRecords map[peer.ID]*peerRecordState
|
||||
}
|
||||
|
||||
func (segments *addrSegments) get(p peer.ID) *addrSegment {
|
||||
if len(p) == 0 { // it's not terribly useful to use an empty peer ID, but at least we should not panic
|
||||
return segments[0]
|
||||
}
|
||||
return segments[uint8(p[len(p)-1])]
|
||||
}
|
||||
|
||||
type clock interface {
|
||||
Now() time.Time
|
||||
}
|
||||
|
||||
type realclock struct{}
|
||||
|
||||
func (rc realclock) Now() time.Time {
|
||||
return time.Now()
|
||||
}
|
||||
|
||||
// memoryAddrBook manages addresses.
|
||||
type memoryAddrBook struct {
|
||||
segments addrSegments
|
||||
|
||||
refCount sync.WaitGroup
|
||||
cancel func()
|
||||
|
||||
subManager *AddrSubManager
|
||||
clock clock
|
||||
}
|
||||
|
||||
var _ pstore.AddrBook = (*memoryAddrBook)(nil)
|
||||
var _ pstore.CertifiedAddrBook = (*memoryAddrBook)(nil)
|
||||
|
||||
func NewAddrBook() *memoryAddrBook {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
|
||||
ab := &memoryAddrBook{
|
||||
segments: func() (ret addrSegments) {
|
||||
for i := range ret {
|
||||
ret[i] = &addrSegment{
|
||||
addrs: make(map[peer.ID]map[string]*expiringAddr),
|
||||
signedPeerRecords: make(map[peer.ID]*peerRecordState)}
|
||||
}
|
||||
return ret
|
||||
}(),
|
||||
subManager: NewAddrSubManager(),
|
||||
cancel: cancel,
|
||||
clock: realclock{},
|
||||
}
|
||||
ab.refCount.Add(1)
|
||||
go ab.background(ctx)
|
||||
return ab
|
||||
}
|
||||
|
||||
type AddrBookOption func(book *memoryAddrBook) error
|
||||
|
||||
func WithClock(clock clock) AddrBookOption {
|
||||
return func(book *memoryAddrBook) error {
|
||||
book.clock = clock
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// background periodically schedules a gc
|
||||
func (mab *memoryAddrBook) background(ctx context.Context) {
|
||||
defer mab.refCount.Done()
|
||||
ticker := time.NewTicker(1 * time.Hour)
|
||||
defer ticker.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
mab.gc()
|
||||
case <-ctx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (mab *memoryAddrBook) Close() error {
|
||||
mab.cancel()
|
||||
mab.refCount.Wait()
|
||||
return nil
|
||||
}
|
||||
|
||||
// gc garbage collects the in-memory address book.
|
||||
func (mab *memoryAddrBook) gc() {
|
||||
now := mab.clock.Now()
|
||||
for _, s := range mab.segments {
|
||||
s.Lock()
|
||||
for p, amap := range s.addrs {
|
||||
for k, addr := range amap {
|
||||
if addr.ExpiredBy(now) {
|
||||
delete(amap, k)
|
||||
}
|
||||
}
|
||||
if len(amap) == 0 {
|
||||
delete(s.addrs, p)
|
||||
delete(s.signedPeerRecords, p)
|
||||
}
|
||||
}
|
||||
s.Unlock()
|
||||
}
|
||||
}
|
||||
|
||||
func (mab *memoryAddrBook) PeersWithAddrs() peer.IDSlice {
|
||||
// deduplicate, since the same peer could have both signed & unsigned addrs
|
||||
set := make(map[peer.ID]struct{})
|
||||
for _, s := range mab.segments {
|
||||
s.RLock()
|
||||
for pid, amap := range s.addrs {
|
||||
if len(amap) > 0 {
|
||||
set[pid] = struct{}{}
|
||||
}
|
||||
}
|
||||
s.RUnlock()
|
||||
}
|
||||
peers := make(peer.IDSlice, 0, len(set))
|
||||
for pid := range set {
|
||||
peers = append(peers, pid)
|
||||
}
|
||||
return peers
|
||||
}
|
||||
|
||||
// AddAddr calls AddAddrs(p, []ma.Multiaddr{addr}, ttl)
|
||||
func (mab *memoryAddrBook) AddAddr(p peer.ID, addr ma.Multiaddr, ttl time.Duration) {
|
||||
mab.AddAddrs(p, []ma.Multiaddr{addr}, ttl)
|
||||
}
|
||||
|
||||
// AddAddrs gives memoryAddrBook addresses to use, with a given ttl
|
||||
// (time-to-live), after which the address is no longer valid.
|
||||
// This function never reduces the TTL or expiration of an address.
|
||||
func (mab *memoryAddrBook) AddAddrs(p peer.ID, addrs []ma.Multiaddr, ttl time.Duration) {
|
||||
// if we have a valid peer record, ignore unsigned addrs
|
||||
// peerRec := mab.GetPeerRecord(p)
|
||||
// if peerRec != nil {
|
||||
// return
|
||||
// }
|
||||
mab.addAddrs(p, addrs, ttl)
|
||||
}
|
||||
|
||||
// ConsumePeerRecord adds addresses from a signed peer.PeerRecord (contained in
|
||||
// a record.Envelope), which will expire after the given TTL.
|
||||
// See https://godoc.org/github.com/libp2p/go-libp2p/core/peerstore#CertifiedAddrBook for more details.
|
||||
func (mab *memoryAddrBook) ConsumePeerRecord(recordEnvelope *record.Envelope, ttl time.Duration) (bool, error) {
|
||||
r, err := recordEnvelope.Record()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
rec, ok := r.(*peer.PeerRecord)
|
||||
if !ok {
|
||||
return false, fmt.Errorf("unable to process envelope: not a PeerRecord")
|
||||
}
|
||||
if !rec.PeerID.MatchesPublicKey(recordEnvelope.PublicKey) {
|
||||
return false, fmt.Errorf("signing key does not match PeerID in PeerRecord")
|
||||
}
|
||||
|
||||
// ensure seq is greater than, or equal to, the last received
|
||||
s := mab.segments.get(rec.PeerID)
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
lastState, found := s.signedPeerRecords[rec.PeerID]
|
||||
if found && lastState.Seq > rec.Seq {
|
||||
return false, nil
|
||||
}
|
||||
s.signedPeerRecords[rec.PeerID] = &peerRecordState{
|
||||
Envelope: recordEnvelope,
|
||||
Seq: rec.Seq,
|
||||
}
|
||||
mab.addAddrsUnlocked(s, rec.PeerID, rec.Addrs, ttl, true)
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func (mab *memoryAddrBook) addAddrs(p peer.ID, addrs []ma.Multiaddr, ttl time.Duration) {
|
||||
s := mab.segments.get(p)
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
|
||||
mab.addAddrsUnlocked(s, p, addrs, ttl, false)
|
||||
}
|
||||
|
||||
func (mab *memoryAddrBook) addAddrsUnlocked(s *addrSegment, p peer.ID, addrs []ma.Multiaddr, ttl time.Duration, signed bool) {
|
||||
// if ttl is zero, exit. nothing to do.
|
||||
if ttl <= 0 {
|
||||
return
|
||||
}
|
||||
|
||||
amap, ok := s.addrs[p]
|
||||
if !ok {
|
||||
amap = make(map[string]*expiringAddr)
|
||||
s.addrs[p] = amap
|
||||
}
|
||||
|
||||
exp := mab.clock.Now().Add(ttl)
|
||||
for _, addr := range addrs {
|
||||
// Remove suffix of /p2p/peer-id from address
|
||||
addr, addrPid := peer.SplitAddr(addr)
|
||||
if addr == nil {
|
||||
log.Warnw("Was passed nil multiaddr", "peer", p)
|
||||
continue
|
||||
}
|
||||
if addrPid != "" && addrPid != p {
|
||||
log.Warnf("Was passed p2p address with a different peerId. found: %s, expected: %s", addrPid, p)
|
||||
continue
|
||||
}
|
||||
// find the highest TTL and Expiry time between
|
||||
// existing records and function args
|
||||
a, found := amap[string(addr.Bytes())] // won't allocate.
|
||||
if !found {
|
||||
// not found, announce it.
|
||||
entry := &expiringAddr{Addr: addr, Expires: exp, TTL: ttl}
|
||||
amap[string(addr.Bytes())] = entry
|
||||
mab.subManager.BroadcastAddr(p, addr)
|
||||
} else {
|
||||
// update ttl & exp to whichever is greater between new and existing entry
|
||||
if ttl > a.TTL {
|
||||
a.TTL = ttl
|
||||
}
|
||||
if exp.After(a.Expires) {
|
||||
a.Expires = exp
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// SetAddr calls mgr.SetAddrs(p, addr, ttl)
|
||||
func (mab *memoryAddrBook) SetAddr(p peer.ID, addr ma.Multiaddr, ttl time.Duration) {
|
||||
mab.SetAddrs(p, []ma.Multiaddr{addr}, ttl)
|
||||
}
|
||||
|
||||
// SetAddrs sets the ttl on addresses. This clears any TTL there previously.
|
||||
// This is used when we receive the best estimate of the validity of an address.
|
||||
func (mab *memoryAddrBook) SetAddrs(p peer.ID, addrs []ma.Multiaddr, ttl time.Duration) {
|
||||
s := mab.segments.get(p)
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
|
||||
amap, ok := s.addrs[p]
|
||||
if !ok {
|
||||
amap = make(map[string]*expiringAddr)
|
||||
s.addrs[p] = amap
|
||||
}
|
||||
|
||||
exp := mab.clock.Now().Add(ttl)
|
||||
for _, addr := range addrs {
|
||||
addr, addrPid := peer.SplitAddr(addr)
|
||||
if addr == nil {
|
||||
log.Warnw("was passed nil multiaddr", "peer", p)
|
||||
continue
|
||||
}
|
||||
if addrPid != "" && addrPid != p {
|
||||
log.Warnf("was passed p2p address with a different peerId, found: %s wanted: %s", addrPid, p)
|
||||
continue
|
||||
}
|
||||
aBytes := addr.Bytes()
|
||||
key := string(aBytes)
|
||||
|
||||
// re-set all of them for new ttl.
|
||||
if ttl > 0 {
|
||||
amap[key] = &expiringAddr{Addr: addr, Expires: exp, TTL: ttl}
|
||||
mab.subManager.BroadcastAddr(p, addr)
|
||||
} else {
|
||||
delete(amap, key)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// UpdateAddrs updates the addresses associated with the given peer that have
|
||||
// the given oldTTL to have the given newTTL.
|
||||
func (mab *memoryAddrBook) UpdateAddrs(p peer.ID, oldTTL time.Duration, newTTL time.Duration) {
|
||||
s := mab.segments.get(p)
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
exp := mab.clock.Now().Add(newTTL)
|
||||
amap, found := s.addrs[p]
|
||||
if !found {
|
||||
return
|
||||
}
|
||||
|
||||
for k, a := range amap {
|
||||
if oldTTL == a.TTL {
|
||||
if newTTL == 0 {
|
||||
delete(amap, k)
|
||||
} else {
|
||||
a.TTL = newTTL
|
||||
a.Expires = exp
|
||||
amap[k] = a
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Addrs returns all known (and valid) addresses for a given peer
|
||||
func (mab *memoryAddrBook) Addrs(p peer.ID) []ma.Multiaddr {
|
||||
s := mab.segments.get(p)
|
||||
s.RLock()
|
||||
defer s.RUnlock()
|
||||
|
||||
return validAddrs(mab.clock.Now(), s.addrs[p])
|
||||
}
|
||||
|
||||
func validAddrs(now time.Time, amap map[string]*expiringAddr) []ma.Multiaddr {
|
||||
good := make([]ma.Multiaddr, 0, len(amap))
|
||||
if amap == nil {
|
||||
return good
|
||||
}
|
||||
for _, m := range amap {
|
||||
if !m.ExpiredBy(now) {
|
||||
good = append(good, m.Addr)
|
||||
}
|
||||
}
|
||||
|
||||
return good
|
||||
}
|
||||
|
||||
// GetPeerRecord returns a Envelope containing a PeerRecord for the
|
||||
// given peer id, if one exists.
|
||||
// Returns nil if no signed PeerRecord exists for the peer.
|
||||
func (mab *memoryAddrBook) GetPeerRecord(p peer.ID) *record.Envelope {
|
||||
s := mab.segments.get(p)
|
||||
s.RLock()
|
||||
defer s.RUnlock()
|
||||
|
||||
// although the signed record gets garbage collected when all addrs inside it are expired,
|
||||
// we may be in between the expiration time and the GC interval
|
||||
// so, we check to see if we have any valid signed addrs before returning the record
|
||||
if len(validAddrs(mab.clock.Now(), s.addrs[p])) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
state := s.signedPeerRecords[p]
|
||||
if state == nil {
|
||||
return nil
|
||||
}
|
||||
return state.Envelope
|
||||
}
|
||||
|
||||
// ClearAddrs removes all previously stored addresses
|
||||
func (mab *memoryAddrBook) ClearAddrs(p peer.ID) {
|
||||
s := mab.segments.get(p)
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
|
||||
delete(s.addrs, p)
|
||||
delete(s.signedPeerRecords, p)
|
||||
}
|
||||
|
||||
// AddrStream returns a channel on which all new addresses discovered for a
|
||||
// given peer ID will be published.
|
||||
func (mab *memoryAddrBook) AddrStream(ctx context.Context, p peer.ID) <-chan ma.Multiaddr {
|
||||
s := mab.segments.get(p)
|
||||
s.RLock()
|
||||
defer s.RUnlock()
|
||||
|
||||
baseaddrslice := s.addrs[p]
|
||||
initial := make([]ma.Multiaddr, 0, len(baseaddrslice))
|
||||
for _, a := range baseaddrslice {
|
||||
initial = append(initial, a.Addr)
|
||||
}
|
||||
|
||||
return mab.subManager.AddrStream(ctx, p, initial)
|
||||
}
|
||||
|
||||
type addrSub struct {
|
||||
pubch chan ma.Multiaddr
|
||||
ctx context.Context
|
||||
}
|
||||
|
||||
func (s *addrSub) pubAddr(a ma.Multiaddr) {
|
||||
select {
|
||||
case s.pubch <- a:
|
||||
case <-s.ctx.Done():
|
||||
}
|
||||
}
|
||||
|
||||
// An abstracted, pub-sub manager for address streams. Extracted from
|
||||
// memoryAddrBook in order to support additional implementations.
|
||||
type AddrSubManager struct {
|
||||
mu sync.RWMutex
|
||||
subs map[peer.ID][]*addrSub
|
||||
}
|
||||
|
||||
// NewAddrSubManager initializes an AddrSubManager.
|
||||
func NewAddrSubManager() *AddrSubManager {
|
||||
return &AddrSubManager{
|
||||
subs: make(map[peer.ID][]*addrSub),
|
||||
}
|
||||
}
|
||||
|
||||
// Used internally by the address stream coroutine to remove a subscription
|
||||
// from the manager.
|
||||
func (mgr *AddrSubManager) removeSub(p peer.ID, s *addrSub) {
|
||||
mgr.mu.Lock()
|
||||
defer mgr.mu.Unlock()
|
||||
|
||||
subs := mgr.subs[p]
|
||||
if len(subs) == 1 {
|
||||
if subs[0] != s {
|
||||
return
|
||||
}
|
||||
delete(mgr.subs, p)
|
||||
return
|
||||
}
|
||||
|
||||
for i, v := range subs {
|
||||
if v == s {
|
||||
subs[i] = subs[len(subs)-1]
|
||||
subs[len(subs)-1] = nil
|
||||
mgr.subs[p] = subs[:len(subs)-1]
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// BroadcastAddr broadcasts a new address to all subscribed streams.
|
||||
func (mgr *AddrSubManager) BroadcastAddr(p peer.ID, addr ma.Multiaddr) {
|
||||
mgr.mu.RLock()
|
||||
defer mgr.mu.RUnlock()
|
||||
|
||||
if subs, ok := mgr.subs[p]; ok {
|
||||
for _, sub := range subs {
|
||||
sub.pubAddr(addr)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// AddrStream creates a new subscription for a given peer ID, pre-populating the
|
||||
// channel with any addresses we might already have on file.
|
||||
func (mgr *AddrSubManager) AddrStream(ctx context.Context, p peer.ID, initial []ma.Multiaddr) <-chan ma.Multiaddr {
|
||||
sub := &addrSub{pubch: make(chan ma.Multiaddr), ctx: ctx}
|
||||
out := make(chan ma.Multiaddr)
|
||||
|
||||
mgr.mu.Lock()
|
||||
mgr.subs[p] = append(mgr.subs[p], sub)
|
||||
mgr.mu.Unlock()
|
||||
|
||||
sort.Sort(addrList(initial))
|
||||
|
||||
go func(buffer []ma.Multiaddr) {
|
||||
defer close(out)
|
||||
|
||||
sent := make(map[string]struct{}, len(buffer))
|
||||
for _, a := range buffer {
|
||||
sent[string(a.Bytes())] = struct{}{}
|
||||
}
|
||||
|
||||
var outch chan ma.Multiaddr
|
||||
var next ma.Multiaddr
|
||||
if len(buffer) > 0 {
|
||||
next = buffer[0]
|
||||
buffer = buffer[1:]
|
||||
outch = out
|
||||
}
|
||||
|
||||
for {
|
||||
select {
|
||||
case outch <- next:
|
||||
if len(buffer) > 0 {
|
||||
next = buffer[0]
|
||||
buffer = buffer[1:]
|
||||
} else {
|
||||
outch = nil
|
||||
next = nil
|
||||
}
|
||||
case naddr := <-sub.pubch:
|
||||
if _, ok := sent[string(naddr.Bytes())]; ok {
|
||||
continue
|
||||
}
|
||||
sent[string(naddr.Bytes())] = struct{}{}
|
||||
|
||||
if next == nil {
|
||||
next = naddr
|
||||
outch = out
|
||||
} else {
|
||||
buffer = append(buffer, naddr)
|
||||
}
|
||||
case <-ctx.Done():
|
||||
mgr.removeSub(p, sub)
|
||||
return
|
||||
}
|
||||
}
|
||||
}(initial)
|
||||
|
||||
return out
|
||||
}
|
||||
97
vendor/github.com/libp2p/go-libp2p/p2p/host/peerstore/pstoremem/keybook.go
generated
vendored
Normal file
97
vendor/github.com/libp2p/go-libp2p/p2p/host/peerstore/pstoremem/keybook.go
generated
vendored
Normal file
@@ -0,0 +1,97 @@
|
||||
package pstoremem
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"sync"
|
||||
|
||||
ic "github.com/libp2p/go-libp2p/core/crypto"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
pstore "github.com/libp2p/go-libp2p/core/peerstore"
|
||||
)
|
||||
|
||||
type memoryKeyBook struct {
|
||||
sync.RWMutex // same lock. wont happen a ton.
|
||||
pks map[peer.ID]ic.PubKey
|
||||
sks map[peer.ID]ic.PrivKey
|
||||
}
|
||||
|
||||
var _ pstore.KeyBook = (*memoryKeyBook)(nil)
|
||||
|
||||
func NewKeyBook() *memoryKeyBook {
|
||||
return &memoryKeyBook{
|
||||
pks: map[peer.ID]ic.PubKey{},
|
||||
sks: map[peer.ID]ic.PrivKey{},
|
||||
}
|
||||
}
|
||||
|
||||
func (mkb *memoryKeyBook) PeersWithKeys() peer.IDSlice {
|
||||
mkb.RLock()
|
||||
ps := make(peer.IDSlice, 0, len(mkb.pks)+len(mkb.sks))
|
||||
for p := range mkb.pks {
|
||||
ps = append(ps, p)
|
||||
}
|
||||
for p := range mkb.sks {
|
||||
if _, found := mkb.pks[p]; !found {
|
||||
ps = append(ps, p)
|
||||
}
|
||||
}
|
||||
mkb.RUnlock()
|
||||
return ps
|
||||
}
|
||||
|
||||
func (mkb *memoryKeyBook) PubKey(p peer.ID) ic.PubKey {
|
||||
mkb.RLock()
|
||||
pk := mkb.pks[p]
|
||||
mkb.RUnlock()
|
||||
if pk != nil {
|
||||
return pk
|
||||
}
|
||||
pk, err := p.ExtractPublicKey()
|
||||
if err == nil {
|
||||
mkb.Lock()
|
||||
mkb.pks[p] = pk
|
||||
mkb.Unlock()
|
||||
}
|
||||
return pk
|
||||
}
|
||||
|
||||
func (mkb *memoryKeyBook) AddPubKey(p peer.ID, pk ic.PubKey) error {
|
||||
// check it's correct first
|
||||
if !p.MatchesPublicKey(pk) {
|
||||
return errors.New("ID does not match PublicKey")
|
||||
}
|
||||
|
||||
mkb.Lock()
|
||||
mkb.pks[p] = pk
|
||||
mkb.Unlock()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (mkb *memoryKeyBook) PrivKey(p peer.ID) ic.PrivKey {
|
||||
mkb.RLock()
|
||||
defer mkb.RUnlock()
|
||||
return mkb.sks[p]
|
||||
}
|
||||
|
||||
func (mkb *memoryKeyBook) AddPrivKey(p peer.ID, sk ic.PrivKey) error {
|
||||
if sk == nil {
|
||||
return errors.New("sk is nil (PrivKey)")
|
||||
}
|
||||
|
||||
// check it's correct first
|
||||
if !p.MatchesPrivateKey(sk) {
|
||||
return errors.New("ID does not match PrivateKey")
|
||||
}
|
||||
|
||||
mkb.Lock()
|
||||
mkb.sks[p] = sk
|
||||
mkb.Unlock()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (mkb *memoryKeyBook) RemovePeer(p peer.ID) {
|
||||
mkb.Lock()
|
||||
delete(mkb.sks, p)
|
||||
delete(mkb.pks, p)
|
||||
mkb.Unlock()
|
||||
}
|
||||
54
vendor/github.com/libp2p/go-libp2p/p2p/host/peerstore/pstoremem/metadata.go
generated
vendored
Normal file
54
vendor/github.com/libp2p/go-libp2p/p2p/host/peerstore/pstoremem/metadata.go
generated
vendored
Normal file
@@ -0,0 +1,54 @@
|
||||
package pstoremem
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
pstore "github.com/libp2p/go-libp2p/core/peerstore"
|
||||
)
|
||||
|
||||
type memoryPeerMetadata struct {
|
||||
// store other data, like versions
|
||||
ds map[peer.ID]map[string]interface{}
|
||||
dslock sync.RWMutex
|
||||
}
|
||||
|
||||
var _ pstore.PeerMetadata = (*memoryPeerMetadata)(nil)
|
||||
|
||||
func NewPeerMetadata() *memoryPeerMetadata {
|
||||
return &memoryPeerMetadata{
|
||||
ds: make(map[peer.ID]map[string]interface{}),
|
||||
}
|
||||
}
|
||||
|
||||
func (ps *memoryPeerMetadata) Put(p peer.ID, key string, val interface{}) error {
|
||||
ps.dslock.Lock()
|
||||
defer ps.dslock.Unlock()
|
||||
m, ok := ps.ds[p]
|
||||
if !ok {
|
||||
m = make(map[string]interface{})
|
||||
ps.ds[p] = m
|
||||
}
|
||||
m[key] = val
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ps *memoryPeerMetadata) Get(p peer.ID, key string) (interface{}, error) {
|
||||
ps.dslock.RLock()
|
||||
defer ps.dslock.RUnlock()
|
||||
m, ok := ps.ds[p]
|
||||
if !ok {
|
||||
return nil, pstore.ErrNotFound
|
||||
}
|
||||
val, ok := m[key]
|
||||
if !ok {
|
||||
return nil, pstore.ErrNotFound
|
||||
}
|
||||
return val, nil
|
||||
}
|
||||
|
||||
func (ps *memoryPeerMetadata) RemovePeer(p peer.ID) {
|
||||
ps.dslock.Lock()
|
||||
delete(ps.ds, p)
|
||||
ps.dslock.Unlock()
|
||||
}
|
||||
114
vendor/github.com/libp2p/go-libp2p/p2p/host/peerstore/pstoremem/peerstore.go
generated
vendored
Normal file
114
vendor/github.com/libp2p/go-libp2p/p2p/host/peerstore/pstoremem/peerstore.go
generated
vendored
Normal file
@@ -0,0 +1,114 @@
|
||||
package pstoremem
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/libp2p/go-libp2p/core/peerstore"
|
||||
pstore "github.com/libp2p/go-libp2p/p2p/host/peerstore"
|
||||
)
|
||||
|
||||
type pstoremem struct {
|
||||
peerstore.Metrics
|
||||
|
||||
*memoryKeyBook
|
||||
*memoryAddrBook
|
||||
*memoryProtoBook
|
||||
*memoryPeerMetadata
|
||||
}
|
||||
|
||||
var _ peerstore.Peerstore = &pstoremem{}
|
||||
|
||||
type Option interface{}
|
||||
|
||||
// NewPeerstore creates an in-memory thread-safe collection of peers.
|
||||
// It's the caller's responsibility to call RemovePeer to ensure
|
||||
// that memory consumption of the peerstore doesn't grow unboundedly.
|
||||
func NewPeerstore(opts ...Option) (ps *pstoremem, err error) {
|
||||
ab := NewAddrBook()
|
||||
defer func() {
|
||||
if err != nil {
|
||||
ab.Close()
|
||||
}
|
||||
}()
|
||||
|
||||
var protoBookOpts []ProtoBookOption
|
||||
for _, opt := range opts {
|
||||
switch o := opt.(type) {
|
||||
case ProtoBookOption:
|
||||
protoBookOpts = append(protoBookOpts, o)
|
||||
case AddrBookOption:
|
||||
o(ab)
|
||||
default:
|
||||
return nil, fmt.Errorf("unexpected peer store option: %v", o)
|
||||
}
|
||||
}
|
||||
pb, err := NewProtoBook(protoBookOpts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &pstoremem{
|
||||
Metrics: pstore.NewMetrics(),
|
||||
memoryKeyBook: NewKeyBook(),
|
||||
memoryAddrBook: ab,
|
||||
memoryProtoBook: pb,
|
||||
memoryPeerMetadata: NewPeerMetadata(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (ps *pstoremem) Close() (err error) {
|
||||
var errs []error
|
||||
weakClose := func(name string, c interface{}) {
|
||||
if cl, ok := c.(io.Closer); ok {
|
||||
if err = cl.Close(); err != nil {
|
||||
errs = append(errs, fmt.Errorf("%s error: %s", name, err))
|
||||
}
|
||||
}
|
||||
}
|
||||
weakClose("keybook", ps.memoryKeyBook)
|
||||
weakClose("addressbook", ps.memoryAddrBook)
|
||||
weakClose("protobook", ps.memoryProtoBook)
|
||||
weakClose("peermetadata", ps.memoryPeerMetadata)
|
||||
|
||||
if len(errs) > 0 {
|
||||
return fmt.Errorf("failed while closing peerstore; err(s): %q", errs)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ps *pstoremem) Peers() peer.IDSlice {
|
||||
set := map[peer.ID]struct{}{}
|
||||
for _, p := range ps.PeersWithKeys() {
|
||||
set[p] = struct{}{}
|
||||
}
|
||||
for _, p := range ps.PeersWithAddrs() {
|
||||
set[p] = struct{}{}
|
||||
}
|
||||
|
||||
pps := make(peer.IDSlice, 0, len(set))
|
||||
for p := range set {
|
||||
pps = append(pps, p)
|
||||
}
|
||||
return pps
|
||||
}
|
||||
|
||||
func (ps *pstoremem) PeerInfo(p peer.ID) peer.AddrInfo {
|
||||
return peer.AddrInfo{
|
||||
ID: p,
|
||||
Addrs: ps.memoryAddrBook.Addrs(p),
|
||||
}
|
||||
}
|
||||
|
||||
// RemovePeer removes entries associated with a peer from:
|
||||
// * the KeyBook
|
||||
// * the ProtoBook
|
||||
// * the PeerMetadata
|
||||
// * the Metrics
|
||||
// It DOES NOT remove the peer from the AddrBook.
|
||||
func (ps *pstoremem) RemovePeer(p peer.ID) {
|
||||
ps.memoryKeyBook.RemovePeer(p)
|
||||
ps.memoryProtoBook.RemovePeer(p)
|
||||
ps.memoryPeerMetadata.RemovePeer(p)
|
||||
ps.Metrics.RemovePeer(p)
|
||||
}
|
||||
192
vendor/github.com/libp2p/go-libp2p/p2p/host/peerstore/pstoremem/protobook.go
generated
vendored
Normal file
192
vendor/github.com/libp2p/go-libp2p/p2p/host/peerstore/pstoremem/protobook.go
generated
vendored
Normal file
@@ -0,0 +1,192 @@
|
||||
package pstoremem
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"sync"
|
||||
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
pstore "github.com/libp2p/go-libp2p/core/peerstore"
|
||||
"github.com/libp2p/go-libp2p/core/protocol"
|
||||
)
|
||||
|
||||
type protoSegment struct {
|
||||
sync.RWMutex
|
||||
protocols map[peer.ID]map[protocol.ID]struct{}
|
||||
}
|
||||
|
||||
type protoSegments [256]*protoSegment
|
||||
|
||||
func (s *protoSegments) get(p peer.ID) *protoSegment {
|
||||
return s[byte(p[len(p)-1])]
|
||||
}
|
||||
|
||||
var errTooManyProtocols = errors.New("too many protocols")
|
||||
|
||||
type memoryProtoBook struct {
|
||||
segments protoSegments
|
||||
|
||||
maxProtos int
|
||||
|
||||
lk sync.RWMutex
|
||||
interned map[protocol.ID]protocol.ID
|
||||
}
|
||||
|
||||
var _ pstore.ProtoBook = (*memoryProtoBook)(nil)
|
||||
|
||||
type ProtoBookOption func(book *memoryProtoBook) error
|
||||
|
||||
func WithMaxProtocols(num int) ProtoBookOption {
|
||||
return func(pb *memoryProtoBook) error {
|
||||
pb.maxProtos = num
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func NewProtoBook(opts ...ProtoBookOption) (*memoryProtoBook, error) {
|
||||
pb := &memoryProtoBook{
|
||||
interned: make(map[protocol.ID]protocol.ID, 256),
|
||||
segments: func() (ret protoSegments) {
|
||||
for i := range ret {
|
||||
ret[i] = &protoSegment{
|
||||
protocols: make(map[peer.ID]map[protocol.ID]struct{}),
|
||||
}
|
||||
}
|
||||
return ret
|
||||
}(),
|
||||
maxProtos: 1024,
|
||||
}
|
||||
|
||||
for _, opt := range opts {
|
||||
if err := opt(pb); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return pb, nil
|
||||
}
|
||||
|
||||
func (pb *memoryProtoBook) internProtocol(proto protocol.ID) protocol.ID {
|
||||
// check if it is interned with the read lock
|
||||
pb.lk.RLock()
|
||||
interned, ok := pb.interned[proto]
|
||||
pb.lk.RUnlock()
|
||||
|
||||
if ok {
|
||||
return interned
|
||||
}
|
||||
|
||||
// intern with the write lock
|
||||
pb.lk.Lock()
|
||||
defer pb.lk.Unlock()
|
||||
|
||||
// check again in case it got interned in between locks
|
||||
interned, ok = pb.interned[proto]
|
||||
if ok {
|
||||
return interned
|
||||
}
|
||||
|
||||
pb.interned[proto] = proto
|
||||
return proto
|
||||
}
|
||||
|
||||
func (pb *memoryProtoBook) SetProtocols(p peer.ID, protos ...protocol.ID) error {
|
||||
if len(protos) > pb.maxProtos {
|
||||
return errTooManyProtocols
|
||||
}
|
||||
|
||||
newprotos := make(map[protocol.ID]struct{}, len(protos))
|
||||
for _, proto := range protos {
|
||||
newprotos[pb.internProtocol(proto)] = struct{}{}
|
||||
}
|
||||
|
||||
s := pb.segments.get(p)
|
||||
s.Lock()
|
||||
s.protocols[p] = newprotos
|
||||
s.Unlock()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (pb *memoryProtoBook) AddProtocols(p peer.ID, protos ...protocol.ID) error {
|
||||
s := pb.segments.get(p)
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
|
||||
protomap, ok := s.protocols[p]
|
||||
if !ok {
|
||||
protomap = make(map[protocol.ID]struct{})
|
||||
s.protocols[p] = protomap
|
||||
}
|
||||
if len(protomap)+len(protos) > pb.maxProtos {
|
||||
return errTooManyProtocols
|
||||
}
|
||||
|
||||
for _, proto := range protos {
|
||||
protomap[pb.internProtocol(proto)] = struct{}{}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (pb *memoryProtoBook) GetProtocols(p peer.ID) ([]protocol.ID, error) {
|
||||
s := pb.segments.get(p)
|
||||
s.RLock()
|
||||
defer s.RUnlock()
|
||||
|
||||
out := make([]protocol.ID, 0, len(s.protocols[p]))
|
||||
for k := range s.protocols[p] {
|
||||
out = append(out, k)
|
||||
}
|
||||
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (pb *memoryProtoBook) RemoveProtocols(p peer.ID, protos ...protocol.ID) error {
|
||||
s := pb.segments.get(p)
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
|
||||
protomap, ok := s.protocols[p]
|
||||
if !ok {
|
||||
// nothing to remove.
|
||||
return nil
|
||||
}
|
||||
|
||||
for _, proto := range protos {
|
||||
delete(protomap, pb.internProtocol(proto))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (pb *memoryProtoBook) SupportsProtocols(p peer.ID, protos ...protocol.ID) ([]protocol.ID, error) {
|
||||
s := pb.segments.get(p)
|
||||
s.RLock()
|
||||
defer s.RUnlock()
|
||||
|
||||
out := make([]protocol.ID, 0, len(protos))
|
||||
for _, proto := range protos {
|
||||
if _, ok := s.protocols[p][proto]; ok {
|
||||
out = append(out, proto)
|
||||
}
|
||||
}
|
||||
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (pb *memoryProtoBook) FirstSupportedProtocol(p peer.ID, protos ...protocol.ID) (protocol.ID, error) {
|
||||
s := pb.segments.get(p)
|
||||
s.RLock()
|
||||
defer s.RUnlock()
|
||||
|
||||
for _, proto := range protos {
|
||||
if _, ok := s.protocols[p][proto]; ok {
|
||||
return proto, nil
|
||||
}
|
||||
}
|
||||
return "", nil
|
||||
}
|
||||
|
||||
func (pb *memoryProtoBook) RemovePeer(p peer.ID) {
|
||||
s := pb.segments.get(p)
|
||||
s.Lock()
|
||||
delete(s.protocols, p)
|
||||
s.Unlock()
|
||||
}
|
||||
50
vendor/github.com/libp2p/go-libp2p/p2p/host/peerstore/pstoremem/sorting.go
generated
vendored
Normal file
50
vendor/github.com/libp2p/go-libp2p/p2p/host/peerstore/pstoremem/sorting.go
generated
vendored
Normal file
@@ -0,0 +1,50 @@
|
||||
package pstoremem
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
|
||||
ma "github.com/multiformats/go-multiaddr"
|
||||
mafmt "github.com/multiformats/go-multiaddr-fmt"
|
||||
manet "github.com/multiformats/go-multiaddr/net"
|
||||
)
|
||||
|
||||
func isFDCostlyTransport(a ma.Multiaddr) bool {
|
||||
return mafmt.TCP.Matches(a)
|
||||
}
|
||||
|
||||
type addrList []ma.Multiaddr
|
||||
|
||||
func (al addrList) Len() int { return len(al) }
|
||||
func (al addrList) Swap(i, j int) { al[i], al[j] = al[j], al[i] }
|
||||
|
||||
func (al addrList) Less(i, j int) bool {
|
||||
a := al[i]
|
||||
b := al[j]
|
||||
|
||||
// dial localhost addresses next, they should fail immediately
|
||||
lba := manet.IsIPLoopback(a)
|
||||
lbb := manet.IsIPLoopback(b)
|
||||
if lba && !lbb {
|
||||
return true
|
||||
}
|
||||
|
||||
// dial utp and similar 'non-fd-consuming' addresses first
|
||||
fda := isFDCostlyTransport(a)
|
||||
fdb := isFDCostlyTransport(b)
|
||||
if !fda {
|
||||
return fdb
|
||||
}
|
||||
|
||||
// if 'b' doesnt take a file descriptor
|
||||
if !fdb {
|
||||
return false
|
||||
}
|
||||
|
||||
// if 'b' is loopback and both take file descriptors
|
||||
if lbb {
|
||||
return false
|
||||
}
|
||||
|
||||
// for the rest, just sort by bytes
|
||||
return bytes.Compare(a.Bytes(), b.Bytes()) > 0
|
||||
}
|
||||
132
vendor/github.com/libp2p/go-libp2p/p2p/host/pstoremanager/pstoremanager.go
generated
vendored
Normal file
132
vendor/github.com/libp2p/go-libp2p/p2p/host/pstoremanager/pstoremanager.go
generated
vendored
Normal file
@@ -0,0 +1,132 @@
|
||||
package pstoremanager
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/libp2p/go-libp2p/core/event"
|
||||
"github.com/libp2p/go-libp2p/core/network"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/libp2p/go-libp2p/core/peerstore"
|
||||
"github.com/libp2p/go-libp2p/p2p/host/eventbus"
|
||||
|
||||
logging "github.com/ipfs/go-log/v2"
|
||||
)
|
||||
|
||||
var log = logging.Logger("pstoremanager")
|
||||
|
||||
type Option func(*PeerstoreManager) error
|
||||
|
||||
// WithGracePeriod sets the grace period.
|
||||
// If a peer doesn't reconnect during the grace period, its data is removed.
|
||||
// Default: 1 minute.
|
||||
func WithGracePeriod(p time.Duration) Option {
|
||||
return func(m *PeerstoreManager) error {
|
||||
m.gracePeriod = p
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithCleanupInterval set the clean up interval.
|
||||
// During a clean up run peers that disconnected before the grace period are removed.
|
||||
// If unset, the interval is set to half the grace period.
|
||||
func WithCleanupInterval(t time.Duration) Option {
|
||||
return func(m *PeerstoreManager) error {
|
||||
m.cleanupInterval = t
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
type PeerstoreManager struct {
|
||||
pstore peerstore.Peerstore
|
||||
eventBus event.Bus
|
||||
|
||||
cancel context.CancelFunc
|
||||
refCount sync.WaitGroup
|
||||
|
||||
gracePeriod time.Duration
|
||||
cleanupInterval time.Duration
|
||||
}
|
||||
|
||||
func NewPeerstoreManager(pstore peerstore.Peerstore, eventBus event.Bus, opts ...Option) (*PeerstoreManager, error) {
|
||||
m := &PeerstoreManager{
|
||||
pstore: pstore,
|
||||
gracePeriod: time.Minute,
|
||||
eventBus: eventBus,
|
||||
}
|
||||
for _, opt := range opts {
|
||||
if err := opt(m); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if m.cleanupInterval == 0 {
|
||||
m.cleanupInterval = m.gracePeriod / 2
|
||||
}
|
||||
return m, nil
|
||||
}
|
||||
|
||||
func (m *PeerstoreManager) Start() {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
m.cancel = cancel
|
||||
sub, err := m.eventBus.Subscribe(&event.EvtPeerConnectednessChanged{}, eventbus.Name("pstoremanager"))
|
||||
if err != nil {
|
||||
log.Warnf("subscription failed. Peerstore manager not activated. Error: %s", err)
|
||||
return
|
||||
}
|
||||
m.refCount.Add(1)
|
||||
go m.background(ctx, sub)
|
||||
}
|
||||
|
||||
func (m *PeerstoreManager) background(ctx context.Context, sub event.Subscription) {
|
||||
defer m.refCount.Done()
|
||||
defer sub.Close()
|
||||
disconnected := make(map[peer.ID]time.Time)
|
||||
|
||||
ticker := time.NewTicker(m.cleanupInterval)
|
||||
defer ticker.Stop()
|
||||
|
||||
defer func() {
|
||||
for p := range disconnected {
|
||||
m.pstore.RemovePeer(p)
|
||||
}
|
||||
}()
|
||||
|
||||
for {
|
||||
select {
|
||||
case e, ok := <-sub.Out():
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
ev := e.(event.EvtPeerConnectednessChanged)
|
||||
p := ev.Peer
|
||||
switch ev.Connectedness {
|
||||
case network.NotConnected:
|
||||
if _, ok := disconnected[p]; !ok {
|
||||
disconnected[p] = time.Now()
|
||||
}
|
||||
case network.Connected:
|
||||
// If we reconnect to the peer before we've cleared the information, keep it.
|
||||
delete(disconnected, p)
|
||||
}
|
||||
case <-ticker.C:
|
||||
now := time.Now()
|
||||
for p, disconnectTime := range disconnected {
|
||||
if disconnectTime.Add(m.gracePeriod).Before(now) {
|
||||
m.pstore.RemovePeer(p)
|
||||
delete(disconnected, p)
|
||||
}
|
||||
}
|
||||
case <-ctx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (m *PeerstoreManager) Close() error {
|
||||
if m.cancel != nil {
|
||||
m.cancel()
|
||||
}
|
||||
m.refCount.Wait()
|
||||
return nil
|
||||
}
|
||||
96
vendor/github.com/libp2p/go-libp2p/p2p/host/relaysvc/relay.go
generated
vendored
Normal file
96
vendor/github.com/libp2p/go-libp2p/p2p/host/relaysvc/relay.go
generated
vendored
Normal file
@@ -0,0 +1,96 @@
|
||||
package relaysvc
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
|
||||
"github.com/libp2p/go-libp2p/core/event"
|
||||
"github.com/libp2p/go-libp2p/core/host"
|
||||
"github.com/libp2p/go-libp2p/core/network"
|
||||
"github.com/libp2p/go-libp2p/p2p/host/eventbus"
|
||||
relayv2 "github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/relay"
|
||||
)
|
||||
|
||||
type RelayManager struct {
|
||||
host host.Host
|
||||
|
||||
mutex sync.Mutex
|
||||
relay *relayv2.Relay
|
||||
opts []relayv2.Option
|
||||
|
||||
refCount sync.WaitGroup
|
||||
ctxCancel context.CancelFunc
|
||||
}
|
||||
|
||||
func NewRelayManager(host host.Host, opts ...relayv2.Option) *RelayManager {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
m := &RelayManager{
|
||||
host: host,
|
||||
opts: opts,
|
||||
ctxCancel: cancel,
|
||||
}
|
||||
m.refCount.Add(1)
|
||||
go m.background(ctx)
|
||||
return m
|
||||
}
|
||||
|
||||
func (m *RelayManager) background(ctx context.Context) {
|
||||
defer m.refCount.Done()
|
||||
defer func() {
|
||||
m.mutex.Lock()
|
||||
if m.relay != nil {
|
||||
m.relay.Close()
|
||||
}
|
||||
m.mutex.Unlock()
|
||||
}()
|
||||
|
||||
subReachability, _ := m.host.EventBus().Subscribe(new(event.EvtLocalReachabilityChanged), eventbus.Name("relaysvc"))
|
||||
defer subReachability.Close()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case ev, ok := <-subReachability.Out():
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
if err := m.reachabilityChanged(ev.(event.EvtLocalReachabilityChanged).Reachability); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (m *RelayManager) reachabilityChanged(r network.Reachability) error {
|
||||
switch r {
|
||||
case network.ReachabilityPublic:
|
||||
m.mutex.Lock()
|
||||
defer m.mutex.Unlock()
|
||||
// This could happen if two consecutive EvtLocalReachabilityChanged report the same reachability.
|
||||
// This shouldn't happen, but it's safer to double-check.
|
||||
if m.relay != nil {
|
||||
return nil
|
||||
}
|
||||
relay, err := relayv2.New(m.host, m.opts...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
m.relay = relay
|
||||
default:
|
||||
m.mutex.Lock()
|
||||
defer m.mutex.Unlock()
|
||||
if m.relay != nil {
|
||||
err := m.relay.Close()
|
||||
m.relay = nil
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *RelayManager) Close() error {
|
||||
m.ctxCancel()
|
||||
m.refCount.Wait()
|
||||
return nil
|
||||
}
|
||||
626
vendor/github.com/libp2p/go-libp2p/p2p/host/resource-manager/README.md
generated
vendored
Normal file
626
vendor/github.com/libp2p/go-libp2p/p2p/host/resource-manager/README.md
generated
vendored
Normal file
@@ -0,0 +1,626 @@
|
||||
# The libp2p Network Resource Manager
|
||||
|
||||
This package contains the canonical implementation of the libp2p
|
||||
Network Resource Manager interface.
|
||||
|
||||
The implementation is based on the concept of Resource Management
|
||||
Scopes, whereby resource usage is constrained by a DAG of scopes,
|
||||
accounting for multiple levels of resource constraints.
|
||||
|
||||
The Resource Manager doesn't prioritize resource requests at all, it simply
|
||||
checks if the resource being requested is currently below the defined limits and
|
||||
returns an error if the limit is reached. It has no notion of honest vs bad peers.
|
||||
|
||||
The Resource Manager does have a special notion of [allowlisted](#allowlisting-multiaddrs-to-mitigate-eclipse-attacks) multiaddrs that
|
||||
have their own limits if the normal system limits are reached.
|
||||
|
||||
## Usage
|
||||
|
||||
The Resource Manager is intended to be used with go-libp2p. go-libp2p sets up a
|
||||
resource manager with the default autoscaled limits if none is provided, but if
|
||||
you want to configure things or if you want to enable metrics you'll use the
|
||||
resource manager like so:
|
||||
|
||||
```go
|
||||
// Start with the default scaling limits.
|
||||
scalingLimits := rcmgr.DefaultLimits
|
||||
|
||||
// Add limits around included libp2p protocols
|
||||
libp2p.SetDefaultServiceLimits(&scalingLimits)
|
||||
|
||||
// Turn the scaling limits into a concrete set of limits using `.AutoScale`. This
|
||||
// scales the limits proportional to your system memory.
|
||||
scaledDefaultLimits := scalingLimits.AutoScale()
|
||||
|
||||
// Tweak certain settings
|
||||
cfg := rcmgr.PartialLimitConfig{
|
||||
System: rcmgr.ResourceLimits{
|
||||
// Allow unlimited outbound streams
|
||||
StreamsOutbound: rcmgr.Unlimited,
|
||||
},
|
||||
// Everything else is default. The exact values will come from `scaledDefaultLimits` above.
|
||||
}
|
||||
|
||||
// Create our limits by using our cfg and replacing the default values with values from `scaledDefaultLimits`
|
||||
limits := cfg.Build(scaledDefaultLimits)
|
||||
|
||||
// The resource manager expects a limiter, se we create one from our limits.
|
||||
limiter := rcmgr.NewFixedLimiter(limits)
|
||||
|
||||
// Metrics are enabled by default. If you want to disable metrics, use the
|
||||
// WithMetricsDisabled option
|
||||
// Initialize the resource manager
|
||||
rm, err := rcmgr.NewResourceManager(limiter, rcmgr.WithMetricsDisabled())
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
// Create a libp2p host
|
||||
host, err := libp2p.New(libp2p.ResourceManager(rm))
|
||||
```
|
||||
|
||||
### Saving the limits config
|
||||
The easiest way to save the defined limits is to serialize the `PartialLimitConfig`
|
||||
type as JSON.
|
||||
|
||||
```go
|
||||
noisyNeighbor, _ := peer.Decode("QmVvtzcZgCkMnSFf2dnrBPXrWuNFWNM9J3MpZQCvWPuVZf")
|
||||
cfg := rcmgr.PartialLimitConfig{
|
||||
System: &rcmgr.ResourceLimits{
|
||||
// Allow unlimited outbound streams
|
||||
StreamsOutbound: rcmgr.Unlimited,
|
||||
},
|
||||
Peer: map[peer.ID]rcmgr.ResourceLimits{
|
||||
noisyNeighbor: {
|
||||
// No inbound connections from this peer
|
||||
ConnsInbound: rcmgr.BlockAllLimit,
|
||||
// But let me open connections to them
|
||||
Conns: rcmgr.DefaultLimit,
|
||||
ConnsOutbound: rcmgr.DefaultLimit,
|
||||
// No inbound streams from this peer
|
||||
StreamsInbound: rcmgr.BlockAllLimit,
|
||||
// And let me open unlimited (by me) outbound streams (the peer may have their own limits on me)
|
||||
StreamsOutbound: rcmgr.Unlimited,
|
||||
},
|
||||
},
|
||||
}
|
||||
jsonBytes, _ := json.Marshal(&cfg)
|
||||
|
||||
// string(jsonBytes)
|
||||
// {
|
||||
// "System": {
|
||||
// "StreamsOutbound": "unlimited"
|
||||
// },
|
||||
// "Peer": {
|
||||
// "QmVvtzcZgCkMnSFf2dnrBPXrWuNFWNM9J3MpZQCvWPuVZf": {
|
||||
// "StreamsInbound": "blockAll",
|
||||
// "StreamsOutbound": "unlimited",
|
||||
// "ConnsInbound": "blockAll"
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
```
|
||||
|
||||
This will omit defaults from the JSON output. It will also serialize the
|
||||
blockAll, and unlimited values explicitly.
|
||||
|
||||
The `Memory` field is serialized as a string to workaround the JSON limitation
|
||||
of 32 bit integers (`Memory` is an int64).
|
||||
|
||||
## Basic Resources
|
||||
|
||||
### Memory
|
||||
|
||||
Perhaps the most fundamental resource is memory, and in particular
|
||||
buffers used for network operations. The system must provide an
|
||||
interface for components to reserve memory that accounts for buffers
|
||||
(and possibly other live objects), which is scoped within the component.
|
||||
Before a new buffer is allocated, the component should try a memory
|
||||
reservation, which can fail if the resource limit is exceeded. It is
|
||||
then up to the component to react to the error condition, depending on
|
||||
the situation. For example, a muxer failing to grow a buffer in
|
||||
response to a window change should simply retain the old buffer and
|
||||
operate at perhaps degraded performance.
|
||||
|
||||
### File Descriptors
|
||||
|
||||
File descriptors are an important resource that uses memory (and
|
||||
computational time) at the system level. They are also a scarce
|
||||
resource, as typically (unless the user explicitly intervenes) they
|
||||
are constrained by the system. Exhaustion of file descriptors may
|
||||
render the application incapable of operating (e.g., because it is
|
||||
unable to open a file). This is important for libp2p because most
|
||||
operating systems represent sockets as file descriptors.
|
||||
|
||||
### Connections
|
||||
|
||||
Connections are a higher-level concept endemic to libp2p; in order to
|
||||
communicate with another peer, a connection must first be
|
||||
established. Connections are an important resource in libp2p, as they
|
||||
consume memory, goroutines, and possibly file descriptors.
|
||||
|
||||
We distinguish between inbound and outbound connections, as the former
|
||||
are initiated by remote peers and consume resources in response to
|
||||
network events and thus need to be tightly controlled in order to
|
||||
protect the application from overload or attack. Outbound
|
||||
connections are typically initiated by the application's volition and
|
||||
don't need to be controlled as tightly. However, outbound connections
|
||||
still consume resources and may be initiated in response to network
|
||||
events because of (potentially faulty) application logic, so they
|
||||
still need to be constrained.
|
||||
|
||||
### Streams
|
||||
|
||||
Streams are the fundamental object of interaction in libp2p; all
|
||||
protocol interactions happen through a stream that goes over some
|
||||
connection. Streams are a fundamental resource in libp2p, as they
|
||||
consume memory and goroutines at all levels of the stack.
|
||||
|
||||
Streams always belong to a peer, specify a protocol and they may
|
||||
belong to some service in the system. Hence, this suggests that apart
|
||||
from global limits, we can constrain stream usage at finer
|
||||
granularity, at the protocol and service level.
|
||||
|
||||
Once again, we disinguish between inbound and outbound streams.
|
||||
Inbound streams are initiated by remote peers and consume resources in
|
||||
response to network events; controlling inbound stream usage is again
|
||||
paramount for protecting the system from overload or attack.
|
||||
Outbound streams are normally initiated by the application or some
|
||||
service in the system in order to effect some protocol
|
||||
interaction. However, they can also be initiated in response to
|
||||
network events because of application or service logic, so we still
|
||||
need to constrain them.
|
||||
|
||||
|
||||
## Resource Scopes
|
||||
|
||||
The Resource Manager is based on the concept of resource
|
||||
scopes. Resource Scopes account for resource usage that is temporally
|
||||
delimited for the span of the scope. Resource Scopes conceptually
|
||||
form a DAG, providing us with a mechanism to enforce multiresolution
|
||||
resource accounting. Downstream resource usage is aggregated at scopes
|
||||
higher up the graph.
|
||||
|
||||
The following diagram depicts the canonical scope graph:
|
||||
```
|
||||
System
|
||||
+------------> Transient.............+................+
|
||||
| . .
|
||||
+------------> Service------------- . ----------+ .
|
||||
| . | .
|
||||
+-------------> Protocol----------- . ----------+ .
|
||||
| . | .
|
||||
+-------------->* Peer \/ | .
|
||||
+------------> Connection | .
|
||||
| \/ \/
|
||||
+---------------------------> Stream
|
||||
```
|
||||
|
||||
### The System Scope
|
||||
|
||||
The system scope is the top level scope that accounts for global
|
||||
resource usage at all levels of the system. This scope nests and
|
||||
constrains all other scopes and institutes global hard limits.
|
||||
|
||||
### The Transient Scope
|
||||
|
||||
The transient scope accounts for resources that are in the process of
|
||||
full establishment. For instance, a new connection prior to the
|
||||
handshake does not belong to any peer, but it still needs to be
|
||||
constrained as this opens an avenue for attacks in transient resource
|
||||
usage. Similarly, a stream that has not negotiated a protocol yet is
|
||||
constrained by the transient scope.
|
||||
|
||||
The transient scope effectively represents a DMZ (DeMilitarized Zone),
|
||||
where resource usage can be accounted for connections and streams that
|
||||
are not fully established.
|
||||
|
||||
### The Allowlist System Scope
|
||||
|
||||
Same as the normal system scope above, but is used if the normal system scope is
|
||||
already at its limits and the resource is from an allowlisted peer. See
|
||||
[Allowlisting multiaddrs to mitigate eclipse
|
||||
attacks](#allowlisting-multiaddrs-to-mitigate-eclipse-attacks) see for more
|
||||
information.
|
||||
|
||||
### The Allowlist Transient Scope
|
||||
|
||||
Same as the normal transient scope above, but is used if the normal transient
|
||||
scope is already at its limits and the resource is from an allowlisted peer. See
|
||||
[Allowlisting multiaddrs to mitigate eclipse
|
||||
attacks](#allowlisting-multiaddrs-to-mitigate-eclipse-attacks) see for more
|
||||
information.
|
||||
|
||||
### Service Scopes
|
||||
|
||||
The system is typically organized across services, which may be
|
||||
ambient and provide basic functionality to the system (e.g. identify,
|
||||
autonat, relay, etc). Alternatively, services may be explicitly
|
||||
instantiated by the application, and provide core components of its
|
||||
functionality (e.g. pubsub, the DHT, etc).
|
||||
|
||||
Services are logical groupings of streams that implement protocol flow
|
||||
and may additionally consume resources such as memory. Services
|
||||
typically have at least one stream handler, so they are subject to
|
||||
inbound stream creation and resource usage in response to network
|
||||
events. As such, the system explicitly models them allowing for
|
||||
isolated resource usage that can be tuned by the user.
|
||||
|
||||
### Protocol Scopes
|
||||
|
||||
Protocol Scopes account for resources at the protocol level. They are
|
||||
an intermediate resource scope which can constrain streams which may
|
||||
not have a service associated or for resource control within a
|
||||
service. It also provides an opportunity for system operators to
|
||||
explicitly restrict specific protocols.
|
||||
|
||||
For instance, a service that is not aware of the resource manager and
|
||||
has not been ported to mark its streams, may still gain limits
|
||||
transparently without any programmer intervention. Furthermore, the
|
||||
protocol scope can constrain resource usage for services that
|
||||
implement multiple protocols for the sake of backwards
|
||||
compatibility. A tighter limit in some older protocol can protect the
|
||||
application from resource consumption caused by legacy clients or
|
||||
potential attacks.
|
||||
|
||||
For a concrete example, consider pubsub with the gossipsub router: the
|
||||
service also understands the floodsub protocol for backwards
|
||||
compatibility and support for unsophisticated clients that are lagging
|
||||
in the implementation effort. By specifying a lower limit for the
|
||||
floodsub protocol, we can can constrain the service level for legacy
|
||||
clients using an inefficient protocol.
|
||||
|
||||
### Peer Scopes
|
||||
|
||||
The peer scope accounts for resource usage by an individual peer. This
|
||||
constrains connections and streams and limits the blast radius of
|
||||
resource consumption by a single remote peer.
|
||||
|
||||
This ensures that no single peer can use more resources than allowed
|
||||
by the peer limits. Every peer has a default limit, but the programmer
|
||||
may raise (or lower) limits for specific peers.
|
||||
|
||||
|
||||
### Connection Scopes
|
||||
|
||||
The connection scope is delimited to the duration of a connection and
|
||||
constrains resource usage by a single connection. The scope is a leaf
|
||||
in the DAG, with a span that begins when a connection is established
|
||||
and ends when the connection is closed. Its resources are aggregated
|
||||
to the resource usage of a peer.
|
||||
|
||||
### Stream Scopes
|
||||
|
||||
The stream scope is delimited to the duration of a stream, and
|
||||
constrains resource usage by a single stream. This scope is also a
|
||||
leaf in the DAG, with span that begins when a stream is created and
|
||||
ends when the stream is closed. Its resources are aggregated to the
|
||||
resource usage of a peer, and constrained by a service and protocol
|
||||
scope.
|
||||
|
||||
### User Transaction Scopes
|
||||
|
||||
User transaction scopes can be created as a child of any extant
|
||||
resource scope, and provide the programmer with a delimited scope for
|
||||
easy resource accounting. Transactions may form a tree that is rooted
|
||||
to some canonical scope in the scope DAG.
|
||||
|
||||
For instance, a programmer may create a transaction scope within a
|
||||
service that accounts for some control flow delimited resource
|
||||
usage. Similarly, a programmer may create a transaction scope for some
|
||||
interaction within a stream, e.g. a Request/Response interaction that
|
||||
uses a buffer.
|
||||
|
||||
## Limits
|
||||
|
||||
Each resource scope has an associated limit object, which designates
|
||||
limits for all [basic resources](#basic-resources). The limit is checked every time some
|
||||
resource is reserved and provides the system with an opportunity to
|
||||
constrain resource usage.
|
||||
|
||||
There are separate limits for each class of scope, allowing for
|
||||
multiresolution and aggregate resource accounting. As such, we have
|
||||
limits for the system and transient scopes, default and specific
|
||||
limits for services, protocols, and peers, and limits for connections
|
||||
and streams.
|
||||
|
||||
### Scaling Limits
|
||||
|
||||
When building software that is supposed to run on many different kind of machines,
|
||||
with various memory and CPU configurations, it is desirable to have limits that
|
||||
scale with the size of the machine.
|
||||
|
||||
This is done using the `ScalingLimitConfig`. For every scope, this configuration
|
||||
struct defines the absolutely bare minimum limits, and an (optional) increase of
|
||||
these limits, which will be applied on nodes that have sufficient memory.
|
||||
|
||||
A `ScalingLimitConfig` can be converted into a `ConcreteLimitConfig` (which can then be
|
||||
used to initialize a fixed limiter with `NewFixedLimiter`) by calling the `Scale` method.
|
||||
The `Scale` method takes two parameters: the amount of memory and the number of file
|
||||
descriptors that an application is willing to dedicate to libp2p.
|
||||
|
||||
These amounts will differ between use cases. A blockchain node running on a dedicated
|
||||
server might have a lot of memory, and dedicate 1/4 of that memory to libp2p. On the
|
||||
other end of the spectrum, a desktop companion application running as a background
|
||||
task on a consumer laptop will probably dedicate significantly less than 1/4 of its system
|
||||
memory to libp2p.
|
||||
|
||||
For convenience, the `ScalingLimitConfig` also provides an `AutoScale` method,
|
||||
which determines the amount of memory and file descriptors available on the
|
||||
system, and dedicates up to 1/8 of the memory and 1/2 of the file descriptors to
|
||||
libp2p.
|
||||
|
||||
For example, one might set:
|
||||
```go
|
||||
var scalingLimits = ScalingLimitConfig{
|
||||
SystemBaseLimit: BaseLimit{
|
||||
ConnsInbound: 64,
|
||||
ConnsOutbound: 128,
|
||||
Conns: 128,
|
||||
StreamsInbound: 512,
|
||||
StreamsOutbound: 1024,
|
||||
Streams: 1024,
|
||||
Memory: 128 << 20,
|
||||
FD: 256,
|
||||
},
|
||||
SystemLimitIncrease: BaseLimitIncrease{
|
||||
ConnsInbound: 32,
|
||||
ConnsOutbound: 64,
|
||||
Conns: 64,
|
||||
StreamsInbound: 256,
|
||||
StreamsOutbound: 512,
|
||||
Streams: 512,
|
||||
Memory: 256 << 20,
|
||||
FDFraction: 1,
|
||||
},
|
||||
}
|
||||
```
|
||||
|
||||
The base limit (`SystemBaseLimit`) here is the minimum configuration that any
|
||||
node will have, no matter how little memory it possesses. For every GB of memory
|
||||
passed into the `Scale` method, an increase of (`SystemLimitIncrease`) is added.
|
||||
|
||||
For Example, calling `Scale` with 4 GB of memory will result in a limit of 384 for
|
||||
`Conns` (128 + 4*64).
|
||||
|
||||
The `FDFraction` defines how many of the file descriptors are allocated to this
|
||||
scope. In the example above, when called with a file descriptor value of 1000,
|
||||
this would result in a limit of 1000 (1000 * 1) file descriptors for the system
|
||||
scope. See `TestReadmeExample` in `limit_test.go`.
|
||||
|
||||
Note that we only showed the configuration for the system scope here, equivalent
|
||||
configuration options apply to all other scopes as well.
|
||||
|
||||
### Default limits
|
||||
|
||||
By default the resource manager ships with some reasonable scaling limits and
|
||||
makes a reasonable guess at how much system memory you want to dedicate to the
|
||||
go-libp2p process. For the default definitions see [`DefaultLimits` and
|
||||
`ScalingLimitConfig.AutoScale()`](./limit_defaults.go).
|
||||
|
||||
### Tweaking Defaults
|
||||
|
||||
If the defaults seem mostly okay, but you want to adjust one facet you can
|
||||
simply copy the default struct object and update the field you want to change. You can
|
||||
apply changes to a `BaseLimit`, `BaseLimitIncrease`, and `ConcreteLimitConfig` with
|
||||
`.Apply`.
|
||||
|
||||
Example
|
||||
```
|
||||
// An example on how to tweak the default limits
|
||||
tweakedDefaults := DefaultLimits
|
||||
tweakedDefaults.ProtocolBaseLimit.Streams = 1024
|
||||
tweakedDefaults.ProtocolBaseLimit.StreamsInbound = 512
|
||||
tweakedDefaults.ProtocolBaseLimit.StreamsOutbound = 512
|
||||
```
|
||||
|
||||
### How to tune your limits
|
||||
|
||||
Once you've set your limits and monitoring (see [Monitoring](#monitoring) below)
|
||||
you can now tune your limits better. The `rcmgr_blocked_resources` metric will
|
||||
tell you what was blocked and for what scope. If you see a steady stream of
|
||||
these blocked requests it means your resource limits are too low for your usage.
|
||||
If you see a rare sudden spike, this is okay and it means the resource manager
|
||||
protected you from some anomaly.
|
||||
|
||||
### How to disable limits
|
||||
|
||||
Sometimes disabling all limits is useful when you want to see how much
|
||||
resources you use during normal operation. You can then use this information to
|
||||
define your initial limits. Disable the limits by using `InfiniteLimits`.
|
||||
|
||||
### Debug "resource limit exceeded" errors
|
||||
|
||||
These errors occur whenever a limit is hit. For example, you'll get this error if
|
||||
you are at your limit for the number of streams you can have, and you try to
|
||||
open one more.
|
||||
|
||||
Example Log:
|
||||
```
|
||||
2022-08-12T15:49:35.459-0700 DEBUG rcmgr go-libp2p-resource-manager@v0.5.3/scope.go:541 blocked connection from constraining edge {"scope": "conn-19667", "edge": "system", "direction": "Inbound", "usefd": false, "current": 100, "attempted": 1, "limit": 100, "stat": {"NumStreamsInbound":28,"NumStreamsOutbound":66,"NumConnsInbound":37,"NumConnsOutbound":63,"NumFD":33,"Memory":8687616}, "error": "system: cannot reserve connection: resource limit exceeded"}
|
||||
```
|
||||
|
||||
The log line above is an example log line that gets emitted if you enable debug
|
||||
logging in the resource manager. You can do this by setting the environment
|
||||
variable `GOLOG_LOG_LEVEL="rcmgr=debug"`. By default only the error is
|
||||
returned to the caller, and nothing is logged by the resource manager itself.
|
||||
|
||||
The log line message (and returned error) will tell you which resource limit was
|
||||
hit (connection in the log above) and what blocked it (in this case it was the
|
||||
system scope that blocked it). The log will also include some more information
|
||||
about the current usage of the resources. In the example log above, there is a
|
||||
limit of 100 connections, and you can see that we have 37 inbound connections
|
||||
and 63 outbound connections. We've reached the limit and the resource manager
|
||||
will block any further connections.
|
||||
|
||||
The next step in debugging is seeing if this is a recurring problem or just a
|
||||
transient error. If it's a transient error it's okay to ignore it since the
|
||||
resource manager was doing its job in keeping resource usage under the limit. If
|
||||
it's recurring then you should understand what's causing you to hit these limits
|
||||
and either refactor your application or raise the limits.
|
||||
|
||||
To check if it's a recurring problem you can count the number of times you've
|
||||
seen the `"resource limit exceeded"` error over time. You can also check the
|
||||
`rcmgr_blocked_resources` metric to see how many times the resource manager has
|
||||
blocked a resource over time.
|
||||
|
||||

|
||||
|
||||
If the resource is blocked by a protocol-level scope, take a look at the various
|
||||
resource usages in the metrics. For example, if you run into a new stream being blocked,
|
||||
you can check the
|
||||
`rcmgr_streams` metric and the "Streams by protocol" graph in the Grafana
|
||||
dashboard (assuming you've set that up or something similar – see
|
||||
[Monitoring](#monitoring)) to understand the usage pattern of that specific
|
||||
protocol. This can help answer questions such as: "Am I constantly around my
|
||||
limit?", "Does it make sense to raise my limit?", "Are there any patterns around
|
||||
hitting this limit?", and "should I refactor my protocol implementation?"
|
||||
|
||||
## Monitoring
|
||||
|
||||
Once you have limits set, you'll want to monitor to see if you're running into
|
||||
your limits often. This could be a sign that you need to raise your limits
|
||||
(your process is more intensive than you originally thought) or that you need
|
||||
to fix something in your application (surely you don't need over 1000 streams?).
|
||||
|
||||
There are Prometheus metrics that can be hooked up to the resource manager. See
|
||||
`obs/stats_test.go` for an example on how to enable this, and `DefaultViews` in
|
||||
`stats.go` for recommended views. These metrics can be hooked up to Prometheus
|
||||
or any other platform that can scrape a prometheus endpoint.
|
||||
|
||||
There is also an included Grafana dashboard to help kickstart your
|
||||
observability into the resource manager. Find more information about it at
|
||||
[here](./../../../dashboards/resource-manager/README.md).
|
||||
|
||||
## Allowlisting multiaddrs to mitigate eclipse attacks
|
||||
|
||||
If you have a set of trusted peers and IP addresses, you can use the resource
|
||||
manager's [Allowlist](./docs/allowlist.md) to protect yourself from eclipse
|
||||
attacks. The set of peers in the allowlist will have their own limits in case
|
||||
the normal limits are reached. This means you will always be able to connect to
|
||||
these trusted peers even if you've already reached your system limits.
|
||||
|
||||
Look at `WithAllowlistedMultiaddrs` and its example in the GoDoc to learn more.
|
||||
|
||||
## ConnManager vs Resource Manager
|
||||
|
||||
go-libp2p already includes a [connection
|
||||
manager](https://pkg.go.dev/github.com/libp2p/go-libp2p/core/connmgr#ConnManager),
|
||||
so what's the difference between the `ConnManager` and the `ResourceManager`?
|
||||
|
||||
ConnManager:
|
||||
1. Configured with a low and high watermark number of connections.
|
||||
2. Attempts to maintain the number of connections between the low and high
|
||||
markers.
|
||||
3. Connections can be given metadata and weight (e.g. a hole punched
|
||||
connection is more valuable than a connection to a publicly addressable
|
||||
endpoint since it took more effort to make the hole punched connection).
|
||||
4. The ConnManager will trim connections once the high watermark is reached. and
|
||||
trim down to the low watermark.
|
||||
5. Won't block adding another connection above the high watermark, but will
|
||||
trigger the trim mentioned above.
|
||||
6. Can trim and prioritize connections with custom logic.
|
||||
7. No concept of scopes (like the resource manager).
|
||||
|
||||
Resource Manager:
|
||||
1. Configured with limits on the number of outgoing and incoming connections at
|
||||
different [resource scopes](#resource-scopes).
|
||||
2. Will block adding any more connections if any of the scope-specific limits would be exceeded.
|
||||
|
||||
The natural question when comparing these two managers is "how do the watermarks
|
||||
and limits interact with each other?". The short answer is that they don't know
|
||||
about each other. This can lead to some surprising subtleties, such as the
|
||||
trimming never happening because the resource manager's limit is lower than the
|
||||
high watermark. This is confusing, and we'd like to fix it. The issue is
|
||||
captured in [go-libp2p#1640](https://github.com/libp2p/go-libp2p/issues/1640).
|
||||
|
||||
When configuring the resource manager and connection manager, you should set the
|
||||
limits in the resource manager as your hard limits that you would never want to
|
||||
go over, and set the low/high watermarks as the range at which your application
|
||||
works best.
|
||||
|
||||
## Examples
|
||||
|
||||
Here we consider some concrete examples that can elucidate the abstract
|
||||
design as described so far.
|
||||
|
||||
### Stream Lifetime
|
||||
|
||||
Let's consider a stream and the limits that apply to it.
|
||||
When the stream scope is first opened, it is created by calling
|
||||
`ResourceManager.OpenStream`.
|
||||
|
||||
Initially the stream is constrained by:
|
||||
- the system scope, where global hard limits apply.
|
||||
- the transient scope, where unnegotiated streams live.
|
||||
- the peer scope, where the limits for the peer at the other end of the stream
|
||||
apply.
|
||||
|
||||
Once the protocol has been negotiated, the protocol is set by calling
|
||||
`StreamManagementScope.SetProtocol`. The constraint from the
|
||||
transient scope is removed and the stream is now constrained by the
|
||||
protocol instead.
|
||||
|
||||
More specifically, the following constraints apply:
|
||||
- the system scope, where global hard limits apply.
|
||||
- the peer scope, where the limits for the peer at the other end of the stream
|
||||
apply.
|
||||
- the protocol scope, where the limits of the specific protocol used apply.
|
||||
|
||||
The existence of the protocol limit allows us to implicitly constrain
|
||||
streams for services that have not been ported to the resource manager
|
||||
yet. Once the programmer attaches a stream to a service by calling
|
||||
`StreamScope.SetService`, the stream resources are aggregated and constrained
|
||||
by the service scope in addition to its protocol scope.
|
||||
|
||||
More specifically the following constraints apply:
|
||||
- the system scope, where global hard limits apply.
|
||||
- the peer scope, where the limits for the peer at the other end of the stream
|
||||
apply.
|
||||
- the service scope, where the limits of the specific service owning the stream apply.
|
||||
- the protocol scope, where the limits of the specific protocol for the stream apply.
|
||||
|
||||
|
||||
The resource transfer that happens in the `SetProtocol` and `SetService`
|
||||
gives the opportunity to the resource manager to gate the streams. If
|
||||
the transfer results in exceeding the scope limits, then a error
|
||||
indicating "resource limit exceeded" is returned. The wrapped error
|
||||
includes the name of the scope rejecting the resource acquisition to
|
||||
aid understanding of applicable limits. Note that the (wrapped) error
|
||||
implements `net.Error` and is marked as temporary, so that the
|
||||
programmer can handle by backoff retry.
|
||||
|
||||
|
||||
## Implementation Notes
|
||||
|
||||
- The package only exports a constructor for the resource manager and
|
||||
basic types for defining limits. Internals are not exposed.
|
||||
- Internally, there is a resources object that is embedded in every scope and
|
||||
implements resource accounting.
|
||||
- There is a single implementation of a generic resource scope, that
|
||||
provides all necessary interface methods.
|
||||
- There are concrete types for all canonical scopes, embedding a
|
||||
pointer to a generic resource scope.
|
||||
- Peer and Protocol scopes, which may be created in response to
|
||||
network events, are periodically garbage collected.
|
||||
|
||||
## Design Considerations
|
||||
|
||||
- The Resource Manager must account for basic resource usage at all
|
||||
levels of the stack, from the internals to application components
|
||||
that use the network facilities of libp2p.
|
||||
- Basic resources include memory, streams, connections, and file
|
||||
descriptors. These account for both space and time used by
|
||||
the stack, as each resource has a direct effect on the system
|
||||
availability and performance.
|
||||
- The design must support seamless integration for user applications,
|
||||
which should reap the benefits of resource management without any
|
||||
changes. That is, existing applications should be oblivious of the
|
||||
resource manager and transparently obtain limits which protects it
|
||||
from resource exhaustion and OOM conditions.
|
||||
- At the same time, the design must support opt-in resource usage
|
||||
accounting for applications that want to explicitly utilize the
|
||||
facilities of the system to inform about and constrain their own
|
||||
resource usage.
|
||||
- The design must allow the user to set their own limits, which can be
|
||||
static (fixed) or dynamic.
|
||||
216
vendor/github.com/libp2p/go-libp2p/p2p/host/resource-manager/allowlist.go
generated
vendored
Normal file
216
vendor/github.com/libp2p/go-libp2p/p2p/host/resource-manager/allowlist.go
generated
vendored
Normal file
@@ -0,0 +1,216 @@
|
||||
package rcmgr
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"sync"
|
||||
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
|
||||
"github.com/multiformats/go-multiaddr"
|
||||
manet "github.com/multiformats/go-multiaddr/net"
|
||||
)
|
||||
|
||||
type Allowlist struct {
|
||||
mu sync.RWMutex
|
||||
// a simple structure of lists of networks. There is probably a faster way
|
||||
// to check if an IP address is in this network than iterating over this
|
||||
// list, but this is good enough for small numbers of networks (<1_000).
|
||||
// Analyze the benchmark before trying to optimize this.
|
||||
|
||||
// Any peer with these IPs are allowed
|
||||
allowedNetworks []*net.IPNet
|
||||
|
||||
// Only the specified peers can use these IPs
|
||||
allowedPeerByNetwork map[peer.ID][]*net.IPNet
|
||||
}
|
||||
|
||||
// WithAllowlistedMultiaddrs sets the multiaddrs to be in the allowlist
|
||||
func WithAllowlistedMultiaddrs(mas []multiaddr.Multiaddr) Option {
|
||||
return func(rm *resourceManager) error {
|
||||
for _, ma := range mas {
|
||||
err := rm.allowlist.Add(ma)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func newAllowlist() Allowlist {
|
||||
return Allowlist{
|
||||
allowedPeerByNetwork: make(map[peer.ID][]*net.IPNet),
|
||||
}
|
||||
}
|
||||
|
||||
func toIPNet(ma multiaddr.Multiaddr) (*net.IPNet, peer.ID, error) {
|
||||
var ipString string
|
||||
var mask string
|
||||
var allowedPeerStr string
|
||||
var allowedPeer peer.ID
|
||||
var isIPV4 bool
|
||||
|
||||
multiaddr.ForEach(ma, func(c multiaddr.Component) bool {
|
||||
if c.Protocol().Code == multiaddr.P_IP4 || c.Protocol().Code == multiaddr.P_IP6 {
|
||||
isIPV4 = c.Protocol().Code == multiaddr.P_IP4
|
||||
ipString = c.Value()
|
||||
}
|
||||
if c.Protocol().Code == multiaddr.P_IPCIDR {
|
||||
mask = c.Value()
|
||||
}
|
||||
if c.Protocol().Code == multiaddr.P_P2P {
|
||||
allowedPeerStr = c.Value()
|
||||
}
|
||||
return ipString == "" || mask == "" || allowedPeerStr == ""
|
||||
})
|
||||
|
||||
if ipString == "" {
|
||||
return nil, allowedPeer, errors.New("missing ip address")
|
||||
}
|
||||
|
||||
if allowedPeerStr != "" {
|
||||
var err error
|
||||
allowedPeer, err = peer.Decode(allowedPeerStr)
|
||||
if err != nil {
|
||||
return nil, allowedPeer, fmt.Errorf("failed to decode allowed peer: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
if mask == "" {
|
||||
ip := net.ParseIP(ipString)
|
||||
if ip == nil {
|
||||
return nil, allowedPeer, errors.New("invalid ip address")
|
||||
}
|
||||
var mask net.IPMask
|
||||
if isIPV4 {
|
||||
mask = net.CIDRMask(32, 32)
|
||||
} else {
|
||||
mask = net.CIDRMask(128, 128)
|
||||
}
|
||||
|
||||
net := &net.IPNet{IP: ip, Mask: mask}
|
||||
return net, allowedPeer, nil
|
||||
}
|
||||
|
||||
_, ipnet, err := net.ParseCIDR(ipString + "/" + mask)
|
||||
return ipnet, allowedPeer, err
|
||||
|
||||
}
|
||||
|
||||
// Add takes a multiaddr and adds it to the allowlist. The multiaddr should be
|
||||
// an ip address of the peer with or without a `/p2p` protocol.
|
||||
// e.g. /ip4/1.2.3.4/p2p/QmFoo, /ip4/1.2.3.4, and /ip4/1.2.3.0/ipcidr/24 are valid.
|
||||
// /p2p/QmFoo is not valid.
|
||||
func (al *Allowlist) Add(ma multiaddr.Multiaddr) error {
|
||||
ipnet, allowedPeer, err := toIPNet(ma)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
al.mu.Lock()
|
||||
defer al.mu.Unlock()
|
||||
|
||||
if allowedPeer != peer.ID("") {
|
||||
// We have a peerID constraint
|
||||
if al.allowedPeerByNetwork == nil {
|
||||
al.allowedPeerByNetwork = make(map[peer.ID][]*net.IPNet)
|
||||
}
|
||||
al.allowedPeerByNetwork[allowedPeer] = append(al.allowedPeerByNetwork[allowedPeer], ipnet)
|
||||
} else {
|
||||
al.allowedNetworks = append(al.allowedNetworks, ipnet)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (al *Allowlist) Remove(ma multiaddr.Multiaddr) error {
|
||||
ipnet, allowedPeer, err := toIPNet(ma)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
al.mu.Lock()
|
||||
defer al.mu.Unlock()
|
||||
|
||||
ipNetList := al.allowedNetworks
|
||||
|
||||
if allowedPeer != "" {
|
||||
// We have a peerID constraint
|
||||
ipNetList = al.allowedPeerByNetwork[allowedPeer]
|
||||
}
|
||||
|
||||
if ipNetList == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
i := len(ipNetList)
|
||||
for i > 0 {
|
||||
i--
|
||||
if ipNetList[i].IP.Equal(ipnet.IP) && bytes.Equal(ipNetList[i].Mask, ipnet.Mask) {
|
||||
// swap remove
|
||||
ipNetList[i] = ipNetList[len(ipNetList)-1]
|
||||
ipNetList = ipNetList[:len(ipNetList)-1]
|
||||
// We only remove one thing
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if allowedPeer != "" {
|
||||
al.allowedPeerByNetwork[allowedPeer] = ipNetList
|
||||
} else {
|
||||
al.allowedNetworks = ipNetList
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (al *Allowlist) Allowed(ma multiaddr.Multiaddr) bool {
|
||||
ip, err := manet.ToIP(ma)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
al.mu.RLock()
|
||||
defer al.mu.RUnlock()
|
||||
|
||||
for _, network := range al.allowedNetworks {
|
||||
if network.Contains(ip) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
for _, allowedNetworks := range al.allowedPeerByNetwork {
|
||||
for _, network := range allowedNetworks {
|
||||
if network.Contains(ip) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func (al *Allowlist) AllowedPeerAndMultiaddr(peerID peer.ID, ma multiaddr.Multiaddr) bool {
|
||||
ip, err := manet.ToIP(ma)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
al.mu.RLock()
|
||||
defer al.mu.RUnlock()
|
||||
|
||||
for _, network := range al.allowedNetworks {
|
||||
if network.Contains(ip) {
|
||||
// We found a match that isn't constrained by a peerID
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
if expectedNetworks, ok := al.allowedPeerByNetwork[peerID]; ok {
|
||||
for _, expectedNetwork := range expectedNetworks {
|
||||
if expectedNetwork.Contains(ip) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
81
vendor/github.com/libp2p/go-libp2p/p2p/host/resource-manager/error.go
generated
vendored
Normal file
81
vendor/github.com/libp2p/go-libp2p/p2p/host/resource-manager/error.go
generated
vendored
Normal file
@@ -0,0 +1,81 @@
|
||||
package rcmgr
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
"github.com/libp2p/go-libp2p/core/network"
|
||||
)
|
||||
|
||||
type ErrStreamOrConnLimitExceeded struct {
|
||||
current, attempted, limit int
|
||||
err error
|
||||
}
|
||||
|
||||
func (e *ErrStreamOrConnLimitExceeded) Error() string { return e.err.Error() }
|
||||
func (e *ErrStreamOrConnLimitExceeded) Unwrap() error { return e.err }
|
||||
|
||||
// edge may be "" if this is not an edge error
|
||||
func logValuesStreamLimit(scope, edge string, dir network.Direction, stat network.ScopeStat, err error) []interface{} {
|
||||
logValues := make([]interface{}, 0, 2*8)
|
||||
logValues = append(logValues, "scope", scope)
|
||||
if edge != "" {
|
||||
logValues = append(logValues, "edge", edge)
|
||||
}
|
||||
logValues = append(logValues, "direction", dir)
|
||||
var e *ErrStreamOrConnLimitExceeded
|
||||
if errors.As(err, &e) {
|
||||
logValues = append(logValues,
|
||||
"current", e.current,
|
||||
"attempted", e.attempted,
|
||||
"limit", e.limit,
|
||||
)
|
||||
}
|
||||
return append(logValues, "stat", stat, "error", err)
|
||||
}
|
||||
|
||||
// edge may be "" if this is not an edge error
|
||||
func logValuesConnLimit(scope, edge string, dir network.Direction, usefd bool, stat network.ScopeStat, err error) []interface{} {
|
||||
logValues := make([]interface{}, 0, 2*9)
|
||||
logValues = append(logValues, "scope", scope)
|
||||
if edge != "" {
|
||||
logValues = append(logValues, "edge", edge)
|
||||
}
|
||||
logValues = append(logValues, "direction", dir, "usefd", usefd)
|
||||
var e *ErrStreamOrConnLimitExceeded
|
||||
if errors.As(err, &e) {
|
||||
logValues = append(logValues,
|
||||
"current", e.current,
|
||||
"attempted", e.attempted,
|
||||
"limit", e.limit,
|
||||
)
|
||||
}
|
||||
return append(logValues, "stat", stat, "error", err)
|
||||
}
|
||||
|
||||
type ErrMemoryLimitExceeded struct {
|
||||
current, attempted, limit int64
|
||||
priority uint8
|
||||
err error
|
||||
}
|
||||
|
||||
func (e *ErrMemoryLimitExceeded) Error() string { return e.err.Error() }
|
||||
func (e *ErrMemoryLimitExceeded) Unwrap() error { return e.err }
|
||||
|
||||
// edge may be "" if this is not an edge error
|
||||
func logValuesMemoryLimit(scope, edge string, stat network.ScopeStat, err error) []interface{} {
|
||||
logValues := make([]interface{}, 0, 2*8)
|
||||
logValues = append(logValues, "scope", scope)
|
||||
if edge != "" {
|
||||
logValues = append(logValues, "edge", edge)
|
||||
}
|
||||
var e *ErrMemoryLimitExceeded
|
||||
if errors.As(err, &e) {
|
||||
logValues = append(logValues,
|
||||
"current", e.current,
|
||||
"attempted", e.attempted,
|
||||
"priority", e.priority,
|
||||
"limit", e.limit,
|
||||
)
|
||||
}
|
||||
return append(logValues, "stat", stat, "error", err)
|
||||
}
|
||||
151
vendor/github.com/libp2p/go-libp2p/p2p/host/resource-manager/extapi.go
generated
vendored
Normal file
151
vendor/github.com/libp2p/go-libp2p/p2p/host/resource-manager/extapi.go
generated
vendored
Normal file
@@ -0,0 +1,151 @@
|
||||
package rcmgr
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/libp2p/go-libp2p/core/network"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/libp2p/go-libp2p/core/protocol"
|
||||
)
|
||||
|
||||
// ResourceScopeLimiter is a trait interface that allows you to access scope limits.
|
||||
type ResourceScopeLimiter interface {
|
||||
Limit() Limit
|
||||
SetLimit(Limit)
|
||||
}
|
||||
|
||||
var _ ResourceScopeLimiter = (*resourceScope)(nil)
|
||||
|
||||
// ResourceManagerStat is a trait that allows you to access resource manager state.
|
||||
type ResourceManagerState interface {
|
||||
ListServices() []string
|
||||
ListProtocols() []protocol.ID
|
||||
ListPeers() []peer.ID
|
||||
|
||||
Stat() ResourceManagerStat
|
||||
}
|
||||
|
||||
type ResourceManagerStat struct {
|
||||
System network.ScopeStat
|
||||
Transient network.ScopeStat
|
||||
Services map[string]network.ScopeStat
|
||||
Protocols map[protocol.ID]network.ScopeStat
|
||||
Peers map[peer.ID]network.ScopeStat
|
||||
}
|
||||
|
||||
var _ ResourceManagerState = (*resourceManager)(nil)
|
||||
|
||||
func (s *resourceScope) Limit() Limit {
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
|
||||
return s.rc.limit
|
||||
}
|
||||
|
||||
func (s *resourceScope) SetLimit(limit Limit) {
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
|
||||
s.rc.limit = limit
|
||||
}
|
||||
|
||||
func (s *protocolScope) SetLimit(limit Limit) {
|
||||
s.rcmgr.setStickyProtocol(s.proto)
|
||||
s.resourceScope.SetLimit(limit)
|
||||
}
|
||||
|
||||
func (s *peerScope) SetLimit(limit Limit) {
|
||||
s.rcmgr.setStickyPeer(s.peer)
|
||||
s.resourceScope.SetLimit(limit)
|
||||
}
|
||||
|
||||
func (r *resourceManager) ListServices() []string {
|
||||
r.mx.Lock()
|
||||
defer r.mx.Unlock()
|
||||
|
||||
result := make([]string, 0, len(r.svc))
|
||||
for svc := range r.svc {
|
||||
result = append(result, svc)
|
||||
}
|
||||
|
||||
sort.Slice(result, func(i, j int) bool {
|
||||
return strings.Compare(result[i], result[j]) < 0
|
||||
})
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
func (r *resourceManager) ListProtocols() []protocol.ID {
|
||||
r.mx.Lock()
|
||||
defer r.mx.Unlock()
|
||||
|
||||
result := make([]protocol.ID, 0, len(r.proto))
|
||||
for p := range r.proto {
|
||||
result = append(result, p)
|
||||
}
|
||||
|
||||
sort.Slice(result, func(i, j int) bool {
|
||||
return result[i] < result[j]
|
||||
})
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
func (r *resourceManager) ListPeers() []peer.ID {
|
||||
r.mx.Lock()
|
||||
defer r.mx.Unlock()
|
||||
|
||||
result := make([]peer.ID, 0, len(r.peer))
|
||||
for p := range r.peer {
|
||||
result = append(result, p)
|
||||
}
|
||||
|
||||
sort.Slice(result, func(i, j int) bool {
|
||||
return bytes.Compare([]byte(result[i]), []byte(result[j])) < 0
|
||||
})
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
func (r *resourceManager) Stat() (result ResourceManagerStat) {
|
||||
r.mx.Lock()
|
||||
svcs := make([]*serviceScope, 0, len(r.svc))
|
||||
for _, svc := range r.svc {
|
||||
svcs = append(svcs, svc)
|
||||
}
|
||||
protos := make([]*protocolScope, 0, len(r.proto))
|
||||
for _, proto := range r.proto {
|
||||
protos = append(protos, proto)
|
||||
}
|
||||
peers := make([]*peerScope, 0, len(r.peer))
|
||||
for _, peer := range r.peer {
|
||||
peers = append(peers, peer)
|
||||
}
|
||||
r.mx.Unlock()
|
||||
|
||||
// Note: there is no global lock, so the system is updating while we are dumping its state...
|
||||
// as such stats might not exactly add up to the system level; we take the system stat
|
||||
// last nonetheless so that this is the most up-to-date snapshot
|
||||
result.Peers = make(map[peer.ID]network.ScopeStat, len(peers))
|
||||
for _, peer := range peers {
|
||||
result.Peers[peer.peer] = peer.Stat()
|
||||
}
|
||||
result.Protocols = make(map[protocol.ID]network.ScopeStat, len(protos))
|
||||
for _, proto := range protos {
|
||||
result.Protocols[proto.proto] = proto.Stat()
|
||||
}
|
||||
result.Services = make(map[string]network.ScopeStat, len(svcs))
|
||||
for _, svc := range svcs {
|
||||
result.Services[svc.service] = svc.Stat()
|
||||
}
|
||||
result.Transient = r.transient.Stat()
|
||||
result.System = r.system.Stat()
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
func (r *resourceManager) GetConnLimit() int {
|
||||
return r.limits.GetConnLimits().GetConnTotalLimit()
|
||||
}
|
||||
297
vendor/github.com/libp2p/go-libp2p/p2p/host/resource-manager/limit.go
generated
vendored
Normal file
297
vendor/github.com/libp2p/go-libp2p/p2p/host/resource-manager/limit.go
generated
vendored
Normal file
@@ -0,0 +1,297 @@
|
||||
/*
|
||||
Package rcmgr is the resource manager for go-libp2p. This allows you to track
|
||||
resources being used throughout your go-libp2p process. As well as making sure
|
||||
that the process doesn't use more resources than what you define as your
|
||||
limits. The resource manager only knows about things it is told about, so it's
|
||||
the responsibility of the user of this library (either go-libp2p or a go-libp2p
|
||||
user) to make sure they check with the resource manager before actually
|
||||
allocating the resource.
|
||||
*/
|
||||
package rcmgr
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"io"
|
||||
"math"
|
||||
|
||||
"github.com/libp2p/go-libp2p/core/network"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/libp2p/go-libp2p/core/protocol"
|
||||
)
|
||||
|
||||
// Limit is an object that specifies basic resource limits.
|
||||
type Limit interface {
|
||||
// GetMemoryLimit returns the (current) memory limit.
|
||||
GetMemoryLimit() int64
|
||||
// GetStreamLimit returns the stream limit, for inbound or outbound streams.
|
||||
GetStreamLimit(network.Direction) int
|
||||
// GetStreamTotalLimit returns the total stream limit
|
||||
GetStreamTotalLimit() int
|
||||
// GetConnLimit returns the connection limit, for inbound or outbound connections.
|
||||
GetConnLimit(network.Direction) int
|
||||
// GetConnTotalLimit returns the total connection limit
|
||||
GetConnTotalLimit() int
|
||||
// GetFDLimit returns the file descriptor limit.
|
||||
GetFDLimit() int
|
||||
}
|
||||
|
||||
// Limiter is the interface for providing limits to the resource manager.
|
||||
type Limiter interface {
|
||||
GetSystemLimits() Limit
|
||||
GetTransientLimits() Limit
|
||||
GetAllowlistedSystemLimits() Limit
|
||||
GetAllowlistedTransientLimits() Limit
|
||||
GetServiceLimits(svc string) Limit
|
||||
GetServicePeerLimits(svc string) Limit
|
||||
GetProtocolLimits(proto protocol.ID) Limit
|
||||
GetProtocolPeerLimits(proto protocol.ID) Limit
|
||||
GetPeerLimits(p peer.ID) Limit
|
||||
GetStreamLimits(p peer.ID) Limit
|
||||
GetConnLimits() Limit
|
||||
}
|
||||
|
||||
// NewDefaultLimiterFromJSON creates a new limiter by parsing a json configuration,
|
||||
// using the default limits for fallback.
|
||||
func NewDefaultLimiterFromJSON(in io.Reader) (Limiter, error) {
|
||||
return NewLimiterFromJSON(in, DefaultLimits.AutoScale())
|
||||
}
|
||||
|
||||
// NewLimiterFromJSON creates a new limiter by parsing a json configuration.
|
||||
func NewLimiterFromJSON(in io.Reader, defaults ConcreteLimitConfig) (Limiter, error) {
|
||||
cfg, err := readLimiterConfigFromJSON(in, defaults)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &fixedLimiter{cfg}, nil
|
||||
}
|
||||
|
||||
func readLimiterConfigFromJSON(in io.Reader, defaults ConcreteLimitConfig) (ConcreteLimitConfig, error) {
|
||||
var cfg PartialLimitConfig
|
||||
if err := json.NewDecoder(in).Decode(&cfg); err != nil {
|
||||
return ConcreteLimitConfig{}, err
|
||||
}
|
||||
return cfg.Build(defaults), nil
|
||||
}
|
||||
|
||||
// fixedLimiter is a limiter with fixed limits.
|
||||
type fixedLimiter struct {
|
||||
ConcreteLimitConfig
|
||||
}
|
||||
|
||||
var _ Limiter = (*fixedLimiter)(nil)
|
||||
|
||||
func NewFixedLimiter(conf ConcreteLimitConfig) Limiter {
|
||||
log.Debugw("initializing new limiter with config", "limits", conf)
|
||||
return &fixedLimiter{conf}
|
||||
}
|
||||
|
||||
// BaseLimit is a mixin type for basic resource limits.
|
||||
type BaseLimit struct {
|
||||
Streams int `json:",omitempty"`
|
||||
StreamsInbound int `json:",omitempty"`
|
||||
StreamsOutbound int `json:",omitempty"`
|
||||
Conns int `json:",omitempty"`
|
||||
ConnsInbound int `json:",omitempty"`
|
||||
ConnsOutbound int `json:",omitempty"`
|
||||
FD int `json:",omitempty"`
|
||||
Memory int64 `json:",omitempty"`
|
||||
}
|
||||
|
||||
func valueOrBlockAll(n int) LimitVal {
|
||||
if n == 0 {
|
||||
return BlockAllLimit
|
||||
} else if n == math.MaxInt {
|
||||
return Unlimited
|
||||
}
|
||||
return LimitVal(n)
|
||||
}
|
||||
func valueOrBlockAll64(n int64) LimitVal64 {
|
||||
if n == 0 {
|
||||
return BlockAllLimit64
|
||||
} else if n == math.MaxInt {
|
||||
return Unlimited64
|
||||
}
|
||||
return LimitVal64(n)
|
||||
}
|
||||
|
||||
// ToResourceLimits converts the BaseLimit to a ResourceLimits
|
||||
func (l BaseLimit) ToResourceLimits() ResourceLimits {
|
||||
return ResourceLimits{
|
||||
Streams: valueOrBlockAll(l.Streams),
|
||||
StreamsInbound: valueOrBlockAll(l.StreamsInbound),
|
||||
StreamsOutbound: valueOrBlockAll(l.StreamsOutbound),
|
||||
Conns: valueOrBlockAll(l.Conns),
|
||||
ConnsInbound: valueOrBlockAll(l.ConnsInbound),
|
||||
ConnsOutbound: valueOrBlockAll(l.ConnsOutbound),
|
||||
FD: valueOrBlockAll(l.FD),
|
||||
Memory: valueOrBlockAll64(l.Memory),
|
||||
}
|
||||
}
|
||||
|
||||
// Apply overwrites all zero-valued limits with the values of l2
|
||||
// Must not use a pointer receiver.
|
||||
func (l *BaseLimit) Apply(l2 BaseLimit) {
|
||||
if l.Streams == 0 {
|
||||
l.Streams = l2.Streams
|
||||
}
|
||||
if l.StreamsInbound == 0 {
|
||||
l.StreamsInbound = l2.StreamsInbound
|
||||
}
|
||||
if l.StreamsOutbound == 0 {
|
||||
l.StreamsOutbound = l2.StreamsOutbound
|
||||
}
|
||||
if l.Conns == 0 {
|
||||
l.Conns = l2.Conns
|
||||
}
|
||||
if l.ConnsInbound == 0 {
|
||||
l.ConnsInbound = l2.ConnsInbound
|
||||
}
|
||||
if l.ConnsOutbound == 0 {
|
||||
l.ConnsOutbound = l2.ConnsOutbound
|
||||
}
|
||||
if l.Memory == 0 {
|
||||
l.Memory = l2.Memory
|
||||
}
|
||||
if l.FD == 0 {
|
||||
l.FD = l2.FD
|
||||
}
|
||||
}
|
||||
|
||||
// BaseLimitIncrease is the increase per GiB of allowed memory.
|
||||
type BaseLimitIncrease struct {
|
||||
Streams int `json:",omitempty"`
|
||||
StreamsInbound int `json:",omitempty"`
|
||||
StreamsOutbound int `json:",omitempty"`
|
||||
Conns int `json:",omitempty"`
|
||||
ConnsInbound int `json:",omitempty"`
|
||||
ConnsOutbound int `json:",omitempty"`
|
||||
// Memory is in bytes. Values over 1>>30 (1GiB) don't make sense.
|
||||
Memory int64 `json:",omitempty"`
|
||||
// FDFraction is expected to be >= 0 and <= 1.
|
||||
FDFraction float64 `json:",omitempty"`
|
||||
}
|
||||
|
||||
// Apply overwrites all zero-valued limits with the values of l2
|
||||
// Must not use a pointer receiver.
|
||||
func (l *BaseLimitIncrease) Apply(l2 BaseLimitIncrease) {
|
||||
if l.Streams == 0 {
|
||||
l.Streams = l2.Streams
|
||||
}
|
||||
if l.StreamsInbound == 0 {
|
||||
l.StreamsInbound = l2.StreamsInbound
|
||||
}
|
||||
if l.StreamsOutbound == 0 {
|
||||
l.StreamsOutbound = l2.StreamsOutbound
|
||||
}
|
||||
if l.Conns == 0 {
|
||||
l.Conns = l2.Conns
|
||||
}
|
||||
if l.ConnsInbound == 0 {
|
||||
l.ConnsInbound = l2.ConnsInbound
|
||||
}
|
||||
if l.ConnsOutbound == 0 {
|
||||
l.ConnsOutbound = l2.ConnsOutbound
|
||||
}
|
||||
if l.Memory == 0 {
|
||||
l.Memory = l2.Memory
|
||||
}
|
||||
if l.FDFraction == 0 {
|
||||
l.FDFraction = l2.FDFraction
|
||||
}
|
||||
}
|
||||
|
||||
func (l BaseLimit) GetStreamLimit(dir network.Direction) int {
|
||||
if dir == network.DirInbound {
|
||||
return l.StreamsInbound
|
||||
} else {
|
||||
return l.StreamsOutbound
|
||||
}
|
||||
}
|
||||
|
||||
func (l BaseLimit) GetStreamTotalLimit() int {
|
||||
return l.Streams
|
||||
}
|
||||
|
||||
func (l BaseLimit) GetConnLimit(dir network.Direction) int {
|
||||
if dir == network.DirInbound {
|
||||
return l.ConnsInbound
|
||||
} else {
|
||||
return l.ConnsOutbound
|
||||
}
|
||||
}
|
||||
|
||||
func (l BaseLimit) GetConnTotalLimit() int {
|
||||
return l.Conns
|
||||
}
|
||||
|
||||
func (l BaseLimit) GetFDLimit() int {
|
||||
return l.FD
|
||||
}
|
||||
|
||||
func (l BaseLimit) GetMemoryLimit() int64 {
|
||||
return l.Memory
|
||||
}
|
||||
|
||||
func (l *fixedLimiter) GetSystemLimits() Limit {
|
||||
return &l.system
|
||||
}
|
||||
|
||||
func (l *fixedLimiter) GetTransientLimits() Limit {
|
||||
return &l.transient
|
||||
}
|
||||
|
||||
func (l *fixedLimiter) GetAllowlistedSystemLimits() Limit {
|
||||
return &l.allowlistedSystem
|
||||
}
|
||||
|
||||
func (l *fixedLimiter) GetAllowlistedTransientLimits() Limit {
|
||||
return &l.allowlistedTransient
|
||||
}
|
||||
|
||||
func (l *fixedLimiter) GetServiceLimits(svc string) Limit {
|
||||
sl, ok := l.service[svc]
|
||||
if !ok {
|
||||
return &l.serviceDefault
|
||||
}
|
||||
return &sl
|
||||
}
|
||||
|
||||
func (l *fixedLimiter) GetServicePeerLimits(svc string) Limit {
|
||||
pl, ok := l.servicePeer[svc]
|
||||
if !ok {
|
||||
return &l.servicePeerDefault
|
||||
}
|
||||
return &pl
|
||||
}
|
||||
|
||||
func (l *fixedLimiter) GetProtocolLimits(proto protocol.ID) Limit {
|
||||
pl, ok := l.protocol[proto]
|
||||
if !ok {
|
||||
return &l.protocolDefault
|
||||
}
|
||||
return &pl
|
||||
}
|
||||
|
||||
func (l *fixedLimiter) GetProtocolPeerLimits(proto protocol.ID) Limit {
|
||||
pl, ok := l.protocolPeer[proto]
|
||||
if !ok {
|
||||
return &l.protocolPeerDefault
|
||||
}
|
||||
return &pl
|
||||
}
|
||||
|
||||
func (l *fixedLimiter) GetPeerLimits(p peer.ID) Limit {
|
||||
pl, ok := l.peer[p]
|
||||
if !ok {
|
||||
return &l.peerDefault
|
||||
}
|
||||
return &pl
|
||||
}
|
||||
|
||||
func (l *fixedLimiter) GetStreamLimits(_ peer.ID) Limit {
|
||||
return &l.stream
|
||||
}
|
||||
|
||||
func (l *fixedLimiter) GetConnLimits() Limit {
|
||||
return &l.conn
|
||||
}
|
||||
45
vendor/github.com/libp2p/go-libp2p/p2p/host/resource-manager/limit_config_test.backwards-compat.json
generated
vendored
Normal file
45
vendor/github.com/libp2p/go-libp2p/p2p/host/resource-manager/limit_config_test.backwards-compat.json
generated
vendored
Normal file
@@ -0,0 +1,45 @@
|
||||
{
|
||||
"System": {
|
||||
"Memory": 65536,
|
||||
"Conns": 16,
|
||||
"ConnsInbound": 8,
|
||||
"ConnsOutbound": 16,
|
||||
"FD": 16
|
||||
},
|
||||
"ServiceDefault": {
|
||||
"Memory": 8765
|
||||
},
|
||||
"Service": {
|
||||
"A": {
|
||||
"Memory": 8192
|
||||
},
|
||||
"B": {}
|
||||
},
|
||||
"ServicePeerDefault": {
|
||||
"Memory": 2048
|
||||
},
|
||||
"ServicePeer": {
|
||||
"A": {
|
||||
"Memory": 4096
|
||||
}
|
||||
},
|
||||
"ProtocolDefault": {
|
||||
"Memory": 2048
|
||||
},
|
||||
"ProtocolPeerDefault": {
|
||||
"Memory": 1024
|
||||
},
|
||||
"Protocol": {
|
||||
"/A": {
|
||||
"Memory": 8192
|
||||
}
|
||||
},
|
||||
"PeerDefault": {
|
||||
"Memory": 4096
|
||||
},
|
||||
"Peer": {
|
||||
"12D3KooWPFH2Bx2tPfw6RLxN8k2wh47GRXgkt9yrAHU37zFwHWzS": {
|
||||
"Memory": 4097
|
||||
}
|
||||
}
|
||||
}
|
||||
45
vendor/github.com/libp2p/go-libp2p/p2p/host/resource-manager/limit_config_test.json
generated
vendored
Normal file
45
vendor/github.com/libp2p/go-libp2p/p2p/host/resource-manager/limit_config_test.json
generated
vendored
Normal file
@@ -0,0 +1,45 @@
|
||||
{
|
||||
"System": {
|
||||
"Memory": 65536,
|
||||
"Conns": 16,
|
||||
"ConnsInbound": 8,
|
||||
"ConnsOutbound": 16,
|
||||
"FD": 16
|
||||
},
|
||||
"ServiceDefault": {
|
||||
"Memory": 8765
|
||||
},
|
||||
"Service": {
|
||||
"A": {
|
||||
"Memory": 8192
|
||||
},
|
||||
"B": {}
|
||||
},
|
||||
"ServicePeerDefault": {
|
||||
"Memory": 2048
|
||||
},
|
||||
"ServicePeer": {
|
||||
"A": {
|
||||
"Memory": 4096
|
||||
}
|
||||
},
|
||||
"ProtocolDefault": {
|
||||
"Memory": 2048
|
||||
},
|
||||
"ProtocolPeerDefault": {
|
||||
"Memory": 1024
|
||||
},
|
||||
"Protocol": {
|
||||
"/A": {
|
||||
"Memory": 8192
|
||||
}
|
||||
},
|
||||
"PeerDefault": {
|
||||
"Memory": 4096
|
||||
},
|
||||
"Peer": {
|
||||
"12D3KooWPFH2Bx2tPfw6RLxN8k2wh47GRXgkt9yrAHU37zFwHWzS": {
|
||||
"Memory": 4097
|
||||
}
|
||||
}
|
||||
}
|
||||
112
vendor/github.com/libp2p/go-libp2p/p2p/host/resource-manager/limit_config_test_default.json
generated
vendored
Normal file
112
vendor/github.com/libp2p/go-libp2p/p2p/host/resource-manager/limit_config_test_default.json
generated
vendored
Normal file
@@ -0,0 +1,112 @@
|
||||
{
|
||||
"System": {
|
||||
"Streams": 18432,
|
||||
"StreamsInbound": 9216,
|
||||
"StreamsOutbound": 18432,
|
||||
"Conns": 1152,
|
||||
"ConnsInbound": 576,
|
||||
"ConnsOutbound": 1152,
|
||||
"FD": 16384,
|
||||
"Memory": "8724152320"
|
||||
},
|
||||
"Transient": {
|
||||
"Streams": 2304,
|
||||
"StreamsInbound": 1152,
|
||||
"StreamsOutbound": 2304,
|
||||
"Conns": 320,
|
||||
"ConnsInbound": 160,
|
||||
"ConnsOutbound": 320,
|
||||
"FD": 4096,
|
||||
"Memory": "1107296256"
|
||||
},
|
||||
"AllowlistedSystem": {
|
||||
"Streams": 18432,
|
||||
"StreamsInbound": 9216,
|
||||
"StreamsOutbound": 18432,
|
||||
"Conns": 1152,
|
||||
"ConnsInbound": 576,
|
||||
"ConnsOutbound": 1152,
|
||||
"FD": 16384,
|
||||
"Memory": "8724152320"
|
||||
},
|
||||
"AllowlistedTransient": {
|
||||
"Streams": 2304,
|
||||
"StreamsInbound": 1152,
|
||||
"StreamsOutbound": 2304,
|
||||
"Conns": 320,
|
||||
"ConnsInbound": 160,
|
||||
"ConnsOutbound": 320,
|
||||
"FD": 4096,
|
||||
"Memory": "1107296256"
|
||||
},
|
||||
"ServiceDefault": {
|
||||
"Streams": 20480,
|
||||
"StreamsInbound": 5120,
|
||||
"StreamsOutbound": 20480,
|
||||
"Conns": "blockAll",
|
||||
"ConnsInbound": "blockAll",
|
||||
"ConnsOutbound": "blockAll",
|
||||
"FD": "blockAll",
|
||||
"Memory": "1140850688"
|
||||
},
|
||||
"ServicePeerDefault": {
|
||||
"Streams": 320,
|
||||
"StreamsInbound": 160,
|
||||
"StreamsOutbound": 320,
|
||||
"Conns": "blockAll",
|
||||
"ConnsInbound": "blockAll",
|
||||
"ConnsOutbound": "blockAll",
|
||||
"FD": "blockAll",
|
||||
"Memory": "50331648"
|
||||
},
|
||||
"ProtocolDefault": {
|
||||
"Streams": 6144,
|
||||
"StreamsInbound": 2560,
|
||||
"StreamsOutbound": 6144,
|
||||
"Conns": "blockAll",
|
||||
"ConnsInbound": "blockAll",
|
||||
"ConnsOutbound": "blockAll",
|
||||
"FD": "blockAll",
|
||||
"Memory": "1442840576"
|
||||
},
|
||||
"ProtocolPeerDefault": {
|
||||
"Streams": 384,
|
||||
"StreamsInbound": 96,
|
||||
"StreamsOutbound": 192,
|
||||
"Conns": "blockAll",
|
||||
"ConnsInbound": "blockAll",
|
||||
"ConnsOutbound": "blockAll",
|
||||
"FD": "blockAll",
|
||||
"Memory": "16777248"
|
||||
},
|
||||
"PeerDefault": {
|
||||
"Streams": 2560,
|
||||
"StreamsInbound": 1280,
|
||||
"StreamsOutbound": 2560,
|
||||
"Conns": 8,
|
||||
"ConnsInbound": 8,
|
||||
"ConnsOutbound": 8,
|
||||
"FD": 256,
|
||||
"Memory": "1140850688"
|
||||
},
|
||||
"Conn": {
|
||||
"Streams": "blockAll",
|
||||
"StreamsInbound": "blockAll",
|
||||
"StreamsOutbound": "blockAll",
|
||||
"Conns": 1,
|
||||
"ConnsInbound": 1,
|
||||
"ConnsOutbound": 1,
|
||||
"FD": 1,
|
||||
"Memory": "33554432"
|
||||
},
|
||||
"Stream": {
|
||||
"Streams": 1,
|
||||
"StreamsInbound": 1,
|
||||
"StreamsOutbound": 1,
|
||||
"Conns": "blockAll",
|
||||
"ConnsInbound": "blockAll",
|
||||
"ConnsOutbound": "blockAll",
|
||||
"FD": "blockAll",
|
||||
"Memory": "16777216"
|
||||
}
|
||||
}
|
||||
879
vendor/github.com/libp2p/go-libp2p/p2p/host/resource-manager/limit_defaults.go
generated
vendored
Normal file
879
vendor/github.com/libp2p/go-libp2p/p2p/host/resource-manager/limit_defaults.go
generated
vendored
Normal file
@@ -0,0 +1,879 @@
|
||||
package rcmgr
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"math"
|
||||
"strconv"
|
||||
|
||||
"github.com/libp2p/go-libp2p/core/network"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/libp2p/go-libp2p/core/protocol"
|
||||
|
||||
"github.com/pbnjay/memory"
|
||||
)
|
||||
|
||||
type baseLimitConfig struct {
|
||||
BaseLimit BaseLimit
|
||||
BaseLimitIncrease BaseLimitIncrease
|
||||
}
|
||||
|
||||
// ScalingLimitConfig is a struct for configuring default limits.
|
||||
// {}BaseLimit is the limits that Apply for a minimal node (128 MB of memory for libp2p) and 256 file descriptors.
|
||||
// {}LimitIncrease is the additional limit granted for every additional 1 GB of RAM.
|
||||
type ScalingLimitConfig struct {
|
||||
SystemBaseLimit BaseLimit
|
||||
SystemLimitIncrease BaseLimitIncrease
|
||||
|
||||
TransientBaseLimit BaseLimit
|
||||
TransientLimitIncrease BaseLimitIncrease
|
||||
|
||||
AllowlistedSystemBaseLimit BaseLimit
|
||||
AllowlistedSystemLimitIncrease BaseLimitIncrease
|
||||
|
||||
AllowlistedTransientBaseLimit BaseLimit
|
||||
AllowlistedTransientLimitIncrease BaseLimitIncrease
|
||||
|
||||
ServiceBaseLimit BaseLimit
|
||||
ServiceLimitIncrease BaseLimitIncrease
|
||||
ServiceLimits map[string]baseLimitConfig // use AddServiceLimit to modify
|
||||
|
||||
ServicePeerBaseLimit BaseLimit
|
||||
ServicePeerLimitIncrease BaseLimitIncrease
|
||||
ServicePeerLimits map[string]baseLimitConfig // use AddServicePeerLimit to modify
|
||||
|
||||
ProtocolBaseLimit BaseLimit
|
||||
ProtocolLimitIncrease BaseLimitIncrease
|
||||
ProtocolLimits map[protocol.ID]baseLimitConfig // use AddProtocolLimit to modify
|
||||
|
||||
ProtocolPeerBaseLimit BaseLimit
|
||||
ProtocolPeerLimitIncrease BaseLimitIncrease
|
||||
ProtocolPeerLimits map[protocol.ID]baseLimitConfig // use AddProtocolPeerLimit to modify
|
||||
|
||||
PeerBaseLimit BaseLimit
|
||||
PeerLimitIncrease BaseLimitIncrease
|
||||
PeerLimits map[peer.ID]baseLimitConfig // use AddPeerLimit to modify
|
||||
|
||||
ConnBaseLimit BaseLimit
|
||||
ConnLimitIncrease BaseLimitIncrease
|
||||
|
||||
StreamBaseLimit BaseLimit
|
||||
StreamLimitIncrease BaseLimitIncrease
|
||||
}
|
||||
|
||||
func (cfg *ScalingLimitConfig) AddServiceLimit(svc string, base BaseLimit, inc BaseLimitIncrease) {
|
||||
if cfg.ServiceLimits == nil {
|
||||
cfg.ServiceLimits = make(map[string]baseLimitConfig)
|
||||
}
|
||||
cfg.ServiceLimits[svc] = baseLimitConfig{
|
||||
BaseLimit: base,
|
||||
BaseLimitIncrease: inc,
|
||||
}
|
||||
}
|
||||
|
||||
func (cfg *ScalingLimitConfig) AddProtocolLimit(proto protocol.ID, base BaseLimit, inc BaseLimitIncrease) {
|
||||
if cfg.ProtocolLimits == nil {
|
||||
cfg.ProtocolLimits = make(map[protocol.ID]baseLimitConfig)
|
||||
}
|
||||
cfg.ProtocolLimits[proto] = baseLimitConfig{
|
||||
BaseLimit: base,
|
||||
BaseLimitIncrease: inc,
|
||||
}
|
||||
}
|
||||
|
||||
func (cfg *ScalingLimitConfig) AddPeerLimit(p peer.ID, base BaseLimit, inc BaseLimitIncrease) {
|
||||
if cfg.PeerLimits == nil {
|
||||
cfg.PeerLimits = make(map[peer.ID]baseLimitConfig)
|
||||
}
|
||||
cfg.PeerLimits[p] = baseLimitConfig{
|
||||
BaseLimit: base,
|
||||
BaseLimitIncrease: inc,
|
||||
}
|
||||
}
|
||||
|
||||
func (cfg *ScalingLimitConfig) AddServicePeerLimit(svc string, base BaseLimit, inc BaseLimitIncrease) {
|
||||
if cfg.ServicePeerLimits == nil {
|
||||
cfg.ServicePeerLimits = make(map[string]baseLimitConfig)
|
||||
}
|
||||
cfg.ServicePeerLimits[svc] = baseLimitConfig{
|
||||
BaseLimit: base,
|
||||
BaseLimitIncrease: inc,
|
||||
}
|
||||
}
|
||||
|
||||
func (cfg *ScalingLimitConfig) AddProtocolPeerLimit(proto protocol.ID, base BaseLimit, inc BaseLimitIncrease) {
|
||||
if cfg.ProtocolPeerLimits == nil {
|
||||
cfg.ProtocolPeerLimits = make(map[protocol.ID]baseLimitConfig)
|
||||
}
|
||||
cfg.ProtocolPeerLimits[proto] = baseLimitConfig{
|
||||
BaseLimit: base,
|
||||
BaseLimitIncrease: inc,
|
||||
}
|
||||
}
|
||||
|
||||
type LimitVal int
|
||||
|
||||
const (
|
||||
// DefaultLimit is the default value for resources. The exact value depends on the context, but will get values from `DefaultLimits`.
|
||||
DefaultLimit LimitVal = 0
|
||||
// Unlimited is the value for unlimited resources. An arbitrarily high number will also work.
|
||||
Unlimited LimitVal = -1
|
||||
// BlockAllLimit is the LimitVal for allowing no amount of resources.
|
||||
BlockAllLimit LimitVal = -2
|
||||
)
|
||||
|
||||
func (l LimitVal) MarshalJSON() ([]byte, error) {
|
||||
if l == Unlimited {
|
||||
return json.Marshal("unlimited")
|
||||
} else if l == DefaultLimit {
|
||||
return json.Marshal("default")
|
||||
} else if l == BlockAllLimit {
|
||||
return json.Marshal("blockAll")
|
||||
}
|
||||
return json.Marshal(int(l))
|
||||
}
|
||||
|
||||
func (l *LimitVal) UnmarshalJSON(b []byte) error {
|
||||
if string(b) == `"default"` {
|
||||
*l = DefaultLimit
|
||||
return nil
|
||||
} else if string(b) == `"unlimited"` {
|
||||
*l = Unlimited
|
||||
return nil
|
||||
} else if string(b) == `"blockAll"` {
|
||||
*l = BlockAllLimit
|
||||
return nil
|
||||
}
|
||||
|
||||
var val int
|
||||
if err := json.Unmarshal(b, &val); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if val == 0 {
|
||||
// If there is an explicit 0 in the JSON we should interpret this as block all.
|
||||
*l = BlockAllLimit
|
||||
return nil
|
||||
}
|
||||
|
||||
*l = LimitVal(val)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l LimitVal) Build(defaultVal int) int {
|
||||
if l == DefaultLimit {
|
||||
return defaultVal
|
||||
}
|
||||
if l == Unlimited {
|
||||
return math.MaxInt
|
||||
}
|
||||
if l == BlockAllLimit {
|
||||
return 0
|
||||
}
|
||||
return int(l)
|
||||
}
|
||||
|
||||
type LimitVal64 int64
|
||||
|
||||
const (
|
||||
// Default is the default value for resources.
|
||||
DefaultLimit64 LimitVal64 = 0
|
||||
// Unlimited is the value for unlimited resources.
|
||||
Unlimited64 LimitVal64 = -1
|
||||
// BlockAllLimit64 is the LimitVal for allowing no amount of resources.
|
||||
BlockAllLimit64 LimitVal64 = -2
|
||||
)
|
||||
|
||||
func (l LimitVal64) MarshalJSON() ([]byte, error) {
|
||||
if l == Unlimited64 {
|
||||
return json.Marshal("unlimited")
|
||||
} else if l == DefaultLimit64 {
|
||||
return json.Marshal("default")
|
||||
} else if l == BlockAllLimit64 {
|
||||
return json.Marshal("blockAll")
|
||||
}
|
||||
|
||||
// Convert this to a string because JSON doesn't support 64-bit integers.
|
||||
return json.Marshal(strconv.FormatInt(int64(l), 10))
|
||||
}
|
||||
|
||||
func (l *LimitVal64) UnmarshalJSON(b []byte) error {
|
||||
if string(b) == `"default"` {
|
||||
*l = DefaultLimit64
|
||||
return nil
|
||||
} else if string(b) == `"unlimited"` {
|
||||
*l = Unlimited64
|
||||
return nil
|
||||
} else if string(b) == `"blockAll"` {
|
||||
*l = BlockAllLimit64
|
||||
return nil
|
||||
}
|
||||
|
||||
var val string
|
||||
if err := json.Unmarshal(b, &val); err != nil {
|
||||
// Is this an integer? Possible because of backwards compatibility.
|
||||
var val int
|
||||
if err := json.Unmarshal(b, &val); err != nil {
|
||||
return fmt.Errorf("failed to unmarshal limit value: %w", err)
|
||||
}
|
||||
|
||||
if val == 0 {
|
||||
// If there is an explicit 0 in the JSON we should interpret this as block all.
|
||||
*l = BlockAllLimit64
|
||||
return nil
|
||||
}
|
||||
|
||||
*l = LimitVal64(val)
|
||||
return nil
|
||||
}
|
||||
|
||||
i, err := strconv.ParseInt(val, 10, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if i == 0 {
|
||||
// If there is an explicit 0 in the JSON we should interpret this as block all.
|
||||
*l = BlockAllLimit64
|
||||
return nil
|
||||
}
|
||||
|
||||
*l = LimitVal64(i)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l LimitVal64) Build(defaultVal int64) int64 {
|
||||
if l == DefaultLimit64 {
|
||||
return defaultVal
|
||||
}
|
||||
if l == Unlimited64 {
|
||||
return math.MaxInt64
|
||||
}
|
||||
if l == BlockAllLimit64 {
|
||||
return 0
|
||||
}
|
||||
return int64(l)
|
||||
}
|
||||
|
||||
// ResourceLimits is the type for basic resource limits.
|
||||
type ResourceLimits struct {
|
||||
Streams LimitVal `json:",omitempty"`
|
||||
StreamsInbound LimitVal `json:",omitempty"`
|
||||
StreamsOutbound LimitVal `json:",omitempty"`
|
||||
Conns LimitVal `json:",omitempty"`
|
||||
ConnsInbound LimitVal `json:",omitempty"`
|
||||
ConnsOutbound LimitVal `json:",omitempty"`
|
||||
FD LimitVal `json:",omitempty"`
|
||||
Memory LimitVal64 `json:",omitempty"`
|
||||
}
|
||||
|
||||
func (l *ResourceLimits) IsDefault() bool {
|
||||
if l == nil {
|
||||
return true
|
||||
}
|
||||
|
||||
if l.Streams == DefaultLimit &&
|
||||
l.StreamsInbound == DefaultLimit &&
|
||||
l.StreamsOutbound == DefaultLimit &&
|
||||
l.Conns == DefaultLimit &&
|
||||
l.ConnsInbound == DefaultLimit &&
|
||||
l.ConnsOutbound == DefaultLimit &&
|
||||
l.FD == DefaultLimit &&
|
||||
l.Memory == DefaultLimit64 {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (l *ResourceLimits) ToMaybeNilPtr() *ResourceLimits {
|
||||
if l.IsDefault() {
|
||||
return nil
|
||||
}
|
||||
return l
|
||||
}
|
||||
|
||||
// Apply overwrites all default limits with the values of l2
|
||||
func (l *ResourceLimits) Apply(l2 ResourceLimits) {
|
||||
if l.Streams == DefaultLimit {
|
||||
l.Streams = l2.Streams
|
||||
}
|
||||
if l.StreamsInbound == DefaultLimit {
|
||||
l.StreamsInbound = l2.StreamsInbound
|
||||
}
|
||||
if l.StreamsOutbound == DefaultLimit {
|
||||
l.StreamsOutbound = l2.StreamsOutbound
|
||||
}
|
||||
if l.Conns == DefaultLimit {
|
||||
l.Conns = l2.Conns
|
||||
}
|
||||
if l.ConnsInbound == DefaultLimit {
|
||||
l.ConnsInbound = l2.ConnsInbound
|
||||
}
|
||||
if l.ConnsOutbound == DefaultLimit {
|
||||
l.ConnsOutbound = l2.ConnsOutbound
|
||||
}
|
||||
if l.FD == DefaultLimit {
|
||||
l.FD = l2.FD
|
||||
}
|
||||
if l.Memory == DefaultLimit64 {
|
||||
l.Memory = l2.Memory
|
||||
}
|
||||
}
|
||||
|
||||
func (l *ResourceLimits) Build(defaults Limit) BaseLimit {
|
||||
if l == nil {
|
||||
return BaseLimit{
|
||||
Streams: defaults.GetStreamTotalLimit(),
|
||||
StreamsInbound: defaults.GetStreamLimit(network.DirInbound),
|
||||
StreamsOutbound: defaults.GetStreamLimit(network.DirOutbound),
|
||||
Conns: defaults.GetConnTotalLimit(),
|
||||
ConnsInbound: defaults.GetConnLimit(network.DirInbound),
|
||||
ConnsOutbound: defaults.GetConnLimit(network.DirOutbound),
|
||||
FD: defaults.GetFDLimit(),
|
||||
Memory: defaults.GetMemoryLimit(),
|
||||
}
|
||||
}
|
||||
|
||||
return BaseLimit{
|
||||
Streams: l.Streams.Build(defaults.GetStreamTotalLimit()),
|
||||
StreamsInbound: l.StreamsInbound.Build(defaults.GetStreamLimit(network.DirInbound)),
|
||||
StreamsOutbound: l.StreamsOutbound.Build(defaults.GetStreamLimit(network.DirOutbound)),
|
||||
Conns: l.Conns.Build(defaults.GetConnTotalLimit()),
|
||||
ConnsInbound: l.ConnsInbound.Build(defaults.GetConnLimit(network.DirInbound)),
|
||||
ConnsOutbound: l.ConnsOutbound.Build(defaults.GetConnLimit(network.DirOutbound)),
|
||||
FD: l.FD.Build(defaults.GetFDLimit()),
|
||||
Memory: l.Memory.Build(defaults.GetMemoryLimit()),
|
||||
}
|
||||
}
|
||||
|
||||
type PartialLimitConfig struct {
|
||||
System ResourceLimits `json:",omitempty"`
|
||||
Transient ResourceLimits `json:",omitempty"`
|
||||
|
||||
// Limits that are applied to resources with an allowlisted multiaddr.
|
||||
// These will only be used if the normal System & Transient limits are
|
||||
// reached.
|
||||
AllowlistedSystem ResourceLimits `json:",omitempty"`
|
||||
AllowlistedTransient ResourceLimits `json:",omitempty"`
|
||||
|
||||
ServiceDefault ResourceLimits `json:",omitempty"`
|
||||
Service map[string]ResourceLimits `json:",omitempty"`
|
||||
|
||||
ServicePeerDefault ResourceLimits `json:",omitempty"`
|
||||
ServicePeer map[string]ResourceLimits `json:",omitempty"`
|
||||
|
||||
ProtocolDefault ResourceLimits `json:",omitempty"`
|
||||
Protocol map[protocol.ID]ResourceLimits `json:",omitempty"`
|
||||
|
||||
ProtocolPeerDefault ResourceLimits `json:",omitempty"`
|
||||
ProtocolPeer map[protocol.ID]ResourceLimits `json:",omitempty"`
|
||||
|
||||
PeerDefault ResourceLimits `json:",omitempty"`
|
||||
Peer map[peer.ID]ResourceLimits `json:",omitempty"`
|
||||
|
||||
Conn ResourceLimits `json:",omitempty"`
|
||||
Stream ResourceLimits `json:",omitempty"`
|
||||
}
|
||||
|
||||
func (cfg *PartialLimitConfig) MarshalJSON() ([]byte, error) {
|
||||
// we want to marshal the encoded peer id
|
||||
encodedPeerMap := make(map[string]ResourceLimits, len(cfg.Peer))
|
||||
for p, v := range cfg.Peer {
|
||||
encodedPeerMap[p.String()] = v
|
||||
}
|
||||
|
||||
type Alias PartialLimitConfig
|
||||
return json.Marshal(&struct {
|
||||
*Alias
|
||||
// String so we can have the properly marshalled peer id
|
||||
Peer map[string]ResourceLimits `json:",omitempty"`
|
||||
|
||||
// The rest of the fields as pointers so that we omit empty values in the serialized result
|
||||
System *ResourceLimits `json:",omitempty"`
|
||||
Transient *ResourceLimits `json:",omitempty"`
|
||||
AllowlistedSystem *ResourceLimits `json:",omitempty"`
|
||||
AllowlistedTransient *ResourceLimits `json:",omitempty"`
|
||||
|
||||
ServiceDefault *ResourceLimits `json:",omitempty"`
|
||||
|
||||
ServicePeerDefault *ResourceLimits `json:",omitempty"`
|
||||
|
||||
ProtocolDefault *ResourceLimits `json:",omitempty"`
|
||||
|
||||
ProtocolPeerDefault *ResourceLimits `json:",omitempty"`
|
||||
|
||||
PeerDefault *ResourceLimits `json:",omitempty"`
|
||||
|
||||
Conn *ResourceLimits `json:",omitempty"`
|
||||
Stream *ResourceLimits `json:",omitempty"`
|
||||
}{
|
||||
Alias: (*Alias)(cfg),
|
||||
Peer: encodedPeerMap,
|
||||
|
||||
System: cfg.System.ToMaybeNilPtr(),
|
||||
Transient: cfg.Transient.ToMaybeNilPtr(),
|
||||
AllowlistedSystem: cfg.AllowlistedSystem.ToMaybeNilPtr(),
|
||||
AllowlistedTransient: cfg.AllowlistedTransient.ToMaybeNilPtr(),
|
||||
ServiceDefault: cfg.ServiceDefault.ToMaybeNilPtr(),
|
||||
ServicePeerDefault: cfg.ServicePeerDefault.ToMaybeNilPtr(),
|
||||
ProtocolDefault: cfg.ProtocolDefault.ToMaybeNilPtr(),
|
||||
ProtocolPeerDefault: cfg.ProtocolPeerDefault.ToMaybeNilPtr(),
|
||||
PeerDefault: cfg.PeerDefault.ToMaybeNilPtr(),
|
||||
Conn: cfg.Conn.ToMaybeNilPtr(),
|
||||
Stream: cfg.Stream.ToMaybeNilPtr(),
|
||||
})
|
||||
}
|
||||
|
||||
func applyResourceLimitsMap[K comparable](this *map[K]ResourceLimits, other map[K]ResourceLimits, fallbackDefault ResourceLimits) {
|
||||
for k, l := range *this {
|
||||
r := fallbackDefault
|
||||
if l2, ok := other[k]; ok {
|
||||
r = l2
|
||||
}
|
||||
l.Apply(r)
|
||||
(*this)[k] = l
|
||||
}
|
||||
if *this == nil && other != nil {
|
||||
*this = make(map[K]ResourceLimits)
|
||||
}
|
||||
for k, l := range other {
|
||||
if _, ok := (*this)[k]; !ok {
|
||||
(*this)[k] = l
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (cfg *PartialLimitConfig) Apply(c PartialLimitConfig) {
|
||||
cfg.System.Apply(c.System)
|
||||
cfg.Transient.Apply(c.Transient)
|
||||
cfg.AllowlistedSystem.Apply(c.AllowlistedSystem)
|
||||
cfg.AllowlistedTransient.Apply(c.AllowlistedTransient)
|
||||
cfg.ServiceDefault.Apply(c.ServiceDefault)
|
||||
cfg.ServicePeerDefault.Apply(c.ServicePeerDefault)
|
||||
cfg.ProtocolDefault.Apply(c.ProtocolDefault)
|
||||
cfg.ProtocolPeerDefault.Apply(c.ProtocolPeerDefault)
|
||||
cfg.PeerDefault.Apply(c.PeerDefault)
|
||||
cfg.Conn.Apply(c.Conn)
|
||||
cfg.Stream.Apply(c.Stream)
|
||||
|
||||
applyResourceLimitsMap(&cfg.Service, c.Service, cfg.ServiceDefault)
|
||||
applyResourceLimitsMap(&cfg.ServicePeer, c.ServicePeer, cfg.ServicePeerDefault)
|
||||
applyResourceLimitsMap(&cfg.Protocol, c.Protocol, cfg.ProtocolDefault)
|
||||
applyResourceLimitsMap(&cfg.ProtocolPeer, c.ProtocolPeer, cfg.ProtocolPeerDefault)
|
||||
applyResourceLimitsMap(&cfg.Peer, c.Peer, cfg.PeerDefault)
|
||||
}
|
||||
|
||||
func (cfg PartialLimitConfig) Build(defaults ConcreteLimitConfig) ConcreteLimitConfig {
|
||||
out := defaults
|
||||
|
||||
out.system = cfg.System.Build(defaults.system)
|
||||
out.transient = cfg.Transient.Build(defaults.transient)
|
||||
out.allowlistedSystem = cfg.AllowlistedSystem.Build(defaults.allowlistedSystem)
|
||||
out.allowlistedTransient = cfg.AllowlistedTransient.Build(defaults.allowlistedTransient)
|
||||
out.serviceDefault = cfg.ServiceDefault.Build(defaults.serviceDefault)
|
||||
out.servicePeerDefault = cfg.ServicePeerDefault.Build(defaults.servicePeerDefault)
|
||||
out.protocolDefault = cfg.ProtocolDefault.Build(defaults.protocolDefault)
|
||||
out.protocolPeerDefault = cfg.ProtocolPeerDefault.Build(defaults.protocolPeerDefault)
|
||||
out.peerDefault = cfg.PeerDefault.Build(defaults.peerDefault)
|
||||
out.conn = cfg.Conn.Build(defaults.conn)
|
||||
out.stream = cfg.Stream.Build(defaults.stream)
|
||||
|
||||
out.service = buildMapWithDefault(cfg.Service, defaults.service, out.serviceDefault)
|
||||
out.servicePeer = buildMapWithDefault(cfg.ServicePeer, defaults.servicePeer, out.servicePeerDefault)
|
||||
out.protocol = buildMapWithDefault(cfg.Protocol, defaults.protocol, out.protocolDefault)
|
||||
out.protocolPeer = buildMapWithDefault(cfg.ProtocolPeer, defaults.protocolPeer, out.protocolPeerDefault)
|
||||
out.peer = buildMapWithDefault(cfg.Peer, defaults.peer, out.peerDefault)
|
||||
|
||||
return out
|
||||
}
|
||||
|
||||
func buildMapWithDefault[K comparable](definedLimits map[K]ResourceLimits, defaults map[K]BaseLimit, fallbackDefault BaseLimit) map[K]BaseLimit {
|
||||
if definedLimits == nil && defaults == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
out := make(map[K]BaseLimit)
|
||||
for k, l := range defaults {
|
||||
out[k] = l
|
||||
}
|
||||
|
||||
for k, l := range definedLimits {
|
||||
if defaultForKey, ok := out[k]; ok {
|
||||
out[k] = l.Build(defaultForKey)
|
||||
} else {
|
||||
out[k] = l.Build(fallbackDefault)
|
||||
}
|
||||
}
|
||||
|
||||
return out
|
||||
}
|
||||
|
||||
// ConcreteLimitConfig is similar to PartialLimitConfig, but all values are defined.
|
||||
// There is no unset "default" value. Commonly constructed by calling
|
||||
// PartialLimitConfig.Build(rcmgr.DefaultLimits.AutoScale())
|
||||
type ConcreteLimitConfig struct {
|
||||
system BaseLimit
|
||||
transient BaseLimit
|
||||
|
||||
// Limits that are applied to resources with an allowlisted multiaddr.
|
||||
// These will only be used if the normal System & Transient limits are
|
||||
// reached.
|
||||
allowlistedSystem BaseLimit
|
||||
allowlistedTransient BaseLimit
|
||||
|
||||
serviceDefault BaseLimit
|
||||
service map[string]BaseLimit
|
||||
|
||||
servicePeerDefault BaseLimit
|
||||
servicePeer map[string]BaseLimit
|
||||
|
||||
protocolDefault BaseLimit
|
||||
protocol map[protocol.ID]BaseLimit
|
||||
|
||||
protocolPeerDefault BaseLimit
|
||||
protocolPeer map[protocol.ID]BaseLimit
|
||||
|
||||
peerDefault BaseLimit
|
||||
peer map[peer.ID]BaseLimit
|
||||
|
||||
conn BaseLimit
|
||||
stream BaseLimit
|
||||
}
|
||||
|
||||
func resourceLimitsMapFromBaseLimitMap[K comparable](baseLimitMap map[K]BaseLimit) map[K]ResourceLimits {
|
||||
if baseLimitMap == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
out := make(map[K]ResourceLimits)
|
||||
for k, l := range baseLimitMap {
|
||||
out[k] = l.ToResourceLimits()
|
||||
}
|
||||
|
||||
return out
|
||||
}
|
||||
|
||||
// ToPartialLimitConfig converts a ConcreteLimitConfig to a PartialLimitConfig.
|
||||
// The returned PartialLimitConfig will have no default values.
|
||||
func (cfg ConcreteLimitConfig) ToPartialLimitConfig() PartialLimitConfig {
|
||||
return PartialLimitConfig{
|
||||
System: cfg.system.ToResourceLimits(),
|
||||
Transient: cfg.transient.ToResourceLimits(),
|
||||
AllowlistedSystem: cfg.allowlistedSystem.ToResourceLimits(),
|
||||
AllowlistedTransient: cfg.allowlistedTransient.ToResourceLimits(),
|
||||
ServiceDefault: cfg.serviceDefault.ToResourceLimits(),
|
||||
Service: resourceLimitsMapFromBaseLimitMap(cfg.service),
|
||||
ServicePeerDefault: cfg.servicePeerDefault.ToResourceLimits(),
|
||||
ServicePeer: resourceLimitsMapFromBaseLimitMap(cfg.servicePeer),
|
||||
ProtocolDefault: cfg.protocolDefault.ToResourceLimits(),
|
||||
Protocol: resourceLimitsMapFromBaseLimitMap(cfg.protocol),
|
||||
ProtocolPeerDefault: cfg.protocolPeerDefault.ToResourceLimits(),
|
||||
ProtocolPeer: resourceLimitsMapFromBaseLimitMap(cfg.protocolPeer),
|
||||
PeerDefault: cfg.peerDefault.ToResourceLimits(),
|
||||
Peer: resourceLimitsMapFromBaseLimitMap(cfg.peer),
|
||||
Conn: cfg.conn.ToResourceLimits(),
|
||||
Stream: cfg.stream.ToResourceLimits(),
|
||||
}
|
||||
}
|
||||
|
||||
// Scale scales up a limit configuration.
|
||||
// memory is the amount of memory that the stack is allowed to consume,
|
||||
// for a dedicated node it's recommended to use 1/8 of the installed system memory.
|
||||
// If memory is smaller than 128 MB, the base configuration will be used.
|
||||
func (cfg *ScalingLimitConfig) Scale(memory int64, numFD int) ConcreteLimitConfig {
|
||||
lc := ConcreteLimitConfig{
|
||||
system: scale(cfg.SystemBaseLimit, cfg.SystemLimitIncrease, memory, numFD),
|
||||
transient: scale(cfg.TransientBaseLimit, cfg.TransientLimitIncrease, memory, numFD),
|
||||
allowlistedSystem: scale(cfg.AllowlistedSystemBaseLimit, cfg.AllowlistedSystemLimitIncrease, memory, numFD),
|
||||
allowlistedTransient: scale(cfg.AllowlistedTransientBaseLimit, cfg.AllowlistedTransientLimitIncrease, memory, numFD),
|
||||
serviceDefault: scale(cfg.ServiceBaseLimit, cfg.ServiceLimitIncrease, memory, numFD),
|
||||
servicePeerDefault: scale(cfg.ServicePeerBaseLimit, cfg.ServicePeerLimitIncrease, memory, numFD),
|
||||
protocolDefault: scale(cfg.ProtocolBaseLimit, cfg.ProtocolLimitIncrease, memory, numFD),
|
||||
protocolPeerDefault: scale(cfg.ProtocolPeerBaseLimit, cfg.ProtocolPeerLimitIncrease, memory, numFD),
|
||||
peerDefault: scale(cfg.PeerBaseLimit, cfg.PeerLimitIncrease, memory, numFD),
|
||||
conn: scale(cfg.ConnBaseLimit, cfg.ConnLimitIncrease, memory, numFD),
|
||||
stream: scale(cfg.StreamBaseLimit, cfg.ConnLimitIncrease, memory, numFD),
|
||||
}
|
||||
if cfg.ServiceLimits != nil {
|
||||
lc.service = make(map[string]BaseLimit)
|
||||
for svc, l := range cfg.ServiceLimits {
|
||||
lc.service[svc] = scale(l.BaseLimit, l.BaseLimitIncrease, memory, numFD)
|
||||
}
|
||||
}
|
||||
if cfg.ProtocolLimits != nil {
|
||||
lc.protocol = make(map[protocol.ID]BaseLimit)
|
||||
for proto, l := range cfg.ProtocolLimits {
|
||||
lc.protocol[proto] = scale(l.BaseLimit, l.BaseLimitIncrease, memory, numFD)
|
||||
}
|
||||
}
|
||||
if cfg.PeerLimits != nil {
|
||||
lc.peer = make(map[peer.ID]BaseLimit)
|
||||
for p, l := range cfg.PeerLimits {
|
||||
lc.peer[p] = scale(l.BaseLimit, l.BaseLimitIncrease, memory, numFD)
|
||||
}
|
||||
}
|
||||
if cfg.ServicePeerLimits != nil {
|
||||
lc.servicePeer = make(map[string]BaseLimit)
|
||||
for svc, l := range cfg.ServicePeerLimits {
|
||||
lc.servicePeer[svc] = scale(l.BaseLimit, l.BaseLimitIncrease, memory, numFD)
|
||||
}
|
||||
}
|
||||
if cfg.ProtocolPeerLimits != nil {
|
||||
lc.protocolPeer = make(map[protocol.ID]BaseLimit)
|
||||
for p, l := range cfg.ProtocolPeerLimits {
|
||||
lc.protocolPeer[p] = scale(l.BaseLimit, l.BaseLimitIncrease, memory, numFD)
|
||||
}
|
||||
}
|
||||
return lc
|
||||
}
|
||||
|
||||
func (cfg *ScalingLimitConfig) AutoScale() ConcreteLimitConfig {
|
||||
return cfg.Scale(
|
||||
int64(memory.TotalMemory())/8,
|
||||
getNumFDs()/2,
|
||||
)
|
||||
}
|
||||
|
||||
func scale(base BaseLimit, inc BaseLimitIncrease, memory int64, numFD int) BaseLimit {
|
||||
// mebibytesAvailable represents how many MiBs we're allowed to use. Used to
|
||||
// scale the limits. If this is below 128MiB we set it to 0 to just use the
|
||||
// base amounts.
|
||||
var mebibytesAvailable int
|
||||
if memory > 128<<20 {
|
||||
mebibytesAvailable = int((memory) >> 20)
|
||||
}
|
||||
l := BaseLimit{
|
||||
StreamsInbound: base.StreamsInbound + (inc.StreamsInbound*mebibytesAvailable)>>10,
|
||||
StreamsOutbound: base.StreamsOutbound + (inc.StreamsOutbound*mebibytesAvailable)>>10,
|
||||
Streams: base.Streams + (inc.Streams*mebibytesAvailable)>>10,
|
||||
ConnsInbound: base.ConnsInbound + (inc.ConnsInbound*mebibytesAvailable)>>10,
|
||||
ConnsOutbound: base.ConnsOutbound + (inc.ConnsOutbound*mebibytesAvailable)>>10,
|
||||
Conns: base.Conns + (inc.Conns*mebibytesAvailable)>>10,
|
||||
Memory: base.Memory + (inc.Memory*int64(mebibytesAvailable))>>10,
|
||||
FD: base.FD,
|
||||
}
|
||||
if inc.FDFraction > 0 && numFD > 0 {
|
||||
l.FD = int(inc.FDFraction * float64(numFD))
|
||||
if l.FD < base.FD {
|
||||
// Use at least the base amount
|
||||
l.FD = base.FD
|
||||
}
|
||||
}
|
||||
return l
|
||||
}
|
||||
|
||||
// DefaultLimits are the limits used by the default limiter constructors.
|
||||
var DefaultLimits = ScalingLimitConfig{
|
||||
SystemBaseLimit: BaseLimit{
|
||||
ConnsInbound: 64,
|
||||
ConnsOutbound: 128,
|
||||
Conns: 128,
|
||||
StreamsInbound: 64 * 16,
|
||||
StreamsOutbound: 128 * 16,
|
||||
Streams: 128 * 16,
|
||||
Memory: 128 << 20,
|
||||
FD: 256,
|
||||
},
|
||||
|
||||
SystemLimitIncrease: BaseLimitIncrease{
|
||||
ConnsInbound: 64,
|
||||
ConnsOutbound: 128,
|
||||
Conns: 128,
|
||||
StreamsInbound: 64 * 16,
|
||||
StreamsOutbound: 128 * 16,
|
||||
Streams: 128 * 16,
|
||||
Memory: 1 << 30,
|
||||
FDFraction: 1,
|
||||
},
|
||||
|
||||
TransientBaseLimit: BaseLimit{
|
||||
ConnsInbound: 32,
|
||||
ConnsOutbound: 64,
|
||||
Conns: 64,
|
||||
StreamsInbound: 128,
|
||||
StreamsOutbound: 256,
|
||||
Streams: 256,
|
||||
Memory: 32 << 20,
|
||||
FD: 64,
|
||||
},
|
||||
|
||||
TransientLimitIncrease: BaseLimitIncrease{
|
||||
ConnsInbound: 16,
|
||||
ConnsOutbound: 32,
|
||||
Conns: 32,
|
||||
StreamsInbound: 128,
|
||||
StreamsOutbound: 256,
|
||||
Streams: 256,
|
||||
Memory: 128 << 20,
|
||||
FDFraction: 0.25,
|
||||
},
|
||||
|
||||
// Setting the allowlisted limits to be the same as the normal limits. The
|
||||
// allowlist only activates when you reach your normal system/transient
|
||||
// limits. So it's okay if these limits err on the side of being too big,
|
||||
// since most of the time you won't even use any of these. Tune these down
|
||||
// if you want to manage your resources against an allowlisted endpoint.
|
||||
AllowlistedSystemBaseLimit: BaseLimit{
|
||||
ConnsInbound: 64,
|
||||
ConnsOutbound: 128,
|
||||
Conns: 128,
|
||||
StreamsInbound: 64 * 16,
|
||||
StreamsOutbound: 128 * 16,
|
||||
Streams: 128 * 16,
|
||||
Memory: 128 << 20,
|
||||
FD: 256,
|
||||
},
|
||||
|
||||
AllowlistedSystemLimitIncrease: BaseLimitIncrease{
|
||||
ConnsInbound: 64,
|
||||
ConnsOutbound: 128,
|
||||
Conns: 128,
|
||||
StreamsInbound: 64 * 16,
|
||||
StreamsOutbound: 128 * 16,
|
||||
Streams: 128 * 16,
|
||||
Memory: 1 << 30,
|
||||
FDFraction: 1,
|
||||
},
|
||||
|
||||
AllowlistedTransientBaseLimit: BaseLimit{
|
||||
ConnsInbound: 32,
|
||||
ConnsOutbound: 64,
|
||||
Conns: 64,
|
||||
StreamsInbound: 128,
|
||||
StreamsOutbound: 256,
|
||||
Streams: 256,
|
||||
Memory: 32 << 20,
|
||||
FD: 64,
|
||||
},
|
||||
|
||||
AllowlistedTransientLimitIncrease: BaseLimitIncrease{
|
||||
ConnsInbound: 16,
|
||||
ConnsOutbound: 32,
|
||||
Conns: 32,
|
||||
StreamsInbound: 128,
|
||||
StreamsOutbound: 256,
|
||||
Streams: 256,
|
||||
Memory: 128 << 20,
|
||||
FDFraction: 0.25,
|
||||
},
|
||||
|
||||
ServiceBaseLimit: BaseLimit{
|
||||
StreamsInbound: 1024,
|
||||
StreamsOutbound: 4096,
|
||||
Streams: 4096,
|
||||
Memory: 64 << 20,
|
||||
},
|
||||
|
||||
ServiceLimitIncrease: BaseLimitIncrease{
|
||||
StreamsInbound: 512,
|
||||
StreamsOutbound: 2048,
|
||||
Streams: 2048,
|
||||
Memory: 128 << 20,
|
||||
},
|
||||
|
||||
ServicePeerBaseLimit: BaseLimit{
|
||||
StreamsInbound: 128,
|
||||
StreamsOutbound: 256,
|
||||
Streams: 256,
|
||||
Memory: 16 << 20,
|
||||
},
|
||||
|
||||
ServicePeerLimitIncrease: BaseLimitIncrease{
|
||||
StreamsInbound: 4,
|
||||
StreamsOutbound: 8,
|
||||
Streams: 8,
|
||||
Memory: 4 << 20,
|
||||
},
|
||||
|
||||
ProtocolBaseLimit: BaseLimit{
|
||||
StreamsInbound: 512,
|
||||
StreamsOutbound: 2048,
|
||||
Streams: 2048,
|
||||
Memory: 64 << 20,
|
||||
},
|
||||
|
||||
ProtocolLimitIncrease: BaseLimitIncrease{
|
||||
StreamsInbound: 256,
|
||||
StreamsOutbound: 512,
|
||||
Streams: 512,
|
||||
Memory: 164 << 20,
|
||||
},
|
||||
|
||||
ProtocolPeerBaseLimit: BaseLimit{
|
||||
StreamsInbound: 64,
|
||||
StreamsOutbound: 128,
|
||||
Streams: 256,
|
||||
Memory: 16 << 20,
|
||||
},
|
||||
|
||||
ProtocolPeerLimitIncrease: BaseLimitIncrease{
|
||||
StreamsInbound: 4,
|
||||
StreamsOutbound: 8,
|
||||
Streams: 16,
|
||||
Memory: 4,
|
||||
},
|
||||
|
||||
PeerBaseLimit: BaseLimit{
|
||||
// 8 for now so that it matches the number of concurrent dials we may do
|
||||
// in swarm_dial.go. With future smart dialing work we should bring this
|
||||
// down
|
||||
ConnsInbound: 8,
|
||||
ConnsOutbound: 8,
|
||||
Conns: 8,
|
||||
StreamsInbound: 256,
|
||||
StreamsOutbound: 512,
|
||||
Streams: 512,
|
||||
Memory: 64 << 20,
|
||||
FD: 4,
|
||||
},
|
||||
|
||||
PeerLimitIncrease: BaseLimitIncrease{
|
||||
StreamsInbound: 128,
|
||||
StreamsOutbound: 256,
|
||||
Streams: 256,
|
||||
Memory: 128 << 20,
|
||||
FDFraction: 1.0 / 64,
|
||||
},
|
||||
|
||||
ConnBaseLimit: BaseLimit{
|
||||
ConnsInbound: 1,
|
||||
ConnsOutbound: 1,
|
||||
Conns: 1,
|
||||
FD: 1,
|
||||
Memory: 32 << 20,
|
||||
},
|
||||
|
||||
StreamBaseLimit: BaseLimit{
|
||||
StreamsInbound: 1,
|
||||
StreamsOutbound: 1,
|
||||
Streams: 1,
|
||||
Memory: 16 << 20,
|
||||
},
|
||||
}
|
||||
|
||||
var infiniteBaseLimit = BaseLimit{
|
||||
Streams: math.MaxInt,
|
||||
StreamsInbound: math.MaxInt,
|
||||
StreamsOutbound: math.MaxInt,
|
||||
Conns: math.MaxInt,
|
||||
ConnsInbound: math.MaxInt,
|
||||
ConnsOutbound: math.MaxInt,
|
||||
FD: math.MaxInt,
|
||||
Memory: math.MaxInt64,
|
||||
}
|
||||
|
||||
// InfiniteLimits are a limiter configuration that uses unlimited limits, thus effectively not limiting anything.
|
||||
// Keep in mind that the operating system limits the number of file descriptors that an application can use.
|
||||
var InfiniteLimits = ConcreteLimitConfig{
|
||||
system: infiniteBaseLimit,
|
||||
transient: infiniteBaseLimit,
|
||||
allowlistedSystem: infiniteBaseLimit,
|
||||
allowlistedTransient: infiniteBaseLimit,
|
||||
serviceDefault: infiniteBaseLimit,
|
||||
servicePeerDefault: infiniteBaseLimit,
|
||||
protocolDefault: infiniteBaseLimit,
|
||||
protocolPeerDefault: infiniteBaseLimit,
|
||||
peerDefault: infiniteBaseLimit,
|
||||
conn: infiniteBaseLimit,
|
||||
stream: infiniteBaseLimit,
|
||||
}
|
||||
168
vendor/github.com/libp2p/go-libp2p/p2p/host/resource-manager/metrics.go
generated
vendored
Normal file
168
vendor/github.com/libp2p/go-libp2p/p2p/host/resource-manager/metrics.go
generated
vendored
Normal file
@@ -0,0 +1,168 @@
|
||||
package rcmgr
|
||||
|
||||
import (
|
||||
"github.com/libp2p/go-libp2p/core/network"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/libp2p/go-libp2p/core/protocol"
|
||||
)
|
||||
|
||||
// MetricsReporter is an interface for collecting metrics from resource manager actions
|
||||
type MetricsReporter interface {
|
||||
// AllowConn is invoked when opening a connection is allowed
|
||||
AllowConn(dir network.Direction, usefd bool)
|
||||
// BlockConn is invoked when opening a connection is blocked
|
||||
BlockConn(dir network.Direction, usefd bool)
|
||||
|
||||
// AllowStream is invoked when opening a stream is allowed
|
||||
AllowStream(p peer.ID, dir network.Direction)
|
||||
// BlockStream is invoked when opening a stream is blocked
|
||||
BlockStream(p peer.ID, dir network.Direction)
|
||||
|
||||
// AllowPeer is invoked when attaching ac onnection to a peer is allowed
|
||||
AllowPeer(p peer.ID)
|
||||
// BlockPeer is invoked when attaching ac onnection to a peer is blocked
|
||||
BlockPeer(p peer.ID)
|
||||
|
||||
// AllowProtocol is invoked when setting the protocol for a stream is allowed
|
||||
AllowProtocol(proto protocol.ID)
|
||||
// BlockProtocol is invoked when setting the protocol for a stream is blocked
|
||||
BlockProtocol(proto protocol.ID)
|
||||
// BlockProtocolPeer is invoked when setting the protocol for a stream is blocked at the per protocol peer scope
|
||||
BlockProtocolPeer(proto protocol.ID, p peer.ID)
|
||||
|
||||
// AllowService is invoked when setting the protocol for a stream is allowed
|
||||
AllowService(svc string)
|
||||
// BlockService is invoked when setting the protocol for a stream is blocked
|
||||
BlockService(svc string)
|
||||
// BlockServicePeer is invoked when setting the service for a stream is blocked at the per service peer scope
|
||||
BlockServicePeer(svc string, p peer.ID)
|
||||
|
||||
// AllowMemory is invoked when a memory reservation is allowed
|
||||
AllowMemory(size int)
|
||||
// BlockMemory is invoked when a memory reservation is blocked
|
||||
BlockMemory(size int)
|
||||
}
|
||||
|
||||
type metrics struct {
|
||||
reporter MetricsReporter
|
||||
}
|
||||
|
||||
// WithMetrics is a resource manager option to enable metrics collection
|
||||
func WithMetrics(reporter MetricsReporter) Option {
|
||||
return func(r *resourceManager) error {
|
||||
r.metrics = &metrics{reporter: reporter}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func (m *metrics) AllowConn(dir network.Direction, usefd bool) {
|
||||
if m == nil {
|
||||
return
|
||||
}
|
||||
|
||||
m.reporter.AllowConn(dir, usefd)
|
||||
}
|
||||
|
||||
func (m *metrics) BlockConn(dir network.Direction, usefd bool) {
|
||||
if m == nil {
|
||||
return
|
||||
}
|
||||
|
||||
m.reporter.BlockConn(dir, usefd)
|
||||
}
|
||||
|
||||
func (m *metrics) AllowStream(p peer.ID, dir network.Direction) {
|
||||
if m == nil {
|
||||
return
|
||||
}
|
||||
|
||||
m.reporter.AllowStream(p, dir)
|
||||
}
|
||||
|
||||
func (m *metrics) BlockStream(p peer.ID, dir network.Direction) {
|
||||
if m == nil {
|
||||
return
|
||||
}
|
||||
|
||||
m.reporter.BlockStream(p, dir)
|
||||
}
|
||||
|
||||
func (m *metrics) AllowPeer(p peer.ID) {
|
||||
if m == nil {
|
||||
return
|
||||
}
|
||||
|
||||
m.reporter.AllowPeer(p)
|
||||
}
|
||||
|
||||
func (m *metrics) BlockPeer(p peer.ID) {
|
||||
if m == nil {
|
||||
return
|
||||
}
|
||||
|
||||
m.reporter.BlockPeer(p)
|
||||
}
|
||||
|
||||
func (m *metrics) AllowProtocol(proto protocol.ID) {
|
||||
if m == nil {
|
||||
return
|
||||
}
|
||||
|
||||
m.reporter.AllowProtocol(proto)
|
||||
}
|
||||
|
||||
func (m *metrics) BlockProtocol(proto protocol.ID) {
|
||||
if m == nil {
|
||||
return
|
||||
}
|
||||
|
||||
m.reporter.BlockProtocol(proto)
|
||||
}
|
||||
|
||||
func (m *metrics) BlockProtocolPeer(proto protocol.ID, p peer.ID) {
|
||||
if m == nil {
|
||||
return
|
||||
}
|
||||
|
||||
m.reporter.BlockProtocolPeer(proto, p)
|
||||
}
|
||||
|
||||
func (m *metrics) AllowService(svc string) {
|
||||
if m == nil {
|
||||
return
|
||||
}
|
||||
|
||||
m.reporter.AllowService(svc)
|
||||
}
|
||||
|
||||
func (m *metrics) BlockService(svc string) {
|
||||
if m == nil {
|
||||
return
|
||||
}
|
||||
|
||||
m.reporter.BlockService(svc)
|
||||
}
|
||||
|
||||
func (m *metrics) BlockServicePeer(svc string, p peer.ID) {
|
||||
if m == nil {
|
||||
return
|
||||
}
|
||||
|
||||
m.reporter.BlockServicePeer(svc, p)
|
||||
}
|
||||
|
||||
func (m *metrics) AllowMemory(size int) {
|
||||
if m == nil {
|
||||
return
|
||||
}
|
||||
|
||||
m.reporter.AllowMemory(size)
|
||||
}
|
||||
|
||||
func (m *metrics) BlockMemory(size int) {
|
||||
if m == nil {
|
||||
return
|
||||
}
|
||||
|
||||
m.reporter.BlockMemory(size)
|
||||
}
|
||||
878
vendor/github.com/libp2p/go-libp2p/p2p/host/resource-manager/rcmgr.go
generated
vendored
Normal file
878
vendor/github.com/libp2p/go-libp2p/p2p/host/resource-manager/rcmgr.go
generated
vendored
Normal file
@@ -0,0 +1,878 @@
|
||||
package rcmgr
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/libp2p/go-libp2p/core/network"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/libp2p/go-libp2p/core/protocol"
|
||||
|
||||
logging "github.com/ipfs/go-log/v2"
|
||||
"github.com/multiformats/go-multiaddr"
|
||||
)
|
||||
|
||||
var log = logging.Logger("rcmgr")
|
||||
|
||||
type resourceManager struct {
|
||||
limits Limiter
|
||||
|
||||
trace *trace
|
||||
metrics *metrics
|
||||
disableMetrics bool
|
||||
|
||||
allowlist *Allowlist
|
||||
|
||||
system *systemScope
|
||||
transient *transientScope
|
||||
|
||||
allowlistedSystem *systemScope
|
||||
allowlistedTransient *transientScope
|
||||
|
||||
cancelCtx context.Context
|
||||
cancel func()
|
||||
wg sync.WaitGroup
|
||||
|
||||
mx sync.Mutex
|
||||
svc map[string]*serviceScope
|
||||
proto map[protocol.ID]*protocolScope
|
||||
peer map[peer.ID]*peerScope
|
||||
|
||||
stickyProto map[protocol.ID]struct{}
|
||||
stickyPeer map[peer.ID]struct{}
|
||||
|
||||
connId, streamId int64
|
||||
}
|
||||
|
||||
var _ network.ResourceManager = (*resourceManager)(nil)
|
||||
|
||||
type systemScope struct {
|
||||
*resourceScope
|
||||
}
|
||||
|
||||
var _ network.ResourceScope = (*systemScope)(nil)
|
||||
|
||||
type transientScope struct {
|
||||
*resourceScope
|
||||
|
||||
system *systemScope
|
||||
}
|
||||
|
||||
var _ network.ResourceScope = (*transientScope)(nil)
|
||||
|
||||
type serviceScope struct {
|
||||
*resourceScope
|
||||
|
||||
service string
|
||||
rcmgr *resourceManager
|
||||
|
||||
peers map[peer.ID]*resourceScope
|
||||
}
|
||||
|
||||
var _ network.ServiceScope = (*serviceScope)(nil)
|
||||
|
||||
type protocolScope struct {
|
||||
*resourceScope
|
||||
|
||||
proto protocol.ID
|
||||
rcmgr *resourceManager
|
||||
|
||||
peers map[peer.ID]*resourceScope
|
||||
}
|
||||
|
||||
var _ network.ProtocolScope = (*protocolScope)(nil)
|
||||
|
||||
type peerScope struct {
|
||||
*resourceScope
|
||||
|
||||
peer peer.ID
|
||||
rcmgr *resourceManager
|
||||
}
|
||||
|
||||
var _ network.PeerScope = (*peerScope)(nil)
|
||||
|
||||
type connectionScope struct {
|
||||
*resourceScope
|
||||
|
||||
dir network.Direction
|
||||
usefd bool
|
||||
isAllowlisted bool
|
||||
rcmgr *resourceManager
|
||||
peer *peerScope
|
||||
endpoint multiaddr.Multiaddr
|
||||
}
|
||||
|
||||
var _ network.ConnScope = (*connectionScope)(nil)
|
||||
var _ network.ConnManagementScope = (*connectionScope)(nil)
|
||||
|
||||
type streamScope struct {
|
||||
*resourceScope
|
||||
|
||||
dir network.Direction
|
||||
rcmgr *resourceManager
|
||||
peer *peerScope
|
||||
svc *serviceScope
|
||||
proto *protocolScope
|
||||
|
||||
peerProtoScope *resourceScope
|
||||
peerSvcScope *resourceScope
|
||||
}
|
||||
|
||||
var _ network.StreamScope = (*streamScope)(nil)
|
||||
var _ network.StreamManagementScope = (*streamScope)(nil)
|
||||
|
||||
type Option func(*resourceManager) error
|
||||
|
||||
func NewResourceManager(limits Limiter, opts ...Option) (network.ResourceManager, error) {
|
||||
allowlist := newAllowlist()
|
||||
r := &resourceManager{
|
||||
limits: limits,
|
||||
allowlist: &allowlist,
|
||||
svc: make(map[string]*serviceScope),
|
||||
proto: make(map[protocol.ID]*protocolScope),
|
||||
peer: make(map[peer.ID]*peerScope),
|
||||
}
|
||||
|
||||
for _, opt := range opts {
|
||||
if err := opt(r); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if !r.disableMetrics {
|
||||
var sr TraceReporter
|
||||
sr, err := NewStatsTraceReporter()
|
||||
if err != nil {
|
||||
log.Errorf("failed to initialise StatsTraceReporter %s", err)
|
||||
} else {
|
||||
if r.trace == nil {
|
||||
r.trace = &trace{}
|
||||
}
|
||||
found := false
|
||||
for _, rep := range r.trace.reporters {
|
||||
if rep == sr {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
r.trace.reporters = append(r.trace.reporters, sr)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if err := r.trace.Start(limits); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
r.system = newSystemScope(limits.GetSystemLimits(), r, "system")
|
||||
r.system.IncRef()
|
||||
r.transient = newTransientScope(limits.GetTransientLimits(), r, "transient", r.system.resourceScope)
|
||||
r.transient.IncRef()
|
||||
|
||||
r.allowlistedSystem = newSystemScope(limits.GetAllowlistedSystemLimits(), r, "allowlistedSystem")
|
||||
r.allowlistedSystem.IncRef()
|
||||
r.allowlistedTransient = newTransientScope(limits.GetAllowlistedTransientLimits(), r, "allowlistedTransient", r.allowlistedSystem.resourceScope)
|
||||
r.allowlistedTransient.IncRef()
|
||||
|
||||
r.cancelCtx, r.cancel = context.WithCancel(context.Background())
|
||||
|
||||
r.wg.Add(1)
|
||||
go r.background()
|
||||
|
||||
return r, nil
|
||||
}
|
||||
|
||||
func (r *resourceManager) GetAllowlist() *Allowlist {
|
||||
return r.allowlist
|
||||
}
|
||||
|
||||
// GetAllowlist tries to get the allowlist from the given resourcemanager
|
||||
// interface by checking to see if its concrete type is a resourceManager.
|
||||
// Returns nil if it fails to get the allowlist.
|
||||
func GetAllowlist(rcmgr network.ResourceManager) *Allowlist {
|
||||
r, ok := rcmgr.(*resourceManager)
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
|
||||
return r.allowlist
|
||||
}
|
||||
|
||||
func (r *resourceManager) ViewSystem(f func(network.ResourceScope) error) error {
|
||||
return f(r.system)
|
||||
}
|
||||
|
||||
func (r *resourceManager) ViewTransient(f func(network.ResourceScope) error) error {
|
||||
return f(r.transient)
|
||||
}
|
||||
|
||||
func (r *resourceManager) ViewService(srv string, f func(network.ServiceScope) error) error {
|
||||
s := r.getServiceScope(srv)
|
||||
defer s.DecRef()
|
||||
|
||||
return f(s)
|
||||
}
|
||||
|
||||
func (r *resourceManager) ViewProtocol(proto protocol.ID, f func(network.ProtocolScope) error) error {
|
||||
s := r.getProtocolScope(proto)
|
||||
defer s.DecRef()
|
||||
|
||||
return f(s)
|
||||
}
|
||||
|
||||
func (r *resourceManager) ViewPeer(p peer.ID, f func(network.PeerScope) error) error {
|
||||
s := r.getPeerScope(p)
|
||||
defer s.DecRef()
|
||||
|
||||
return f(s)
|
||||
}
|
||||
|
||||
func (r *resourceManager) getServiceScope(svc string) *serviceScope {
|
||||
r.mx.Lock()
|
||||
defer r.mx.Unlock()
|
||||
|
||||
s, ok := r.svc[svc]
|
||||
if !ok {
|
||||
s = newServiceScope(svc, r.limits.GetServiceLimits(svc), r)
|
||||
r.svc[svc] = s
|
||||
}
|
||||
|
||||
s.IncRef()
|
||||
return s
|
||||
}
|
||||
|
||||
func (r *resourceManager) getProtocolScope(proto protocol.ID) *protocolScope {
|
||||
r.mx.Lock()
|
||||
defer r.mx.Unlock()
|
||||
|
||||
s, ok := r.proto[proto]
|
||||
if !ok {
|
||||
s = newProtocolScope(proto, r.limits.GetProtocolLimits(proto), r)
|
||||
r.proto[proto] = s
|
||||
}
|
||||
|
||||
s.IncRef()
|
||||
return s
|
||||
}
|
||||
|
||||
func (r *resourceManager) setStickyProtocol(proto protocol.ID) {
|
||||
r.mx.Lock()
|
||||
defer r.mx.Unlock()
|
||||
|
||||
if r.stickyProto == nil {
|
||||
r.stickyProto = make(map[protocol.ID]struct{})
|
||||
}
|
||||
r.stickyProto[proto] = struct{}{}
|
||||
}
|
||||
|
||||
func (r *resourceManager) getPeerScope(p peer.ID) *peerScope {
|
||||
r.mx.Lock()
|
||||
defer r.mx.Unlock()
|
||||
|
||||
s, ok := r.peer[p]
|
||||
if !ok {
|
||||
s = newPeerScope(p, r.limits.GetPeerLimits(p), r)
|
||||
r.peer[p] = s
|
||||
}
|
||||
|
||||
s.IncRef()
|
||||
return s
|
||||
}
|
||||
|
||||
func (r *resourceManager) setStickyPeer(p peer.ID) {
|
||||
r.mx.Lock()
|
||||
defer r.mx.Unlock()
|
||||
|
||||
if r.stickyPeer == nil {
|
||||
r.stickyPeer = make(map[peer.ID]struct{})
|
||||
}
|
||||
|
||||
r.stickyPeer[p] = struct{}{}
|
||||
}
|
||||
|
||||
func (r *resourceManager) nextConnId() int64 {
|
||||
r.mx.Lock()
|
||||
defer r.mx.Unlock()
|
||||
|
||||
r.connId++
|
||||
return r.connId
|
||||
}
|
||||
|
||||
func (r *resourceManager) nextStreamId() int64 {
|
||||
r.mx.Lock()
|
||||
defer r.mx.Unlock()
|
||||
|
||||
r.streamId++
|
||||
return r.streamId
|
||||
}
|
||||
|
||||
func (r *resourceManager) OpenConnection(dir network.Direction, usefd bool, endpoint multiaddr.Multiaddr) (network.ConnManagementScope, error) {
|
||||
var conn *connectionScope
|
||||
conn = newConnectionScope(dir, usefd, r.limits.GetConnLimits(), r, endpoint)
|
||||
|
||||
err := conn.AddConn(dir, usefd)
|
||||
if err != nil {
|
||||
// Try again if this is an allowlisted connection
|
||||
// Failed to open connection, let's see if this was allowlisted and try again
|
||||
allowed := r.allowlist.Allowed(endpoint)
|
||||
if allowed {
|
||||
conn.Done()
|
||||
conn = newAllowListedConnectionScope(dir, usefd, r.limits.GetConnLimits(), r, endpoint)
|
||||
err = conn.AddConn(dir, usefd)
|
||||
}
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
conn.Done()
|
||||
r.metrics.BlockConn(dir, usefd)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
r.metrics.AllowConn(dir, usefd)
|
||||
return conn, nil
|
||||
}
|
||||
|
||||
func (r *resourceManager) OpenStream(p peer.ID, dir network.Direction) (network.StreamManagementScope, error) {
|
||||
peer := r.getPeerScope(p)
|
||||
stream := newStreamScope(dir, r.limits.GetStreamLimits(p), peer, r)
|
||||
peer.DecRef() // we have the reference in edges
|
||||
|
||||
err := stream.AddStream(dir)
|
||||
if err != nil {
|
||||
stream.Done()
|
||||
r.metrics.BlockStream(p, dir)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
r.metrics.AllowStream(p, dir)
|
||||
return stream, nil
|
||||
}
|
||||
|
||||
func (r *resourceManager) Close() error {
|
||||
r.cancel()
|
||||
r.wg.Wait()
|
||||
r.trace.Close()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *resourceManager) background() {
|
||||
defer r.wg.Done()
|
||||
|
||||
// periodically garbage collects unused peer and protocol scopes
|
||||
ticker := time.NewTicker(time.Minute)
|
||||
defer ticker.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
r.gc()
|
||||
case <-r.cancelCtx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (r *resourceManager) gc() {
|
||||
r.mx.Lock()
|
||||
defer r.mx.Unlock()
|
||||
|
||||
for proto, s := range r.proto {
|
||||
_, sticky := r.stickyProto[proto]
|
||||
if sticky {
|
||||
continue
|
||||
}
|
||||
if s.IsUnused() {
|
||||
s.Done()
|
||||
delete(r.proto, proto)
|
||||
}
|
||||
}
|
||||
|
||||
var deadPeers []peer.ID
|
||||
for p, s := range r.peer {
|
||||
_, sticky := r.stickyPeer[p]
|
||||
if sticky {
|
||||
continue
|
||||
}
|
||||
|
||||
if s.IsUnused() {
|
||||
s.Done()
|
||||
delete(r.peer, p)
|
||||
deadPeers = append(deadPeers, p)
|
||||
}
|
||||
}
|
||||
|
||||
for _, s := range r.svc {
|
||||
s.Lock()
|
||||
for _, p := range deadPeers {
|
||||
ps, ok := s.peers[p]
|
||||
if ok {
|
||||
ps.Done()
|
||||
delete(s.peers, p)
|
||||
}
|
||||
}
|
||||
s.Unlock()
|
||||
}
|
||||
|
||||
for _, s := range r.proto {
|
||||
s.Lock()
|
||||
for _, p := range deadPeers {
|
||||
ps, ok := s.peers[p]
|
||||
if ok {
|
||||
ps.Done()
|
||||
delete(s.peers, p)
|
||||
}
|
||||
}
|
||||
s.Unlock()
|
||||
}
|
||||
}
|
||||
|
||||
func newSystemScope(limit Limit, rcmgr *resourceManager, name string) *systemScope {
|
||||
return &systemScope{
|
||||
resourceScope: newResourceScope(limit, nil, name, rcmgr.trace, rcmgr.metrics),
|
||||
}
|
||||
}
|
||||
|
||||
func newTransientScope(limit Limit, rcmgr *resourceManager, name string, systemScope *resourceScope) *transientScope {
|
||||
return &transientScope{
|
||||
resourceScope: newResourceScope(limit,
|
||||
[]*resourceScope{systemScope},
|
||||
name, rcmgr.trace, rcmgr.metrics),
|
||||
system: rcmgr.system,
|
||||
}
|
||||
}
|
||||
|
||||
func newServiceScope(service string, limit Limit, rcmgr *resourceManager) *serviceScope {
|
||||
return &serviceScope{
|
||||
resourceScope: newResourceScope(limit,
|
||||
[]*resourceScope{rcmgr.system.resourceScope},
|
||||
fmt.Sprintf("service:%s", service), rcmgr.trace, rcmgr.metrics),
|
||||
service: service,
|
||||
rcmgr: rcmgr,
|
||||
}
|
||||
}
|
||||
|
||||
func newProtocolScope(proto protocol.ID, limit Limit, rcmgr *resourceManager) *protocolScope {
|
||||
return &protocolScope{
|
||||
resourceScope: newResourceScope(limit,
|
||||
[]*resourceScope{rcmgr.system.resourceScope},
|
||||
fmt.Sprintf("protocol:%s", proto), rcmgr.trace, rcmgr.metrics),
|
||||
proto: proto,
|
||||
rcmgr: rcmgr,
|
||||
}
|
||||
}
|
||||
|
||||
func newPeerScope(p peer.ID, limit Limit, rcmgr *resourceManager) *peerScope {
|
||||
return &peerScope{
|
||||
resourceScope: newResourceScope(limit,
|
||||
[]*resourceScope{rcmgr.system.resourceScope},
|
||||
peerScopeName(p), rcmgr.trace, rcmgr.metrics),
|
||||
peer: p,
|
||||
rcmgr: rcmgr,
|
||||
}
|
||||
}
|
||||
|
||||
func newConnectionScope(dir network.Direction, usefd bool, limit Limit, rcmgr *resourceManager, endpoint multiaddr.Multiaddr) *connectionScope {
|
||||
return &connectionScope{
|
||||
resourceScope: newResourceScope(limit,
|
||||
[]*resourceScope{rcmgr.transient.resourceScope, rcmgr.system.resourceScope},
|
||||
connScopeName(rcmgr.nextConnId()), rcmgr.trace, rcmgr.metrics),
|
||||
dir: dir,
|
||||
usefd: usefd,
|
||||
rcmgr: rcmgr,
|
||||
endpoint: endpoint,
|
||||
}
|
||||
}
|
||||
|
||||
func newAllowListedConnectionScope(dir network.Direction, usefd bool, limit Limit, rcmgr *resourceManager, endpoint multiaddr.Multiaddr) *connectionScope {
|
||||
return &connectionScope{
|
||||
resourceScope: newResourceScope(limit,
|
||||
[]*resourceScope{rcmgr.allowlistedTransient.resourceScope, rcmgr.allowlistedSystem.resourceScope},
|
||||
connScopeName(rcmgr.nextConnId()), rcmgr.trace, rcmgr.metrics),
|
||||
dir: dir,
|
||||
usefd: usefd,
|
||||
rcmgr: rcmgr,
|
||||
endpoint: endpoint,
|
||||
isAllowlisted: true,
|
||||
}
|
||||
}
|
||||
|
||||
func newStreamScope(dir network.Direction, limit Limit, peer *peerScope, rcmgr *resourceManager) *streamScope {
|
||||
return &streamScope{
|
||||
resourceScope: newResourceScope(limit,
|
||||
[]*resourceScope{peer.resourceScope, rcmgr.transient.resourceScope, rcmgr.system.resourceScope},
|
||||
streamScopeName(rcmgr.nextStreamId()), rcmgr.trace, rcmgr.metrics),
|
||||
dir: dir,
|
||||
rcmgr: peer.rcmgr,
|
||||
peer: peer,
|
||||
}
|
||||
}
|
||||
|
||||
func IsSystemScope(name string) bool {
|
||||
return name == "system"
|
||||
}
|
||||
|
||||
func IsTransientScope(name string) bool {
|
||||
return name == "transient"
|
||||
}
|
||||
|
||||
func streamScopeName(streamId int64) string {
|
||||
return fmt.Sprintf("stream-%d", streamId)
|
||||
}
|
||||
|
||||
func IsStreamScope(name string) bool {
|
||||
return strings.HasPrefix(name, "stream-") && !IsSpan(name)
|
||||
}
|
||||
|
||||
func connScopeName(streamId int64) string {
|
||||
return fmt.Sprintf("conn-%d", streamId)
|
||||
}
|
||||
|
||||
func IsConnScope(name string) bool {
|
||||
return strings.HasPrefix(name, "conn-") && !IsSpan(name)
|
||||
}
|
||||
|
||||
func peerScopeName(p peer.ID) string {
|
||||
return fmt.Sprintf("peer:%s", p)
|
||||
}
|
||||
|
||||
// PeerStrInScopeName returns "" if name is not a peerScopeName. Returns a string to avoid allocating a peer ID object
|
||||
func PeerStrInScopeName(name string) string {
|
||||
if !strings.HasPrefix(name, "peer:") || IsSpan(name) {
|
||||
return ""
|
||||
}
|
||||
// Index to avoid allocating a new string
|
||||
peerSplitIdx := strings.Index(name, "peer:")
|
||||
if peerSplitIdx == -1 {
|
||||
return ""
|
||||
}
|
||||
p := (name[peerSplitIdx+len("peer:"):])
|
||||
return p
|
||||
}
|
||||
|
||||
// ParseProtocolScopeName returns the service name if name is a serviceScopeName.
|
||||
// Otherwise returns ""
|
||||
func ParseProtocolScopeName(name string) string {
|
||||
if strings.HasPrefix(name, "protocol:") && !IsSpan(name) {
|
||||
if strings.Contains(name, "peer:") {
|
||||
// This is a protocol peer scope
|
||||
return ""
|
||||
}
|
||||
|
||||
// Index to avoid allocating a new string
|
||||
separatorIdx := strings.Index(name, ":")
|
||||
if separatorIdx == -1 {
|
||||
return ""
|
||||
}
|
||||
return name[separatorIdx+1:]
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (s *serviceScope) Name() string {
|
||||
return s.service
|
||||
}
|
||||
|
||||
func (s *serviceScope) getPeerScope(p peer.ID) *resourceScope {
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
|
||||
ps, ok := s.peers[p]
|
||||
if ok {
|
||||
ps.IncRef()
|
||||
return ps
|
||||
}
|
||||
|
||||
l := s.rcmgr.limits.GetServicePeerLimits(s.service)
|
||||
|
||||
if s.peers == nil {
|
||||
s.peers = make(map[peer.ID]*resourceScope)
|
||||
}
|
||||
|
||||
ps = newResourceScope(l, nil, fmt.Sprintf("%s.peer:%s", s.name, p), s.rcmgr.trace, s.rcmgr.metrics)
|
||||
s.peers[p] = ps
|
||||
|
||||
ps.IncRef()
|
||||
return ps
|
||||
}
|
||||
|
||||
func (s *protocolScope) Protocol() protocol.ID {
|
||||
return s.proto
|
||||
}
|
||||
|
||||
func (s *protocolScope) getPeerScope(p peer.ID) *resourceScope {
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
|
||||
ps, ok := s.peers[p]
|
||||
if ok {
|
||||
ps.IncRef()
|
||||
return ps
|
||||
}
|
||||
|
||||
l := s.rcmgr.limits.GetProtocolPeerLimits(s.proto)
|
||||
|
||||
if s.peers == nil {
|
||||
s.peers = make(map[peer.ID]*resourceScope)
|
||||
}
|
||||
|
||||
ps = newResourceScope(l, nil, fmt.Sprintf("%s.peer:%s", s.name, p), s.rcmgr.trace, s.rcmgr.metrics)
|
||||
s.peers[p] = ps
|
||||
|
||||
ps.IncRef()
|
||||
return ps
|
||||
}
|
||||
|
||||
func (s *peerScope) Peer() peer.ID {
|
||||
return s.peer
|
||||
}
|
||||
|
||||
func (s *connectionScope) PeerScope() network.PeerScope {
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
|
||||
// avoid nil is not nil footgun; go....
|
||||
if s.peer == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
return s.peer
|
||||
}
|
||||
|
||||
// transferAllowedToStandard transfers this connection scope from being part of
|
||||
// the allowlist set of scopes to being part of the standard set of scopes.
|
||||
// Happens when we first allowlisted this connection due to its IP, but later
|
||||
// discovered that the peer id not what we expected.
|
||||
func (s *connectionScope) transferAllowedToStandard() (err error) {
|
||||
|
||||
systemScope := s.rcmgr.system.resourceScope
|
||||
transientScope := s.rcmgr.transient.resourceScope
|
||||
|
||||
stat := s.resourceScope.rc.stat()
|
||||
|
||||
for _, scope := range s.edges {
|
||||
scope.ReleaseForChild(stat)
|
||||
scope.DecRef() // removed from edges
|
||||
}
|
||||
s.edges = nil
|
||||
|
||||
if err := systemScope.ReserveForChild(stat); err != nil {
|
||||
return err
|
||||
}
|
||||
systemScope.IncRef()
|
||||
|
||||
// Undo this if we fail later
|
||||
defer func() {
|
||||
if err != nil {
|
||||
systemScope.ReleaseForChild(stat)
|
||||
systemScope.DecRef()
|
||||
}
|
||||
}()
|
||||
|
||||
if err := transientScope.ReserveForChild(stat); err != nil {
|
||||
return err
|
||||
}
|
||||
transientScope.IncRef()
|
||||
|
||||
// Update edges
|
||||
s.edges = []*resourceScope{
|
||||
systemScope,
|
||||
transientScope,
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *connectionScope) SetPeer(p peer.ID) error {
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
|
||||
if s.peer != nil {
|
||||
return fmt.Errorf("connection scope already attached to a peer")
|
||||
}
|
||||
|
||||
system := s.rcmgr.system
|
||||
transient := s.rcmgr.transient
|
||||
|
||||
if s.isAllowlisted {
|
||||
system = s.rcmgr.allowlistedSystem
|
||||
transient = s.rcmgr.allowlistedTransient
|
||||
|
||||
if !s.rcmgr.allowlist.AllowedPeerAndMultiaddr(p, s.endpoint) {
|
||||
s.isAllowlisted = false
|
||||
|
||||
// This is not an allowed peer + multiaddr combination. We need to
|
||||
// transfer this connection to the general scope. We'll do this first by
|
||||
// transferring the connection to the system and transient scopes, then
|
||||
// continue on with this function. The idea is that a connection
|
||||
// shouldn't get the benefit of evading the transient scope because it
|
||||
// was _almost_ an allowlisted connection.
|
||||
if err := s.transferAllowedToStandard(); err != nil {
|
||||
// Failed to transfer this connection to the standard scopes
|
||||
return err
|
||||
}
|
||||
|
||||
// set the system and transient scopes to the non-allowlisted ones
|
||||
system = s.rcmgr.system
|
||||
transient = s.rcmgr.transient
|
||||
}
|
||||
}
|
||||
|
||||
s.peer = s.rcmgr.getPeerScope(p)
|
||||
|
||||
// juggle resources from transient scope to peer scope
|
||||
stat := s.resourceScope.rc.stat()
|
||||
if err := s.peer.ReserveForChild(stat); err != nil {
|
||||
s.peer.DecRef()
|
||||
s.peer = nil
|
||||
s.rcmgr.metrics.BlockPeer(p)
|
||||
return err
|
||||
}
|
||||
|
||||
transient.ReleaseForChild(stat)
|
||||
transient.DecRef() // removed from edges
|
||||
|
||||
// update edges
|
||||
edges := []*resourceScope{
|
||||
s.peer.resourceScope,
|
||||
system.resourceScope,
|
||||
}
|
||||
s.resourceScope.edges = edges
|
||||
|
||||
s.rcmgr.metrics.AllowPeer(p)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *streamScope) ProtocolScope() network.ProtocolScope {
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
|
||||
// avoid nil is not nil footgun; go....
|
||||
if s.proto == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
return s.proto
|
||||
}
|
||||
|
||||
func (s *streamScope) SetProtocol(proto protocol.ID) error {
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
|
||||
if s.proto != nil {
|
||||
return fmt.Errorf("stream scope already attached to a protocol")
|
||||
}
|
||||
|
||||
s.proto = s.rcmgr.getProtocolScope(proto)
|
||||
|
||||
// juggle resources from transient scope to protocol scope
|
||||
stat := s.resourceScope.rc.stat()
|
||||
if err := s.proto.ReserveForChild(stat); err != nil {
|
||||
s.proto.DecRef()
|
||||
s.proto = nil
|
||||
s.rcmgr.metrics.BlockProtocol(proto)
|
||||
return err
|
||||
}
|
||||
|
||||
s.peerProtoScope = s.proto.getPeerScope(s.peer.peer)
|
||||
if err := s.peerProtoScope.ReserveForChild(stat); err != nil {
|
||||
s.proto.ReleaseForChild(stat)
|
||||
s.proto.DecRef()
|
||||
s.proto = nil
|
||||
s.peerProtoScope.DecRef()
|
||||
s.peerProtoScope = nil
|
||||
s.rcmgr.metrics.BlockProtocolPeer(proto, s.peer.peer)
|
||||
return err
|
||||
}
|
||||
|
||||
s.rcmgr.transient.ReleaseForChild(stat)
|
||||
s.rcmgr.transient.DecRef() // removed from edges
|
||||
|
||||
// update edges
|
||||
edges := []*resourceScope{
|
||||
s.peer.resourceScope,
|
||||
s.peerProtoScope,
|
||||
s.proto.resourceScope,
|
||||
s.rcmgr.system.resourceScope,
|
||||
}
|
||||
s.resourceScope.edges = edges
|
||||
|
||||
s.rcmgr.metrics.AllowProtocol(proto)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *streamScope) ServiceScope() network.ServiceScope {
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
|
||||
// avoid nil is not nil footgun; go....
|
||||
if s.svc == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
return s.svc
|
||||
}
|
||||
|
||||
func (s *streamScope) SetService(svc string) error {
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
|
||||
if s.svc != nil {
|
||||
return fmt.Errorf("stream scope already attached to a service")
|
||||
}
|
||||
if s.proto == nil {
|
||||
return fmt.Errorf("stream scope not attached to a protocol")
|
||||
}
|
||||
|
||||
s.svc = s.rcmgr.getServiceScope(svc)
|
||||
|
||||
// reserve resources in service
|
||||
stat := s.resourceScope.rc.stat()
|
||||
if err := s.svc.ReserveForChild(stat); err != nil {
|
||||
s.svc.DecRef()
|
||||
s.svc = nil
|
||||
s.rcmgr.metrics.BlockService(svc)
|
||||
return err
|
||||
}
|
||||
|
||||
// get the per peer service scope constraint, if any
|
||||
s.peerSvcScope = s.svc.getPeerScope(s.peer.peer)
|
||||
if err := s.peerSvcScope.ReserveForChild(stat); err != nil {
|
||||
s.svc.ReleaseForChild(stat)
|
||||
s.svc.DecRef()
|
||||
s.svc = nil
|
||||
s.peerSvcScope.DecRef()
|
||||
s.peerSvcScope = nil
|
||||
s.rcmgr.metrics.BlockServicePeer(svc, s.peer.peer)
|
||||
return err
|
||||
}
|
||||
|
||||
// update edges
|
||||
edges := []*resourceScope{
|
||||
s.peer.resourceScope,
|
||||
s.peerProtoScope,
|
||||
s.peerSvcScope,
|
||||
s.proto.resourceScope,
|
||||
s.svc.resourceScope,
|
||||
s.rcmgr.system.resourceScope,
|
||||
}
|
||||
s.resourceScope.edges = edges
|
||||
|
||||
s.rcmgr.metrics.AllowService(svc)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *streamScope) PeerScope() network.PeerScope {
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
|
||||
// avoid nil is not nil footgun; go....
|
||||
if s.peer == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
return s.peer
|
||||
}
|
||||
814
vendor/github.com/libp2p/go-libp2p/p2p/host/resource-manager/scope.go
generated
vendored
Normal file
814
vendor/github.com/libp2p/go-libp2p/p2p/host/resource-manager/scope.go
generated
vendored
Normal file
@@ -0,0 +1,814 @@
|
||||
package rcmgr
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"math/big"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/libp2p/go-libp2p/core/network"
|
||||
)
|
||||
|
||||
// resources tracks the current state of resource consumption
|
||||
type resources struct {
|
||||
limit Limit
|
||||
|
||||
nconnsIn, nconnsOut int
|
||||
nstreamsIn, nstreamsOut int
|
||||
nfd int
|
||||
|
||||
memory int64
|
||||
}
|
||||
|
||||
// A resourceScope can be a DAG, where a downstream node is not allowed to outlive an upstream node
|
||||
// (ie cannot call Done in the upstream node before the downstream node) and account for resources
|
||||
// using a linearized parent set.
|
||||
// A resourceScope can be a span scope, where it has a specific owner; span scopes create a tree rooted
|
||||
// at the owner (which can be a DAG scope) and can outlive their parents -- this is important because
|
||||
// span scopes are the main *user* interface for memory management, and the user may call
|
||||
// Done in a span scope after the system has closed the root of the span tree in some background
|
||||
// goroutine.
|
||||
// If we didn't make this distinction we would have a double release problem in that case.
|
||||
type resourceScope struct {
|
||||
sync.Mutex
|
||||
done bool
|
||||
refCnt int
|
||||
|
||||
spanID int
|
||||
|
||||
rc resources
|
||||
owner *resourceScope // set in span scopes, which define trees
|
||||
edges []*resourceScope // set in DAG scopes, it's the linearized parent set
|
||||
|
||||
name string // for debugging purposes
|
||||
trace *trace // debug tracing
|
||||
metrics *metrics // metrics collection
|
||||
}
|
||||
|
||||
var _ network.ResourceScope = (*resourceScope)(nil)
|
||||
var _ network.ResourceScopeSpan = (*resourceScope)(nil)
|
||||
|
||||
func newResourceScope(limit Limit, edges []*resourceScope, name string, trace *trace, metrics *metrics) *resourceScope {
|
||||
for _, e := range edges {
|
||||
e.IncRef()
|
||||
}
|
||||
r := &resourceScope{
|
||||
rc: resources{limit: limit},
|
||||
edges: edges,
|
||||
name: name,
|
||||
trace: trace,
|
||||
metrics: metrics,
|
||||
}
|
||||
r.trace.CreateScope(name, limit)
|
||||
return r
|
||||
}
|
||||
|
||||
func newResourceScopeSpan(owner *resourceScope, id int) *resourceScope {
|
||||
r := &resourceScope{
|
||||
rc: resources{limit: owner.rc.limit},
|
||||
owner: owner,
|
||||
name: fmt.Sprintf("%s.span-%d", owner.name, id),
|
||||
trace: owner.trace,
|
||||
metrics: owner.metrics,
|
||||
}
|
||||
r.trace.CreateScope(r.name, r.rc.limit)
|
||||
return r
|
||||
}
|
||||
|
||||
// IsSpan will return true if this name was created by newResourceScopeSpan
|
||||
func IsSpan(name string) bool {
|
||||
return strings.Contains(name, ".span-")
|
||||
}
|
||||
|
||||
func addInt64WithOverflow(a int64, b int64) (c int64, ok bool) {
|
||||
c = a + b
|
||||
return c, (c > a) == (b > 0)
|
||||
}
|
||||
|
||||
// mulInt64WithOverflow checks for overflow in multiplying two int64s. See
|
||||
// https://groups.google.com/g/golang-nuts/c/h5oSN5t3Au4/m/KaNQREhZh0QJ
|
||||
func mulInt64WithOverflow(a, b int64) (c int64, ok bool) {
|
||||
const mostPositive = 1<<63 - 1
|
||||
const mostNegative = -(mostPositive + 1)
|
||||
c = a * b
|
||||
if a == 0 || b == 0 || a == 1 || b == 1 {
|
||||
return c, true
|
||||
}
|
||||
if a == mostNegative || b == mostNegative {
|
||||
return c, false
|
||||
}
|
||||
return c, c/b == a
|
||||
}
|
||||
|
||||
// Resources implementation
|
||||
func (rc *resources) checkMemory(rsvp int64, prio uint8) error {
|
||||
if rsvp < 0 {
|
||||
return fmt.Errorf("can't reserve negative memory. rsvp=%v", rsvp)
|
||||
}
|
||||
|
||||
limit := rc.limit.GetMemoryLimit()
|
||||
if limit == math.MaxInt64 {
|
||||
// Special case where we've set max limits.
|
||||
return nil
|
||||
}
|
||||
|
||||
newmem, addOk := addInt64WithOverflow(rc.memory, rsvp)
|
||||
|
||||
threshold, mulOk := mulInt64WithOverflow(1+int64(prio), limit)
|
||||
if !mulOk {
|
||||
thresholdBig := big.NewInt(limit)
|
||||
thresholdBig = thresholdBig.Mul(thresholdBig, big.NewInt(1+int64(prio)))
|
||||
thresholdBig.Rsh(thresholdBig, 8) // Divide 256
|
||||
if !thresholdBig.IsInt64() {
|
||||
// Shouldn't happen since the threshold can only be <= limit
|
||||
threshold = limit
|
||||
}
|
||||
threshold = thresholdBig.Int64()
|
||||
} else {
|
||||
threshold = threshold / 256
|
||||
}
|
||||
|
||||
if !addOk || newmem > threshold {
|
||||
return &ErrMemoryLimitExceeded{
|
||||
current: rc.memory,
|
||||
attempted: rsvp,
|
||||
limit: limit,
|
||||
priority: prio,
|
||||
err: network.ErrResourceLimitExceeded,
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (rc *resources) reserveMemory(size int64, prio uint8) error {
|
||||
if err := rc.checkMemory(size, prio); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
rc.memory += size
|
||||
return nil
|
||||
}
|
||||
|
||||
func (rc *resources) releaseMemory(size int64) {
|
||||
rc.memory -= size
|
||||
|
||||
// sanity check for bugs upstream
|
||||
if rc.memory < 0 {
|
||||
log.Warn("BUG: too much memory released")
|
||||
rc.memory = 0
|
||||
}
|
||||
}
|
||||
|
||||
func (rc *resources) addStream(dir network.Direction) error {
|
||||
if dir == network.DirInbound {
|
||||
return rc.addStreams(1, 0)
|
||||
}
|
||||
return rc.addStreams(0, 1)
|
||||
}
|
||||
|
||||
func (rc *resources) addStreams(incount, outcount int) error {
|
||||
if incount > 0 {
|
||||
limit := rc.limit.GetStreamLimit(network.DirInbound)
|
||||
if rc.nstreamsIn+incount > limit {
|
||||
return &ErrStreamOrConnLimitExceeded{
|
||||
current: rc.nstreamsIn,
|
||||
attempted: incount,
|
||||
limit: limit,
|
||||
err: fmt.Errorf("cannot reserve inbound stream: %w", network.ErrResourceLimitExceeded),
|
||||
}
|
||||
}
|
||||
}
|
||||
if outcount > 0 {
|
||||
limit := rc.limit.GetStreamLimit(network.DirOutbound)
|
||||
if rc.nstreamsOut+outcount > limit {
|
||||
return &ErrStreamOrConnLimitExceeded{
|
||||
current: rc.nstreamsOut,
|
||||
attempted: outcount,
|
||||
limit: limit,
|
||||
err: fmt.Errorf("cannot reserve outbound stream: %w", network.ErrResourceLimitExceeded),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if limit := rc.limit.GetStreamTotalLimit(); rc.nstreamsIn+incount+rc.nstreamsOut+outcount > limit {
|
||||
return &ErrStreamOrConnLimitExceeded{
|
||||
current: rc.nstreamsIn + rc.nstreamsOut,
|
||||
attempted: incount + outcount,
|
||||
limit: limit,
|
||||
err: fmt.Errorf("cannot reserve stream: %w", network.ErrResourceLimitExceeded),
|
||||
}
|
||||
}
|
||||
|
||||
rc.nstreamsIn += incount
|
||||
rc.nstreamsOut += outcount
|
||||
return nil
|
||||
}
|
||||
|
||||
func (rc *resources) removeStream(dir network.Direction) {
|
||||
if dir == network.DirInbound {
|
||||
rc.removeStreams(1, 0)
|
||||
} else {
|
||||
rc.removeStreams(0, 1)
|
||||
}
|
||||
}
|
||||
|
||||
func (rc *resources) removeStreams(incount, outcount int) {
|
||||
rc.nstreamsIn -= incount
|
||||
rc.nstreamsOut -= outcount
|
||||
|
||||
if rc.nstreamsIn < 0 {
|
||||
log.Warn("BUG: too many inbound streams released")
|
||||
rc.nstreamsIn = 0
|
||||
}
|
||||
if rc.nstreamsOut < 0 {
|
||||
log.Warn("BUG: too many outbound streams released")
|
||||
rc.nstreamsOut = 0
|
||||
}
|
||||
}
|
||||
|
||||
func (rc *resources) addConn(dir network.Direction, usefd bool) error {
|
||||
var fd int
|
||||
if usefd {
|
||||
fd = 1
|
||||
}
|
||||
|
||||
if dir == network.DirInbound {
|
||||
return rc.addConns(1, 0, fd)
|
||||
}
|
||||
|
||||
return rc.addConns(0, 1, fd)
|
||||
}
|
||||
|
||||
func (rc *resources) addConns(incount, outcount, fdcount int) error {
|
||||
if incount > 0 {
|
||||
limit := rc.limit.GetConnLimit(network.DirInbound)
|
||||
if rc.nconnsIn+incount > limit {
|
||||
return &ErrStreamOrConnLimitExceeded{
|
||||
current: rc.nconnsIn,
|
||||
attempted: incount,
|
||||
limit: limit,
|
||||
err: fmt.Errorf("cannot reserve inbound connection: %w", network.ErrResourceLimitExceeded),
|
||||
}
|
||||
}
|
||||
}
|
||||
if outcount > 0 {
|
||||
limit := rc.limit.GetConnLimit(network.DirOutbound)
|
||||
if rc.nconnsOut+outcount > limit {
|
||||
return &ErrStreamOrConnLimitExceeded{
|
||||
current: rc.nconnsOut,
|
||||
attempted: outcount,
|
||||
limit: limit,
|
||||
err: fmt.Errorf("cannot reserve outbound connection: %w", network.ErrResourceLimitExceeded),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if connLimit := rc.limit.GetConnTotalLimit(); rc.nconnsIn+incount+rc.nconnsOut+outcount > connLimit {
|
||||
return &ErrStreamOrConnLimitExceeded{
|
||||
current: rc.nconnsIn + rc.nconnsOut,
|
||||
attempted: incount + outcount,
|
||||
limit: connLimit,
|
||||
err: fmt.Errorf("cannot reserve connection: %w", network.ErrResourceLimitExceeded),
|
||||
}
|
||||
}
|
||||
if fdcount > 0 {
|
||||
limit := rc.limit.GetFDLimit()
|
||||
if rc.nfd+fdcount > limit {
|
||||
return &ErrStreamOrConnLimitExceeded{
|
||||
current: rc.nfd,
|
||||
attempted: fdcount,
|
||||
limit: limit,
|
||||
err: fmt.Errorf("cannot reserve file descriptor: %w", network.ErrResourceLimitExceeded),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
rc.nconnsIn += incount
|
||||
rc.nconnsOut += outcount
|
||||
rc.nfd += fdcount
|
||||
return nil
|
||||
}
|
||||
|
||||
func (rc *resources) removeConn(dir network.Direction, usefd bool) {
|
||||
var fd int
|
||||
if usefd {
|
||||
fd = 1
|
||||
}
|
||||
|
||||
if dir == network.DirInbound {
|
||||
rc.removeConns(1, 0, fd)
|
||||
} else {
|
||||
rc.removeConns(0, 1, fd)
|
||||
}
|
||||
}
|
||||
|
||||
func (rc *resources) removeConns(incount, outcount, fdcount int) {
|
||||
rc.nconnsIn -= incount
|
||||
rc.nconnsOut -= outcount
|
||||
rc.nfd -= fdcount
|
||||
|
||||
if rc.nconnsIn < 0 {
|
||||
log.Warn("BUG: too many inbound connections released")
|
||||
rc.nconnsIn = 0
|
||||
}
|
||||
if rc.nconnsOut < 0 {
|
||||
log.Warn("BUG: too many outbound connections released")
|
||||
rc.nconnsOut = 0
|
||||
}
|
||||
if rc.nfd < 0 {
|
||||
log.Warn("BUG: too many file descriptors released")
|
||||
rc.nfd = 0
|
||||
}
|
||||
}
|
||||
|
||||
func (rc *resources) stat() network.ScopeStat {
|
||||
return network.ScopeStat{
|
||||
Memory: rc.memory,
|
||||
NumStreamsInbound: rc.nstreamsIn,
|
||||
NumStreamsOutbound: rc.nstreamsOut,
|
||||
NumConnsInbound: rc.nconnsIn,
|
||||
NumConnsOutbound: rc.nconnsOut,
|
||||
NumFD: rc.nfd,
|
||||
}
|
||||
}
|
||||
|
||||
// resourceScope implementation
|
||||
func (s *resourceScope) wrapError(err error) error {
|
||||
return fmt.Errorf("%s: %w", s.name, err)
|
||||
}
|
||||
|
||||
func (s *resourceScope) ReserveMemory(size int, prio uint8) error {
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
|
||||
if s.done {
|
||||
return s.wrapError(network.ErrResourceScopeClosed)
|
||||
}
|
||||
|
||||
if err := s.rc.reserveMemory(int64(size), prio); err != nil {
|
||||
log.Debugw("blocked memory reservation", logValuesMemoryLimit(s.name, "", s.rc.stat(), err)...)
|
||||
s.trace.BlockReserveMemory(s.name, prio, int64(size), s.rc.memory)
|
||||
s.metrics.BlockMemory(size)
|
||||
return s.wrapError(err)
|
||||
}
|
||||
|
||||
if err := s.reserveMemoryForEdges(size, prio); err != nil {
|
||||
s.rc.releaseMemory(int64(size))
|
||||
s.metrics.BlockMemory(size)
|
||||
return s.wrapError(err)
|
||||
}
|
||||
|
||||
s.trace.ReserveMemory(s.name, prio, int64(size), s.rc.memory)
|
||||
s.metrics.AllowMemory(size)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *resourceScope) reserveMemoryForEdges(size int, prio uint8) error {
|
||||
if s.owner != nil {
|
||||
return s.owner.ReserveMemory(size, prio)
|
||||
}
|
||||
|
||||
var reserved int
|
||||
var err error
|
||||
for _, e := range s.edges {
|
||||
var stat network.ScopeStat
|
||||
stat, err = e.ReserveMemoryForChild(int64(size), prio)
|
||||
if err != nil {
|
||||
log.Debugw("blocked memory reservation from constraining edge", logValuesMemoryLimit(s.name, e.name, stat, err)...)
|
||||
break
|
||||
}
|
||||
|
||||
reserved++
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
// we failed because of a constraint; undo memory reservations
|
||||
for _, e := range s.edges[:reserved] {
|
||||
e.ReleaseMemoryForChild(int64(size))
|
||||
}
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func (s *resourceScope) releaseMemoryForEdges(size int) {
|
||||
if s.owner != nil {
|
||||
s.owner.ReleaseMemory(size)
|
||||
return
|
||||
}
|
||||
|
||||
for _, e := range s.edges {
|
||||
e.ReleaseMemoryForChild(int64(size))
|
||||
}
|
||||
}
|
||||
|
||||
func (s *resourceScope) ReserveMemoryForChild(size int64, prio uint8) (network.ScopeStat, error) {
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
|
||||
if s.done {
|
||||
return s.rc.stat(), s.wrapError(network.ErrResourceScopeClosed)
|
||||
}
|
||||
|
||||
if err := s.rc.reserveMemory(size, prio); err != nil {
|
||||
s.trace.BlockReserveMemory(s.name, prio, size, s.rc.memory)
|
||||
return s.rc.stat(), s.wrapError(err)
|
||||
}
|
||||
|
||||
s.trace.ReserveMemory(s.name, prio, size, s.rc.memory)
|
||||
return network.ScopeStat{}, nil
|
||||
}
|
||||
|
||||
func (s *resourceScope) ReleaseMemory(size int) {
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
|
||||
if s.done {
|
||||
return
|
||||
}
|
||||
|
||||
s.rc.releaseMemory(int64(size))
|
||||
s.releaseMemoryForEdges(size)
|
||||
s.trace.ReleaseMemory(s.name, int64(size), s.rc.memory)
|
||||
}
|
||||
|
||||
func (s *resourceScope) ReleaseMemoryForChild(size int64) {
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
|
||||
if s.done {
|
||||
return
|
||||
}
|
||||
|
||||
s.rc.releaseMemory(size)
|
||||
s.trace.ReleaseMemory(s.name, size, s.rc.memory)
|
||||
}
|
||||
|
||||
func (s *resourceScope) AddStream(dir network.Direction) error {
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
|
||||
if s.done {
|
||||
return s.wrapError(network.ErrResourceScopeClosed)
|
||||
}
|
||||
|
||||
if err := s.rc.addStream(dir); err != nil {
|
||||
log.Debugw("blocked stream", logValuesStreamLimit(s.name, "", dir, s.rc.stat(), err)...)
|
||||
s.trace.BlockAddStream(s.name, dir, s.rc.nstreamsIn, s.rc.nstreamsOut)
|
||||
return s.wrapError(err)
|
||||
}
|
||||
|
||||
if err := s.addStreamForEdges(dir); err != nil {
|
||||
s.rc.removeStream(dir)
|
||||
return s.wrapError(err)
|
||||
}
|
||||
|
||||
s.trace.AddStream(s.name, dir, s.rc.nstreamsIn, s.rc.nstreamsOut)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *resourceScope) addStreamForEdges(dir network.Direction) error {
|
||||
if s.owner != nil {
|
||||
return s.owner.AddStream(dir)
|
||||
}
|
||||
|
||||
var err error
|
||||
var reserved int
|
||||
for _, e := range s.edges {
|
||||
var stat network.ScopeStat
|
||||
stat, err = e.AddStreamForChild(dir)
|
||||
if err != nil {
|
||||
log.Debugw("blocked stream from constraining edge", logValuesStreamLimit(s.name, e.name, dir, stat, err)...)
|
||||
break
|
||||
}
|
||||
reserved++
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
for _, e := range s.edges[:reserved] {
|
||||
e.RemoveStreamForChild(dir)
|
||||
}
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func (s *resourceScope) AddStreamForChild(dir network.Direction) (network.ScopeStat, error) {
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
|
||||
if s.done {
|
||||
return s.rc.stat(), s.wrapError(network.ErrResourceScopeClosed)
|
||||
}
|
||||
|
||||
if err := s.rc.addStream(dir); err != nil {
|
||||
s.trace.BlockAddStream(s.name, dir, s.rc.nstreamsIn, s.rc.nstreamsOut)
|
||||
return s.rc.stat(), s.wrapError(err)
|
||||
}
|
||||
|
||||
s.trace.AddStream(s.name, dir, s.rc.nstreamsIn, s.rc.nstreamsOut)
|
||||
return network.ScopeStat{}, nil
|
||||
}
|
||||
|
||||
func (s *resourceScope) RemoveStream(dir network.Direction) {
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
|
||||
if s.done {
|
||||
return
|
||||
}
|
||||
|
||||
s.rc.removeStream(dir)
|
||||
s.removeStreamForEdges(dir)
|
||||
s.trace.RemoveStream(s.name, dir, s.rc.nstreamsIn, s.rc.nstreamsOut)
|
||||
}
|
||||
|
||||
func (s *resourceScope) removeStreamForEdges(dir network.Direction) {
|
||||
if s.owner != nil {
|
||||
s.owner.RemoveStream(dir)
|
||||
return
|
||||
}
|
||||
|
||||
for _, e := range s.edges {
|
||||
e.RemoveStreamForChild(dir)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *resourceScope) RemoveStreamForChild(dir network.Direction) {
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
|
||||
if s.done {
|
||||
return
|
||||
}
|
||||
|
||||
s.rc.removeStream(dir)
|
||||
s.trace.RemoveStream(s.name, dir, s.rc.nstreamsIn, s.rc.nstreamsOut)
|
||||
}
|
||||
|
||||
func (s *resourceScope) AddConn(dir network.Direction, usefd bool) error {
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
|
||||
if s.done {
|
||||
return s.wrapError(network.ErrResourceScopeClosed)
|
||||
}
|
||||
|
||||
if err := s.rc.addConn(dir, usefd); err != nil {
|
||||
log.Debugw("blocked connection", logValuesConnLimit(s.name, "", dir, usefd, s.rc.stat(), err)...)
|
||||
s.trace.BlockAddConn(s.name, dir, usefd, s.rc.nconnsIn, s.rc.nconnsOut, s.rc.nfd)
|
||||
return s.wrapError(err)
|
||||
}
|
||||
|
||||
if err := s.addConnForEdges(dir, usefd); err != nil {
|
||||
s.rc.removeConn(dir, usefd)
|
||||
return s.wrapError(err)
|
||||
}
|
||||
|
||||
s.trace.AddConn(s.name, dir, usefd, s.rc.nconnsIn, s.rc.nconnsOut, s.rc.nfd)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *resourceScope) addConnForEdges(dir network.Direction, usefd bool) error {
|
||||
if s.owner != nil {
|
||||
return s.owner.AddConn(dir, usefd)
|
||||
}
|
||||
|
||||
var err error
|
||||
var reserved int
|
||||
for _, e := range s.edges {
|
||||
var stat network.ScopeStat
|
||||
stat, err = e.AddConnForChild(dir, usefd)
|
||||
if err != nil {
|
||||
log.Debugw("blocked connection from constraining edge", logValuesConnLimit(s.name, e.name, dir, usefd, stat, err)...)
|
||||
break
|
||||
}
|
||||
reserved++
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
for _, e := range s.edges[:reserved] {
|
||||
e.RemoveConnForChild(dir, usefd)
|
||||
}
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func (s *resourceScope) AddConnForChild(dir network.Direction, usefd bool) (network.ScopeStat, error) {
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
|
||||
if s.done {
|
||||
return s.rc.stat(), s.wrapError(network.ErrResourceScopeClosed)
|
||||
}
|
||||
|
||||
if err := s.rc.addConn(dir, usefd); err != nil {
|
||||
s.trace.BlockAddConn(s.name, dir, usefd, s.rc.nconnsIn, s.rc.nconnsOut, s.rc.nfd)
|
||||
return s.rc.stat(), s.wrapError(err)
|
||||
}
|
||||
|
||||
s.trace.AddConn(s.name, dir, usefd, s.rc.nconnsIn, s.rc.nconnsOut, s.rc.nfd)
|
||||
return network.ScopeStat{}, nil
|
||||
}
|
||||
|
||||
func (s *resourceScope) RemoveConn(dir network.Direction, usefd bool) {
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
|
||||
if s.done {
|
||||
return
|
||||
}
|
||||
|
||||
s.rc.removeConn(dir, usefd)
|
||||
s.removeConnForEdges(dir, usefd)
|
||||
s.trace.RemoveConn(s.name, dir, usefd, s.rc.nconnsIn, s.rc.nconnsOut, s.rc.nfd)
|
||||
}
|
||||
|
||||
func (s *resourceScope) removeConnForEdges(dir network.Direction, usefd bool) {
|
||||
if s.owner != nil {
|
||||
s.owner.RemoveConn(dir, usefd)
|
||||
}
|
||||
|
||||
for _, e := range s.edges {
|
||||
e.RemoveConnForChild(dir, usefd)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *resourceScope) RemoveConnForChild(dir network.Direction, usefd bool) {
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
|
||||
if s.done {
|
||||
return
|
||||
}
|
||||
|
||||
s.rc.removeConn(dir, usefd)
|
||||
s.trace.RemoveConn(s.name, dir, usefd, s.rc.nconnsIn, s.rc.nconnsOut, s.rc.nfd)
|
||||
}
|
||||
|
||||
func (s *resourceScope) ReserveForChild(st network.ScopeStat) error {
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
|
||||
if s.done {
|
||||
return s.wrapError(network.ErrResourceScopeClosed)
|
||||
}
|
||||
|
||||
if err := s.rc.reserveMemory(st.Memory, network.ReservationPriorityAlways); err != nil {
|
||||
s.trace.BlockReserveMemory(s.name, 255, st.Memory, s.rc.memory)
|
||||
return s.wrapError(err)
|
||||
}
|
||||
|
||||
if err := s.rc.addStreams(st.NumStreamsInbound, st.NumStreamsOutbound); err != nil {
|
||||
s.trace.BlockAddStreams(s.name, st.NumStreamsInbound, st.NumStreamsOutbound, s.rc.nstreamsIn, s.rc.nstreamsOut)
|
||||
s.rc.releaseMemory(st.Memory)
|
||||
return s.wrapError(err)
|
||||
}
|
||||
|
||||
if err := s.rc.addConns(st.NumConnsInbound, st.NumConnsOutbound, st.NumFD); err != nil {
|
||||
s.trace.BlockAddConns(s.name, st.NumConnsInbound, st.NumConnsOutbound, st.NumFD, s.rc.nconnsIn, s.rc.nconnsOut, s.rc.nfd)
|
||||
|
||||
s.rc.releaseMemory(st.Memory)
|
||||
s.rc.removeStreams(st.NumStreamsInbound, st.NumStreamsOutbound)
|
||||
return s.wrapError(err)
|
||||
}
|
||||
|
||||
s.trace.ReserveMemory(s.name, 255, st.Memory, s.rc.memory)
|
||||
s.trace.AddStreams(s.name, st.NumStreamsInbound, st.NumStreamsOutbound, s.rc.nstreamsIn, s.rc.nstreamsOut)
|
||||
s.trace.AddConns(s.name, st.NumConnsInbound, st.NumConnsOutbound, st.NumFD, s.rc.nconnsIn, s.rc.nconnsOut, s.rc.nfd)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *resourceScope) ReleaseForChild(st network.ScopeStat) {
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
|
||||
if s.done {
|
||||
return
|
||||
}
|
||||
|
||||
s.rc.releaseMemory(st.Memory)
|
||||
s.rc.removeStreams(st.NumStreamsInbound, st.NumStreamsOutbound)
|
||||
s.rc.removeConns(st.NumConnsInbound, st.NumConnsOutbound, st.NumFD)
|
||||
|
||||
s.trace.ReleaseMemory(s.name, st.Memory, s.rc.memory)
|
||||
s.trace.RemoveStreams(s.name, st.NumStreamsInbound, st.NumStreamsOutbound, s.rc.nstreamsIn, s.rc.nstreamsOut)
|
||||
s.trace.RemoveConns(s.name, st.NumConnsInbound, st.NumConnsOutbound, st.NumFD, s.rc.nconnsIn, s.rc.nconnsOut, s.rc.nfd)
|
||||
}
|
||||
|
||||
func (s *resourceScope) ReleaseResources(st network.ScopeStat) {
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
|
||||
if s.done {
|
||||
return
|
||||
}
|
||||
|
||||
s.rc.releaseMemory(st.Memory)
|
||||
s.rc.removeStreams(st.NumStreamsInbound, st.NumStreamsOutbound)
|
||||
s.rc.removeConns(st.NumConnsInbound, st.NumConnsOutbound, st.NumFD)
|
||||
|
||||
if s.owner != nil {
|
||||
s.owner.ReleaseResources(st)
|
||||
} else {
|
||||
for _, e := range s.edges {
|
||||
e.ReleaseForChild(st)
|
||||
}
|
||||
}
|
||||
|
||||
s.trace.ReleaseMemory(s.name, st.Memory, s.rc.memory)
|
||||
s.trace.RemoveStreams(s.name, st.NumStreamsInbound, st.NumStreamsOutbound, s.rc.nstreamsIn, s.rc.nstreamsOut)
|
||||
s.trace.RemoveConns(s.name, st.NumConnsInbound, st.NumConnsOutbound, st.NumFD, s.rc.nconnsIn, s.rc.nconnsOut, s.rc.nfd)
|
||||
}
|
||||
|
||||
func (s *resourceScope) nextSpanID() int {
|
||||
s.spanID++
|
||||
return s.spanID
|
||||
}
|
||||
|
||||
func (s *resourceScope) BeginSpan() (network.ResourceScopeSpan, error) {
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
|
||||
if s.done {
|
||||
return nil, s.wrapError(network.ErrResourceScopeClosed)
|
||||
}
|
||||
|
||||
s.refCnt++
|
||||
return newResourceScopeSpan(s, s.nextSpanID()), nil
|
||||
}
|
||||
|
||||
func (s *resourceScope) Done() {
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
|
||||
if s.done {
|
||||
return
|
||||
}
|
||||
|
||||
stat := s.rc.stat()
|
||||
if s.owner != nil {
|
||||
s.owner.ReleaseResources(stat)
|
||||
s.owner.DecRef()
|
||||
} else {
|
||||
for _, e := range s.edges {
|
||||
e.ReleaseForChild(stat)
|
||||
e.DecRef()
|
||||
}
|
||||
}
|
||||
|
||||
s.rc.nstreamsIn = 0
|
||||
s.rc.nstreamsOut = 0
|
||||
s.rc.nconnsIn = 0
|
||||
s.rc.nconnsOut = 0
|
||||
s.rc.nfd = 0
|
||||
s.rc.memory = 0
|
||||
|
||||
s.done = true
|
||||
|
||||
s.trace.DestroyScope(s.name)
|
||||
}
|
||||
|
||||
func (s *resourceScope) Stat() network.ScopeStat {
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
|
||||
return s.rc.stat()
|
||||
}
|
||||
|
||||
func (s *resourceScope) IncRef() {
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
|
||||
s.refCnt++
|
||||
}
|
||||
|
||||
func (s *resourceScope) DecRef() {
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
|
||||
s.refCnt--
|
||||
}
|
||||
|
||||
func (s *resourceScope) IsUnused() bool {
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
|
||||
if s.done {
|
||||
return true
|
||||
}
|
||||
|
||||
if s.refCnt > 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
st := s.rc.stat()
|
||||
return st.NumStreamsInbound == 0 &&
|
||||
st.NumStreamsOutbound == 0 &&
|
||||
st.NumConnsInbound == 0 &&
|
||||
st.NumConnsOutbound == 0 &&
|
||||
st.NumFD == 0
|
||||
}
|
||||
390
vendor/github.com/libp2p/go-libp2p/p2p/host/resource-manager/stats.go
generated
vendored
Normal file
390
vendor/github.com/libp2p/go-libp2p/p2p/host/resource-manager/stats.go
generated
vendored
Normal file
@@ -0,0 +1,390 @@
|
||||
package rcmgr
|
||||
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"github.com/libp2p/go-libp2p/p2p/metricshelper"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
)
|
||||
|
||||
const metricNamespace = "libp2p_rcmgr"
|
||||
|
||||
var (
|
||||
|
||||
// Conns
|
||||
conns = prometheus.NewGaugeVec(prometheus.GaugeOpts{
|
||||
Namespace: metricNamespace,
|
||||
Name: "connections",
|
||||
Help: "Number of Connections",
|
||||
}, []string{"dir", "scope"})
|
||||
|
||||
connsInboundSystem = conns.With(prometheus.Labels{"dir": "inbound", "scope": "system"})
|
||||
connsInboundTransient = conns.With(prometheus.Labels{"dir": "inbound", "scope": "transient"})
|
||||
connsOutboundSystem = conns.With(prometheus.Labels{"dir": "outbound", "scope": "system"})
|
||||
connsOutboundTransient = conns.With(prometheus.Labels{"dir": "outbound", "scope": "transient"})
|
||||
|
||||
oneTenThenExpDistributionBuckets = []float64{
|
||||
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 16, 32, 64, 128, 256,
|
||||
}
|
||||
|
||||
// PeerConns
|
||||
peerConns = prometheus.NewHistogramVec(prometheus.HistogramOpts{
|
||||
Namespace: metricNamespace,
|
||||
Name: "peer_connections",
|
||||
Buckets: oneTenThenExpDistributionBuckets,
|
||||
Help: "Number of connections this peer has",
|
||||
}, []string{"dir"})
|
||||
peerConnsInbound = peerConns.With(prometheus.Labels{"dir": "inbound"})
|
||||
peerConnsOutbound = peerConns.With(prometheus.Labels{"dir": "outbound"})
|
||||
|
||||
// Lets us build a histogram of our current state. See https://github.com/libp2p/go-libp2p-resource-manager/pull/54#discussion_r911244757 for more information.
|
||||
previousPeerConns = prometheus.NewHistogramVec(prometheus.HistogramOpts{
|
||||
Namespace: metricNamespace,
|
||||
Name: "previous_peer_connections",
|
||||
Buckets: oneTenThenExpDistributionBuckets,
|
||||
Help: "Number of connections this peer previously had. This is used to get the current connection number per peer histogram by subtracting this from the peer_connections histogram",
|
||||
}, []string{"dir"})
|
||||
previousPeerConnsInbound = previousPeerConns.With(prometheus.Labels{"dir": "inbound"})
|
||||
previousPeerConnsOutbound = previousPeerConns.With(prometheus.Labels{"dir": "outbound"})
|
||||
|
||||
// Streams
|
||||
streams = prometheus.NewGaugeVec(prometheus.GaugeOpts{
|
||||
Namespace: metricNamespace,
|
||||
Name: "streams",
|
||||
Help: "Number of Streams",
|
||||
}, []string{"dir", "scope", "protocol"})
|
||||
|
||||
peerStreams = prometheus.NewHistogramVec(prometheus.HistogramOpts{
|
||||
Namespace: metricNamespace,
|
||||
Name: "peer_streams",
|
||||
Buckets: oneTenThenExpDistributionBuckets,
|
||||
Help: "Number of streams this peer has",
|
||||
}, []string{"dir"})
|
||||
peerStreamsInbound = peerStreams.With(prometheus.Labels{"dir": "inbound"})
|
||||
peerStreamsOutbound = peerStreams.With(prometheus.Labels{"dir": "outbound"})
|
||||
|
||||
previousPeerStreams = prometheus.NewHistogramVec(prometheus.HistogramOpts{
|
||||
Namespace: metricNamespace,
|
||||
Name: "previous_peer_streams",
|
||||
Buckets: oneTenThenExpDistributionBuckets,
|
||||
Help: "Number of streams this peer has",
|
||||
}, []string{"dir"})
|
||||
previousPeerStreamsInbound = previousPeerStreams.With(prometheus.Labels{"dir": "inbound"})
|
||||
previousPeerStreamsOutbound = previousPeerStreams.With(prometheus.Labels{"dir": "outbound"})
|
||||
|
||||
// Memory
|
||||
memoryTotal = prometheus.NewGaugeVec(prometheus.GaugeOpts{
|
||||
Namespace: metricNamespace,
|
||||
Name: "memory",
|
||||
Help: "Amount of memory reserved as reported to the Resource Manager",
|
||||
}, []string{"scope", "protocol"})
|
||||
|
||||
// PeerMemory
|
||||
peerMemory = prometheus.NewHistogram(prometheus.HistogramOpts{
|
||||
Namespace: metricNamespace,
|
||||
Name: "peer_memory",
|
||||
Buckets: memDistribution,
|
||||
Help: "How many peers have reserved this bucket of memory, as reported to the Resource Manager",
|
||||
})
|
||||
previousPeerMemory = prometheus.NewHistogram(prometheus.HistogramOpts{
|
||||
Namespace: metricNamespace,
|
||||
Name: "previous_peer_memory",
|
||||
Buckets: memDistribution,
|
||||
Help: "How many peers have previously reserved this bucket of memory, as reported to the Resource Manager",
|
||||
})
|
||||
|
||||
// ConnMemory
|
||||
connMemory = prometheus.NewHistogram(prometheus.HistogramOpts{
|
||||
Namespace: metricNamespace,
|
||||
Name: "conn_memory",
|
||||
Buckets: memDistribution,
|
||||
Help: "How many conns have reserved this bucket of memory, as reported to the Resource Manager",
|
||||
})
|
||||
previousConnMemory = prometheus.NewHistogram(prometheus.HistogramOpts{
|
||||
Namespace: metricNamespace,
|
||||
Name: "previous_conn_memory",
|
||||
Buckets: memDistribution,
|
||||
Help: "How many conns have previously reserved this bucket of memory, as reported to the Resource Manager",
|
||||
})
|
||||
|
||||
// FDs
|
||||
fds = prometheus.NewGaugeVec(prometheus.GaugeOpts{
|
||||
Namespace: metricNamespace,
|
||||
Name: "fds",
|
||||
Help: "Number of file descriptors reserved as reported to the Resource Manager",
|
||||
}, []string{"scope"})
|
||||
|
||||
fdsSystem = fds.With(prometheus.Labels{"scope": "system"})
|
||||
fdsTransient = fds.With(prometheus.Labels{"scope": "transient"})
|
||||
|
||||
// Blocked resources
|
||||
blockedResources = prometheus.NewGaugeVec(prometheus.GaugeOpts{
|
||||
Namespace: metricNamespace,
|
||||
Name: "blocked_resources",
|
||||
Help: "Number of blocked resources",
|
||||
}, []string{"dir", "scope", "resource"})
|
||||
)
|
||||
|
||||
var (
|
||||
memDistribution = []float64{
|
||||
1 << 10, // 1KB
|
||||
4 << 10, // 4KB
|
||||
32 << 10, // 32KB
|
||||
1 << 20, // 1MB
|
||||
32 << 20, // 32MB
|
||||
256 << 20, // 256MB
|
||||
512 << 20, // 512MB
|
||||
1 << 30, // 1GB
|
||||
2 << 30, // 2GB
|
||||
4 << 30, // 4GB
|
||||
}
|
||||
)
|
||||
|
||||
func MustRegisterWith(reg prometheus.Registerer) {
|
||||
metricshelper.RegisterCollectors(reg,
|
||||
conns,
|
||||
peerConns,
|
||||
previousPeerConns,
|
||||
streams,
|
||||
peerStreams,
|
||||
|
||||
previousPeerStreams,
|
||||
|
||||
memoryTotal,
|
||||
peerMemory,
|
||||
previousPeerMemory,
|
||||
connMemory,
|
||||
previousConnMemory,
|
||||
fds,
|
||||
blockedResources,
|
||||
)
|
||||
}
|
||||
|
||||
func WithMetricsDisabled() Option {
|
||||
return func(r *resourceManager) error {
|
||||
r.disableMetrics = true
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// StatsTraceReporter reports stats on the resource manager using its traces.
|
||||
type StatsTraceReporter struct{}
|
||||
|
||||
func NewStatsTraceReporter() (StatsTraceReporter, error) {
|
||||
// TODO tell prometheus the system limits
|
||||
return StatsTraceReporter{}, nil
|
||||
}
|
||||
|
||||
func (r StatsTraceReporter) ConsumeEvent(evt TraceEvt) {
|
||||
tags := metricshelper.GetStringSlice()
|
||||
defer metricshelper.PutStringSlice(tags)
|
||||
|
||||
r.consumeEventWithLabelSlice(evt, tags)
|
||||
}
|
||||
|
||||
// Separate func so that we can test that this function does not allocate. The syncPool may allocate.
|
||||
func (r StatsTraceReporter) consumeEventWithLabelSlice(evt TraceEvt, tags *[]string) {
|
||||
switch evt.Type {
|
||||
case TraceAddStreamEvt, TraceRemoveStreamEvt:
|
||||
if p := PeerStrInScopeName(evt.Name); p != "" {
|
||||
// Aggregated peer stats. Counts how many peers have N number of streams open.
|
||||
// Uses two buckets aggregations. One to count how many streams the
|
||||
// peer has now. The other to count the negative value, or how many
|
||||
// streams did the peer use to have. When looking at the data you
|
||||
// take the difference from the two.
|
||||
|
||||
oldStreamsOut := int64(evt.StreamsOut - evt.DeltaOut)
|
||||
peerStreamsOut := int64(evt.StreamsOut)
|
||||
if oldStreamsOut != peerStreamsOut {
|
||||
if oldStreamsOut != 0 {
|
||||
previousPeerStreamsOutbound.Observe(float64(oldStreamsOut))
|
||||
}
|
||||
if peerStreamsOut != 0 {
|
||||
peerStreamsOutbound.Observe(float64(peerStreamsOut))
|
||||
}
|
||||
}
|
||||
|
||||
oldStreamsIn := int64(evt.StreamsIn - evt.DeltaIn)
|
||||
peerStreamsIn := int64(evt.StreamsIn)
|
||||
if oldStreamsIn != peerStreamsIn {
|
||||
if oldStreamsIn != 0 {
|
||||
previousPeerStreamsInbound.Observe(float64(oldStreamsIn))
|
||||
}
|
||||
if peerStreamsIn != 0 {
|
||||
peerStreamsInbound.Observe(float64(peerStreamsIn))
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if evt.DeltaOut != 0 {
|
||||
if IsSystemScope(evt.Name) || IsTransientScope(evt.Name) {
|
||||
*tags = (*tags)[:0]
|
||||
*tags = append(*tags, "outbound", evt.Name, "")
|
||||
streams.WithLabelValues(*tags...).Set(float64(evt.StreamsOut))
|
||||
} else if proto := ParseProtocolScopeName(evt.Name); proto != "" {
|
||||
*tags = (*tags)[:0]
|
||||
*tags = append(*tags, "outbound", "protocol", proto)
|
||||
streams.WithLabelValues(*tags...).Set(float64(evt.StreamsOut))
|
||||
} else {
|
||||
// Not measuring service scope, connscope, servicepeer and protocolpeer. Lots of data, and
|
||||
// you can use aggregated peer stats + service stats to infer
|
||||
// this.
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if evt.DeltaIn != 0 {
|
||||
if IsSystemScope(evt.Name) || IsTransientScope(evt.Name) {
|
||||
*tags = (*tags)[:0]
|
||||
*tags = append(*tags, "inbound", evt.Name, "")
|
||||
streams.WithLabelValues(*tags...).Set(float64(evt.StreamsIn))
|
||||
} else if proto := ParseProtocolScopeName(evt.Name); proto != "" {
|
||||
*tags = (*tags)[:0]
|
||||
*tags = append(*tags, "inbound", "protocol", proto)
|
||||
streams.WithLabelValues(*tags...).Set(float64(evt.StreamsIn))
|
||||
} else {
|
||||
// Not measuring service scope, connscope, servicepeer and protocolpeer. Lots of data, and
|
||||
// you can use aggregated peer stats + service stats to infer
|
||||
// this.
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
case TraceAddConnEvt, TraceRemoveConnEvt:
|
||||
if p := PeerStrInScopeName(evt.Name); p != "" {
|
||||
// Aggregated peer stats. Counts how many peers have N number of connections.
|
||||
// Uses two buckets aggregations. One to count how many streams the
|
||||
// peer has now. The other to count the negative value, or how many
|
||||
// conns did the peer use to have. When looking at the data you
|
||||
// take the difference from the two.
|
||||
|
||||
oldConnsOut := int64(evt.ConnsOut - evt.DeltaOut)
|
||||
connsOut := int64(evt.ConnsOut)
|
||||
if oldConnsOut != connsOut {
|
||||
if oldConnsOut != 0 {
|
||||
previousPeerConnsOutbound.Observe(float64(oldConnsOut))
|
||||
}
|
||||
if connsOut != 0 {
|
||||
peerConnsOutbound.Observe(float64(connsOut))
|
||||
}
|
||||
}
|
||||
|
||||
oldConnsIn := int64(evt.ConnsIn - evt.DeltaIn)
|
||||
connsIn := int64(evt.ConnsIn)
|
||||
if oldConnsIn != connsIn {
|
||||
if oldConnsIn != 0 {
|
||||
previousPeerConnsInbound.Observe(float64(oldConnsIn))
|
||||
}
|
||||
if connsIn != 0 {
|
||||
peerConnsInbound.Observe(float64(connsIn))
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if IsConnScope(evt.Name) {
|
||||
// Not measuring this. I don't think it's useful.
|
||||
break
|
||||
}
|
||||
|
||||
if IsSystemScope(evt.Name) {
|
||||
connsInboundSystem.Set(float64(evt.ConnsIn))
|
||||
connsOutboundSystem.Set(float64(evt.ConnsOut))
|
||||
} else if IsTransientScope(evt.Name) {
|
||||
connsInboundTransient.Set(float64(evt.ConnsIn))
|
||||
connsOutboundTransient.Set(float64(evt.ConnsOut))
|
||||
}
|
||||
|
||||
// Represents the delta in fds
|
||||
if evt.Delta != 0 {
|
||||
if IsSystemScope(evt.Name) {
|
||||
fdsSystem.Set(float64(evt.FD))
|
||||
} else if IsTransientScope(evt.Name) {
|
||||
fdsTransient.Set(float64(evt.FD))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
case TraceReserveMemoryEvt, TraceReleaseMemoryEvt:
|
||||
if p := PeerStrInScopeName(evt.Name); p != "" {
|
||||
oldMem := evt.Memory - evt.Delta
|
||||
if oldMem != evt.Memory {
|
||||
if oldMem != 0 {
|
||||
previousPeerMemory.Observe(float64(oldMem))
|
||||
}
|
||||
if evt.Memory != 0 {
|
||||
peerMemory.Observe(float64(evt.Memory))
|
||||
}
|
||||
}
|
||||
} else if IsConnScope(evt.Name) {
|
||||
oldMem := evt.Memory - evt.Delta
|
||||
if oldMem != evt.Memory {
|
||||
if oldMem != 0 {
|
||||
previousConnMemory.Observe(float64(oldMem))
|
||||
}
|
||||
if evt.Memory != 0 {
|
||||
connMemory.Observe(float64(evt.Memory))
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if IsSystemScope(evt.Name) || IsTransientScope(evt.Name) {
|
||||
*tags = (*tags)[:0]
|
||||
*tags = append(*tags, evt.Name, "")
|
||||
memoryTotal.WithLabelValues(*tags...).Set(float64(evt.Memory))
|
||||
} else if proto := ParseProtocolScopeName(evt.Name); proto != "" {
|
||||
*tags = (*tags)[:0]
|
||||
*tags = append(*tags, "protocol", proto)
|
||||
memoryTotal.WithLabelValues(*tags...).Set(float64(evt.Memory))
|
||||
} else {
|
||||
// Not measuring connscope, servicepeer and protocolpeer. Lots of data, and
|
||||
// you can use aggregated peer stats + service stats to infer
|
||||
// this.
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
case TraceBlockAddConnEvt, TraceBlockAddStreamEvt, TraceBlockReserveMemoryEvt:
|
||||
var resource string
|
||||
if evt.Type == TraceBlockAddConnEvt {
|
||||
resource = "connection"
|
||||
} else if evt.Type == TraceBlockAddStreamEvt {
|
||||
resource = "stream"
|
||||
} else {
|
||||
resource = "memory"
|
||||
}
|
||||
|
||||
scopeName := evt.Name
|
||||
// Only the top scopeName. We don't want to get the peerid here.
|
||||
// Using indexes and slices to avoid allocating.
|
||||
scopeSplitIdx := strings.IndexByte(scopeName, ':')
|
||||
if scopeSplitIdx != -1 {
|
||||
scopeName = evt.Name[0:scopeSplitIdx]
|
||||
}
|
||||
// Drop the connection or stream id
|
||||
idSplitIdx := strings.IndexByte(scopeName, '-')
|
||||
if idSplitIdx != -1 {
|
||||
scopeName = scopeName[0:idSplitIdx]
|
||||
}
|
||||
|
||||
if evt.DeltaIn != 0 {
|
||||
*tags = (*tags)[:0]
|
||||
*tags = append(*tags, "inbound", scopeName, resource)
|
||||
blockedResources.WithLabelValues(*tags...).Add(float64(evt.DeltaIn))
|
||||
}
|
||||
|
||||
if evt.DeltaOut != 0 {
|
||||
*tags = (*tags)[:0]
|
||||
*tags = append(*tags, "outbound", scopeName, resource)
|
||||
blockedResources.WithLabelValues(*tags...).Add(float64(evt.DeltaOut))
|
||||
}
|
||||
|
||||
if evt.Delta != 0 && resource == "connection" {
|
||||
// This represents fds blocked
|
||||
*tags = (*tags)[:0]
|
||||
*tags = append(*tags, "", scopeName, "fd")
|
||||
blockedResources.WithLabelValues(*tags...).Add(float64(evt.Delta))
|
||||
} else if evt.Delta != 0 {
|
||||
*tags = (*tags)[:0]
|
||||
*tags = append(*tags, "", scopeName, resource)
|
||||
blockedResources.WithLabelValues(*tags...).Add(float64(evt.Delta))
|
||||
}
|
||||
}
|
||||
}
|
||||
11
vendor/github.com/libp2p/go-libp2p/p2p/host/resource-manager/sys_not_unix.go
generated
vendored
Normal file
11
vendor/github.com/libp2p/go-libp2p/p2p/host/resource-manager/sys_not_unix.go
generated
vendored
Normal file
@@ -0,0 +1,11 @@
|
||||
//go:build !linux && !darwin && !windows
|
||||
|
||||
package rcmgr
|
||||
|
||||
import "runtime"
|
||||
|
||||
// TODO: figure out how to get the number of file descriptors on Windows and other systems
|
||||
func getNumFDs() int {
|
||||
log.Warnf("cannot determine number of file descriptors on %s", runtime.GOOS)
|
||||
return 0
|
||||
}
|
||||
16
vendor/github.com/libp2p/go-libp2p/p2p/host/resource-manager/sys_unix.go
generated
vendored
Normal file
16
vendor/github.com/libp2p/go-libp2p/p2p/host/resource-manager/sys_unix.go
generated
vendored
Normal file
@@ -0,0 +1,16 @@
|
||||
//go:build linux || darwin
|
||||
|
||||
package rcmgr
|
||||
|
||||
import (
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
func getNumFDs() int {
|
||||
var l unix.Rlimit
|
||||
if err := unix.Getrlimit(unix.RLIMIT_NOFILE, &l); err != nil {
|
||||
log.Errorw("failed to get fd limit", "error", err)
|
||||
return 0
|
||||
}
|
||||
return int(l.Cur)
|
||||
}
|
||||
11
vendor/github.com/libp2p/go-libp2p/p2p/host/resource-manager/sys_windows.go
generated
vendored
Normal file
11
vendor/github.com/libp2p/go-libp2p/p2p/host/resource-manager/sys_windows.go
generated
vendored
Normal file
@@ -0,0 +1,11 @@
|
||||
//go:build windows
|
||||
|
||||
package rcmgr
|
||||
|
||||
import (
|
||||
"math"
|
||||
)
|
||||
|
||||
func getNumFDs() int {
|
||||
return math.MaxInt
|
||||
}
|
||||
698
vendor/github.com/libp2p/go-libp2p/p2p/host/resource-manager/trace.go
generated
vendored
Normal file
698
vendor/github.com/libp2p/go-libp2p/p2p/host/resource-manager/trace.go
generated
vendored
Normal file
@@ -0,0 +1,698 @@
|
||||
package rcmgr
|
||||
|
||||
import (
|
||||
"compress/gzip"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/libp2p/go-libp2p/core/network"
|
||||
)
|
||||
|
||||
type trace struct {
|
||||
path string
|
||||
|
||||
ctx context.Context
|
||||
cancel func()
|
||||
wg sync.WaitGroup
|
||||
|
||||
mx sync.Mutex
|
||||
done bool
|
||||
pendingWrites []interface{}
|
||||
reporters []TraceReporter
|
||||
}
|
||||
|
||||
type TraceReporter interface {
|
||||
// ConsumeEvent consumes a trace event. This is called synchronously,
|
||||
// implementations should process the event quickly.
|
||||
ConsumeEvent(TraceEvt)
|
||||
}
|
||||
|
||||
func WithTrace(path string) Option {
|
||||
return func(r *resourceManager) error {
|
||||
if r.trace == nil {
|
||||
r.trace = &trace{path: path}
|
||||
} else {
|
||||
r.trace.path = path
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func WithTraceReporter(reporter TraceReporter) Option {
|
||||
return func(r *resourceManager) error {
|
||||
if r.trace == nil {
|
||||
r.trace = &trace{}
|
||||
}
|
||||
r.trace.reporters = append(r.trace.reporters, reporter)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
type TraceEvtTyp string
|
||||
|
||||
const (
|
||||
TraceStartEvt TraceEvtTyp = "start"
|
||||
TraceCreateScopeEvt TraceEvtTyp = "create_scope"
|
||||
TraceDestroyScopeEvt TraceEvtTyp = "destroy_scope"
|
||||
TraceReserveMemoryEvt TraceEvtTyp = "reserve_memory"
|
||||
TraceBlockReserveMemoryEvt TraceEvtTyp = "block_reserve_memory"
|
||||
TraceReleaseMemoryEvt TraceEvtTyp = "release_memory"
|
||||
TraceAddStreamEvt TraceEvtTyp = "add_stream"
|
||||
TraceBlockAddStreamEvt TraceEvtTyp = "block_add_stream"
|
||||
TraceRemoveStreamEvt TraceEvtTyp = "remove_stream"
|
||||
TraceAddConnEvt TraceEvtTyp = "add_conn"
|
||||
TraceBlockAddConnEvt TraceEvtTyp = "block_add_conn"
|
||||
TraceRemoveConnEvt TraceEvtTyp = "remove_conn"
|
||||
)
|
||||
|
||||
type scopeClass struct {
|
||||
name string
|
||||
}
|
||||
|
||||
func (s scopeClass) MarshalJSON() ([]byte, error) {
|
||||
name := s.name
|
||||
var span string
|
||||
if idx := strings.Index(name, "span:"); idx > -1 {
|
||||
name = name[:idx-1]
|
||||
span = name[idx+5:]
|
||||
}
|
||||
// System and Transient scope
|
||||
if name == "system" || name == "transient" || name == "allowlistedSystem" || name == "allowlistedTransient" {
|
||||
return json.Marshal(struct {
|
||||
Class string
|
||||
Span string `json:",omitempty"`
|
||||
}{
|
||||
Class: name,
|
||||
Span: span,
|
||||
})
|
||||
}
|
||||
// Connection scope
|
||||
if strings.HasPrefix(name, "conn-") {
|
||||
return json.Marshal(struct {
|
||||
Class string
|
||||
Conn string
|
||||
Span string `json:",omitempty"`
|
||||
}{
|
||||
Class: "conn",
|
||||
Conn: name[5:],
|
||||
Span: span,
|
||||
})
|
||||
}
|
||||
// Stream scope
|
||||
if strings.HasPrefix(name, "stream-") {
|
||||
return json.Marshal(struct {
|
||||
Class string
|
||||
Stream string
|
||||
Span string `json:",omitempty"`
|
||||
}{
|
||||
Class: "stream",
|
||||
Stream: name[7:],
|
||||
Span: span,
|
||||
})
|
||||
}
|
||||
// Peer scope
|
||||
if strings.HasPrefix(name, "peer:") {
|
||||
return json.Marshal(struct {
|
||||
Class string
|
||||
Peer string
|
||||
Span string `json:",omitempty"`
|
||||
}{
|
||||
Class: "peer",
|
||||
Peer: name[5:],
|
||||
Span: span,
|
||||
})
|
||||
}
|
||||
|
||||
if strings.HasPrefix(name, "service:") {
|
||||
if idx := strings.Index(name, "peer:"); idx > 0 { // Peer-Service scope
|
||||
return json.Marshal(struct {
|
||||
Class string
|
||||
Service string
|
||||
Peer string
|
||||
Span string `json:",omitempty"`
|
||||
}{
|
||||
Class: "service-peer",
|
||||
Service: name[8 : idx-1],
|
||||
Peer: name[idx+5:],
|
||||
Span: span,
|
||||
})
|
||||
} else { // Service scope
|
||||
return json.Marshal(struct {
|
||||
Class string
|
||||
Service string
|
||||
Span string `json:",omitempty"`
|
||||
}{
|
||||
Class: "service",
|
||||
Service: name[8:],
|
||||
Span: span,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
if strings.HasPrefix(name, "protocol:") {
|
||||
if idx := strings.Index(name, "peer:"); idx > -1 { // Peer-Protocol scope
|
||||
return json.Marshal(struct {
|
||||
Class string
|
||||
Protocol string
|
||||
Peer string
|
||||
Span string `json:",omitempty"`
|
||||
}{
|
||||
Class: "protocol-peer",
|
||||
Protocol: name[9 : idx-1],
|
||||
Peer: name[idx+5:],
|
||||
Span: span,
|
||||
})
|
||||
} else { // Protocol scope
|
||||
return json.Marshal(struct {
|
||||
Class string
|
||||
Protocol string
|
||||
Span string `json:",omitempty"`
|
||||
}{
|
||||
Class: "protocol",
|
||||
Protocol: name[9:],
|
||||
Span: span,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("unrecognized scope: %s", name)
|
||||
}
|
||||
|
||||
type TraceEvt struct {
|
||||
Time string
|
||||
Type TraceEvtTyp
|
||||
|
||||
Scope *scopeClass `json:",omitempty"`
|
||||
Name string `json:",omitempty"`
|
||||
|
||||
Limit interface{} `json:",omitempty"`
|
||||
|
||||
Priority uint8 `json:",omitempty"`
|
||||
|
||||
Delta int64 `json:",omitempty"`
|
||||
DeltaIn int `json:",omitempty"`
|
||||
DeltaOut int `json:",omitempty"`
|
||||
|
||||
Memory int64 `json:",omitempty"`
|
||||
|
||||
StreamsIn int `json:",omitempty"`
|
||||
StreamsOut int `json:",omitempty"`
|
||||
|
||||
ConnsIn int `json:",omitempty"`
|
||||
ConnsOut int `json:",omitempty"`
|
||||
|
||||
FD int `json:",omitempty"`
|
||||
}
|
||||
|
||||
func (t *trace) push(evt TraceEvt) {
|
||||
t.mx.Lock()
|
||||
defer t.mx.Unlock()
|
||||
|
||||
if t.done {
|
||||
return
|
||||
}
|
||||
evt.Time = time.Now().Format(time.RFC3339Nano)
|
||||
if evt.Name != "" {
|
||||
evt.Scope = &scopeClass{name: evt.Name}
|
||||
}
|
||||
|
||||
for _, reporter := range t.reporters {
|
||||
reporter.ConsumeEvent(evt)
|
||||
}
|
||||
|
||||
if t.path != "" {
|
||||
t.pendingWrites = append(t.pendingWrites, evt)
|
||||
}
|
||||
}
|
||||
|
||||
func (t *trace) backgroundWriter(out io.WriteCloser) {
|
||||
defer t.wg.Done()
|
||||
defer out.Close()
|
||||
|
||||
gzOut := gzip.NewWriter(out)
|
||||
defer gzOut.Close()
|
||||
|
||||
jsonOut := json.NewEncoder(gzOut)
|
||||
|
||||
ticker := time.NewTicker(time.Second)
|
||||
defer ticker.Stop()
|
||||
|
||||
var pend []interface{}
|
||||
|
||||
getEvents := func() {
|
||||
t.mx.Lock()
|
||||
tmp := t.pendingWrites
|
||||
t.pendingWrites = pend[:0]
|
||||
pend = tmp
|
||||
t.mx.Unlock()
|
||||
}
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
getEvents()
|
||||
|
||||
if len(pend) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
if err := t.writeEvents(pend, jsonOut); err != nil {
|
||||
log.Warnf("error writing rcmgr trace: %s", err)
|
||||
t.mx.Lock()
|
||||
t.done = true
|
||||
t.mx.Unlock()
|
||||
return
|
||||
}
|
||||
|
||||
if err := gzOut.Flush(); err != nil {
|
||||
log.Warnf("error flushing rcmgr trace: %s", err)
|
||||
t.mx.Lock()
|
||||
t.done = true
|
||||
t.mx.Unlock()
|
||||
return
|
||||
}
|
||||
|
||||
case <-t.ctx.Done():
|
||||
getEvents()
|
||||
|
||||
if len(pend) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
if err := t.writeEvents(pend, jsonOut); err != nil {
|
||||
log.Warnf("error writing rcmgr trace: %s", err)
|
||||
return
|
||||
}
|
||||
|
||||
if err := gzOut.Flush(); err != nil {
|
||||
log.Warnf("error flushing rcmgr trace: %s", err)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (t *trace) writeEvents(pend []interface{}, jout *json.Encoder) error {
|
||||
for _, e := range pend {
|
||||
if err := jout.Encode(e); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *trace) Start(limits Limiter) error {
|
||||
if t == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
t.ctx, t.cancel = context.WithCancel(context.Background())
|
||||
|
||||
if t.path != "" {
|
||||
out, err := os.OpenFile(t.path, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0644)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
t.wg.Add(1)
|
||||
go t.backgroundWriter(out)
|
||||
}
|
||||
|
||||
t.push(TraceEvt{
|
||||
Type: TraceStartEvt,
|
||||
Limit: limits,
|
||||
})
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *trace) Close() error {
|
||||
if t == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
t.mx.Lock()
|
||||
|
||||
if t.done {
|
||||
t.mx.Unlock()
|
||||
return nil
|
||||
}
|
||||
|
||||
t.cancel()
|
||||
t.done = true
|
||||
t.mx.Unlock()
|
||||
|
||||
t.wg.Wait()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *trace) CreateScope(scope string, limit Limit) {
|
||||
if t == nil {
|
||||
return
|
||||
}
|
||||
|
||||
t.push(TraceEvt{
|
||||
Type: TraceCreateScopeEvt,
|
||||
Name: scope,
|
||||
Limit: limit,
|
||||
})
|
||||
}
|
||||
|
||||
func (t *trace) DestroyScope(scope string) {
|
||||
if t == nil {
|
||||
return
|
||||
}
|
||||
|
||||
t.push(TraceEvt{
|
||||
Type: TraceDestroyScopeEvt,
|
||||
Name: scope,
|
||||
})
|
||||
}
|
||||
|
||||
func (t *trace) ReserveMemory(scope string, prio uint8, size, mem int64) {
|
||||
if t == nil {
|
||||
return
|
||||
}
|
||||
|
||||
if size == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
t.push(TraceEvt{
|
||||
Type: TraceReserveMemoryEvt,
|
||||
Name: scope,
|
||||
Priority: prio,
|
||||
Delta: size,
|
||||
Memory: mem,
|
||||
})
|
||||
}
|
||||
|
||||
func (t *trace) BlockReserveMemory(scope string, prio uint8, size, mem int64) {
|
||||
if t == nil {
|
||||
return
|
||||
}
|
||||
|
||||
if size == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
t.push(TraceEvt{
|
||||
Type: TraceBlockReserveMemoryEvt,
|
||||
Name: scope,
|
||||
Priority: prio,
|
||||
Delta: size,
|
||||
Memory: mem,
|
||||
})
|
||||
}
|
||||
|
||||
func (t *trace) ReleaseMemory(scope string, size, mem int64) {
|
||||
if t == nil {
|
||||
return
|
||||
}
|
||||
|
||||
if size == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
t.push(TraceEvt{
|
||||
Type: TraceReleaseMemoryEvt,
|
||||
Name: scope,
|
||||
Delta: -size,
|
||||
Memory: mem,
|
||||
})
|
||||
}
|
||||
|
||||
func (t *trace) AddStream(scope string, dir network.Direction, nstreamsIn, nstreamsOut int) {
|
||||
if t == nil {
|
||||
return
|
||||
}
|
||||
|
||||
var deltaIn, deltaOut int
|
||||
if dir == network.DirInbound {
|
||||
deltaIn = 1
|
||||
} else {
|
||||
deltaOut = 1
|
||||
}
|
||||
|
||||
t.push(TraceEvt{
|
||||
Type: TraceAddStreamEvt,
|
||||
Name: scope,
|
||||
DeltaIn: deltaIn,
|
||||
DeltaOut: deltaOut,
|
||||
StreamsIn: nstreamsIn,
|
||||
StreamsOut: nstreamsOut,
|
||||
})
|
||||
}
|
||||
|
||||
func (t *trace) BlockAddStream(scope string, dir network.Direction, nstreamsIn, nstreamsOut int) {
|
||||
if t == nil {
|
||||
return
|
||||
}
|
||||
|
||||
var deltaIn, deltaOut int
|
||||
if dir == network.DirInbound {
|
||||
deltaIn = 1
|
||||
} else {
|
||||
deltaOut = 1
|
||||
}
|
||||
|
||||
t.push(TraceEvt{
|
||||
Type: TraceBlockAddStreamEvt,
|
||||
Name: scope,
|
||||
DeltaIn: deltaIn,
|
||||
DeltaOut: deltaOut,
|
||||
StreamsIn: nstreamsIn,
|
||||
StreamsOut: nstreamsOut,
|
||||
})
|
||||
}
|
||||
|
||||
func (t *trace) RemoveStream(scope string, dir network.Direction, nstreamsIn, nstreamsOut int) {
|
||||
if t == nil {
|
||||
return
|
||||
}
|
||||
|
||||
var deltaIn, deltaOut int
|
||||
if dir == network.DirInbound {
|
||||
deltaIn = -1
|
||||
} else {
|
||||
deltaOut = -1
|
||||
}
|
||||
|
||||
t.push(TraceEvt{
|
||||
Type: TraceRemoveStreamEvt,
|
||||
Name: scope,
|
||||
DeltaIn: deltaIn,
|
||||
DeltaOut: deltaOut,
|
||||
StreamsIn: nstreamsIn,
|
||||
StreamsOut: nstreamsOut,
|
||||
})
|
||||
}
|
||||
|
||||
func (t *trace) AddStreams(scope string, deltaIn, deltaOut, nstreamsIn, nstreamsOut int) {
|
||||
if t == nil {
|
||||
return
|
||||
}
|
||||
|
||||
if deltaIn == 0 && deltaOut == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
t.push(TraceEvt{
|
||||
Type: TraceAddStreamEvt,
|
||||
Name: scope,
|
||||
DeltaIn: deltaIn,
|
||||
DeltaOut: deltaOut,
|
||||
StreamsIn: nstreamsIn,
|
||||
StreamsOut: nstreamsOut,
|
||||
})
|
||||
}
|
||||
|
||||
func (t *trace) BlockAddStreams(scope string, deltaIn, deltaOut, nstreamsIn, nstreamsOut int) {
|
||||
if t == nil {
|
||||
return
|
||||
}
|
||||
|
||||
if deltaIn == 0 && deltaOut == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
t.push(TraceEvt{
|
||||
Type: TraceBlockAddStreamEvt,
|
||||
Name: scope,
|
||||
DeltaIn: deltaIn,
|
||||
DeltaOut: deltaOut,
|
||||
StreamsIn: nstreamsIn,
|
||||
StreamsOut: nstreamsOut,
|
||||
})
|
||||
}
|
||||
|
||||
func (t *trace) RemoveStreams(scope string, deltaIn, deltaOut, nstreamsIn, nstreamsOut int) {
|
||||
if t == nil {
|
||||
return
|
||||
}
|
||||
|
||||
if deltaIn == 0 && deltaOut == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
t.push(TraceEvt{
|
||||
Type: TraceRemoveStreamEvt,
|
||||
Name: scope,
|
||||
DeltaIn: -deltaIn,
|
||||
DeltaOut: -deltaOut,
|
||||
StreamsIn: nstreamsIn,
|
||||
StreamsOut: nstreamsOut,
|
||||
})
|
||||
}
|
||||
|
||||
func (t *trace) AddConn(scope string, dir network.Direction, usefd bool, nconnsIn, nconnsOut, nfd int) {
|
||||
if t == nil {
|
||||
return
|
||||
}
|
||||
|
||||
var deltaIn, deltaOut, deltafd int
|
||||
if dir == network.DirInbound {
|
||||
deltaIn = 1
|
||||
} else {
|
||||
deltaOut = 1
|
||||
}
|
||||
if usefd {
|
||||
deltafd = 1
|
||||
}
|
||||
|
||||
t.push(TraceEvt{
|
||||
Type: TraceAddConnEvt,
|
||||
Name: scope,
|
||||
DeltaIn: deltaIn,
|
||||
DeltaOut: deltaOut,
|
||||
Delta: int64(deltafd),
|
||||
ConnsIn: nconnsIn,
|
||||
ConnsOut: nconnsOut,
|
||||
FD: nfd,
|
||||
})
|
||||
}
|
||||
|
||||
func (t *trace) BlockAddConn(scope string, dir network.Direction, usefd bool, nconnsIn, nconnsOut, nfd int) {
|
||||
if t == nil {
|
||||
return
|
||||
}
|
||||
|
||||
var deltaIn, deltaOut, deltafd int
|
||||
if dir == network.DirInbound {
|
||||
deltaIn = 1
|
||||
} else {
|
||||
deltaOut = 1
|
||||
}
|
||||
if usefd {
|
||||
deltafd = 1
|
||||
}
|
||||
|
||||
t.push(TraceEvt{
|
||||
Type: TraceBlockAddConnEvt,
|
||||
Name: scope,
|
||||
DeltaIn: deltaIn,
|
||||
DeltaOut: deltaOut,
|
||||
Delta: int64(deltafd),
|
||||
ConnsIn: nconnsIn,
|
||||
ConnsOut: nconnsOut,
|
||||
FD: nfd,
|
||||
})
|
||||
}
|
||||
|
||||
func (t *trace) RemoveConn(scope string, dir network.Direction, usefd bool, nconnsIn, nconnsOut, nfd int) {
|
||||
if t == nil {
|
||||
return
|
||||
}
|
||||
|
||||
var deltaIn, deltaOut, deltafd int
|
||||
if dir == network.DirInbound {
|
||||
deltaIn = -1
|
||||
} else {
|
||||
deltaOut = -1
|
||||
}
|
||||
if usefd {
|
||||
deltafd = -1
|
||||
}
|
||||
|
||||
t.push(TraceEvt{
|
||||
Type: TraceRemoveConnEvt,
|
||||
Name: scope,
|
||||
DeltaIn: deltaIn,
|
||||
DeltaOut: deltaOut,
|
||||
Delta: int64(deltafd),
|
||||
ConnsIn: nconnsIn,
|
||||
ConnsOut: nconnsOut,
|
||||
FD: nfd,
|
||||
})
|
||||
}
|
||||
|
||||
func (t *trace) AddConns(scope string, deltaIn, deltaOut, deltafd, nconnsIn, nconnsOut, nfd int) {
|
||||
if t == nil {
|
||||
return
|
||||
}
|
||||
|
||||
if deltaIn == 0 && deltaOut == 0 && deltafd == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
t.push(TraceEvt{
|
||||
Type: TraceAddConnEvt,
|
||||
Name: scope,
|
||||
DeltaIn: deltaIn,
|
||||
DeltaOut: deltaOut,
|
||||
Delta: int64(deltafd),
|
||||
ConnsIn: nconnsIn,
|
||||
ConnsOut: nconnsOut,
|
||||
FD: nfd,
|
||||
})
|
||||
}
|
||||
|
||||
func (t *trace) BlockAddConns(scope string, deltaIn, deltaOut, deltafd, nconnsIn, nconnsOut, nfd int) {
|
||||
if t == nil {
|
||||
return
|
||||
}
|
||||
|
||||
if deltaIn == 0 && deltaOut == 0 && deltafd == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
t.push(TraceEvt{
|
||||
Type: TraceBlockAddConnEvt,
|
||||
Name: scope,
|
||||
DeltaIn: deltaIn,
|
||||
DeltaOut: deltaOut,
|
||||
Delta: int64(deltafd),
|
||||
ConnsIn: nconnsIn,
|
||||
ConnsOut: nconnsOut,
|
||||
FD: nfd,
|
||||
})
|
||||
}
|
||||
|
||||
func (t *trace) RemoveConns(scope string, deltaIn, deltaOut, deltafd, nconnsIn, nconnsOut, nfd int) {
|
||||
if t == nil {
|
||||
return
|
||||
}
|
||||
|
||||
if deltaIn == 0 && deltaOut == 0 && deltafd == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
t.push(TraceEvt{
|
||||
Type: TraceRemoveConnEvt,
|
||||
Name: scope,
|
||||
DeltaIn: -deltaIn,
|
||||
DeltaOut: -deltaOut,
|
||||
Delta: -int64(deltafd),
|
||||
ConnsIn: nconnsIn,
|
||||
ConnsOut: nconnsOut,
|
||||
FD: nfd,
|
||||
})
|
||||
}
|
||||
222
vendor/github.com/libp2p/go-libp2p/p2p/host/routed/routed.go
generated
vendored
Normal file
222
vendor/github.com/libp2p/go-libp2p/p2p/host/routed/routed.go
generated
vendored
Normal file
@@ -0,0 +1,222 @@
|
||||
package routedhost
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/libp2p/go-libp2p/core/connmgr"
|
||||
"github.com/libp2p/go-libp2p/core/event"
|
||||
"github.com/libp2p/go-libp2p/core/host"
|
||||
"github.com/libp2p/go-libp2p/core/network"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/libp2p/go-libp2p/core/peerstore"
|
||||
"github.com/libp2p/go-libp2p/core/protocol"
|
||||
|
||||
logging "github.com/ipfs/go-log/v2"
|
||||
|
||||
ma "github.com/multiformats/go-multiaddr"
|
||||
)
|
||||
|
||||
var log = logging.Logger("routedhost")
|
||||
|
||||
// AddressTTL is the expiry time for our addresses.
|
||||
// We expire them quickly.
|
||||
const AddressTTL = time.Second * 10
|
||||
|
||||
// RoutedHost is a p2p Host that includes a routing system.
|
||||
// This allows the Host to find the addresses for peers when
|
||||
// it does not have them.
|
||||
type RoutedHost struct {
|
||||
host host.Host // embedded other host.
|
||||
route Routing
|
||||
}
|
||||
|
||||
type Routing interface {
|
||||
FindPeer(context.Context, peer.ID) (peer.AddrInfo, error)
|
||||
}
|
||||
|
||||
func Wrap(h host.Host, r Routing) *RoutedHost {
|
||||
return &RoutedHost{h, r}
|
||||
}
|
||||
|
||||
// Connect ensures there is a connection between this host and the peer with
|
||||
// given peer.ID. See (host.Host).Connect for more information.
|
||||
//
|
||||
// RoutedHost's Connect differs in that if the host has no addresses for a
|
||||
// given peer, it will use its routing system to try to find some.
|
||||
func (rh *RoutedHost) Connect(ctx context.Context, pi peer.AddrInfo) error {
|
||||
// first, check if we're already connected unless force direct dial.
|
||||
forceDirect, _ := network.GetForceDirectDial(ctx)
|
||||
if !forceDirect {
|
||||
if rh.Network().Connectedness(pi.ID) == network.Connected {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// if we were given some addresses, keep + use them.
|
||||
if len(pi.Addrs) > 0 {
|
||||
rh.Peerstore().AddAddrs(pi.ID, pi.Addrs, peerstore.TempAddrTTL)
|
||||
}
|
||||
|
||||
// Check if we have some addresses in our recent memory.
|
||||
addrs := rh.Peerstore().Addrs(pi.ID)
|
||||
if len(addrs) < 1 {
|
||||
// no addrs? find some with the routing system.
|
||||
var err error
|
||||
addrs, err = rh.findPeerAddrs(ctx, pi.ID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Issue 448: if our address set includes routed specific relay addrs,
|
||||
// we need to make sure the relay's addr itself is in the peerstore or else
|
||||
// we won't be able to dial it.
|
||||
for _, addr := range addrs {
|
||||
if _, err := addr.ValueForProtocol(ma.P_CIRCUIT); err != nil {
|
||||
// not a relay address
|
||||
continue
|
||||
}
|
||||
|
||||
if addr.Protocols()[0].Code != ma.P_P2P {
|
||||
// not a routed relay specific address
|
||||
continue
|
||||
}
|
||||
|
||||
relay, _ := addr.ValueForProtocol(ma.P_P2P)
|
||||
relayID, err := peer.Decode(relay)
|
||||
if err != nil {
|
||||
log.Debugf("failed to parse relay ID in address %s: %s", relay, err)
|
||||
continue
|
||||
}
|
||||
|
||||
if len(rh.Peerstore().Addrs(relayID)) > 0 {
|
||||
// we already have addrs for this relay
|
||||
continue
|
||||
}
|
||||
|
||||
relayAddrs, err := rh.findPeerAddrs(ctx, relayID)
|
||||
if err != nil {
|
||||
log.Debugf("failed to find relay %s: %s", relay, err)
|
||||
continue
|
||||
}
|
||||
|
||||
rh.Peerstore().AddAddrs(relayID, relayAddrs, peerstore.TempAddrTTL)
|
||||
}
|
||||
|
||||
// if we're here, we got some addrs. let's use our wrapped host to connect.
|
||||
pi.Addrs = addrs
|
||||
if cerr := rh.host.Connect(ctx, pi); cerr != nil {
|
||||
// We couldn't connect. Let's check if we have the most
|
||||
// up-to-date addresses for the given peer. If there
|
||||
// are addresses we didn't know about previously, we
|
||||
// try to connect again.
|
||||
newAddrs, err := rh.findPeerAddrs(ctx, pi.ID)
|
||||
if err != nil {
|
||||
log.Debugf("failed to find more peer addresses %s: %s", pi.ID, err)
|
||||
return cerr
|
||||
}
|
||||
|
||||
// Build lookup map
|
||||
lookup := make(map[string]struct{}, len(addrs))
|
||||
for _, addr := range addrs {
|
||||
lookup[string(addr.Bytes())] = struct{}{}
|
||||
}
|
||||
|
||||
// if there's any address that's not in the previous set
|
||||
// of addresses, try to connect again. If all addresses
|
||||
// where known previously we return the original error.
|
||||
for _, newAddr := range newAddrs {
|
||||
if _, found := lookup[string(newAddr.Bytes())]; found {
|
||||
continue
|
||||
}
|
||||
|
||||
pi.Addrs = newAddrs
|
||||
return rh.host.Connect(ctx, pi)
|
||||
}
|
||||
// No appropriate new address found.
|
||||
// Return the original dial error.
|
||||
return cerr
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (rh *RoutedHost) findPeerAddrs(ctx context.Context, id peer.ID) ([]ma.Multiaddr, error) {
|
||||
pi, err := rh.route.FindPeer(ctx, id)
|
||||
if err != nil {
|
||||
return nil, err // couldnt find any :(
|
||||
}
|
||||
|
||||
if pi.ID != id {
|
||||
err = fmt.Errorf("routing failure: provided addrs for different peer")
|
||||
log.Errorw("got wrong peer",
|
||||
"error", err,
|
||||
"wantedPeer", id,
|
||||
"gotPeer", pi.ID,
|
||||
)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return pi.Addrs, nil
|
||||
}
|
||||
|
||||
func (rh *RoutedHost) ID() peer.ID {
|
||||
return rh.host.ID()
|
||||
}
|
||||
|
||||
func (rh *RoutedHost) Peerstore() peerstore.Peerstore {
|
||||
return rh.host.Peerstore()
|
||||
}
|
||||
|
||||
func (rh *RoutedHost) Addrs() []ma.Multiaddr {
|
||||
return rh.host.Addrs()
|
||||
}
|
||||
|
||||
func (rh *RoutedHost) Network() network.Network {
|
||||
return rh.host.Network()
|
||||
}
|
||||
|
||||
func (rh *RoutedHost) Mux() protocol.Switch {
|
||||
return rh.host.Mux()
|
||||
}
|
||||
|
||||
func (rh *RoutedHost) EventBus() event.Bus {
|
||||
return rh.host.EventBus()
|
||||
}
|
||||
|
||||
func (rh *RoutedHost) SetStreamHandler(pid protocol.ID, handler network.StreamHandler) {
|
||||
rh.host.SetStreamHandler(pid, handler)
|
||||
}
|
||||
|
||||
func (rh *RoutedHost) SetStreamHandlerMatch(pid protocol.ID, m func(protocol.ID) bool, handler network.StreamHandler) {
|
||||
rh.host.SetStreamHandlerMatch(pid, m, handler)
|
||||
}
|
||||
|
||||
func (rh *RoutedHost) RemoveStreamHandler(pid protocol.ID) {
|
||||
rh.host.RemoveStreamHandler(pid)
|
||||
}
|
||||
|
||||
func (rh *RoutedHost) NewStream(ctx context.Context, p peer.ID, pids ...protocol.ID) (network.Stream, error) {
|
||||
// Ensure we have a connection, with peer addresses resolved by the routing system (#207)
|
||||
// It is not sufficient to let the underlying host connect, it will most likely not have
|
||||
// any addresses for the peer without any prior connections.
|
||||
// If the caller wants to prevent the host from dialing, it should use the NoDial option.
|
||||
if nodial, _ := network.GetNoDial(ctx); !nodial {
|
||||
err := rh.Connect(ctx, peer.AddrInfo{ID: p})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return rh.host.NewStream(ctx, p, pids...)
|
||||
}
|
||||
func (rh *RoutedHost) Close() error {
|
||||
// no need to close IpfsRouting. we dont own it.
|
||||
return rh.host.Close()
|
||||
}
|
||||
func (rh *RoutedHost) ConnManager() connmgr.ConnManager {
|
||||
return rh.host.ConnManager()
|
||||
}
|
||||
|
||||
var _ (host.Host) = (*RoutedHost)(nil)
|
||||
Reference in New Issue
Block a user