Integrate BACKBEAT SDK and resolve KACHING license validation

Major integrations and fixes:
- Added BACKBEAT SDK integration for P2P operation timing
- Implemented beat-aware status tracking for distributed operations
- Added Docker secrets support for secure license management
- Resolved KACHING license validation via HTTPS/TLS
- Updated docker-compose configuration for clean stack deployment
- Disabled rollback policies to prevent deployment failures
- Added license credential storage (CHORUS-DEV-MULTI-001)

Technical improvements:
- BACKBEAT P2P operation tracking with phase management
- Enhanced configuration system with file-based secrets
- Improved error handling for license validation
- Clean separation of KACHING and CHORUS deployment stacks

🤖 Generated with [Claude Code](https://claude.ai/code)

Co-Authored-By: Claude <noreply@anthropic.com>
This commit is contained in:
anthonyrawlins
2025-09-06 07:56:26 +10:00
parent 543ab216f9
commit 9bdcbe0447
4730 changed files with 1480093 additions and 1916 deletions

32
vendor/golang.org/x/tools/internal/aliases/aliases.go generated vendored Normal file
View File

@@ -0,0 +1,32 @@
// Copyright 2024 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package aliases
import (
"go/token"
"go/types"
)
// Package aliases defines backward compatible shims
// for the types.Alias type representation added in 1.22.
// This defines placeholders for x/tools until 1.26.
// NewAlias creates a new TypeName in Package pkg that
// is an alias for the type rhs.
//
// The enabled parameter determines whether the resulting [TypeName]'s
// type is an [types.Alias]. Its value must be the result of a call to
// [Enabled], which computes the effective value of
// GODEBUG=gotypesalias=... by invoking the type checker. The Enabled
// function is expensive and should be called once per task (e.g.
// package import), not once per call to NewAlias.
func NewAlias(enabled bool, pos token.Pos, pkg *types.Package, name string, rhs types.Type) *types.TypeName {
if enabled {
tname := types.NewTypeName(pos, pkg, name, nil)
newAlias(tname, rhs)
return tname
}
return types.NewTypeName(pos, pkg, name, rhs)
}

View File

@@ -0,0 +1,31 @@
// Copyright 2024 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build !go1.22
// +build !go1.22
package aliases
import (
"go/types"
)
// Alias is a placeholder for a go/types.Alias for <=1.21.
// It will never be created by go/types.
type Alias struct{}
func (*Alias) String() string { panic("unreachable") }
func (*Alias) Underlying() types.Type { panic("unreachable") }
func (*Alias) Obj() *types.TypeName { panic("unreachable") }
func Rhs(alias *Alias) types.Type { panic("unreachable") }
// Unalias returns the type t for go <=1.21.
func Unalias(t types.Type) types.Type { return t }
func newAlias(name *types.TypeName, rhs types.Type) *Alias { panic("unreachable") }
// Enabled reports whether [NewAlias] should create [types.Alias] types.
//
// Before go1.22, this function always returns false.
func Enabled() bool { return false }

View File

@@ -0,0 +1,63 @@
// Copyright 2024 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build go1.22
// +build go1.22
package aliases
import (
"go/ast"
"go/parser"
"go/token"
"go/types"
)
// Alias is an alias of types.Alias.
type Alias = types.Alias
// Rhs returns the type on the right-hand side of the alias declaration.
func Rhs(alias *Alias) types.Type {
if alias, ok := any(alias).(interface{ Rhs() types.Type }); ok {
return alias.Rhs() // go1.23+
}
// go1.22's Alias didn't have the Rhs method,
// so Unalias is the best we can do.
return Unalias(alias)
}
// Unalias is a wrapper of types.Unalias.
func Unalias(t types.Type) types.Type { return types.Unalias(t) }
// newAlias is an internal alias around types.NewAlias.
// Direct usage is discouraged as the moment.
// Try to use NewAlias instead.
func newAlias(tname *types.TypeName, rhs types.Type) *Alias {
a := types.NewAlias(tname, rhs)
// TODO(go.dev/issue/65455): Remove kludgy workaround to set a.actual as a side-effect.
Unalias(a)
return a
}
// Enabled reports whether [NewAlias] should create [types.Alias] types.
//
// This function is expensive! Call it sparingly.
func Enabled() bool {
// The only reliable way to compute the answer is to invoke go/types.
// We don't parse the GODEBUG environment variable, because
// (a) it's tricky to do so in a manner that is consistent
// with the godebug package; in particular, a simple
// substring check is not good enough. The value is a
// rightmost-wins list of options. But more importantly:
// (b) it is impossible to detect changes to the effective
// setting caused by os.Setenv("GODEBUG"), as happens in
// many tests. Therefore any attempt to cache the result
// is just incorrect.
fset := token.NewFileSet()
f, _ := parser.ParseFile(fset, "a.go", "package p; type A = int", 0)
pkg, _ := new(types.Config).Check("p", fset, []*ast.File{f}, nil)
_, enabled := pkg.Scope().Lookup("A").Type().(*types.Alias)
return enabled
}

85
vendor/golang.org/x/tools/internal/event/core/event.go generated vendored Normal file
View File

@@ -0,0 +1,85 @@
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package core provides support for event based telemetry.
package core
import (
"fmt"
"time"
"golang.org/x/tools/internal/event/label"
)
// Event holds the information about an event of note that occurred.
type Event struct {
at time.Time
// As events are often on the stack, storing the first few labels directly
// in the event can avoid an allocation at all for the very common cases of
// simple events.
// The length needs to be large enough to cope with the majority of events
// but no so large as to cause undue stack pressure.
// A log message with two values will use 3 labels (one for each value and
// one for the message itself).
static [3]label.Label // inline storage for the first few labels
dynamic []label.Label // dynamically sized storage for remaining labels
}
// eventLabelMap implements label.Map for a the labels of an Event.
type eventLabelMap struct {
event Event
}
func (ev Event) At() time.Time { return ev.at }
func (ev Event) Format(f fmt.State, r rune) {
if !ev.at.IsZero() {
fmt.Fprint(f, ev.at.Format("2006/01/02 15:04:05 "))
}
for index := 0; ev.Valid(index); index++ {
if l := ev.Label(index); l.Valid() {
fmt.Fprintf(f, "\n\t%v", l)
}
}
}
func (ev Event) Valid(index int) bool {
return index >= 0 && index < len(ev.static)+len(ev.dynamic)
}
func (ev Event) Label(index int) label.Label {
if index < len(ev.static) {
return ev.static[index]
}
return ev.dynamic[index-len(ev.static)]
}
func (ev Event) Find(key label.Key) label.Label {
for _, l := range ev.static {
if l.Key() == key {
return l
}
}
for _, l := range ev.dynamic {
if l.Key() == key {
return l
}
}
return label.Label{}
}
func MakeEvent(static [3]label.Label, labels []label.Label) Event {
return Event{
static: static,
dynamic: labels,
}
}
// CloneEvent event returns a copy of the event with the time adjusted to at.
func CloneEvent(ev Event, at time.Time) Event {
ev.at = at
return ev
}

View File

@@ -0,0 +1,70 @@
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package core
import (
"context"
"sync/atomic"
"time"
"unsafe"
"golang.org/x/tools/internal/event/label"
)
// Exporter is a function that handles events.
// It may return a modified context and event.
type Exporter func(context.Context, Event, label.Map) context.Context
var (
exporter unsafe.Pointer
)
// SetExporter sets the global exporter function that handles all events.
// The exporter is called synchronously from the event call site, so it should
// return quickly so as not to hold up user code.
func SetExporter(e Exporter) {
p := unsafe.Pointer(&e)
if e == nil {
// &e is always valid, and so p is always valid, but for the early abort
// of ProcessEvent to be efficient it needs to make the nil check on the
// pointer without having to dereference it, so we make the nil function
// also a nil pointer
p = nil
}
atomic.StorePointer(&exporter, p)
}
// deliver is called to deliver an event to the supplied exporter.
// it will fill in the time.
func deliver(ctx context.Context, exporter Exporter, ev Event) context.Context {
// add the current time to the event
ev.at = time.Now()
// hand the event off to the current exporter
return exporter(ctx, ev, ev)
}
// Export is called to deliver an event to the global exporter if set.
func Export(ctx context.Context, ev Event) context.Context {
// get the global exporter and abort early if there is not one
exporterPtr := (*Exporter)(atomic.LoadPointer(&exporter))
if exporterPtr == nil {
return ctx
}
return deliver(ctx, *exporterPtr, ev)
}
// ExportPair is called to deliver a start event to the supplied exporter.
// It also returns a function that will deliver the end event to the same
// exporter.
// It will fill in the time.
func ExportPair(ctx context.Context, begin, end Event) (context.Context, func()) {
// get the global exporter and abort early if there is not one
exporterPtr := (*Exporter)(atomic.LoadPointer(&exporter))
if exporterPtr == nil {
return ctx, func() {}
}
ctx = deliver(ctx, *exporterPtr, begin)
return ctx, func() { deliver(ctx, *exporterPtr, end) }
}

77
vendor/golang.org/x/tools/internal/event/core/fast.go generated vendored Normal file
View File

@@ -0,0 +1,77 @@
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package core
import (
"context"
"golang.org/x/tools/internal/event/keys"
"golang.org/x/tools/internal/event/label"
)
// Log1 takes a message and one label delivers a log event to the exporter.
// It is a customized version of Print that is faster and does no allocation.
func Log1(ctx context.Context, message string, t1 label.Label) {
Export(ctx, MakeEvent([3]label.Label{
keys.Msg.Of(message),
t1,
}, nil))
}
// Log2 takes a message and two labels and delivers a log event to the exporter.
// It is a customized version of Print that is faster and does no allocation.
func Log2(ctx context.Context, message string, t1 label.Label, t2 label.Label) {
Export(ctx, MakeEvent([3]label.Label{
keys.Msg.Of(message),
t1,
t2,
}, nil))
}
// Metric1 sends a label event to the exporter with the supplied labels.
func Metric1(ctx context.Context, t1 label.Label) context.Context {
return Export(ctx, MakeEvent([3]label.Label{
keys.Metric.New(),
t1,
}, nil))
}
// Metric2 sends a label event to the exporter with the supplied labels.
func Metric2(ctx context.Context, t1, t2 label.Label) context.Context {
return Export(ctx, MakeEvent([3]label.Label{
keys.Metric.New(),
t1,
t2,
}, nil))
}
// Start1 sends a span start event with the supplied label list to the exporter.
// It also returns a function that will end the span, which should normally be
// deferred.
func Start1(ctx context.Context, name string, t1 label.Label) (context.Context, func()) {
return ExportPair(ctx,
MakeEvent([3]label.Label{
keys.Start.Of(name),
t1,
}, nil),
MakeEvent([3]label.Label{
keys.End.New(),
}, nil))
}
// Start2 sends a span start event with the supplied label list to the exporter.
// It also returns a function that will end the span, which should normally be
// deferred.
func Start2(ctx context.Context, name string, t1, t2 label.Label) (context.Context, func()) {
return ExportPair(ctx,
MakeEvent([3]label.Label{
keys.Start.Of(name),
t1,
t2,
}, nil),
MakeEvent([3]label.Label{
keys.End.New(),
}, nil))
}

7
vendor/golang.org/x/tools/internal/event/doc.go generated vendored Normal file
View File

@@ -0,0 +1,7 @@
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package event provides a set of packages that cover the main
// concepts of telemetry in an implementation agnostic way.
package event

127
vendor/golang.org/x/tools/internal/event/event.go generated vendored Normal file
View File

@@ -0,0 +1,127 @@
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package event
import (
"context"
"golang.org/x/tools/internal/event/core"
"golang.org/x/tools/internal/event/keys"
"golang.org/x/tools/internal/event/label"
)
// Exporter is a function that handles events.
// It may return a modified context and event.
type Exporter func(context.Context, core.Event, label.Map) context.Context
// SetExporter sets the global exporter function that handles all events.
// The exporter is called synchronously from the event call site, so it should
// return quickly so as not to hold up user code.
func SetExporter(e Exporter) {
core.SetExporter(core.Exporter(e))
}
// Log takes a message and a label list and combines them into a single event
// before delivering them to the exporter.
func Log(ctx context.Context, message string, labels ...label.Label) {
core.Export(ctx, core.MakeEvent([3]label.Label{
keys.Msg.Of(message),
}, labels))
}
// IsLog returns true if the event was built by the Log function.
// It is intended to be used in exporters to identify the semantics of the
// event when deciding what to do with it.
func IsLog(ev core.Event) bool {
return ev.Label(0).Key() == keys.Msg
}
// Error takes a message and a label list and combines them into a single event
// before delivering them to the exporter. It captures the error in the
// delivered event.
func Error(ctx context.Context, message string, err error, labels ...label.Label) {
core.Export(ctx, core.MakeEvent([3]label.Label{
keys.Msg.Of(message),
keys.Err.Of(err),
}, labels))
}
// IsError returns true if the event was built by the Error function.
// It is intended to be used in exporters to identify the semantics of the
// event when deciding what to do with it.
func IsError(ev core.Event) bool {
return ev.Label(0).Key() == keys.Msg &&
ev.Label(1).Key() == keys.Err
}
// Metric sends a label event to the exporter with the supplied labels.
func Metric(ctx context.Context, labels ...label.Label) {
core.Export(ctx, core.MakeEvent([3]label.Label{
keys.Metric.New(),
}, labels))
}
// IsMetric returns true if the event was built by the Metric function.
// It is intended to be used in exporters to identify the semantics of the
// event when deciding what to do with it.
func IsMetric(ev core.Event) bool {
return ev.Label(0).Key() == keys.Metric
}
// Label sends a label event to the exporter with the supplied labels.
func Label(ctx context.Context, labels ...label.Label) context.Context {
return core.Export(ctx, core.MakeEvent([3]label.Label{
keys.Label.New(),
}, labels))
}
// IsLabel returns true if the event was built by the Label function.
// It is intended to be used in exporters to identify the semantics of the
// event when deciding what to do with it.
func IsLabel(ev core.Event) bool {
return ev.Label(0).Key() == keys.Label
}
// Start sends a span start event with the supplied label list to the exporter.
// It also returns a function that will end the span, which should normally be
// deferred.
func Start(ctx context.Context, name string, labels ...label.Label) (context.Context, func()) {
return core.ExportPair(ctx,
core.MakeEvent([3]label.Label{
keys.Start.Of(name),
}, labels),
core.MakeEvent([3]label.Label{
keys.End.New(),
}, nil))
}
// IsStart returns true if the event was built by the Start function.
// It is intended to be used in exporters to identify the semantics of the
// event when deciding what to do with it.
func IsStart(ev core.Event) bool {
return ev.Label(0).Key() == keys.Start
}
// IsEnd returns true if the event was built by the End function.
// It is intended to be used in exporters to identify the semantics of the
// event when deciding what to do with it.
func IsEnd(ev core.Event) bool {
return ev.Label(0).Key() == keys.End
}
// Detach returns a context without an associated span.
// This allows the creation of spans that are not children of the current span.
func Detach(ctx context.Context) context.Context {
return core.Export(ctx, core.MakeEvent([3]label.Label{
keys.Detach.New(),
}, nil))
}
// IsDetach returns true if the event was built by the Detach function.
// It is intended to be used in exporters to identify the semantics of the
// event when deciding what to do with it.
func IsDetach(ev core.Event) bool {
return ev.Label(0).Key() == keys.Detach
}

564
vendor/golang.org/x/tools/internal/event/keys/keys.go generated vendored Normal file
View File

@@ -0,0 +1,564 @@
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package keys
import (
"fmt"
"io"
"math"
"strconv"
"golang.org/x/tools/internal/event/label"
)
// Value represents a key for untyped values.
type Value struct {
name string
description string
}
// New creates a new Key for untyped values.
func New(name, description string) *Value {
return &Value{name: name, description: description}
}
func (k *Value) Name() string { return k.name }
func (k *Value) Description() string { return k.description }
func (k *Value) Format(w io.Writer, buf []byte, l label.Label) {
fmt.Fprint(w, k.From(l))
}
// Get can be used to get a label for the key from a label.Map.
func (k *Value) Get(lm label.Map) interface{} {
if t := lm.Find(k); t.Valid() {
return k.From(t)
}
return nil
}
// From can be used to get a value from a Label.
func (k *Value) From(t label.Label) interface{} { return t.UnpackValue() }
// Of creates a new Label with this key and the supplied value.
func (k *Value) Of(value interface{}) label.Label { return label.OfValue(k, value) }
// Tag represents a key for tagging labels that have no value.
// These are used when the existence of the label is the entire information it
// carries, such as marking events to be of a specific kind, or from a specific
// package.
type Tag struct {
name string
description string
}
// NewTag creates a new Key for tagging labels.
func NewTag(name, description string) *Tag {
return &Tag{name: name, description: description}
}
func (k *Tag) Name() string { return k.name }
func (k *Tag) Description() string { return k.description }
func (k *Tag) Format(w io.Writer, buf []byte, l label.Label) {}
// New creates a new Label with this key.
func (k *Tag) New() label.Label { return label.OfValue(k, nil) }
// Int represents a key
type Int struct {
name string
description string
}
// NewInt creates a new Key for int values.
func NewInt(name, description string) *Int {
return &Int{name: name, description: description}
}
func (k *Int) Name() string { return k.name }
func (k *Int) Description() string { return k.description }
func (k *Int) Format(w io.Writer, buf []byte, l label.Label) {
w.Write(strconv.AppendInt(buf, int64(k.From(l)), 10))
}
// Of creates a new Label with this key and the supplied value.
func (k *Int) Of(v int) label.Label { return label.Of64(k, uint64(v)) }
// Get can be used to get a label for the key from a label.Map.
func (k *Int) Get(lm label.Map) int {
if t := lm.Find(k); t.Valid() {
return k.From(t)
}
return 0
}
// From can be used to get a value from a Label.
func (k *Int) From(t label.Label) int { return int(t.Unpack64()) }
// Int8 represents a key
type Int8 struct {
name string
description string
}
// NewInt8 creates a new Key for int8 values.
func NewInt8(name, description string) *Int8 {
return &Int8{name: name, description: description}
}
func (k *Int8) Name() string { return k.name }
func (k *Int8) Description() string { return k.description }
func (k *Int8) Format(w io.Writer, buf []byte, l label.Label) {
w.Write(strconv.AppendInt(buf, int64(k.From(l)), 10))
}
// Of creates a new Label with this key and the supplied value.
func (k *Int8) Of(v int8) label.Label { return label.Of64(k, uint64(v)) }
// Get can be used to get a label for the key from a label.Map.
func (k *Int8) Get(lm label.Map) int8 {
if t := lm.Find(k); t.Valid() {
return k.From(t)
}
return 0
}
// From can be used to get a value from a Label.
func (k *Int8) From(t label.Label) int8 { return int8(t.Unpack64()) }
// Int16 represents a key
type Int16 struct {
name string
description string
}
// NewInt16 creates a new Key for int16 values.
func NewInt16(name, description string) *Int16 {
return &Int16{name: name, description: description}
}
func (k *Int16) Name() string { return k.name }
func (k *Int16) Description() string { return k.description }
func (k *Int16) Format(w io.Writer, buf []byte, l label.Label) {
w.Write(strconv.AppendInt(buf, int64(k.From(l)), 10))
}
// Of creates a new Label with this key and the supplied value.
func (k *Int16) Of(v int16) label.Label { return label.Of64(k, uint64(v)) }
// Get can be used to get a label for the key from a label.Map.
func (k *Int16) Get(lm label.Map) int16 {
if t := lm.Find(k); t.Valid() {
return k.From(t)
}
return 0
}
// From can be used to get a value from a Label.
func (k *Int16) From(t label.Label) int16 { return int16(t.Unpack64()) }
// Int32 represents a key
type Int32 struct {
name string
description string
}
// NewInt32 creates a new Key for int32 values.
func NewInt32(name, description string) *Int32 {
return &Int32{name: name, description: description}
}
func (k *Int32) Name() string { return k.name }
func (k *Int32) Description() string { return k.description }
func (k *Int32) Format(w io.Writer, buf []byte, l label.Label) {
w.Write(strconv.AppendInt(buf, int64(k.From(l)), 10))
}
// Of creates a new Label with this key and the supplied value.
func (k *Int32) Of(v int32) label.Label { return label.Of64(k, uint64(v)) }
// Get can be used to get a label for the key from a label.Map.
func (k *Int32) Get(lm label.Map) int32 {
if t := lm.Find(k); t.Valid() {
return k.From(t)
}
return 0
}
// From can be used to get a value from a Label.
func (k *Int32) From(t label.Label) int32 { return int32(t.Unpack64()) }
// Int64 represents a key
type Int64 struct {
name string
description string
}
// NewInt64 creates a new Key for int64 values.
func NewInt64(name, description string) *Int64 {
return &Int64{name: name, description: description}
}
func (k *Int64) Name() string { return k.name }
func (k *Int64) Description() string { return k.description }
func (k *Int64) Format(w io.Writer, buf []byte, l label.Label) {
w.Write(strconv.AppendInt(buf, k.From(l), 10))
}
// Of creates a new Label with this key and the supplied value.
func (k *Int64) Of(v int64) label.Label { return label.Of64(k, uint64(v)) }
// Get can be used to get a label for the key from a label.Map.
func (k *Int64) Get(lm label.Map) int64 {
if t := lm.Find(k); t.Valid() {
return k.From(t)
}
return 0
}
// From can be used to get a value from a Label.
func (k *Int64) From(t label.Label) int64 { return int64(t.Unpack64()) }
// UInt represents a key
type UInt struct {
name string
description string
}
// NewUInt creates a new Key for uint values.
func NewUInt(name, description string) *UInt {
return &UInt{name: name, description: description}
}
func (k *UInt) Name() string { return k.name }
func (k *UInt) Description() string { return k.description }
func (k *UInt) Format(w io.Writer, buf []byte, l label.Label) {
w.Write(strconv.AppendUint(buf, uint64(k.From(l)), 10))
}
// Of creates a new Label with this key and the supplied value.
func (k *UInt) Of(v uint) label.Label { return label.Of64(k, uint64(v)) }
// Get can be used to get a label for the key from a label.Map.
func (k *UInt) Get(lm label.Map) uint {
if t := lm.Find(k); t.Valid() {
return k.From(t)
}
return 0
}
// From can be used to get a value from a Label.
func (k *UInt) From(t label.Label) uint { return uint(t.Unpack64()) }
// UInt8 represents a key
type UInt8 struct {
name string
description string
}
// NewUInt8 creates a new Key for uint8 values.
func NewUInt8(name, description string) *UInt8 {
return &UInt8{name: name, description: description}
}
func (k *UInt8) Name() string { return k.name }
func (k *UInt8) Description() string { return k.description }
func (k *UInt8) Format(w io.Writer, buf []byte, l label.Label) {
w.Write(strconv.AppendUint(buf, uint64(k.From(l)), 10))
}
// Of creates a new Label with this key and the supplied value.
func (k *UInt8) Of(v uint8) label.Label { return label.Of64(k, uint64(v)) }
// Get can be used to get a label for the key from a label.Map.
func (k *UInt8) Get(lm label.Map) uint8 {
if t := lm.Find(k); t.Valid() {
return k.From(t)
}
return 0
}
// From can be used to get a value from a Label.
func (k *UInt8) From(t label.Label) uint8 { return uint8(t.Unpack64()) }
// UInt16 represents a key
type UInt16 struct {
name string
description string
}
// NewUInt16 creates a new Key for uint16 values.
func NewUInt16(name, description string) *UInt16 {
return &UInt16{name: name, description: description}
}
func (k *UInt16) Name() string { return k.name }
func (k *UInt16) Description() string { return k.description }
func (k *UInt16) Format(w io.Writer, buf []byte, l label.Label) {
w.Write(strconv.AppendUint(buf, uint64(k.From(l)), 10))
}
// Of creates a new Label with this key and the supplied value.
func (k *UInt16) Of(v uint16) label.Label { return label.Of64(k, uint64(v)) }
// Get can be used to get a label for the key from a label.Map.
func (k *UInt16) Get(lm label.Map) uint16 {
if t := lm.Find(k); t.Valid() {
return k.From(t)
}
return 0
}
// From can be used to get a value from a Label.
func (k *UInt16) From(t label.Label) uint16 { return uint16(t.Unpack64()) }
// UInt32 represents a key
type UInt32 struct {
name string
description string
}
// NewUInt32 creates a new Key for uint32 values.
func NewUInt32(name, description string) *UInt32 {
return &UInt32{name: name, description: description}
}
func (k *UInt32) Name() string { return k.name }
func (k *UInt32) Description() string { return k.description }
func (k *UInt32) Format(w io.Writer, buf []byte, l label.Label) {
w.Write(strconv.AppendUint(buf, uint64(k.From(l)), 10))
}
// Of creates a new Label with this key and the supplied value.
func (k *UInt32) Of(v uint32) label.Label { return label.Of64(k, uint64(v)) }
// Get can be used to get a label for the key from a label.Map.
func (k *UInt32) Get(lm label.Map) uint32 {
if t := lm.Find(k); t.Valid() {
return k.From(t)
}
return 0
}
// From can be used to get a value from a Label.
func (k *UInt32) From(t label.Label) uint32 { return uint32(t.Unpack64()) }
// UInt64 represents a key
type UInt64 struct {
name string
description string
}
// NewUInt64 creates a new Key for uint64 values.
func NewUInt64(name, description string) *UInt64 {
return &UInt64{name: name, description: description}
}
func (k *UInt64) Name() string { return k.name }
func (k *UInt64) Description() string { return k.description }
func (k *UInt64) Format(w io.Writer, buf []byte, l label.Label) {
w.Write(strconv.AppendUint(buf, k.From(l), 10))
}
// Of creates a new Label with this key and the supplied value.
func (k *UInt64) Of(v uint64) label.Label { return label.Of64(k, v) }
// Get can be used to get a label for the key from a label.Map.
func (k *UInt64) Get(lm label.Map) uint64 {
if t := lm.Find(k); t.Valid() {
return k.From(t)
}
return 0
}
// From can be used to get a value from a Label.
func (k *UInt64) From(t label.Label) uint64 { return t.Unpack64() }
// Float32 represents a key
type Float32 struct {
name string
description string
}
// NewFloat32 creates a new Key for float32 values.
func NewFloat32(name, description string) *Float32 {
return &Float32{name: name, description: description}
}
func (k *Float32) Name() string { return k.name }
func (k *Float32) Description() string { return k.description }
func (k *Float32) Format(w io.Writer, buf []byte, l label.Label) {
w.Write(strconv.AppendFloat(buf, float64(k.From(l)), 'E', -1, 32))
}
// Of creates a new Label with this key and the supplied value.
func (k *Float32) Of(v float32) label.Label {
return label.Of64(k, uint64(math.Float32bits(v)))
}
// Get can be used to get a label for the key from a label.Map.
func (k *Float32) Get(lm label.Map) float32 {
if t := lm.Find(k); t.Valid() {
return k.From(t)
}
return 0
}
// From can be used to get a value from a Label.
func (k *Float32) From(t label.Label) float32 {
return math.Float32frombits(uint32(t.Unpack64()))
}
// Float64 represents a key
type Float64 struct {
name string
description string
}
// NewFloat64 creates a new Key for int64 values.
func NewFloat64(name, description string) *Float64 {
return &Float64{name: name, description: description}
}
func (k *Float64) Name() string { return k.name }
func (k *Float64) Description() string { return k.description }
func (k *Float64) Format(w io.Writer, buf []byte, l label.Label) {
w.Write(strconv.AppendFloat(buf, k.From(l), 'E', -1, 64))
}
// Of creates a new Label with this key and the supplied value.
func (k *Float64) Of(v float64) label.Label {
return label.Of64(k, math.Float64bits(v))
}
// Get can be used to get a label for the key from a label.Map.
func (k *Float64) Get(lm label.Map) float64 {
if t := lm.Find(k); t.Valid() {
return k.From(t)
}
return 0
}
// From can be used to get a value from a Label.
func (k *Float64) From(t label.Label) float64 {
return math.Float64frombits(t.Unpack64())
}
// String represents a key
type String struct {
name string
description string
}
// NewString creates a new Key for int64 values.
func NewString(name, description string) *String {
return &String{name: name, description: description}
}
func (k *String) Name() string { return k.name }
func (k *String) Description() string { return k.description }
func (k *String) Format(w io.Writer, buf []byte, l label.Label) {
w.Write(strconv.AppendQuote(buf, k.From(l)))
}
// Of creates a new Label with this key and the supplied value.
func (k *String) Of(v string) label.Label { return label.OfString(k, v) }
// Get can be used to get a label for the key from a label.Map.
func (k *String) Get(lm label.Map) string {
if t := lm.Find(k); t.Valid() {
return k.From(t)
}
return ""
}
// From can be used to get a value from a Label.
func (k *String) From(t label.Label) string { return t.UnpackString() }
// Boolean represents a key
type Boolean struct {
name string
description string
}
// NewBoolean creates a new Key for bool values.
func NewBoolean(name, description string) *Boolean {
return &Boolean{name: name, description: description}
}
func (k *Boolean) Name() string { return k.name }
func (k *Boolean) Description() string { return k.description }
func (k *Boolean) Format(w io.Writer, buf []byte, l label.Label) {
w.Write(strconv.AppendBool(buf, k.From(l)))
}
// Of creates a new Label with this key and the supplied value.
func (k *Boolean) Of(v bool) label.Label {
if v {
return label.Of64(k, 1)
}
return label.Of64(k, 0)
}
// Get can be used to get a label for the key from a label.Map.
func (k *Boolean) Get(lm label.Map) bool {
if t := lm.Find(k); t.Valid() {
return k.From(t)
}
return false
}
// From can be used to get a value from a Label.
func (k *Boolean) From(t label.Label) bool { return t.Unpack64() > 0 }
// Error represents a key
type Error struct {
name string
description string
}
// NewError creates a new Key for int64 values.
func NewError(name, description string) *Error {
return &Error{name: name, description: description}
}
func (k *Error) Name() string { return k.name }
func (k *Error) Description() string { return k.description }
func (k *Error) Format(w io.Writer, buf []byte, l label.Label) {
io.WriteString(w, k.From(l).Error())
}
// Of creates a new Label with this key and the supplied value.
func (k *Error) Of(v error) label.Label { return label.OfValue(k, v) }
// Get can be used to get a label for the key from a label.Map.
func (k *Error) Get(lm label.Map) error {
if t := lm.Find(k); t.Valid() {
return k.From(t)
}
return nil
}
// From can be used to get a value from a Label.
func (k *Error) From(t label.Label) error {
err, _ := t.UnpackValue().(error)
return err
}

View File

@@ -0,0 +1,22 @@
// Copyright 2020 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package keys
var (
// Msg is a key used to add message strings to label lists.
Msg = NewString("message", "a readable message")
// Label is a key used to indicate an event adds labels to the context.
Label = NewTag("label", "a label context marker")
// Start is used for things like traces that have a name.
Start = NewString("start", "span start")
// Metric is a key used to indicate an event records metrics.
End = NewTag("end", "a span end marker")
// Metric is a key used to indicate an event records metrics.
Detach = NewTag("detach", "a span detach marker")
// Err is a key used to add error values to label lists.
Err = NewError("error", "an error that occurred")
// Metric is a key used to indicate an event records metrics.
Metric = NewTag("metric", "a metric event marker")
)

21
vendor/golang.org/x/tools/internal/event/keys/util.go generated vendored Normal file
View File

@@ -0,0 +1,21 @@
// Copyright 2023 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package keys
import (
"sort"
"strings"
)
// Join returns a canonical join of the keys in S:
// a sorted comma-separated string list.
func Join[S ~[]T, T ~string](s S) string {
strs := make([]string, 0, len(s))
for _, v := range s {
strs = append(strs, string(v))
}
sort.Strings(strs)
return strings.Join(strs, ",")
}

215
vendor/golang.org/x/tools/internal/event/label/label.go generated vendored Normal file
View File

@@ -0,0 +1,215 @@
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package label
import (
"fmt"
"io"
"reflect"
"unsafe"
)
// Key is used as the identity of a Label.
// Keys are intended to be compared by pointer only, the name should be unique
// for communicating with external systems, but it is not required or enforced.
type Key interface {
// Name returns the key name.
Name() string
// Description returns a string that can be used to describe the value.
Description() string
// Format is used in formatting to append the value of the label to the
// supplied buffer.
// The formatter may use the supplied buf as a scratch area to avoid
// allocations.
Format(w io.Writer, buf []byte, l Label)
}
// Label holds a key and value pair.
// It is normally used when passing around lists of labels.
type Label struct {
key Key
packed uint64
untyped interface{}
}
// Map is the interface to a collection of Labels indexed by key.
type Map interface {
// Find returns the label that matches the supplied key.
Find(key Key) Label
}
// List is the interface to something that provides an iterable
// list of labels.
// Iteration should start from 0 and continue until Valid returns false.
type List interface {
// Valid returns true if the index is within range for the list.
// It does not imply the label at that index will itself be valid.
Valid(index int) bool
// Label returns the label at the given index.
Label(index int) Label
}
// list implements LabelList for a list of Labels.
type list struct {
labels []Label
}
// filter wraps a LabelList filtering out specific labels.
type filter struct {
keys []Key
underlying List
}
// listMap implements LabelMap for a simple list of labels.
type listMap struct {
labels []Label
}
// mapChain implements LabelMap for a list of underlying LabelMap.
type mapChain struct {
maps []Map
}
// OfValue creates a new label from the key and value.
// This method is for implementing new key types, label creation should
// normally be done with the Of method of the key.
func OfValue(k Key, value interface{}) Label { return Label{key: k, untyped: value} }
// UnpackValue assumes the label was built using LabelOfValue and returns the value
// that was passed to that constructor.
// This method is for implementing new key types, for type safety normal
// access should be done with the From method of the key.
func (t Label) UnpackValue() interface{} { return t.untyped }
// Of64 creates a new label from a key and a uint64. This is often
// used for non uint64 values that can be packed into a uint64.
// This method is for implementing new key types, label creation should
// normally be done with the Of method of the key.
func Of64(k Key, v uint64) Label { return Label{key: k, packed: v} }
// Unpack64 assumes the label was built using LabelOf64 and returns the value that
// was passed to that constructor.
// This method is for implementing new key types, for type safety normal
// access should be done with the From method of the key.
func (t Label) Unpack64() uint64 { return t.packed }
type stringptr unsafe.Pointer
// OfString creates a new label from a key and a string.
// This method is for implementing new key types, label creation should
// normally be done with the Of method of the key.
func OfString(k Key, v string) Label {
hdr := (*reflect.StringHeader)(unsafe.Pointer(&v))
return Label{
key: k,
packed: uint64(hdr.Len),
untyped: stringptr(hdr.Data),
}
}
// UnpackString assumes the label was built using LabelOfString and returns the
// value that was passed to that constructor.
// This method is for implementing new key types, for type safety normal
// access should be done with the From method of the key.
func (t Label) UnpackString() string {
var v string
hdr := (*reflect.StringHeader)(unsafe.Pointer(&v))
hdr.Data = uintptr(t.untyped.(stringptr))
hdr.Len = int(t.packed)
return v
}
// Valid returns true if the Label is a valid one (it has a key).
func (t Label) Valid() bool { return t.key != nil }
// Key returns the key of this Label.
func (t Label) Key() Key { return t.key }
// Format is used for debug printing of labels.
func (t Label) Format(f fmt.State, r rune) {
if !t.Valid() {
io.WriteString(f, `nil`)
return
}
io.WriteString(f, t.Key().Name())
io.WriteString(f, "=")
var buf [128]byte
t.Key().Format(f, buf[:0], t)
}
func (l *list) Valid(index int) bool {
return index >= 0 && index < len(l.labels)
}
func (l *list) Label(index int) Label {
return l.labels[index]
}
func (f *filter) Valid(index int) bool {
return f.underlying.Valid(index)
}
func (f *filter) Label(index int) Label {
l := f.underlying.Label(index)
for _, f := range f.keys {
if l.Key() == f {
return Label{}
}
}
return l
}
func (lm listMap) Find(key Key) Label {
for _, l := range lm.labels {
if l.Key() == key {
return l
}
}
return Label{}
}
func (c mapChain) Find(key Key) Label {
for _, src := range c.maps {
l := src.Find(key)
if l.Valid() {
return l
}
}
return Label{}
}
var emptyList = &list{}
func NewList(labels ...Label) List {
if len(labels) == 0 {
return emptyList
}
return &list{labels: labels}
}
func Filter(l List, keys ...Key) List {
if len(keys) == 0 {
return l
}
return &filter{keys: keys, underlying: l}
}
func NewMap(labels ...Label) Map {
return listMap{labels: labels}
}
func MergeMaps(srcs ...Map) Map {
var nonNil []Map
for _, src := range srcs {
if src != nil {
nonNil = append(nonNil, src)
}
}
if len(nonNil) == 1 {
return nonNil[0]
}
return mapChain{maps: nonNil}
}

View File

@@ -0,0 +1,150 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// This file contains the remaining vestiges of
// $GOROOT/src/go/internal/gcimporter/bimport.go.
package gcimporter
import (
"fmt"
"go/token"
"go/types"
"sync"
)
func errorf(format string, args ...interface{}) {
panic(fmt.Sprintf(format, args...))
}
const deltaNewFile = -64 // see cmd/compile/internal/gc/bexport.go
// Synthesize a token.Pos
type fakeFileSet struct {
fset *token.FileSet
files map[string]*fileInfo
}
type fileInfo struct {
file *token.File
lastline int
}
const maxlines = 64 * 1024
func (s *fakeFileSet) pos(file string, line, column int) token.Pos {
// TODO(mdempsky): Make use of column.
// Since we don't know the set of needed file positions, we reserve maxlines
// positions per file. We delay calling token.File.SetLines until all
// positions have been calculated (by way of fakeFileSet.setLines), so that
// we can avoid setting unnecessary lines. See also golang/go#46586.
f := s.files[file]
if f == nil {
f = &fileInfo{file: s.fset.AddFile(file, -1, maxlines)}
s.files[file] = f
}
if line > maxlines {
line = 1
}
if line > f.lastline {
f.lastline = line
}
// Return a fake position assuming that f.file consists only of newlines.
return token.Pos(f.file.Base() + line - 1)
}
func (s *fakeFileSet) setLines() {
fakeLinesOnce.Do(func() {
fakeLines = make([]int, maxlines)
for i := range fakeLines {
fakeLines[i] = i
}
})
for _, f := range s.files {
f.file.SetLines(fakeLines[:f.lastline])
}
}
var (
fakeLines []int
fakeLinesOnce sync.Once
)
func chanDir(d int) types.ChanDir {
// tag values must match the constants in cmd/compile/internal/gc/go.go
switch d {
case 1 /* Crecv */ :
return types.RecvOnly
case 2 /* Csend */ :
return types.SendOnly
case 3 /* Cboth */ :
return types.SendRecv
default:
errorf("unexpected channel dir %d", d)
return 0
}
}
var predeclOnce sync.Once
var predecl []types.Type // initialized lazily
func predeclared() []types.Type {
predeclOnce.Do(func() {
// initialize lazily to be sure that all
// elements have been initialized before
predecl = []types.Type{ // basic types
types.Typ[types.Bool],
types.Typ[types.Int],
types.Typ[types.Int8],
types.Typ[types.Int16],
types.Typ[types.Int32],
types.Typ[types.Int64],
types.Typ[types.Uint],
types.Typ[types.Uint8],
types.Typ[types.Uint16],
types.Typ[types.Uint32],
types.Typ[types.Uint64],
types.Typ[types.Uintptr],
types.Typ[types.Float32],
types.Typ[types.Float64],
types.Typ[types.Complex64],
types.Typ[types.Complex128],
types.Typ[types.String],
// basic type aliases
types.Universe.Lookup("byte").Type(),
types.Universe.Lookup("rune").Type(),
// error
types.Universe.Lookup("error").Type(),
// untyped types
types.Typ[types.UntypedBool],
types.Typ[types.UntypedInt],
types.Typ[types.UntypedRune],
types.Typ[types.UntypedFloat],
types.Typ[types.UntypedComplex],
types.Typ[types.UntypedString],
types.Typ[types.UntypedNil],
// package unsafe
types.Typ[types.UnsafePointer],
// invalid type
types.Typ[types.Invalid], // only appears in packages with errors
// used internally by gc; never used by this package or in .a files
anyType{},
}
predecl = append(predecl, additionalPredeclared()...)
})
return predecl
}
type anyType struct{}
func (t anyType) Underlying() types.Type { return t }
func (t anyType) String() string { return "any" }

View File

@@ -0,0 +1,99 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// This file is a copy of $GOROOT/src/go/internal/gcimporter/exportdata.go.
// This file implements FindExportData.
package gcimporter
import (
"bufio"
"fmt"
"io"
"strconv"
"strings"
)
func readGopackHeader(r *bufio.Reader) (name string, size int64, err error) {
// See $GOROOT/include/ar.h.
hdr := make([]byte, 16+12+6+6+8+10+2)
_, err = io.ReadFull(r, hdr)
if err != nil {
return
}
// leave for debugging
if false {
fmt.Printf("header: %s", hdr)
}
s := strings.TrimSpace(string(hdr[16+12+6+6+8:][:10]))
length, err := strconv.Atoi(s)
size = int64(length)
if err != nil || hdr[len(hdr)-2] != '`' || hdr[len(hdr)-1] != '\n' {
err = fmt.Errorf("invalid archive header")
return
}
name = strings.TrimSpace(string(hdr[:16]))
return
}
// FindExportData positions the reader r at the beginning of the
// export data section of an underlying GC-created object/archive
// file by reading from it. The reader must be positioned at the
// start of the file before calling this function. The hdr result
// is the string before the export data, either "$$" or "$$B".
// The size result is the length of the export data in bytes, or -1 if not known.
func FindExportData(r *bufio.Reader) (hdr string, size int64, err error) {
// Read first line to make sure this is an object file.
line, err := r.ReadSlice('\n')
if err != nil {
err = fmt.Errorf("can't find export data (%v)", err)
return
}
if string(line) == "!<arch>\n" {
// Archive file. Scan to __.PKGDEF.
var name string
if name, size, err = readGopackHeader(r); err != nil {
return
}
// First entry should be __.PKGDEF.
if name != "__.PKGDEF" {
err = fmt.Errorf("go archive is missing __.PKGDEF")
return
}
// Read first line of __.PKGDEF data, so that line
// is once again the first line of the input.
if line, err = r.ReadSlice('\n'); err != nil {
err = fmt.Errorf("can't find export data (%v)", err)
return
}
size -= int64(len(line))
}
// Now at __.PKGDEF in archive or still at beginning of file.
// Either way, line should begin with "go object ".
if !strings.HasPrefix(string(line), "go object ") {
err = fmt.Errorf("not a Go object file")
return
}
// Skip over object header to export data.
// Begins after first line starting with $$.
for line[0] != '$' {
if line, err = r.ReadSlice('\n'); err != nil {
err = fmt.Errorf("can't find export data (%v)", err)
return
}
size -= int64(len(line))
}
hdr = string(line)
if size < 0 {
size = -1
}
return
}

View File

@@ -0,0 +1,266 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// This file is a reduced copy of $GOROOT/src/go/internal/gcimporter/gcimporter.go.
// Package gcimporter provides various functions for reading
// gc-generated object files that can be used to implement the
// Importer interface defined by the Go 1.5 standard library package.
//
// The encoding is deterministic: if the encoder is applied twice to
// the same types.Package data structure, both encodings are equal.
// This property may be important to avoid spurious changes in
// applications such as build systems.
//
// However, the encoder is not necessarily idempotent. Importing an
// exported package may yield a types.Package that, while it
// represents the same set of Go types as the original, may differ in
// the details of its internal representation. Because of these
// differences, re-encoding the imported package may yield a
// different, but equally valid, encoding of the package.
package gcimporter // import "golang.org/x/tools/internal/gcimporter"
import (
"bufio"
"bytes"
"fmt"
"go/build"
"go/token"
"go/types"
"io"
"os"
"os/exec"
"path/filepath"
"strings"
"sync"
)
const (
// Enable debug during development: it adds some additional checks, and
// prevents errors from being recovered.
debug = false
// If trace is set, debugging output is printed to std out.
trace = false
)
var exportMap sync.Map // package dir → func() (string, bool)
// lookupGorootExport returns the location of the export data
// (normally found in the build cache, but located in GOROOT/pkg
// in prior Go releases) for the package located in pkgDir.
//
// (We use the package's directory instead of its import path
// mainly to simplify handling of the packages in src/vendor
// and cmd/vendor.)
func lookupGorootExport(pkgDir string) (string, bool) {
f, ok := exportMap.Load(pkgDir)
if !ok {
var (
listOnce sync.Once
exportPath string
)
f, _ = exportMap.LoadOrStore(pkgDir, func() (string, bool) {
listOnce.Do(func() {
cmd := exec.Command("go", "list", "-export", "-f", "{{.Export}}", pkgDir)
cmd.Dir = build.Default.GOROOT
var output []byte
output, err := cmd.Output()
if err != nil {
return
}
exports := strings.Split(string(bytes.TrimSpace(output)), "\n")
if len(exports) != 1 {
return
}
exportPath = exports[0]
})
return exportPath, exportPath != ""
})
}
return f.(func() (string, bool))()
}
var pkgExts = [...]string{".a", ".o"}
// FindPkg returns the filename and unique package id for an import
// path based on package information provided by build.Import (using
// the build.Default build.Context). A relative srcDir is interpreted
// relative to the current working directory.
// If no file was found, an empty filename is returned.
func FindPkg(path, srcDir string) (filename, id string) {
if path == "" {
return
}
var noext string
switch {
default:
// "x" -> "$GOPATH/pkg/$GOOS_$GOARCH/x.ext", "x"
// Don't require the source files to be present.
if abs, err := filepath.Abs(srcDir); err == nil { // see issue 14282
srcDir = abs
}
bp, _ := build.Import(path, srcDir, build.FindOnly|build.AllowBinary)
if bp.PkgObj == "" {
var ok bool
if bp.Goroot && bp.Dir != "" {
filename, ok = lookupGorootExport(bp.Dir)
}
if !ok {
id = path // make sure we have an id to print in error message
return
}
} else {
noext = strings.TrimSuffix(bp.PkgObj, ".a")
id = bp.ImportPath
}
case build.IsLocalImport(path):
// "./x" -> "/this/directory/x.ext", "/this/directory/x"
noext = filepath.Join(srcDir, path)
id = noext
case filepath.IsAbs(path):
// for completeness only - go/build.Import
// does not support absolute imports
// "/x" -> "/x.ext", "/x"
noext = path
id = path
}
if false { // for debugging
if path != id {
fmt.Printf("%s -> %s\n", path, id)
}
}
if filename != "" {
if f, err := os.Stat(filename); err == nil && !f.IsDir() {
return
}
}
// try extensions
for _, ext := range pkgExts {
filename = noext + ext
if f, err := os.Stat(filename); err == nil && !f.IsDir() {
return
}
}
filename = "" // not found
return
}
// Import imports a gc-generated package given its import path and srcDir, adds
// the corresponding package object to the packages map, and returns the object.
// The packages map must contain all packages already imported.
func Import(packages map[string]*types.Package, path, srcDir string, lookup func(path string) (io.ReadCloser, error)) (pkg *types.Package, err error) {
var rc io.ReadCloser
var filename, id string
if lookup != nil {
// With custom lookup specified, assume that caller has
// converted path to a canonical import path for use in the map.
if path == "unsafe" {
return types.Unsafe, nil
}
id = path
// No need to re-import if the package was imported completely before.
if pkg = packages[id]; pkg != nil && pkg.Complete() {
return
}
f, err := lookup(path)
if err != nil {
return nil, err
}
rc = f
} else {
filename, id = FindPkg(path, srcDir)
if filename == "" {
if path == "unsafe" {
return types.Unsafe, nil
}
return nil, fmt.Errorf("can't find import: %q", id)
}
// no need to re-import if the package was imported completely before
if pkg = packages[id]; pkg != nil && pkg.Complete() {
return
}
// open file
f, err := os.Open(filename)
if err != nil {
return nil, err
}
defer func() {
if err != nil {
// add file name to error
err = fmt.Errorf("%s: %v", filename, err)
}
}()
rc = f
}
defer rc.Close()
var hdr string
var size int64
buf := bufio.NewReader(rc)
if hdr, size, err = FindExportData(buf); err != nil {
return
}
switch hdr {
case "$$B\n":
var data []byte
data, err = io.ReadAll(buf)
if err != nil {
break
}
// TODO(gri): allow clients of go/importer to provide a FileSet.
// Or, define a new standard go/types/gcexportdata package.
fset := token.NewFileSet()
// Select appropriate importer.
if len(data) > 0 {
switch data[0] {
case 'v', 'c', 'd': // binary, till go1.10
return nil, fmt.Errorf("binary (%c) import format is no longer supported", data[0])
case 'i': // indexed, till go1.19
_, pkg, err := IImportData(fset, packages, data[1:], id)
return pkg, err
case 'u': // unified, from go1.20
_, pkg, err := UImportData(fset, packages, data[1:size], id)
return pkg, err
default:
l := len(data)
if l > 10 {
l = 10
}
return nil, fmt.Errorf("unexpected export data with prefix %q for path %s", string(data[:l]), id)
}
}
default:
err = fmt.Errorf("unknown export data header: %q", hdr)
}
return
}
type byPath []*types.Package
func (a byPath) Len() int { return len(a) }
func (a byPath) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
func (a byPath) Less(i, j int) bool { return a[i].Path() < a[j].Path() }

1332
vendor/golang.org/x/tools/internal/gcimporter/iexport.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

1100
vendor/golang.org/x/tools/internal/gcimporter/iimport.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,22 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build !go1.11
// +build !go1.11
package gcimporter
import "go/types"
func newInterface(methods []*types.Func, embeddeds []types.Type) *types.Interface {
named := make([]*types.Named, len(embeddeds))
for i, e := range embeddeds {
var ok bool
named[i], ok = e.(*types.Named)
if !ok {
panic("embedding of non-defined interfaces in interfaces is not supported before Go 1.11")
}
}
return types.NewInterface(methods, named)
}

View File

@@ -0,0 +1,14 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build go1.11
// +build go1.11
package gcimporter
import "go/types"
func newInterface(methods []*types.Func, embeddeds []types.Type) *types.Interface {
return types.NewInterfaceType(methods, embeddeds)
}

View File

@@ -0,0 +1,34 @@
// Copyright 2021 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package gcimporter
import "go/types"
const iexportVersion = iexportVersionGenerics
// additionalPredeclared returns additional predeclared types in go.1.18.
func additionalPredeclared() []types.Type {
return []types.Type{
// comparable
types.Universe.Lookup("comparable").Type(),
// any
types.Universe.Lookup("any").Type(),
}
}
// See cmd/compile/internal/types.SplitVargenSuffix.
func splitVargenSuffix(name string) (base, suffix string) {
i := len(name)
for i > 0 && name[i-1] >= '0' && name[i-1] <= '9' {
i--
}
const dot = "·"
if i >= len(dot) && name[i-len(dot):i] == dot {
i -= len(dot)
return name[:i], name[i:]
}
return name, ""
}

View File

@@ -0,0 +1,10 @@
// Copyright 2022 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build !goexperiment.unified
// +build !goexperiment.unified
package gcimporter
const unifiedIR = false

View File

@@ -0,0 +1,10 @@
// Copyright 2022 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build goexperiment.unified
// +build goexperiment.unified
package gcimporter
const unifiedIR = true

View File

@@ -0,0 +1,728 @@
// Copyright 2021 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Derived from go/internal/gcimporter/ureader.go
package gcimporter
import (
"fmt"
"go/token"
"go/types"
"sort"
"strings"
"golang.org/x/tools/internal/aliases"
"golang.org/x/tools/internal/pkgbits"
)
// A pkgReader holds the shared state for reading a unified IR package
// description.
type pkgReader struct {
pkgbits.PkgDecoder
fake fakeFileSet
ctxt *types.Context
imports map[string]*types.Package // previously imported packages, indexed by path
aliases bool // create types.Alias nodes
// lazily initialized arrays corresponding to the unified IR
// PosBase, Pkg, and Type sections, respectively.
posBases []string // position bases (i.e., file names)
pkgs []*types.Package
typs []types.Type
// laterFns holds functions that need to be invoked at the end of
// import reading.
laterFns []func()
// laterFors is used in case of 'type A B' to ensure that B is processed before A.
laterFors map[types.Type]int
// ifaces holds a list of constructed Interfaces, which need to have
// Complete called after importing is done.
ifaces []*types.Interface
}
// later adds a function to be invoked at the end of import reading.
func (pr *pkgReader) later(fn func()) {
pr.laterFns = append(pr.laterFns, fn)
}
// See cmd/compile/internal/noder.derivedInfo.
type derivedInfo struct {
idx pkgbits.Index
needed bool
}
// See cmd/compile/internal/noder.typeInfo.
type typeInfo struct {
idx pkgbits.Index
derived bool
}
func UImportData(fset *token.FileSet, imports map[string]*types.Package, data []byte, path string) (_ int, pkg *types.Package, err error) {
if !debug {
defer func() {
if x := recover(); x != nil {
err = fmt.Errorf("internal error in importing %q (%v); please report an issue", path, x)
}
}()
}
s := string(data)
s = s[:strings.LastIndex(s, "\n$$\n")]
input := pkgbits.NewPkgDecoder(path, s)
pkg = readUnifiedPackage(fset, nil, imports, input)
return
}
// laterFor adds a function to be invoked at the end of import reading, and records the type that function is finishing.
func (pr *pkgReader) laterFor(t types.Type, fn func()) {
if pr.laterFors == nil {
pr.laterFors = make(map[types.Type]int)
}
pr.laterFors[t] = len(pr.laterFns)
pr.laterFns = append(pr.laterFns, fn)
}
// readUnifiedPackage reads a package description from the given
// unified IR export data decoder.
func readUnifiedPackage(fset *token.FileSet, ctxt *types.Context, imports map[string]*types.Package, input pkgbits.PkgDecoder) *types.Package {
pr := pkgReader{
PkgDecoder: input,
fake: fakeFileSet{
fset: fset,
files: make(map[string]*fileInfo),
},
ctxt: ctxt,
imports: imports,
aliases: aliases.Enabled(),
posBases: make([]string, input.NumElems(pkgbits.RelocPosBase)),
pkgs: make([]*types.Package, input.NumElems(pkgbits.RelocPkg)),
typs: make([]types.Type, input.NumElems(pkgbits.RelocType)),
}
defer pr.fake.setLines()
r := pr.newReader(pkgbits.RelocMeta, pkgbits.PublicRootIdx, pkgbits.SyncPublic)
pkg := r.pkg()
r.Bool() // has init
for i, n := 0, r.Len(); i < n; i++ {
// As if r.obj(), but avoiding the Scope.Lookup call,
// to avoid eager loading of imports.
r.Sync(pkgbits.SyncObject)
assert(!r.Bool())
r.p.objIdx(r.Reloc(pkgbits.RelocObj))
assert(r.Len() == 0)
}
r.Sync(pkgbits.SyncEOF)
for _, fn := range pr.laterFns {
fn()
}
for _, iface := range pr.ifaces {
iface.Complete()
}
// Imports() of pkg are all of the transitive packages that were loaded.
var imps []*types.Package
for _, imp := range pr.pkgs {
if imp != nil && imp != pkg {
imps = append(imps, imp)
}
}
sort.Sort(byPath(imps))
pkg.SetImports(imps)
pkg.MarkComplete()
return pkg
}
// A reader holds the state for reading a single unified IR element
// within a package.
type reader struct {
pkgbits.Decoder
p *pkgReader
dict *readerDict
}
// A readerDict holds the state for type parameters that parameterize
// the current unified IR element.
type readerDict struct {
// bounds is a slice of typeInfos corresponding to the underlying
// bounds of the element's type parameters.
bounds []typeInfo
// tparams is a slice of the constructed TypeParams for the element.
tparams []*types.TypeParam
// devived is a slice of types derived from tparams, which may be
// instantiated while reading the current element.
derived []derivedInfo
derivedTypes []types.Type // lazily instantiated from derived
}
func (pr *pkgReader) newReader(k pkgbits.RelocKind, idx pkgbits.Index, marker pkgbits.SyncMarker) *reader {
return &reader{
Decoder: pr.NewDecoder(k, idx, marker),
p: pr,
}
}
func (pr *pkgReader) tempReader(k pkgbits.RelocKind, idx pkgbits.Index, marker pkgbits.SyncMarker) *reader {
return &reader{
Decoder: pr.TempDecoder(k, idx, marker),
p: pr,
}
}
func (pr *pkgReader) retireReader(r *reader) {
pr.RetireDecoder(&r.Decoder)
}
// @@@ Positions
func (r *reader) pos() token.Pos {
r.Sync(pkgbits.SyncPos)
if !r.Bool() {
return token.NoPos
}
// TODO(mdempsky): Delta encoding.
posBase := r.posBase()
line := r.Uint()
col := r.Uint()
return r.p.fake.pos(posBase, int(line), int(col))
}
func (r *reader) posBase() string {
return r.p.posBaseIdx(r.Reloc(pkgbits.RelocPosBase))
}
func (pr *pkgReader) posBaseIdx(idx pkgbits.Index) string {
if b := pr.posBases[idx]; b != "" {
return b
}
var filename string
{
r := pr.tempReader(pkgbits.RelocPosBase, idx, pkgbits.SyncPosBase)
// Within types2, position bases have a lot more details (e.g.,
// keeping track of where //line directives appeared exactly).
//
// For go/types, we just track the file name.
filename = r.String()
if r.Bool() { // file base
// Was: "b = token.NewTrimmedFileBase(filename, true)"
} else { // line base
pos := r.pos()
line := r.Uint()
col := r.Uint()
// Was: "b = token.NewLineBase(pos, filename, true, line, col)"
_, _, _ = pos, line, col
}
pr.retireReader(r)
}
b := filename
pr.posBases[idx] = b
return b
}
// @@@ Packages
func (r *reader) pkg() *types.Package {
r.Sync(pkgbits.SyncPkg)
return r.p.pkgIdx(r.Reloc(pkgbits.RelocPkg))
}
func (pr *pkgReader) pkgIdx(idx pkgbits.Index) *types.Package {
// TODO(mdempsky): Consider using some non-nil pointer to indicate
// the universe scope, so we don't need to keep re-reading it.
if pkg := pr.pkgs[idx]; pkg != nil {
return pkg
}
pkg := pr.newReader(pkgbits.RelocPkg, idx, pkgbits.SyncPkgDef).doPkg()
pr.pkgs[idx] = pkg
return pkg
}
func (r *reader) doPkg() *types.Package {
path := r.String()
switch path {
case "":
path = r.p.PkgPath()
case "builtin":
return nil // universe
case "unsafe":
return types.Unsafe
}
if pkg := r.p.imports[path]; pkg != nil {
return pkg
}
name := r.String()
pkg := types.NewPackage(path, name)
r.p.imports[path] = pkg
return pkg
}
// @@@ Types
func (r *reader) typ() types.Type {
return r.p.typIdx(r.typInfo(), r.dict)
}
func (r *reader) typInfo() typeInfo {
r.Sync(pkgbits.SyncType)
if r.Bool() {
return typeInfo{idx: pkgbits.Index(r.Len()), derived: true}
}
return typeInfo{idx: r.Reloc(pkgbits.RelocType), derived: false}
}
func (pr *pkgReader) typIdx(info typeInfo, dict *readerDict) types.Type {
idx := info.idx
var where *types.Type
if info.derived {
where = &dict.derivedTypes[idx]
idx = dict.derived[idx].idx
} else {
where = &pr.typs[idx]
}
if typ := *where; typ != nil {
return typ
}
var typ types.Type
{
r := pr.tempReader(pkgbits.RelocType, idx, pkgbits.SyncTypeIdx)
r.dict = dict
typ = r.doTyp()
assert(typ != nil)
pr.retireReader(r)
}
// See comment in pkgReader.typIdx explaining how this happens.
if prev := *where; prev != nil {
return prev
}
*where = typ
return typ
}
func (r *reader) doTyp() (res types.Type) {
switch tag := pkgbits.CodeType(r.Code(pkgbits.SyncType)); tag {
default:
errorf("unhandled type tag: %v", tag)
panic("unreachable")
case pkgbits.TypeBasic:
return types.Typ[r.Len()]
case pkgbits.TypeNamed:
obj, targs := r.obj()
name := obj.(*types.TypeName)
if len(targs) != 0 {
t, _ := types.Instantiate(r.p.ctxt, name.Type(), targs, false)
return t
}
return name.Type()
case pkgbits.TypeTypeParam:
return r.dict.tparams[r.Len()]
case pkgbits.TypeArray:
len := int64(r.Uint64())
return types.NewArray(r.typ(), len)
case pkgbits.TypeChan:
dir := types.ChanDir(r.Len())
return types.NewChan(dir, r.typ())
case pkgbits.TypeMap:
return types.NewMap(r.typ(), r.typ())
case pkgbits.TypePointer:
return types.NewPointer(r.typ())
case pkgbits.TypeSignature:
return r.signature(nil, nil, nil)
case pkgbits.TypeSlice:
return types.NewSlice(r.typ())
case pkgbits.TypeStruct:
return r.structType()
case pkgbits.TypeInterface:
return r.interfaceType()
case pkgbits.TypeUnion:
return r.unionType()
}
}
func (r *reader) structType() *types.Struct {
fields := make([]*types.Var, r.Len())
var tags []string
for i := range fields {
pos := r.pos()
pkg, name := r.selector()
ftyp := r.typ()
tag := r.String()
embedded := r.Bool()
fields[i] = types.NewField(pos, pkg, name, ftyp, embedded)
if tag != "" {
for len(tags) < i {
tags = append(tags, "")
}
tags = append(tags, tag)
}
}
return types.NewStruct(fields, tags)
}
func (r *reader) unionType() *types.Union {
terms := make([]*types.Term, r.Len())
for i := range terms {
terms[i] = types.NewTerm(r.Bool(), r.typ())
}
return types.NewUnion(terms)
}
func (r *reader) interfaceType() *types.Interface {
methods := make([]*types.Func, r.Len())
embeddeds := make([]types.Type, r.Len())
implicit := len(methods) == 0 && len(embeddeds) == 1 && r.Bool()
for i := range methods {
pos := r.pos()
pkg, name := r.selector()
mtyp := r.signature(nil, nil, nil)
methods[i] = types.NewFunc(pos, pkg, name, mtyp)
}
for i := range embeddeds {
embeddeds[i] = r.typ()
}
iface := types.NewInterfaceType(methods, embeddeds)
if implicit {
iface.MarkImplicit()
}
// We need to call iface.Complete(), but if there are any embedded
// defined types, then we may not have set their underlying
// interface type yet. So we need to defer calling Complete until
// after we've called SetUnderlying everywhere.
//
// TODO(mdempsky): After CL 424876 lands, it should be safe to call
// iface.Complete() immediately.
r.p.ifaces = append(r.p.ifaces, iface)
return iface
}
func (r *reader) signature(recv *types.Var, rtparams, tparams []*types.TypeParam) *types.Signature {
r.Sync(pkgbits.SyncSignature)
params := r.params()
results := r.params()
variadic := r.Bool()
return types.NewSignatureType(recv, rtparams, tparams, params, results, variadic)
}
func (r *reader) params() *types.Tuple {
r.Sync(pkgbits.SyncParams)
params := make([]*types.Var, r.Len())
for i := range params {
params[i] = r.param()
}
return types.NewTuple(params...)
}
func (r *reader) param() *types.Var {
r.Sync(pkgbits.SyncParam)
pos := r.pos()
pkg, name := r.localIdent()
typ := r.typ()
return types.NewParam(pos, pkg, name, typ)
}
// @@@ Objects
func (r *reader) obj() (types.Object, []types.Type) {
r.Sync(pkgbits.SyncObject)
assert(!r.Bool())
pkg, name := r.p.objIdx(r.Reloc(pkgbits.RelocObj))
obj := pkgScope(pkg).Lookup(name)
targs := make([]types.Type, r.Len())
for i := range targs {
targs[i] = r.typ()
}
return obj, targs
}
func (pr *pkgReader) objIdx(idx pkgbits.Index) (*types.Package, string) {
var objPkg *types.Package
var objName string
var tag pkgbits.CodeObj
{
rname := pr.tempReader(pkgbits.RelocName, idx, pkgbits.SyncObject1)
objPkg, objName = rname.qualifiedIdent()
assert(objName != "")
tag = pkgbits.CodeObj(rname.Code(pkgbits.SyncCodeObj))
pr.retireReader(rname)
}
if tag == pkgbits.ObjStub {
assert(objPkg == nil || objPkg == types.Unsafe)
return objPkg, objName
}
// Ignore local types promoted to global scope (#55110).
if _, suffix := splitVargenSuffix(objName); suffix != "" {
return objPkg, objName
}
if objPkg.Scope().Lookup(objName) == nil {
dict := pr.objDictIdx(idx)
r := pr.newReader(pkgbits.RelocObj, idx, pkgbits.SyncObject1)
r.dict = dict
declare := func(obj types.Object) {
objPkg.Scope().Insert(obj)
}
switch tag {
default:
panic("weird")
case pkgbits.ObjAlias:
pos := r.pos()
typ := r.typ()
declare(aliases.NewAlias(r.p.aliases, pos, objPkg, objName, typ))
case pkgbits.ObjConst:
pos := r.pos()
typ := r.typ()
val := r.Value()
declare(types.NewConst(pos, objPkg, objName, typ, val))
case pkgbits.ObjFunc:
pos := r.pos()
tparams := r.typeParamNames()
sig := r.signature(nil, nil, tparams)
declare(types.NewFunc(pos, objPkg, objName, sig))
case pkgbits.ObjType:
pos := r.pos()
obj := types.NewTypeName(pos, objPkg, objName, nil)
named := types.NewNamed(obj, nil, nil)
declare(obj)
named.SetTypeParams(r.typeParamNames())
setUnderlying := func(underlying types.Type) {
// If the underlying type is an interface, we need to
// duplicate its methods so we can replace the receiver
// parameter's type (#49906).
if iface, ok := aliases.Unalias(underlying).(*types.Interface); ok && iface.NumExplicitMethods() != 0 {
methods := make([]*types.Func, iface.NumExplicitMethods())
for i := range methods {
fn := iface.ExplicitMethod(i)
sig := fn.Type().(*types.Signature)
recv := types.NewVar(fn.Pos(), fn.Pkg(), "", named)
methods[i] = types.NewFunc(fn.Pos(), fn.Pkg(), fn.Name(), types.NewSignature(recv, sig.Params(), sig.Results(), sig.Variadic()))
}
embeds := make([]types.Type, iface.NumEmbeddeds())
for i := range embeds {
embeds[i] = iface.EmbeddedType(i)
}
newIface := types.NewInterfaceType(methods, embeds)
r.p.ifaces = append(r.p.ifaces, newIface)
underlying = newIface
}
named.SetUnderlying(underlying)
}
// Since go.dev/cl/455279, we can assume rhs.Underlying() will
// always be non-nil. However, to temporarily support users of
// older snapshot releases, we continue to fallback to the old
// behavior for now.
//
// TODO(mdempsky): Remove fallback code and simplify after
// allowing time for snapshot users to upgrade.
rhs := r.typ()
if underlying := rhs.Underlying(); underlying != nil {
setUnderlying(underlying)
} else {
pk := r.p
pk.laterFor(named, func() {
// First be sure that the rhs is initialized, if it needs to be initialized.
delete(pk.laterFors, named) // prevent cycles
if i, ok := pk.laterFors[rhs]; ok {
f := pk.laterFns[i]
pk.laterFns[i] = func() {} // function is running now, so replace it with a no-op
f() // initialize RHS
}
setUnderlying(rhs.Underlying())
})
}
for i, n := 0, r.Len(); i < n; i++ {
named.AddMethod(r.method())
}
case pkgbits.ObjVar:
pos := r.pos()
typ := r.typ()
declare(types.NewVar(pos, objPkg, objName, typ))
}
}
return objPkg, objName
}
func (pr *pkgReader) objDictIdx(idx pkgbits.Index) *readerDict {
var dict readerDict
{
r := pr.tempReader(pkgbits.RelocObjDict, idx, pkgbits.SyncObject1)
if implicits := r.Len(); implicits != 0 {
errorf("unexpected object with %v implicit type parameter(s)", implicits)
}
dict.bounds = make([]typeInfo, r.Len())
for i := range dict.bounds {
dict.bounds[i] = r.typInfo()
}
dict.derived = make([]derivedInfo, r.Len())
dict.derivedTypes = make([]types.Type, len(dict.derived))
for i := range dict.derived {
dict.derived[i] = derivedInfo{r.Reloc(pkgbits.RelocType), r.Bool()}
}
pr.retireReader(r)
}
// function references follow, but reader doesn't need those
return &dict
}
func (r *reader) typeParamNames() []*types.TypeParam {
r.Sync(pkgbits.SyncTypeParamNames)
// Note: This code assumes it only processes objects without
// implement type parameters. This is currently fine, because
// reader is only used to read in exported declarations, which are
// always package scoped.
if len(r.dict.bounds) == 0 {
return nil
}
// Careful: Type parameter lists may have cycles. To allow for this,
// we construct the type parameter list in two passes: first we
// create all the TypeNames and TypeParams, then we construct and
// set the bound type.
r.dict.tparams = make([]*types.TypeParam, len(r.dict.bounds))
for i := range r.dict.bounds {
pos := r.pos()
pkg, name := r.localIdent()
tname := types.NewTypeName(pos, pkg, name, nil)
r.dict.tparams[i] = types.NewTypeParam(tname, nil)
}
typs := make([]types.Type, len(r.dict.bounds))
for i, bound := range r.dict.bounds {
typs[i] = r.p.typIdx(bound, r.dict)
}
// TODO(mdempsky): This is subtle, elaborate further.
//
// We have to save tparams outside of the closure, because
// typeParamNames() can be called multiple times with the same
// dictionary instance.
//
// Also, this needs to happen later to make sure SetUnderlying has
// been called.
//
// TODO(mdempsky): Is it safe to have a single "later" slice or do
// we need to have multiple passes? See comments on CL 386002 and
// go.dev/issue/52104.
tparams := r.dict.tparams
r.p.later(func() {
for i, typ := range typs {
tparams[i].SetConstraint(typ)
}
})
return r.dict.tparams
}
func (r *reader) method() *types.Func {
r.Sync(pkgbits.SyncMethod)
pos := r.pos()
pkg, name := r.selector()
rparams := r.typeParamNames()
sig := r.signature(r.param(), rparams, nil)
_ = r.pos() // TODO(mdempsky): Remove; this is a hacker for linker.go.
return types.NewFunc(pos, pkg, name, sig)
}
func (r *reader) qualifiedIdent() (*types.Package, string) { return r.ident(pkgbits.SyncSym) }
func (r *reader) localIdent() (*types.Package, string) { return r.ident(pkgbits.SyncLocalIdent) }
func (r *reader) selector() (*types.Package, string) { return r.ident(pkgbits.SyncSelector) }
func (r *reader) ident(marker pkgbits.SyncMarker) (*types.Package, string) {
r.Sync(marker)
return r.pkg(), r.String()
}
// pkgScope returns pkg.Scope().
// If pkg is nil, it returns types.Universe instead.
//
// TODO(mdempsky): Remove after x/tools can depend on Go 1.19.
func pkgScope(pkg *types.Package) *types.Scope {
if pkg != nil {
return pkg.Scope()
}
return types.Universe
}

548
vendor/golang.org/x/tools/internal/gocommand/invoke.go generated vendored Normal file
View File

@@ -0,0 +1,548 @@
// Copyright 2020 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package gocommand is a helper for calling the go command.
package gocommand
import (
"bytes"
"context"
"encoding/json"
"errors"
"fmt"
"io"
"log"
"os"
"os/exec"
"path/filepath"
"reflect"
"regexp"
"runtime"
"strconv"
"strings"
"sync"
"time"
"golang.org/x/tools/internal/event"
"golang.org/x/tools/internal/event/keys"
"golang.org/x/tools/internal/event/label"
)
// An Runner will run go command invocations and serialize
// them if it sees a concurrency error.
type Runner struct {
// once guards the runner initialization.
once sync.Once
// inFlight tracks available workers.
inFlight chan struct{}
// serialized guards the ability to run a go command serially,
// to avoid deadlocks when claiming workers.
serialized chan struct{}
}
const maxInFlight = 10
func (runner *Runner) initialize() {
runner.once.Do(func() {
runner.inFlight = make(chan struct{}, maxInFlight)
runner.serialized = make(chan struct{}, 1)
})
}
// 1.13: go: updates to go.mod needed, but contents have changed
// 1.14: go: updating go.mod: existing contents have changed since last read
var modConcurrencyError = regexp.MustCompile(`go:.*go.mod.*contents have changed`)
// event keys for go command invocations
var (
verb = keys.NewString("verb", "go command verb")
directory = keys.NewString("directory", "")
)
func invLabels(inv Invocation) []label.Label {
return []label.Label{verb.Of(inv.Verb), directory.Of(inv.WorkingDir)}
}
// Run is a convenience wrapper around RunRaw.
// It returns only stdout and a "friendly" error.
func (runner *Runner) Run(ctx context.Context, inv Invocation) (*bytes.Buffer, error) {
ctx, done := event.Start(ctx, "gocommand.Runner.Run", invLabels(inv)...)
defer done()
stdout, _, friendly, _ := runner.RunRaw(ctx, inv)
return stdout, friendly
}
// RunPiped runs the invocation serially, always waiting for any concurrent
// invocations to complete first.
func (runner *Runner) RunPiped(ctx context.Context, inv Invocation, stdout, stderr io.Writer) error {
ctx, done := event.Start(ctx, "gocommand.Runner.RunPiped", invLabels(inv)...)
defer done()
_, err := runner.runPiped(ctx, inv, stdout, stderr)
return err
}
// RunRaw runs the invocation, serializing requests only if they fight over
// go.mod changes.
// Postcondition: both error results have same nilness.
func (runner *Runner) RunRaw(ctx context.Context, inv Invocation) (*bytes.Buffer, *bytes.Buffer, error, error) {
ctx, done := event.Start(ctx, "gocommand.Runner.RunRaw", invLabels(inv)...)
defer done()
// Make sure the runner is always initialized.
runner.initialize()
// First, try to run the go command concurrently.
stdout, stderr, friendlyErr, err := runner.runConcurrent(ctx, inv)
// If we encounter a load concurrency error, we need to retry serially.
if friendlyErr != nil && modConcurrencyError.MatchString(friendlyErr.Error()) {
event.Error(ctx, "Load concurrency error, will retry serially", err)
// Run serially by calling runPiped.
stdout.Reset()
stderr.Reset()
friendlyErr, err = runner.runPiped(ctx, inv, stdout, stderr)
}
return stdout, stderr, friendlyErr, err
}
// Postcondition: both error results have same nilness.
func (runner *Runner) runConcurrent(ctx context.Context, inv Invocation) (*bytes.Buffer, *bytes.Buffer, error, error) {
// Wait for 1 worker to become available.
select {
case <-ctx.Done():
return nil, nil, ctx.Err(), ctx.Err()
case runner.inFlight <- struct{}{}:
defer func() { <-runner.inFlight }()
}
stdout, stderr := &bytes.Buffer{}, &bytes.Buffer{}
friendlyErr, err := inv.runWithFriendlyError(ctx, stdout, stderr)
return stdout, stderr, friendlyErr, err
}
// Postcondition: both error results have same nilness.
func (runner *Runner) runPiped(ctx context.Context, inv Invocation, stdout, stderr io.Writer) (error, error) {
// Make sure the runner is always initialized.
runner.initialize()
// Acquire the serialization lock. This avoids deadlocks between two
// runPiped commands.
select {
case <-ctx.Done():
return ctx.Err(), ctx.Err()
case runner.serialized <- struct{}{}:
defer func() { <-runner.serialized }()
}
// Wait for all in-progress go commands to return before proceeding,
// to avoid load concurrency errors.
for i := 0; i < maxInFlight; i++ {
select {
case <-ctx.Done():
return ctx.Err(), ctx.Err()
case runner.inFlight <- struct{}{}:
// Make sure we always "return" any workers we took.
defer func() { <-runner.inFlight }()
}
}
return inv.runWithFriendlyError(ctx, stdout, stderr)
}
// An Invocation represents a call to the go command.
type Invocation struct {
Verb string
Args []string
BuildFlags []string
// If ModFlag is set, the go command is invoked with -mod=ModFlag.
// TODO(rfindley): remove, in favor of Args.
ModFlag string
// If ModFile is set, the go command is invoked with -modfile=ModFile.
// TODO(rfindley): remove, in favor of Args.
ModFile string
// Overlay is the name of the JSON overlay file that describes
// unsaved editor buffers; see [WriteOverlays].
// If set, the go command is invoked with -overlay=Overlay.
// TODO(rfindley): remove, in favor of Args.
Overlay string
// If CleanEnv is set, the invocation will run only with the environment
// in Env, not starting with os.Environ.
CleanEnv bool
Env []string
WorkingDir string
Logf func(format string, args ...interface{})
}
// Postcondition: both error results have same nilness.
func (i *Invocation) runWithFriendlyError(ctx context.Context, stdout, stderr io.Writer) (friendlyError error, rawError error) {
rawError = i.run(ctx, stdout, stderr)
if rawError != nil {
friendlyError = rawError
// Check for 'go' executable not being found.
if ee, ok := rawError.(*exec.Error); ok && ee.Err == exec.ErrNotFound {
friendlyError = fmt.Errorf("go command required, not found: %v", ee)
}
if ctx.Err() != nil {
friendlyError = ctx.Err()
}
friendlyError = fmt.Errorf("err: %v: stderr: %s", friendlyError, stderr)
}
return
}
func (i *Invocation) run(ctx context.Context, stdout, stderr io.Writer) error {
log := i.Logf
if log == nil {
log = func(string, ...interface{}) {}
}
goArgs := []string{i.Verb}
appendModFile := func() {
if i.ModFile != "" {
goArgs = append(goArgs, "-modfile="+i.ModFile)
}
}
appendModFlag := func() {
if i.ModFlag != "" {
goArgs = append(goArgs, "-mod="+i.ModFlag)
}
}
appendOverlayFlag := func() {
if i.Overlay != "" {
goArgs = append(goArgs, "-overlay="+i.Overlay)
}
}
switch i.Verb {
case "env", "version":
goArgs = append(goArgs, i.Args...)
case "mod":
// mod needs the sub-verb before flags.
goArgs = append(goArgs, i.Args[0])
appendModFile()
goArgs = append(goArgs, i.Args[1:]...)
case "get":
goArgs = append(goArgs, i.BuildFlags...)
appendModFile()
goArgs = append(goArgs, i.Args...)
default: // notably list and build.
goArgs = append(goArgs, i.BuildFlags...)
appendModFile()
appendModFlag()
appendOverlayFlag()
goArgs = append(goArgs, i.Args...)
}
cmd := exec.Command("go", goArgs...)
cmd.Stdout = stdout
cmd.Stderr = stderr
// cmd.WaitDelay was added only in go1.20 (see #50436).
if waitDelay := reflect.ValueOf(cmd).Elem().FieldByName("WaitDelay"); waitDelay.IsValid() {
// https://go.dev/issue/59541: don't wait forever copying stderr
// after the command has exited.
// After CL 484741 we copy stdout manually, so we we'll stop reading that as
// soon as ctx is done. However, we also don't want to wait around forever
// for stderr. Give a much-longer-than-reasonable delay and then assume that
// something has wedged in the kernel or runtime.
waitDelay.Set(reflect.ValueOf(30 * time.Second))
}
// The cwd gets resolved to the real path. On Darwin, where
// /tmp is a symlink, this breaks anything that expects the
// working directory to keep the original path, including the
// go command when dealing with modules.
//
// os.Getwd has a special feature where if the cwd and the PWD
// are the same node then it trusts the PWD, so by setting it
// in the env for the child process we fix up all the paths
// returned by the go command.
if !i.CleanEnv {
cmd.Env = os.Environ()
}
cmd.Env = append(cmd.Env, i.Env...)
if i.WorkingDir != "" {
cmd.Env = append(cmd.Env, "PWD="+i.WorkingDir)
cmd.Dir = i.WorkingDir
}
defer func(start time.Time) { log("%s for %v", time.Since(start), cmdDebugStr(cmd)) }(time.Now())
return runCmdContext(ctx, cmd)
}
// DebugHangingGoCommands may be set by tests to enable additional
// instrumentation (including panics) for debugging hanging Go commands.
//
// See golang/go#54461 for details.
var DebugHangingGoCommands = false
// runCmdContext is like exec.CommandContext except it sends os.Interrupt
// before os.Kill.
func runCmdContext(ctx context.Context, cmd *exec.Cmd) (err error) {
// If cmd.Stdout is not an *os.File, the exec package will create a pipe and
// copy it to the Writer in a goroutine until the process has finished and
// either the pipe reaches EOF or command's WaitDelay expires.
//
// However, the output from 'go list' can be quite large, and we don't want to
// keep reading (and allocating buffers) if we've already decided we don't
// care about the output. We don't want to wait for the process to finish, and
// we don't wait to wait for the WaitDelay to expire either.
//
// Instead, if cmd.Stdout requires a copying goroutine we explicitly replace
// it with a pipe (which is an *os.File), which we can close in order to stop
// copying output as soon as we realize we don't care about it.
var stdoutW *os.File
if cmd.Stdout != nil {
if _, ok := cmd.Stdout.(*os.File); !ok {
var stdoutR *os.File
stdoutR, stdoutW, err = os.Pipe()
if err != nil {
return err
}
prevStdout := cmd.Stdout
cmd.Stdout = stdoutW
stdoutErr := make(chan error, 1)
go func() {
_, err := io.Copy(prevStdout, stdoutR)
if err != nil {
err = fmt.Errorf("copying stdout: %w", err)
}
stdoutErr <- err
}()
defer func() {
// We started a goroutine to copy a stdout pipe.
// Wait for it to finish, or terminate it if need be.
var err2 error
select {
case err2 = <-stdoutErr:
stdoutR.Close()
case <-ctx.Done():
stdoutR.Close()
// Per https://pkg.go.dev/os#File.Close, the call to stdoutR.Close
// should cause the Read call in io.Copy to unblock and return
// immediately, but we still need to receive from stdoutErr to confirm
// that it has happened.
<-stdoutErr
err2 = ctx.Err()
}
if err == nil {
err = err2
}
}()
// Per https://pkg.go.dev/os/exec#Cmd, “If Stdout and Stderr are the
// same writer, and have a type that can be compared with ==, at most
// one goroutine at a time will call Write.”
//
// Since we're starting a goroutine that writes to cmd.Stdout, we must
// also update cmd.Stderr so that it still holds.
func() {
defer func() { recover() }()
if cmd.Stderr == prevStdout {
cmd.Stderr = cmd.Stdout
}
}()
}
}
startTime := time.Now()
err = cmd.Start()
if stdoutW != nil {
// The child process has inherited the pipe file,
// so close the copy held in this process.
stdoutW.Close()
stdoutW = nil
}
if err != nil {
return err
}
resChan := make(chan error, 1)
go func() {
resChan <- cmd.Wait()
}()
// If we're interested in debugging hanging Go commands, stop waiting after a
// minute and panic with interesting information.
debug := DebugHangingGoCommands
if debug {
timer := time.NewTimer(1 * time.Minute)
defer timer.Stop()
select {
case err := <-resChan:
return err
case <-timer.C:
HandleHangingGoCommand(startTime, cmd)
case <-ctx.Done():
}
} else {
select {
case err := <-resChan:
return err
case <-ctx.Done():
}
}
// Cancelled. Interrupt and see if it ends voluntarily.
if err := cmd.Process.Signal(os.Interrupt); err == nil {
// (We used to wait only 1s but this proved
// fragile on loaded builder machines.)
timer := time.NewTimer(5 * time.Second)
defer timer.Stop()
select {
case err := <-resChan:
return err
case <-timer.C:
}
}
// Didn't shut down in response to interrupt. Kill it hard.
// TODO(rfindley): per advice from bcmills@, it may be better to send SIGQUIT
// on certain platforms, such as unix.
if err := cmd.Process.Kill(); err != nil && !errors.Is(err, os.ErrProcessDone) && debug {
log.Printf("error killing the Go command: %v", err)
}
return <-resChan
}
func HandleHangingGoCommand(start time.Time, cmd *exec.Cmd) {
switch runtime.GOOS {
case "linux", "darwin", "freebsd", "netbsd":
fmt.Fprintln(os.Stderr, `DETECTED A HANGING GO COMMAND
The gopls test runner has detected a hanging go command. In order to debug
this, the output of ps and lsof/fstat is printed below.
See golang/go#54461 for more details.`)
fmt.Fprintln(os.Stderr, "\nps axo ppid,pid,command:")
fmt.Fprintln(os.Stderr, "-------------------------")
psCmd := exec.Command("ps", "axo", "ppid,pid,command")
psCmd.Stdout = os.Stderr
psCmd.Stderr = os.Stderr
if err := psCmd.Run(); err != nil {
panic(fmt.Sprintf("running ps: %v", err))
}
listFiles := "lsof"
if runtime.GOOS == "freebsd" || runtime.GOOS == "netbsd" {
listFiles = "fstat"
}
fmt.Fprintln(os.Stderr, "\n"+listFiles+":")
fmt.Fprintln(os.Stderr, "-----")
listFilesCmd := exec.Command(listFiles)
listFilesCmd.Stdout = os.Stderr
listFilesCmd.Stderr = os.Stderr
if err := listFilesCmd.Run(); err != nil {
panic(fmt.Sprintf("running %s: %v", listFiles, err))
}
}
panic(fmt.Sprintf("detected hanging go command (golang/go#54461); waited %s\n\tcommand:%s\n\tpid:%d", time.Since(start), cmd, cmd.Process.Pid))
}
func cmdDebugStr(cmd *exec.Cmd) string {
env := make(map[string]string)
for _, kv := range cmd.Env {
split := strings.SplitN(kv, "=", 2)
if len(split) == 2 {
k, v := split[0], split[1]
env[k] = v
}
}
var args []string
for _, arg := range cmd.Args {
quoted := strconv.Quote(arg)
if quoted[1:len(quoted)-1] != arg || strings.Contains(arg, " ") {
args = append(args, quoted)
} else {
args = append(args, arg)
}
}
return fmt.Sprintf("GOROOT=%v GOPATH=%v GO111MODULE=%v GOPROXY=%v PWD=%v %v", env["GOROOT"], env["GOPATH"], env["GO111MODULE"], env["GOPROXY"], env["PWD"], strings.Join(args, " "))
}
// WriteOverlays writes each value in the overlay (see the Overlay
// field of go/packages.Config) to a temporary file and returns the name
// of a JSON file describing the mapping that is suitable for the "go
// list -overlay" flag.
//
// On success, the caller must call the cleanup function exactly once
// when the files are no longer needed.
func WriteOverlays(overlay map[string][]byte) (filename string, cleanup func(), err error) {
// Do nothing if there are no overlays in the config.
if len(overlay) == 0 {
return "", func() {}, nil
}
dir, err := os.MkdirTemp("", "gocommand-*")
if err != nil {
return "", nil, err
}
// The caller must clean up this directory,
// unless this function returns an error.
// (The cleanup operand of each return
// statement below is ignored.)
defer func() {
cleanup = func() {
os.RemoveAll(dir)
}
if err != nil {
cleanup()
cleanup = nil
}
}()
// Write each map entry to a temporary file.
overlays := make(map[string]string)
for k, v := range overlay {
// Use a unique basename for each file (001-foo.go),
// to avoid creating nested directories.
base := fmt.Sprintf("%d-%s.go", 1+len(overlays), filepath.Base(k))
filename := filepath.Join(dir, base)
err := os.WriteFile(filename, v, 0666)
if err != nil {
return "", nil, err
}
overlays[k] = filename
}
// Write the JSON overlay file that maps logical file names to temp files.
//
// OverlayJSON is the format overlay files are expected to be in.
// The Replace map maps from overlaid paths to replacement paths:
// the Go command will forward all reads trying to open
// each overlaid path to its replacement path, or consider the overlaid
// path not to exist if the replacement path is empty.
//
// From golang/go#39958.
type OverlayJSON struct {
Replace map[string]string `json:"replace,omitempty"`
}
b, err := json.Marshal(OverlayJSON{Replace: overlays})
if err != nil {
return "", nil, err
}
filename = filepath.Join(dir, "overlay.json")
if err := os.WriteFile(filename, b, 0666); err != nil {
return "", nil, err
}
return filename, nil, nil
}

163
vendor/golang.org/x/tools/internal/gocommand/vendor.go generated vendored Normal file
View File

@@ -0,0 +1,163 @@
// Copyright 2020 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package gocommand
import (
"bytes"
"context"
"fmt"
"os"
"path/filepath"
"regexp"
"strings"
"time"
"golang.org/x/mod/semver"
)
// ModuleJSON holds information about a module.
type ModuleJSON struct {
Path string // module path
Version string // module version
Versions []string // available module versions (with -versions)
Replace *ModuleJSON // replaced by this module
Time *time.Time // time version was created
Update *ModuleJSON // available update, if any (with -u)
Main bool // is this the main module?
Indirect bool // is this module only an indirect dependency of main module?
Dir string // directory holding files for this module, if any
GoMod string // path to go.mod file used when loading this module, if any
GoVersion string // go version used in module
}
var modFlagRegexp = regexp.MustCompile(`-mod[ =](\w+)`)
// VendorEnabled reports whether vendoring is enabled. It takes a *Runner to execute Go commands
// with the supplied context.Context and Invocation. The Invocation can contain pre-defined fields,
// of which only Verb and Args are modified to run the appropriate Go command.
// Inspired by setDefaultBuildMod in modload/init.go
func VendorEnabled(ctx context.Context, inv Invocation, r *Runner) (bool, *ModuleJSON, error) {
mainMod, go114, err := getMainModuleAnd114(ctx, inv, r)
if err != nil {
return false, nil, err
}
// We check the GOFLAGS to see if there is anything overridden or not.
inv.Verb = "env"
inv.Args = []string{"GOFLAGS"}
stdout, err := r.Run(ctx, inv)
if err != nil {
return false, nil, err
}
goflags := string(bytes.TrimSpace(stdout.Bytes()))
matches := modFlagRegexp.FindStringSubmatch(goflags)
var modFlag string
if len(matches) != 0 {
modFlag = matches[1]
}
// Don't override an explicit '-mod=' argument.
if modFlag == "vendor" {
return true, mainMod, nil
} else if modFlag != "" {
return false, nil, nil
}
if mainMod == nil || !go114 {
return false, nil, nil
}
// Check 1.14's automatic vendor mode.
if fi, err := os.Stat(filepath.Join(mainMod.Dir, "vendor")); err == nil && fi.IsDir() {
if mainMod.GoVersion != "" && semver.Compare("v"+mainMod.GoVersion, "v1.14") >= 0 {
// The Go version is at least 1.14, and a vendor directory exists.
// Set -mod=vendor by default.
return true, mainMod, nil
}
}
return false, nil, nil
}
// getMainModuleAnd114 gets one of the main modules' information and whether the
// go command in use is 1.14+. This is the information needed to figure out
// if vendoring should be enabled.
func getMainModuleAnd114(ctx context.Context, inv Invocation, r *Runner) (*ModuleJSON, bool, error) {
const format = `{{.Path}}
{{.Dir}}
{{.GoMod}}
{{.GoVersion}}
{{range context.ReleaseTags}}{{if eq . "go1.14"}}{{.}}{{end}}{{end}}
`
inv.Verb = "list"
inv.Args = []string{"-m", "-f", format}
stdout, err := r.Run(ctx, inv)
if err != nil {
return nil, false, err
}
lines := strings.Split(stdout.String(), "\n")
if len(lines) < 5 {
return nil, false, fmt.Errorf("unexpected stdout: %q", stdout.String())
}
mod := &ModuleJSON{
Path: lines[0],
Dir: lines[1],
GoMod: lines[2],
GoVersion: lines[3],
Main: true,
}
return mod, lines[4] == "go1.14", nil
}
// WorkspaceVendorEnabled reports whether workspace vendoring is enabled. It takes a *Runner to execute Go commands
// with the supplied context.Context and Invocation. The Invocation can contain pre-defined fields,
// of which only Verb and Args are modified to run the appropriate Go command.
// Inspired by setDefaultBuildMod in modload/init.go
func WorkspaceVendorEnabled(ctx context.Context, inv Invocation, r *Runner) (bool, []*ModuleJSON, error) {
inv.Verb = "env"
inv.Args = []string{"GOWORK"}
stdout, err := r.Run(ctx, inv)
if err != nil {
return false, nil, err
}
goWork := string(bytes.TrimSpace(stdout.Bytes()))
if fi, err := os.Stat(filepath.Join(filepath.Dir(goWork), "vendor")); err == nil && fi.IsDir() {
mainMods, err := getWorkspaceMainModules(ctx, inv, r)
if err != nil {
return false, nil, err
}
return true, mainMods, nil
}
return false, nil, nil
}
// getWorkspaceMainModules gets the main modules' information.
// This is the information needed to figure out if vendoring should be enabled.
func getWorkspaceMainModules(ctx context.Context, inv Invocation, r *Runner) ([]*ModuleJSON, error) {
const format = `{{.Path}}
{{.Dir}}
{{.GoMod}}
{{.GoVersion}}
`
inv.Verb = "list"
inv.Args = []string{"-m", "-f", format}
stdout, err := r.Run(ctx, inv)
if err != nil {
return nil, err
}
lines := strings.Split(strings.TrimSuffix(stdout.String(), "\n"), "\n")
if len(lines) < 4 {
return nil, fmt.Errorf("unexpected stdout: %q", stdout.String())
}
mods := make([]*ModuleJSON, 0, len(lines)/4)
for i := 0; i < len(lines); i += 4 {
mods = append(mods, &ModuleJSON{
Path: lines[i],
Dir: lines[i+1],
GoMod: lines[i+2],
GoVersion: lines[i+3],
Main: true,
})
}
return mods, nil
}

View File

@@ -0,0 +1,71 @@
// Copyright 2020 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package gocommand
import (
"context"
"fmt"
"regexp"
"strings"
)
// GoVersion reports the minor version number of the highest release
// tag built into the go command on the PATH.
//
// Note that this may be higher than the version of the go tool used
// to build this application, and thus the versions of the standard
// go/{scanner,parser,ast,types} packages that are linked into it.
// In that case, callers should either downgrade to the version of
// go used to build the application, or report an error that the
// application is too old to use the go command on the PATH.
func GoVersion(ctx context.Context, inv Invocation, r *Runner) (int, error) {
inv.Verb = "list"
inv.Args = []string{"-e", "-f", `{{context.ReleaseTags}}`, `--`, `unsafe`}
inv.BuildFlags = nil // This is not a build command.
inv.ModFlag = ""
inv.ModFile = ""
inv.Env = append(inv.Env[:len(inv.Env):len(inv.Env)], "GO111MODULE=off")
stdoutBytes, err := r.Run(ctx, inv)
if err != nil {
return 0, err
}
stdout := stdoutBytes.String()
if len(stdout) < 3 {
return 0, fmt.Errorf("bad ReleaseTags output: %q", stdout)
}
// Split up "[go1.1 go1.15]" and return highest go1.X value.
tags := strings.Fields(stdout[1 : len(stdout)-2])
for i := len(tags) - 1; i >= 0; i-- {
var version int
if _, err := fmt.Sscanf(tags[i], "go1.%d", &version); err != nil {
continue
}
return version, nil
}
return 0, fmt.Errorf("no parseable ReleaseTags in %v", tags)
}
// GoVersionOutput returns the complete output of the go version command.
func GoVersionOutput(ctx context.Context, inv Invocation, r *Runner) (string, error) {
inv.Verb = "version"
goVersion, err := r.Run(ctx, inv)
if err != nil {
return "", err
}
return goVersion.String(), nil
}
// ParseGoVersionOutput extracts the Go version string
// from the output of the "go version" command.
// Given an unrecognized form, it returns an empty string.
func ParseGoVersionOutput(data string) string {
re := regexp.MustCompile(`^go version (go\S+|devel \S+)`)
m := re.FindStringSubmatch(data)
if len(m) != 2 {
return "" // unrecognized version
}
return m[1]
}

337
vendor/golang.org/x/tools/internal/gopathwalk/walk.go generated vendored Normal file
View File

@@ -0,0 +1,337 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package gopathwalk is like filepath.Walk but specialized for finding Go
// packages, particularly in $GOPATH and $GOROOT.
package gopathwalk
import (
"bufio"
"bytes"
"io"
"io/fs"
"os"
"path/filepath"
"runtime"
"strings"
"sync"
"time"
)
// Options controls the behavior of a Walk call.
type Options struct {
// If Logf is non-nil, debug logging is enabled through this function.
Logf func(format string, args ...interface{})
// Search module caches. Also disables legacy goimports ignore rules.
ModulesEnabled bool
// Maximum number of concurrent calls to user-provided callbacks,
// or 0 for GOMAXPROCS.
Concurrency int
}
// RootType indicates the type of a Root.
type RootType int
const (
RootUnknown RootType = iota
RootGOROOT
RootGOPATH
RootCurrentModule
RootModuleCache
RootOther
)
// A Root is a starting point for a Walk.
type Root struct {
Path string
Type RootType
}
// Walk concurrently walks Go source directories ($GOROOT, $GOPATH, etc) to find packages.
//
// For each package found, add will be called with the absolute
// paths of the containing source directory and the package directory.
//
// Unlike filepath.WalkDir, Walk follows symbolic links
// (while guarding against cycles).
func Walk(roots []Root, add func(root Root, dir string), opts Options) {
WalkSkip(roots, add, func(Root, string) bool { return false }, opts)
}
// WalkSkip concurrently walks Go source directories ($GOROOT, $GOPATH, etc) to
// find packages.
//
// For each package found, add will be called with the absolute
// paths of the containing source directory and the package directory.
// For each directory that will be scanned, skip will be called
// with the absolute paths of the containing source directory and the directory.
// If skip returns false on a directory it will be processed.
//
// Unlike filepath.WalkDir, WalkSkip follows symbolic links
// (while guarding against cycles).
func WalkSkip(roots []Root, add func(root Root, dir string), skip func(root Root, dir string) bool, opts Options) {
for _, root := range roots {
walkDir(root, add, skip, opts)
}
}
// walkDir creates a walker and starts fastwalk with this walker.
func walkDir(root Root, add func(Root, string), skip func(root Root, dir string) bool, opts Options) {
if opts.Logf == nil {
opts.Logf = func(format string, args ...interface{}) {}
}
if _, err := os.Stat(root.Path); os.IsNotExist(err) {
opts.Logf("skipping nonexistent directory: %v", root.Path)
return
}
start := time.Now()
opts.Logf("scanning %s", root.Path)
concurrency := opts.Concurrency
if concurrency == 0 {
// The walk be either CPU-bound or I/O-bound, depending on what the
// caller-supplied add function does and the details of the user's platform
// and machine. Rather than trying to fine-tune the concurrency level for a
// specific environment, we default to GOMAXPROCS: it is likely to be a good
// choice for a CPU-bound add function, and if it is instead I/O-bound, then
// dealing with I/O saturation is arguably the job of the kernel and/or
// runtime. (Oversaturating I/O seems unlikely to harm performance as badly
// as failing to saturate would.)
concurrency = runtime.GOMAXPROCS(0)
}
w := &walker{
root: root,
add: add,
skip: skip,
opts: opts,
sem: make(chan struct{}, concurrency),
}
w.init()
w.sem <- struct{}{}
path := root.Path
if path == "" {
path = "."
}
if fi, err := os.Lstat(path); err == nil {
w.walk(path, nil, fs.FileInfoToDirEntry(fi))
} else {
w.opts.Logf("scanning directory %v: %v", root.Path, err)
}
<-w.sem
w.walking.Wait()
opts.Logf("scanned %s in %v", root.Path, time.Since(start))
}
// walker is the callback for fastwalk.Walk.
type walker struct {
root Root // The source directory to scan.
add func(Root, string) // The callback that will be invoked for every possible Go package dir.
skip func(Root, string) bool // The callback that will be invoked for every dir. dir is skipped if it returns true.
opts Options // Options passed to Walk by the user.
walking sync.WaitGroup
sem chan struct{} // Channel of semaphore tokens; send to acquire, receive to release.
ignoredDirs []string
added sync.Map // map[string]bool
}
// A symlinkList is a linked list of os.FileInfos for parent directories
// reached via symlinks.
type symlinkList struct {
info os.FileInfo
prev *symlinkList
}
// init initializes the walker based on its Options
func (w *walker) init() {
var ignoredPaths []string
if w.root.Type == RootModuleCache {
ignoredPaths = []string{"cache"}
}
if !w.opts.ModulesEnabled && w.root.Type == RootGOPATH {
ignoredPaths = w.getIgnoredDirs(w.root.Path)
ignoredPaths = append(ignoredPaths, "v", "mod")
}
for _, p := range ignoredPaths {
full := filepath.Join(w.root.Path, p)
w.ignoredDirs = append(w.ignoredDirs, full)
w.opts.Logf("Directory added to ignore list: %s", full)
}
}
// getIgnoredDirs reads an optional config file at <path>/.goimportsignore
// of relative directories to ignore when scanning for go files.
// The provided path is one of the $GOPATH entries with "src" appended.
func (w *walker) getIgnoredDirs(path string) []string {
file := filepath.Join(path, ".goimportsignore")
slurp, err := os.ReadFile(file)
if err != nil {
w.opts.Logf("%v", err)
} else {
w.opts.Logf("Read %s", file)
}
if err != nil {
return nil
}
var ignoredDirs []string
bs := bufio.NewScanner(bytes.NewReader(slurp))
for bs.Scan() {
line := strings.TrimSpace(bs.Text())
if line == "" || strings.HasPrefix(line, "#") {
continue
}
ignoredDirs = append(ignoredDirs, line)
}
return ignoredDirs
}
// shouldSkipDir reports whether the file should be skipped or not.
func (w *walker) shouldSkipDir(dir string) bool {
for _, ignoredDir := range w.ignoredDirs {
if dir == ignoredDir {
return true
}
}
if w.skip != nil {
// Check with the user specified callback.
return w.skip(w.root, dir)
}
return false
}
// walk walks through the given path.
//
// Errors are logged if w.opts.Logf is non-nil, but otherwise ignored.
func (w *walker) walk(path string, pathSymlinks *symlinkList, d fs.DirEntry) {
if d.Type()&os.ModeSymlink != 0 {
// Walk the symlink's target rather than the symlink itself.
//
// (Note that os.Stat, unlike the lower-lever os.Readlink,
// follows arbitrarily many layers of symlinks, so it will eventually
// reach either a non-symlink or a nonexistent target.)
//
// TODO(bcmills): 'go list all' itself ignores symlinks within GOROOT/src
// and GOPATH/src. Do we really need to traverse them here? If so, why?
fi, err := os.Stat(path)
if err != nil {
w.opts.Logf("%v", err)
return
}
// Avoid walking symlink cycles: if we have already followed a symlink to
// this directory as a parent of itself, don't follow it again.
//
// This doesn't catch the first time through a cycle, but it also minimizes
// the number of extra stat calls we make if we *don't* encounter a cycle.
// Since we don't actually expect to encounter symlink cycles in practice,
// this seems like the right tradeoff.
for parent := pathSymlinks; parent != nil; parent = parent.prev {
if os.SameFile(fi, parent.info) {
return
}
}
pathSymlinks = &symlinkList{
info: fi,
prev: pathSymlinks,
}
d = fs.FileInfoToDirEntry(fi)
}
if d.Type().IsRegular() {
if !strings.HasSuffix(path, ".go") {
return
}
dir := filepath.Dir(path)
if dir == w.root.Path && (w.root.Type == RootGOROOT || w.root.Type == RootGOPATH) {
// Doesn't make sense to have regular files
// directly in your $GOPATH/src or $GOROOT/src.
//
// TODO(bcmills): there are many levels of directory within
// RootModuleCache where this also wouldn't make sense,
// Can we generalize this to any directory without a corresponding
// import path?
return
}
if _, dup := w.added.LoadOrStore(dir, true); !dup {
w.add(w.root, dir)
}
}
if !d.IsDir() {
return
}
base := filepath.Base(path)
if base == "" || base[0] == '.' || base[0] == '_' ||
base == "testdata" ||
(w.root.Type == RootGOROOT && w.opts.ModulesEnabled && base == "vendor") ||
(!w.opts.ModulesEnabled && base == "node_modules") ||
w.shouldSkipDir(path) {
return
}
// Read the directory and walk its entries.
f, err := os.Open(path)
if err != nil {
w.opts.Logf("%v", err)
return
}
defer f.Close()
for {
// We impose an arbitrary limit on the number of ReadDir results per
// directory to limit the amount of memory consumed for stale or upcoming
// directory entries. The limit trades off CPU (number of syscalls to read
// the whole directory) against RAM (reachable directory entries other than
// the one currently being processed).
//
// Since we process the directories recursively, we will end up maintaining
// a slice of entries for each level of the directory tree.
// (Compare https://go.dev/issue/36197.)
ents, err := f.ReadDir(1024)
if err != nil {
if err != io.EOF {
w.opts.Logf("%v", err)
}
break
}
for _, d := range ents {
nextPath := filepath.Join(path, d.Name())
if d.IsDir() {
select {
case w.sem <- struct{}{}:
// Got a new semaphore token, so we can traverse the directory concurrently.
d := d
w.walking.Add(1)
go func() {
defer func() {
<-w.sem
w.walking.Done()
}()
w.walk(nextPath, pathSymlinks, d)
}()
continue
default:
// No tokens available, so traverse serially.
}
}
w.walk(nextPath, pathSymlinks, d)
}
}
}

1896
vendor/golang.org/x/tools/internal/imports/fix.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

354
vendor/golang.org/x/tools/internal/imports/imports.go generated vendored Normal file
View File

@@ -0,0 +1,354 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package imports implements a Go pretty-printer (like package "go/format")
// that also adds or removes import statements as necessary.
package imports
import (
"bufio"
"bytes"
"context"
"fmt"
"go/ast"
"go/format"
"go/parser"
"go/printer"
"go/token"
"io"
"regexp"
"strconv"
"strings"
"golang.org/x/tools/go/ast/astutil"
"golang.org/x/tools/internal/event"
)
// Options is golang.org/x/tools/imports.Options with extra internal-only options.
type Options struct {
Env *ProcessEnv // The environment to use. Note: this contains the cached module and filesystem state.
// LocalPrefix is a comma-separated string of import path prefixes, which, if
// set, instructs Process to sort the import paths with the given prefixes
// into another group after 3rd-party packages.
LocalPrefix string
Fragment bool // Accept fragment of a source file (no package statement)
AllErrors bool // Report all errors (not just the first 10 on different lines)
Comments bool // Print comments (true if nil *Options provided)
TabIndent bool // Use tabs for indent (true if nil *Options provided)
TabWidth int // Tab width (8 if nil *Options provided)
FormatOnly bool // Disable the insertion and deletion of imports
}
// Process implements golang.org/x/tools/imports.Process with explicit context in opt.Env.
func Process(filename string, src []byte, opt *Options) (formatted []byte, err error) {
fileSet := token.NewFileSet()
file, adjust, err := parse(fileSet, filename, src, opt)
if err != nil {
return nil, err
}
if !opt.FormatOnly {
if err := fixImports(fileSet, file, filename, opt.Env); err != nil {
return nil, err
}
}
return formatFile(fileSet, file, src, adjust, opt)
}
// FixImports returns a list of fixes to the imports that, when applied,
// will leave the imports in the same state as Process. src and opt must
// be specified.
//
// Note that filename's directory influences which imports can be chosen,
// so it is important that filename be accurate.
func FixImports(ctx context.Context, filename string, src []byte, opt *Options) (fixes []*ImportFix, err error) {
ctx, done := event.Start(ctx, "imports.FixImports")
defer done()
fileSet := token.NewFileSet()
file, _, err := parse(fileSet, filename, src, opt)
if err != nil {
return nil, err
}
return getFixes(ctx, fileSet, file, filename, opt.Env)
}
// ApplyFixes applies all of the fixes to the file and formats it. extraMode
// is added in when parsing the file. src and opts must be specified, but no
// env is needed.
func ApplyFixes(fixes []*ImportFix, filename string, src []byte, opt *Options, extraMode parser.Mode) (formatted []byte, err error) {
// Don't use parse() -- we don't care about fragments or statement lists
// here, and we need to work with unparseable files.
fileSet := token.NewFileSet()
parserMode := parser.Mode(0)
if opt.Comments {
parserMode |= parser.ParseComments
}
if opt.AllErrors {
parserMode |= parser.AllErrors
}
parserMode |= extraMode
file, err := parser.ParseFile(fileSet, filename, src, parserMode)
if file == nil {
return nil, err
}
// Apply the fixes to the file.
apply(fileSet, file, fixes)
return formatFile(fileSet, file, src, nil, opt)
}
// formatFile formats the file syntax tree.
// It may mutate the token.FileSet and the ast.File.
//
// If an adjust function is provided, it is called after formatting
// with the original source (formatFile's src parameter) and the
// formatted file, and returns the postpocessed result.
func formatFile(fset *token.FileSet, file *ast.File, src []byte, adjust func(orig []byte, src []byte) []byte, opt *Options) ([]byte, error) {
mergeImports(file)
sortImports(opt.LocalPrefix, fset.File(file.Pos()), file)
var spacesBefore []string // import paths we need spaces before
for _, impSection := range astutil.Imports(fset, file) {
// Within each block of contiguous imports, see if any
// import lines are in different group numbers. If so,
// we'll need to put a space between them so it's
// compatible with gofmt.
lastGroup := -1
for _, importSpec := range impSection {
importPath, _ := strconv.Unquote(importSpec.Path.Value)
groupNum := importGroup(opt.LocalPrefix, importPath)
if groupNum != lastGroup && lastGroup != -1 {
spacesBefore = append(spacesBefore, importPath)
}
lastGroup = groupNum
}
}
printerMode := printer.UseSpaces
if opt.TabIndent {
printerMode |= printer.TabIndent
}
printConfig := &printer.Config{Mode: printerMode, Tabwidth: opt.TabWidth}
var buf bytes.Buffer
err := printConfig.Fprint(&buf, fset, file)
if err != nil {
return nil, err
}
out := buf.Bytes()
if adjust != nil {
out = adjust(src, out)
}
if len(spacesBefore) > 0 {
out, err = addImportSpaces(bytes.NewReader(out), spacesBefore)
if err != nil {
return nil, err
}
}
out, err = format.Source(out)
if err != nil {
return nil, err
}
return out, nil
}
// parse parses src, which was read from filename,
// as a Go source file or statement list.
func parse(fset *token.FileSet, filename string, src []byte, opt *Options) (*ast.File, func(orig, src []byte) []byte, error) {
parserMode := parser.Mode(0)
if opt.Comments {
parserMode |= parser.ParseComments
}
if opt.AllErrors {
parserMode |= parser.AllErrors
}
// Try as whole source file.
file, err := parser.ParseFile(fset, filename, src, parserMode)
if err == nil {
return file, nil, nil
}
// If the error is that the source file didn't begin with a
// package line and we accept fragmented input, fall through to
// try as a source fragment. Stop and return on any other error.
if !opt.Fragment || !strings.Contains(err.Error(), "expected 'package'") {
return nil, nil, err
}
// If this is a declaration list, make it a source file
// by inserting a package clause.
// Insert using a ;, not a newline, so that parse errors are on
// the correct line.
const prefix = "package main;"
psrc := append([]byte(prefix), src...)
file, err = parser.ParseFile(fset, filename, psrc, parserMode)
if err == nil {
// Gofmt will turn the ; into a \n.
// Do that ourselves now and update the file contents,
// so that positions and line numbers are correct going forward.
psrc[len(prefix)-1] = '\n'
fset.File(file.Package).SetLinesForContent(psrc)
// If a main function exists, we will assume this is a main
// package and leave the file.
if containsMainFunc(file) {
return file, nil, nil
}
adjust := func(orig, src []byte) []byte {
// Remove the package clause.
src = src[len(prefix):]
return matchSpace(orig, src)
}
return file, adjust, nil
}
// If the error is that the source file didn't begin with a
// declaration, fall through to try as a statement list.
// Stop and return on any other error.
if !strings.Contains(err.Error(), "expected declaration") {
return nil, nil, err
}
// If this is a statement list, make it a source file
// by inserting a package clause and turning the list
// into a function body. This handles expressions too.
// Insert using a ;, not a newline, so that the line numbers
// in fsrc match the ones in src.
fsrc := append(append([]byte("package p; func _() {"), src...), '}')
file, err = parser.ParseFile(fset, filename, fsrc, parserMode)
if err == nil {
adjust := func(orig, src []byte) []byte {
// Remove the wrapping.
// Gofmt has turned the ; into a \n\n.
src = src[len("package p\n\nfunc _() {"):]
src = src[:len(src)-len("}\n")]
// Gofmt has also indented the function body one level.
// Remove that indent.
src = bytes.ReplaceAll(src, []byte("\n\t"), []byte("\n"))
return matchSpace(orig, src)
}
return file, adjust, nil
}
// Failed, and out of options.
return nil, nil, err
}
// containsMainFunc checks if a file contains a function declaration with the
// function signature 'func main()'
func containsMainFunc(file *ast.File) bool {
for _, decl := range file.Decls {
if f, ok := decl.(*ast.FuncDecl); ok {
if f.Name.Name != "main" {
continue
}
if len(f.Type.Params.List) != 0 {
continue
}
if f.Type.Results != nil && len(f.Type.Results.List) != 0 {
continue
}
return true
}
}
return false
}
func cutSpace(b []byte) (before, middle, after []byte) {
i := 0
for i < len(b) && (b[i] == ' ' || b[i] == '\t' || b[i] == '\n') {
i++
}
j := len(b)
for j > 0 && (b[j-1] == ' ' || b[j-1] == '\t' || b[j-1] == '\n') {
j--
}
if i <= j {
return b[:i], b[i:j], b[j:]
}
return nil, nil, b[j:]
}
// matchSpace reformats src to use the same space context as orig.
// 1. If orig begins with blank lines, matchSpace inserts them at the beginning of src.
// 2. matchSpace copies the indentation of the first non-blank line in orig
// to every non-blank line in src.
// 3. matchSpace copies the trailing space from orig and uses it in place
// of src's trailing space.
func matchSpace(orig []byte, src []byte) []byte {
before, _, after := cutSpace(orig)
i := bytes.LastIndex(before, []byte{'\n'})
before, indent := before[:i+1], before[i+1:]
_, src, _ = cutSpace(src)
var b bytes.Buffer
b.Write(before)
for len(src) > 0 {
line := src
if i := bytes.IndexByte(line, '\n'); i >= 0 {
line, src = line[:i+1], line[i+1:]
} else {
src = nil
}
if len(line) > 0 && line[0] != '\n' { // not blank
b.Write(indent)
}
b.Write(line)
}
b.Write(after)
return b.Bytes()
}
var impLine = regexp.MustCompile(`^\s+(?:[\w\.]+\s+)?"(.+?)"`)
func addImportSpaces(r io.Reader, breaks []string) ([]byte, error) {
var out bytes.Buffer
in := bufio.NewReader(r)
inImports := false
done := false
for {
s, err := in.ReadString('\n')
if err == io.EOF {
break
} else if err != nil {
return nil, err
}
if !inImports && !done && strings.HasPrefix(s, "import") {
inImports = true
}
if inImports && (strings.HasPrefix(s, "var") ||
strings.HasPrefix(s, "func") ||
strings.HasPrefix(s, "const") ||
strings.HasPrefix(s, "type")) {
done = true
inImports = false
}
if inImports && len(breaks) > 0 {
if m := impLine.FindStringSubmatch(s); m != nil {
if m[1] == breaks[0] {
out.WriteByte('\n')
breaks = breaks[1:]
}
}
}
fmt.Fprint(&out, s)
}
return out.Bytes(), nil
}

841
vendor/golang.org/x/tools/internal/imports/mod.go generated vendored Normal file
View File

@@ -0,0 +1,841 @@
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package imports
import (
"bytes"
"context"
"encoding/json"
"fmt"
"os"
"path"
"path/filepath"
"regexp"
"sort"
"strconv"
"strings"
"golang.org/x/mod/module"
"golang.org/x/tools/internal/event"
"golang.org/x/tools/internal/gocommand"
"golang.org/x/tools/internal/gopathwalk"
"golang.org/x/tools/internal/stdlib"
)
// Notes(rfindley): ModuleResolver appears to be heavily optimized for scanning
// as fast as possible, which is desirable for a call to goimports from the
// command line, but it doesn't work as well for gopls, where it suffers from
// slow startup (golang/go#44863) and intermittent hanging (golang/go#59216),
// both caused by populating the cache, albeit in slightly different ways.
//
// A high level list of TODOs:
// - Optimize the scan itself, as there is some redundancy statting and
// reading go.mod files.
// - Invert the relationship between ProcessEnv and Resolver (see the
// docstring of ProcessEnv).
// - Make it easier to use an external resolver implementation.
//
// Smaller TODOs are annotated in the code below.
// ModuleResolver implements the Resolver interface for a workspace using
// modules.
//
// A goal of the ModuleResolver is to invoke the Go command as little as
// possible. To this end, it runs the Go command only for listing module
// information (i.e. `go list -m -e -json ...`). Package scanning, the process
// of loading package information for the modules, is implemented internally
// via the scan method.
//
// It has two types of state: the state derived from the go command, which
// is populated by init, and the state derived from scans, which is populated
// via scan. A root is considered scanned if it has been walked to discover
// directories. However, if the scan did not require additional information
// from the directory (such as package name or exports), the directory
// information itself may be partially populated. It will be lazily filled in
// as needed by scans, using the scanCallback.
type ModuleResolver struct {
env *ProcessEnv
// Module state, populated during construction
dummyVendorMod *gocommand.ModuleJSON // if vendoring is enabled, a pseudo-module to represent the /vendor directory
moduleCacheDir string // GOMODCACHE, inferred from GOPATH if unset
roots []gopathwalk.Root // roots to scan, in approximate order of importance
mains []*gocommand.ModuleJSON // main modules
mainByDir map[string]*gocommand.ModuleJSON // module information by dir, to join with roots
modsByModPath []*gocommand.ModuleJSON // all modules, ordered by # of path components in their module path
modsByDir []*gocommand.ModuleJSON // ...or by the number of path components in their Dir.
// Scanning state, populated by scan
// scanSema prevents concurrent scans, and guards scannedRoots and the cache
// fields below (though the caches themselves are concurrency safe).
// Receive to acquire, send to release.
scanSema chan struct{}
scannedRoots map[gopathwalk.Root]bool // if true, root has been walked
// Caches of directory info, populated by scans and scan callbacks
//
// moduleCacheCache stores cached information about roots in the module
// cache, which are immutable and therefore do not need to be invalidated.
//
// otherCache stores information about all other roots (even GOROOT), which
// may change.
moduleCacheCache *DirInfoCache
otherCache *DirInfoCache
}
// newModuleResolver returns a new module-aware goimports resolver.
//
// Note: use caution when modifying this constructor: changes must also be
// reflected in ModuleResolver.ClearForNewScan.
func newModuleResolver(e *ProcessEnv, moduleCacheCache *DirInfoCache) (*ModuleResolver, error) {
r := &ModuleResolver{
env: e,
scanSema: make(chan struct{}, 1),
}
r.scanSema <- struct{}{} // release
goenv, err := r.env.goEnv()
if err != nil {
return nil, err
}
// TODO(rfindley): can we refactor to share logic with r.env.invokeGo?
inv := gocommand.Invocation{
BuildFlags: r.env.BuildFlags,
ModFlag: r.env.ModFlag,
Env: r.env.env(),
Logf: r.env.Logf,
WorkingDir: r.env.WorkingDir,
}
vendorEnabled := false
var mainModVendor *gocommand.ModuleJSON // for module vendoring
var mainModsVendor []*gocommand.ModuleJSON // for workspace vendoring
goWork := r.env.Env["GOWORK"]
if len(goWork) == 0 {
// TODO(rfindley): VendorEnabled runs the go command to get GOFLAGS, but
// they should be available from the ProcessEnv. Can we avoid the redundant
// invocation?
vendorEnabled, mainModVendor, err = gocommand.VendorEnabled(context.TODO(), inv, r.env.GocmdRunner)
if err != nil {
return nil, err
}
} else {
vendorEnabled, mainModsVendor, err = gocommand.WorkspaceVendorEnabled(context.Background(), inv, r.env.GocmdRunner)
if err != nil {
return nil, err
}
}
if vendorEnabled {
if mainModVendor != nil {
// Module vendor mode is on, so all the non-Main modules are irrelevant,
// and we need to search /vendor for everything.
r.mains = []*gocommand.ModuleJSON{mainModVendor}
r.dummyVendorMod = &gocommand.ModuleJSON{
Path: "",
Dir: filepath.Join(mainModVendor.Dir, "vendor"),
}
r.modsByModPath = []*gocommand.ModuleJSON{mainModVendor, r.dummyVendorMod}
r.modsByDir = []*gocommand.ModuleJSON{mainModVendor, r.dummyVendorMod}
} else {
// Workspace vendor mode is on, so all the non-Main modules are irrelevant,
// and we need to search /vendor for everything.
r.mains = mainModsVendor
r.dummyVendorMod = &gocommand.ModuleJSON{
Path: "",
Dir: filepath.Join(filepath.Dir(goWork), "vendor"),
}
r.modsByModPath = append(append([]*gocommand.ModuleJSON{}, mainModsVendor...), r.dummyVendorMod)
r.modsByDir = append(append([]*gocommand.ModuleJSON{}, mainModsVendor...), r.dummyVendorMod)
}
} else {
// Vendor mode is off, so run go list -m ... to find everything.
err := r.initAllMods()
// We expect an error when running outside of a module with
// GO111MODULE=on. Other errors are fatal.
if err != nil {
if errMsg := err.Error(); !strings.Contains(errMsg, "working directory is not part of a module") && !strings.Contains(errMsg, "go.mod file not found") {
return nil, err
}
}
}
r.moduleCacheDir = gomodcacheForEnv(goenv)
if r.moduleCacheDir == "" {
return nil, fmt.Errorf("cannot resolve GOMODCACHE")
}
sort.Slice(r.modsByModPath, func(i, j int) bool {
count := func(x int) int {
return strings.Count(r.modsByModPath[x].Path, "/")
}
return count(j) < count(i) // descending order
})
sort.Slice(r.modsByDir, func(i, j int) bool {
count := func(x int) int {
return strings.Count(r.modsByDir[x].Dir, string(filepath.Separator))
}
return count(j) < count(i) // descending order
})
r.roots = []gopathwalk.Root{}
if goenv["GOROOT"] != "" { // "" happens in tests
r.roots = append(r.roots, gopathwalk.Root{Path: filepath.Join(goenv["GOROOT"], "/src"), Type: gopathwalk.RootGOROOT})
}
r.mainByDir = make(map[string]*gocommand.ModuleJSON)
for _, main := range r.mains {
r.roots = append(r.roots, gopathwalk.Root{Path: main.Dir, Type: gopathwalk.RootCurrentModule})
r.mainByDir[main.Dir] = main
}
if vendorEnabled {
r.roots = append(r.roots, gopathwalk.Root{Path: r.dummyVendorMod.Dir, Type: gopathwalk.RootOther})
} else {
addDep := func(mod *gocommand.ModuleJSON) {
if mod.Replace == nil {
// This is redundant with the cache, but we'll skip it cheaply enough
// when we encounter it in the module cache scan.
//
// Including it at a lower index in r.roots than the module cache dir
// helps prioritize matches from within existing dependencies.
r.roots = append(r.roots, gopathwalk.Root{Path: mod.Dir, Type: gopathwalk.RootModuleCache})
} else {
r.roots = append(r.roots, gopathwalk.Root{Path: mod.Dir, Type: gopathwalk.RootOther})
}
}
// Walk dependent modules before scanning the full mod cache, direct deps first.
for _, mod := range r.modsByModPath {
if !mod.Indirect && !mod.Main {
addDep(mod)
}
}
for _, mod := range r.modsByModPath {
if mod.Indirect && !mod.Main {
addDep(mod)
}
}
// If provided, share the moduleCacheCache.
//
// TODO(rfindley): The module cache is immutable. However, the loaded
// exports do depend on GOOS and GOARCH. Fortunately, the
// ProcessEnv.buildContext does not adjust these from build.DefaultContext
// (even though it should). So for now, this is OK to share, but we need to
// add logic for handling GOOS/GOARCH.
r.moduleCacheCache = moduleCacheCache
r.roots = append(r.roots, gopathwalk.Root{Path: r.moduleCacheDir, Type: gopathwalk.RootModuleCache})
}
r.scannedRoots = map[gopathwalk.Root]bool{}
if r.moduleCacheCache == nil {
r.moduleCacheCache = NewDirInfoCache()
}
r.otherCache = NewDirInfoCache()
return r, nil
}
// gomodcacheForEnv returns the GOMODCACHE value to use based on the given env
// map, which must have GOMODCACHE and GOPATH populated.
//
// TODO(rfindley): this is defensive refactoring.
// 1. Is this even relevant anymore? Can't we just read GOMODCACHE.
// 2. Use this to separate module cache scanning from other scanning.
func gomodcacheForEnv(goenv map[string]string) string {
if gmc := goenv["GOMODCACHE"]; gmc != "" {
return gmc
}
gopaths := filepath.SplitList(goenv["GOPATH"])
if len(gopaths) == 0 {
return ""
}
return filepath.Join(gopaths[0], "/pkg/mod")
}
func (r *ModuleResolver) initAllMods() error {
stdout, err := r.env.invokeGo(context.TODO(), "list", "-m", "-e", "-json", "...")
if err != nil {
return err
}
for dec := json.NewDecoder(stdout); dec.More(); {
mod := &gocommand.ModuleJSON{}
if err := dec.Decode(mod); err != nil {
return err
}
if mod.Dir == "" {
if r.env.Logf != nil {
r.env.Logf("module %v has not been downloaded and will be ignored", mod.Path)
}
// Can't do anything with a module that's not downloaded.
continue
}
// golang/go#36193: the go command doesn't always clean paths.
mod.Dir = filepath.Clean(mod.Dir)
r.modsByModPath = append(r.modsByModPath, mod)
r.modsByDir = append(r.modsByDir, mod)
if mod.Main {
r.mains = append(r.mains, mod)
}
}
return nil
}
// ClearForNewScan invalidates the last scan.
//
// It preserves the set of roots, but forgets about the set of directories.
// Though it forgets the set of module cache directories, it remembers their
// contents, since they are assumed to be immutable.
func (r *ModuleResolver) ClearForNewScan() Resolver {
<-r.scanSema // acquire r, to guard scannedRoots
r2 := &ModuleResolver{
env: r.env,
dummyVendorMod: r.dummyVendorMod,
moduleCacheDir: r.moduleCacheDir,
roots: r.roots,
mains: r.mains,
mainByDir: r.mainByDir,
modsByModPath: r.modsByModPath,
scanSema: make(chan struct{}, 1),
scannedRoots: make(map[gopathwalk.Root]bool),
otherCache: NewDirInfoCache(),
moduleCacheCache: r.moduleCacheCache,
}
r2.scanSema <- struct{}{} // r2 must start released
// Invalidate root scans. We don't need to invalidate module cache roots,
// because they are immutable.
// (We don't support a use case where GOMODCACHE is cleaned in the middle of
// e.g. a gopls session: the user must restart gopls to get accurate
// imports.)
//
// Scanning for new directories in GOMODCACHE should be handled elsewhere,
// via a call to ScanModuleCache.
for _, root := range r.roots {
if root.Type == gopathwalk.RootModuleCache && r.scannedRoots[root] {
r2.scannedRoots[root] = true
}
}
r.scanSema <- struct{}{} // release r
return r2
}
// ClearModuleInfo invalidates resolver state that depends on go.mod file
// contents (essentially, the output of go list -m -json ...).
//
// Notably, it does not forget directory contents, which are reset
// asynchronously via ClearForNewScan.
//
// If the ProcessEnv is a GOPATH environment, ClearModuleInfo is a no op.
//
// TODO(rfindley): move this to a new env.go, consolidating ProcessEnv methods.
func (e *ProcessEnv) ClearModuleInfo() {
if r, ok := e.resolver.(*ModuleResolver); ok {
resolver, err := newModuleResolver(e, e.ModCache)
if err != nil {
e.resolver = nil
e.resolverErr = err
return
}
<-r.scanSema // acquire (guards caches)
resolver.moduleCacheCache = r.moduleCacheCache
resolver.otherCache = r.otherCache
r.scanSema <- struct{}{} // release
e.UpdateResolver(resolver)
}
}
// UpdateResolver sets the resolver for the ProcessEnv to use in imports
// operations. Only for use with the result of [Resolver.ClearForNewScan].
//
// TODO(rfindley): this awkward API is a result of the (arguably) inverted
// relationship between configuration and state described in the doc comment
// for [ProcessEnv].
func (e *ProcessEnv) UpdateResolver(r Resolver) {
e.resolver = r
e.resolverErr = nil
}
// findPackage returns the module and directory from within the main modules
// and their dependencies that contains the package at the given import path,
// or returns nil, "" if no module is in scope.
func (r *ModuleResolver) findPackage(importPath string) (*gocommand.ModuleJSON, string) {
// This can't find packages in the stdlib, but that's harmless for all
// the existing code paths.
for _, m := range r.modsByModPath {
if !strings.HasPrefix(importPath, m.Path) {
continue
}
pathInModule := importPath[len(m.Path):]
pkgDir := filepath.Join(m.Dir, pathInModule)
if r.dirIsNestedModule(pkgDir, m) {
continue
}
if info, ok := r.cacheLoad(pkgDir); ok {
if loaded, err := info.reachedStatus(nameLoaded); loaded {
if err != nil {
continue // No package in this dir.
}
return m, pkgDir
}
if scanned, err := info.reachedStatus(directoryScanned); scanned && err != nil {
continue // Dir is unreadable, etc.
}
// This is slightly wrong: a directory doesn't have to have an
// importable package to count as a package for package-to-module
// resolution. package main or _test files should count but
// don't.
// TODO(heschi): fix this.
if _, err := r.cachePackageName(info); err == nil {
return m, pkgDir
}
}
// Not cached. Read the filesystem.
pkgFiles, err := os.ReadDir(pkgDir)
if err != nil {
continue
}
// A module only contains a package if it has buildable go
// files in that directory. If not, it could be provided by an
// outer module. See #29736.
for _, fi := range pkgFiles {
if ok, _ := r.env.matchFile(pkgDir, fi.Name()); ok {
return m, pkgDir
}
}
}
return nil, ""
}
func (r *ModuleResolver) cacheLoad(dir string) (directoryPackageInfo, bool) {
if info, ok := r.moduleCacheCache.Load(dir); ok {
return info, ok
}
return r.otherCache.Load(dir)
}
func (r *ModuleResolver) cacheStore(info directoryPackageInfo) {
if info.rootType == gopathwalk.RootModuleCache {
r.moduleCacheCache.Store(info.dir, info)
} else {
r.otherCache.Store(info.dir, info)
}
}
// cachePackageName caches the package name for a dir already in the cache.
func (r *ModuleResolver) cachePackageName(info directoryPackageInfo) (string, error) {
if info.rootType == gopathwalk.RootModuleCache {
return r.moduleCacheCache.CachePackageName(info)
}
return r.otherCache.CachePackageName(info)
}
func (r *ModuleResolver) cacheExports(ctx context.Context, env *ProcessEnv, info directoryPackageInfo) (string, []stdlib.Symbol, error) {
if info.rootType == gopathwalk.RootModuleCache {
return r.moduleCacheCache.CacheExports(ctx, env, info)
}
return r.otherCache.CacheExports(ctx, env, info)
}
// findModuleByDir returns the module that contains dir, or nil if no such
// module is in scope.
func (r *ModuleResolver) findModuleByDir(dir string) *gocommand.ModuleJSON {
// This is quite tricky and may not be correct. dir could be:
// - a package in the main module.
// - a replace target underneath the main module's directory.
// - a nested module in the above.
// - a replace target somewhere totally random.
// - a nested module in the above.
// - in the mod cache.
// - in /vendor/ in -mod=vendor mode.
// - nested module? Dunno.
// Rumor has it that replace targets cannot contain other replace targets.
//
// Note that it is critical here that modsByDir is sorted to have deeper dirs
// first. This ensures that findModuleByDir finds the innermost module.
// See also golang/go#56291.
for _, m := range r.modsByDir {
if !strings.HasPrefix(dir, m.Dir) {
continue
}
if r.dirIsNestedModule(dir, m) {
continue
}
return m
}
return nil
}
// dirIsNestedModule reports if dir is contained in a nested module underneath
// mod, not actually in mod.
func (r *ModuleResolver) dirIsNestedModule(dir string, mod *gocommand.ModuleJSON) bool {
if !strings.HasPrefix(dir, mod.Dir) {
return false
}
if r.dirInModuleCache(dir) {
// Nested modules in the module cache are pruned,
// so it cannot be a nested module.
return false
}
if mod != nil && mod == r.dummyVendorMod {
// The /vendor pseudomodule is flattened and doesn't actually count.
return false
}
modDir, _ := r.modInfo(dir)
if modDir == "" {
return false
}
return modDir != mod.Dir
}
func readModName(modFile string) string {
modBytes, err := os.ReadFile(modFile)
if err != nil {
return ""
}
return modulePath(modBytes)
}
func (r *ModuleResolver) modInfo(dir string) (modDir, modName string) {
if r.dirInModuleCache(dir) {
if matches := modCacheRegexp.FindStringSubmatch(dir); len(matches) == 3 {
index := strings.Index(dir, matches[1]+"@"+matches[2])
modDir := filepath.Join(dir[:index], matches[1]+"@"+matches[2])
return modDir, readModName(filepath.Join(modDir, "go.mod"))
}
}
for {
if info, ok := r.cacheLoad(dir); ok {
return info.moduleDir, info.moduleName
}
f := filepath.Join(dir, "go.mod")
info, err := os.Stat(f)
if err == nil && !info.IsDir() {
return dir, readModName(f)
}
d := filepath.Dir(dir)
if len(d) >= len(dir) {
return "", "" // reached top of file system, no go.mod
}
dir = d
}
}
func (r *ModuleResolver) dirInModuleCache(dir string) bool {
if r.moduleCacheDir == "" {
return false
}
return strings.HasPrefix(dir, r.moduleCacheDir)
}
func (r *ModuleResolver) loadPackageNames(importPaths []string, srcDir string) (map[string]string, error) {
names := map[string]string{}
for _, path := range importPaths {
// TODO(rfindley): shouldn't this use the dirInfoCache?
_, packageDir := r.findPackage(path)
if packageDir == "" {
continue
}
name, err := packageDirToName(packageDir)
if err != nil {
continue
}
names[path] = name
}
return names, nil
}
func (r *ModuleResolver) scan(ctx context.Context, callback *scanCallback) error {
ctx, done := event.Start(ctx, "imports.ModuleResolver.scan")
defer done()
processDir := func(info directoryPackageInfo) {
// Skip this directory if we were not able to get the package information successfully.
if scanned, err := info.reachedStatus(directoryScanned); !scanned || err != nil {
return
}
pkg, err := r.canonicalize(info)
if err != nil {
return
}
if !callback.dirFound(pkg) {
return
}
pkg.packageName, err = r.cachePackageName(info)
if err != nil {
return
}
if !callback.packageNameLoaded(pkg) {
return
}
_, exports, err := r.loadExports(ctx, pkg, false)
if err != nil {
return
}
callback.exportsLoaded(pkg, exports)
}
// Start processing everything in the cache, and listen for the new stuff
// we discover in the walk below.
stop1 := r.moduleCacheCache.ScanAndListen(ctx, processDir)
defer stop1()
stop2 := r.otherCache.ScanAndListen(ctx, processDir)
defer stop2()
// We assume cached directories are fully cached, including all their
// children, and have not changed. We can skip them.
skip := func(root gopathwalk.Root, dir string) bool {
if r.env.SkipPathInScan != nil && root.Type == gopathwalk.RootCurrentModule {
if root.Path == dir {
return false
}
if r.env.SkipPathInScan(filepath.Clean(dir)) {
return true
}
}
info, ok := r.cacheLoad(dir)
if !ok {
return false
}
// This directory can be skipped as long as we have already scanned it.
// Packages with errors will continue to have errors, so there is no need
// to rescan them.
packageScanned, _ := info.reachedStatus(directoryScanned)
return packageScanned
}
add := func(root gopathwalk.Root, dir string) {
r.cacheStore(r.scanDirForPackage(root, dir))
}
// r.roots and the callback are not necessarily safe to use in the
// goroutine below. Process them eagerly.
roots := filterRoots(r.roots, callback.rootFound)
// We can't cancel walks, because we need them to finish to have a usable
// cache. Instead, run them in a separate goroutine and detach.
scanDone := make(chan struct{})
go func() {
select {
case <-ctx.Done():
return
case <-r.scanSema: // acquire
}
defer func() { r.scanSema <- struct{}{} }() // release
// We have the lock on r.scannedRoots, and no other scans can run.
for _, root := range roots {
if ctx.Err() != nil {
return
}
if r.scannedRoots[root] {
continue
}
gopathwalk.WalkSkip([]gopathwalk.Root{root}, add, skip, gopathwalk.Options{Logf: r.env.Logf, ModulesEnabled: true})
r.scannedRoots[root] = true
}
close(scanDone)
}()
select {
case <-ctx.Done():
case <-scanDone:
}
return nil
}
func (r *ModuleResolver) scoreImportPath(ctx context.Context, path string) float64 {
if stdlib.HasPackage(path) {
return MaxRelevance
}
mod, _ := r.findPackage(path)
return modRelevance(mod)
}
func modRelevance(mod *gocommand.ModuleJSON) float64 {
var relevance float64
switch {
case mod == nil: // out of scope
return MaxRelevance - 4
case mod.Indirect:
relevance = MaxRelevance - 3
case !mod.Main:
relevance = MaxRelevance - 2
default:
relevance = MaxRelevance - 1 // main module ties with stdlib
}
_, versionString, ok := module.SplitPathVersion(mod.Path)
if ok {
index := strings.Index(versionString, "v")
if index == -1 {
return relevance
}
if versionNumber, err := strconv.ParseFloat(versionString[index+1:], 64); err == nil {
relevance += versionNumber / 1000
}
}
return relevance
}
// canonicalize gets the result of canonicalizing the packages using the results
// of initializing the resolver from 'go list -m'.
func (r *ModuleResolver) canonicalize(info directoryPackageInfo) (*pkg, error) {
// Packages in GOROOT are already canonical, regardless of the std/cmd modules.
if info.rootType == gopathwalk.RootGOROOT {
return &pkg{
importPathShort: info.nonCanonicalImportPath,
dir: info.dir,
packageName: path.Base(info.nonCanonicalImportPath),
relevance: MaxRelevance,
}, nil
}
importPath := info.nonCanonicalImportPath
mod := r.findModuleByDir(info.dir)
// Check if the directory is underneath a module that's in scope.
if mod != nil {
// It is. If dir is the target of a replace directive,
// our guessed import path is wrong. Use the real one.
if mod.Dir == info.dir {
importPath = mod.Path
} else {
dirInMod := info.dir[len(mod.Dir)+len("/"):]
importPath = path.Join(mod.Path, filepath.ToSlash(dirInMod))
}
} else if !strings.HasPrefix(importPath, info.moduleName) {
// The module's name doesn't match the package's import path. It
// probably needs a replace directive we don't have.
return nil, fmt.Errorf("package in %q is not valid without a replace statement", info.dir)
}
res := &pkg{
importPathShort: importPath,
dir: info.dir,
relevance: modRelevance(mod),
}
// We may have discovered a package that has a different version
// in scope already. Canonicalize to that one if possible.
if _, canonicalDir := r.findPackage(importPath); canonicalDir != "" {
res.dir = canonicalDir
}
return res, nil
}
func (r *ModuleResolver) loadExports(ctx context.Context, pkg *pkg, includeTest bool) (string, []stdlib.Symbol, error) {
if info, ok := r.cacheLoad(pkg.dir); ok && !includeTest {
return r.cacheExports(ctx, r.env, info)
}
return loadExportsFromFiles(ctx, r.env, pkg.dir, includeTest)
}
func (r *ModuleResolver) scanDirForPackage(root gopathwalk.Root, dir string) directoryPackageInfo {
subdir := ""
if dir != root.Path {
subdir = dir[len(root.Path)+len("/"):]
}
importPath := filepath.ToSlash(subdir)
if strings.HasPrefix(importPath, "vendor/") {
// Only enter vendor directories if they're explicitly requested as a root.
return directoryPackageInfo{
status: directoryScanned,
err: fmt.Errorf("unwanted vendor directory"),
}
}
switch root.Type {
case gopathwalk.RootCurrentModule:
importPath = path.Join(r.mainByDir[root.Path].Path, filepath.ToSlash(subdir))
case gopathwalk.RootModuleCache:
matches := modCacheRegexp.FindStringSubmatch(subdir)
if len(matches) == 0 {
return directoryPackageInfo{
status: directoryScanned,
err: fmt.Errorf("invalid module cache path: %v", subdir),
}
}
modPath, err := module.UnescapePath(filepath.ToSlash(matches[1]))
if err != nil {
if r.env.Logf != nil {
r.env.Logf("decoding module cache path %q: %v", subdir, err)
}
return directoryPackageInfo{
status: directoryScanned,
err: fmt.Errorf("decoding module cache path %q: %v", subdir, err),
}
}
importPath = path.Join(modPath, filepath.ToSlash(matches[3]))
}
modDir, modName := r.modInfo(dir)
result := directoryPackageInfo{
status: directoryScanned,
dir: dir,
rootType: root.Type,
nonCanonicalImportPath: importPath,
moduleDir: modDir,
moduleName: modName,
}
if root.Type == gopathwalk.RootGOROOT {
// stdlib packages are always in scope, despite the confusing go.mod
return result
}
return result
}
// modCacheRegexp splits a path in a module cache into module, module version, and package.
var modCacheRegexp = regexp.MustCompile(`(.*)@([^/\\]*)(.*)`)
var (
slashSlash = []byte("//")
moduleStr = []byte("module")
)
// modulePath returns the module path from the gomod file text.
// If it cannot find a module path, it returns an empty string.
// It is tolerant of unrelated problems in the go.mod file.
//
// Copied from cmd/go/internal/modfile.
func modulePath(mod []byte) string {
for len(mod) > 0 {
line := mod
mod = nil
if i := bytes.IndexByte(line, '\n'); i >= 0 {
line, mod = line[:i], line[i+1:]
}
if i := bytes.Index(line, slashSlash); i >= 0 {
line = line[:i]
}
line = bytes.TrimSpace(line)
if !bytes.HasPrefix(line, moduleStr) {
continue
}
line = line[len(moduleStr):]
n := len(line)
line = bytes.TrimSpace(line)
if len(line) == n || len(line) == 0 {
continue
}
if line[0] == '"' || line[0] == '`' {
p, err := strconv.Unquote(string(line))
if err != nil {
return "" // malformed quoted string or multiline module path
}
return p
}
return string(line)
}
return "" // missing module path
}

331
vendor/golang.org/x/tools/internal/imports/mod_cache.go generated vendored Normal file
View File

@@ -0,0 +1,331 @@
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package imports
import (
"context"
"fmt"
"path"
"path/filepath"
"strings"
"sync"
"golang.org/x/mod/module"
"golang.org/x/tools/internal/gopathwalk"
"golang.org/x/tools/internal/stdlib"
)
// To find packages to import, the resolver needs to know about all of
// the packages that could be imported. This includes packages that are
// already in modules that are in (1) the current module, (2) replace targets,
// and (3) packages in the module cache. Packages in (1) and (2) may change over
// time, as the client may edit the current module and locally replaced modules.
// The module cache (which includes all of the packages in (3)) can only
// ever be added to.
//
// The resolver can thus save state about packages in the module cache
// and guarantee that this will not change over time. To obtain information
// about new modules added to the module cache, the module cache should be
// rescanned.
//
// It is OK to serve information about modules that have been deleted,
// as they do still exist.
// TODO(suzmue): can we share information with the caller about
// what module needs to be downloaded to import this package?
type directoryPackageStatus int
const (
_ directoryPackageStatus = iota
directoryScanned
nameLoaded
exportsLoaded
)
// directoryPackageInfo holds (possibly incomplete) information about packages
// contained in a given directory.
type directoryPackageInfo struct {
// status indicates the extent to which this struct has been filled in.
status directoryPackageStatus
// err is non-nil when there was an error trying to reach status.
err error
// Set when status >= directoryScanned.
// dir is the absolute directory of this package.
dir string
rootType gopathwalk.RootType
// nonCanonicalImportPath is the package's expected import path. It may
// not actually be importable at that path.
nonCanonicalImportPath string
// Module-related information.
moduleDir string // The directory that is the module root of this dir.
moduleName string // The module name that contains this dir.
// Set when status >= nameLoaded.
packageName string // the package name, as declared in the source.
// Set when status >= exportsLoaded.
// TODO(rfindley): it's hard to see this, but exports depend implicitly on
// the default build context GOOS and GOARCH.
//
// We can make this explicit, and key exports by GOOS, GOARCH.
exports []stdlib.Symbol
}
// reachedStatus returns true when info has a status at least target and any error associated with
// an attempt to reach target.
func (info *directoryPackageInfo) reachedStatus(target directoryPackageStatus) (bool, error) {
if info.err == nil {
return info.status >= target, nil
}
if info.status == target {
return true, info.err
}
return true, nil
}
// DirInfoCache is a concurrency-safe map for storing information about
// directories that may contain packages.
//
// The information in this cache is built incrementally. Entries are initialized in scan.
// No new keys should be added in any other functions, as all directories containing
// packages are identified in scan.
//
// Other functions, including loadExports and findPackage, may update entries in this cache
// as they discover new things about the directory.
//
// The information in the cache is not expected to change for the cache's
// lifetime, so there is no protection against competing writes. Users should
// take care not to hold the cache across changes to the underlying files.
type DirInfoCache struct {
mu sync.Mutex
// dirs stores information about packages in directories, keyed by absolute path.
dirs map[string]*directoryPackageInfo
listeners map[*int]cacheListener
}
func NewDirInfoCache() *DirInfoCache {
return &DirInfoCache{
dirs: make(map[string]*directoryPackageInfo),
listeners: make(map[*int]cacheListener),
}
}
type cacheListener func(directoryPackageInfo)
// ScanAndListen calls listener on all the items in the cache, and on anything
// newly added. The returned stop function waits for all in-flight callbacks to
// finish and blocks new ones.
func (d *DirInfoCache) ScanAndListen(ctx context.Context, listener cacheListener) func() {
ctx, cancel := context.WithCancel(ctx)
// Flushing out all the callbacks is tricky without knowing how many there
// are going to be. Setting an arbitrary limit makes it much easier.
const maxInFlight = 10
sema := make(chan struct{}, maxInFlight)
for i := 0; i < maxInFlight; i++ {
sema <- struct{}{}
}
cookie := new(int) // A unique ID we can use for the listener.
// We can't hold mu while calling the listener.
d.mu.Lock()
var keys []string
for key := range d.dirs {
keys = append(keys, key)
}
d.listeners[cookie] = func(info directoryPackageInfo) {
select {
case <-ctx.Done():
return
case <-sema:
}
listener(info)
sema <- struct{}{}
}
d.mu.Unlock()
stop := func() {
cancel()
d.mu.Lock()
delete(d.listeners, cookie)
d.mu.Unlock()
for i := 0; i < maxInFlight; i++ {
<-sema
}
}
// Process the pre-existing keys.
for _, k := range keys {
select {
case <-ctx.Done():
return stop
default:
}
if v, ok := d.Load(k); ok {
listener(v)
}
}
return stop
}
// Store stores the package info for dir.
func (d *DirInfoCache) Store(dir string, info directoryPackageInfo) {
d.mu.Lock()
// TODO(rfindley, golang/go#59216): should we overwrite an existing entry?
// That seems incorrect as the cache should be idempotent.
_, old := d.dirs[dir]
d.dirs[dir] = &info
var listeners []cacheListener
for _, l := range d.listeners {
listeners = append(listeners, l)
}
d.mu.Unlock()
if !old {
for _, l := range listeners {
l(info)
}
}
}
// Load returns a copy of the directoryPackageInfo for absolute directory dir.
func (d *DirInfoCache) Load(dir string) (directoryPackageInfo, bool) {
d.mu.Lock()
defer d.mu.Unlock()
info, ok := d.dirs[dir]
if !ok {
return directoryPackageInfo{}, false
}
return *info, true
}
// Keys returns the keys currently present in d.
func (d *DirInfoCache) Keys() (keys []string) {
d.mu.Lock()
defer d.mu.Unlock()
for key := range d.dirs {
keys = append(keys, key)
}
return keys
}
func (d *DirInfoCache) CachePackageName(info directoryPackageInfo) (string, error) {
if loaded, err := info.reachedStatus(nameLoaded); loaded {
return info.packageName, err
}
if scanned, err := info.reachedStatus(directoryScanned); !scanned || err != nil {
return "", fmt.Errorf("cannot read package name, scan error: %v", err)
}
info.packageName, info.err = packageDirToName(info.dir)
info.status = nameLoaded
d.Store(info.dir, info)
return info.packageName, info.err
}
func (d *DirInfoCache) CacheExports(ctx context.Context, env *ProcessEnv, info directoryPackageInfo) (string, []stdlib.Symbol, error) {
if reached, _ := info.reachedStatus(exportsLoaded); reached {
return info.packageName, info.exports, info.err
}
if reached, err := info.reachedStatus(nameLoaded); reached && err != nil {
return "", nil, err
}
info.packageName, info.exports, info.err = loadExportsFromFiles(ctx, env, info.dir, false)
if info.err == context.Canceled || info.err == context.DeadlineExceeded {
return info.packageName, info.exports, info.err
}
// The cache structure wants things to proceed linearly. We can skip a
// step here, but only if we succeed.
if info.status == nameLoaded || info.err == nil {
info.status = exportsLoaded
} else {
info.status = nameLoaded
}
d.Store(info.dir, info)
return info.packageName, info.exports, info.err
}
// ScanModuleCache walks the given directory, which must be a GOMODCACHE value,
// for directory package information, storing the results in cache.
func ScanModuleCache(dir string, cache *DirInfoCache, logf func(string, ...any)) {
// Note(rfindley): it's hard to see, but this function attempts to implement
// just the side effects on cache of calling PrimeCache with a ProcessEnv
// that has the given dir as its GOMODCACHE.
//
// Teasing out the control flow, we see that we can avoid any handling of
// vendor/ and can infer module info entirely from the path, simplifying the
// logic here.
root := gopathwalk.Root{
Path: filepath.Clean(dir),
Type: gopathwalk.RootModuleCache,
}
directoryInfo := func(root gopathwalk.Root, dir string) directoryPackageInfo {
// This is a copy of ModuleResolver.scanDirForPackage, trimmed down to
// logic that applies to a module cache directory.
subdir := ""
if dir != root.Path {
subdir = dir[len(root.Path)+len("/"):]
}
matches := modCacheRegexp.FindStringSubmatch(subdir)
if len(matches) == 0 {
return directoryPackageInfo{
status: directoryScanned,
err: fmt.Errorf("invalid module cache path: %v", subdir),
}
}
modPath, err := module.UnescapePath(filepath.ToSlash(matches[1]))
if err != nil {
if logf != nil {
logf("decoding module cache path %q: %v", subdir, err)
}
return directoryPackageInfo{
status: directoryScanned,
err: fmt.Errorf("decoding module cache path %q: %v", subdir, err),
}
}
importPath := path.Join(modPath, filepath.ToSlash(matches[3]))
index := strings.Index(dir, matches[1]+"@"+matches[2])
modDir := filepath.Join(dir[:index], matches[1]+"@"+matches[2])
modName := readModName(filepath.Join(modDir, "go.mod"))
return directoryPackageInfo{
status: directoryScanned,
dir: dir,
rootType: root.Type,
nonCanonicalImportPath: importPath,
moduleDir: modDir,
moduleName: modName,
}
}
add := func(root gopathwalk.Root, dir string) {
info := directoryInfo(root, dir)
cache.Store(info.dir, info)
}
skip := func(_ gopathwalk.Root, dir string) bool {
// Skip directories that have already been scanned.
//
// Note that gopathwalk only adds "package" directories, which must contain
// a .go file, and all such package directories in the module cache are
// immutable. So if we can load a dir, it can be skipped.
info, ok := cache.Load(dir)
if !ok {
return false
}
packageScanned, _ := info.reachedStatus(directoryScanned)
return packageScanned
}
gopathwalk.WalkSkip([]gopathwalk.Root{root}, add, skip, gopathwalk.Options{Logf: logf, ModulesEnabled: true})
}

View File

@@ -0,0 +1,297 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Hacked up copy of go/ast/import.go
// Modified to use a single token.File in preference to a FileSet.
package imports
import (
"go/ast"
"go/token"
"log"
"sort"
"strconv"
)
// sortImports sorts runs of consecutive import lines in import blocks in f.
// It also removes duplicate imports when it is possible to do so without data loss.
//
// It may mutate the token.File and the ast.File.
func sortImports(localPrefix string, tokFile *token.File, f *ast.File) {
for i, d := range f.Decls {
d, ok := d.(*ast.GenDecl)
if !ok || d.Tok != token.IMPORT {
// Not an import declaration, so we're done.
// Imports are always first.
break
}
if len(d.Specs) == 0 {
// Empty import block, remove it.
f.Decls = append(f.Decls[:i], f.Decls[i+1:]...)
}
if !d.Lparen.IsValid() {
// Not a block: sorted by default.
continue
}
// Identify and sort runs of specs on successive lines.
i := 0
specs := d.Specs[:0]
for j, s := range d.Specs {
if j > i && tokFile.Line(s.Pos()) > 1+tokFile.Line(d.Specs[j-1].End()) {
// j begins a new run. End this one.
specs = append(specs, sortSpecs(localPrefix, tokFile, f, d.Specs[i:j])...)
i = j
}
}
specs = append(specs, sortSpecs(localPrefix, tokFile, f, d.Specs[i:])...)
d.Specs = specs
// Deduping can leave a blank line before the rparen; clean that up.
// Ignore line directives.
if len(d.Specs) > 0 {
lastSpec := d.Specs[len(d.Specs)-1]
lastLine := tokFile.PositionFor(lastSpec.Pos(), false).Line
if rParenLine := tokFile.PositionFor(d.Rparen, false).Line; rParenLine > lastLine+1 {
tokFile.MergeLine(rParenLine - 1) // has side effects!
}
}
}
}
// mergeImports merges all the import declarations into the first one.
// Taken from golang.org/x/tools/ast/astutil.
// This does not adjust line numbers properly
func mergeImports(f *ast.File) {
if len(f.Decls) <= 1 {
return
}
// Merge all the import declarations into the first one.
var first *ast.GenDecl
for i := 0; i < len(f.Decls); i++ {
decl := f.Decls[i]
gen, ok := decl.(*ast.GenDecl)
if !ok || gen.Tok != token.IMPORT || declImports(gen, "C") {
continue
}
if first == nil {
first = gen
continue // Don't touch the first one.
}
// We now know there is more than one package in this import
// declaration. Ensure that it ends up parenthesized.
first.Lparen = first.Pos()
// Move the imports of the other import declaration to the first one.
for _, spec := range gen.Specs {
spec.(*ast.ImportSpec).Path.ValuePos = first.Pos()
first.Specs = append(first.Specs, spec)
}
f.Decls = append(f.Decls[:i], f.Decls[i+1:]...)
i--
}
}
// declImports reports whether gen contains an import of path.
// Taken from golang.org/x/tools/ast/astutil.
func declImports(gen *ast.GenDecl, path string) bool {
if gen.Tok != token.IMPORT {
return false
}
for _, spec := range gen.Specs {
impspec := spec.(*ast.ImportSpec)
if importPath(impspec) == path {
return true
}
}
return false
}
func importPath(s ast.Spec) string {
t, err := strconv.Unquote(s.(*ast.ImportSpec).Path.Value)
if err == nil {
return t
}
return ""
}
func importName(s ast.Spec) string {
n := s.(*ast.ImportSpec).Name
if n == nil {
return ""
}
return n.Name
}
func importComment(s ast.Spec) string {
c := s.(*ast.ImportSpec).Comment
if c == nil {
return ""
}
return c.Text()
}
// collapse indicates whether prev may be removed, leaving only next.
func collapse(prev, next ast.Spec) bool {
if importPath(next) != importPath(prev) || importName(next) != importName(prev) {
return false
}
return prev.(*ast.ImportSpec).Comment == nil
}
type posSpan struct {
Start token.Pos
End token.Pos
}
// sortSpecs sorts the import specs within each import decl.
// It may mutate the token.File.
func sortSpecs(localPrefix string, tokFile *token.File, f *ast.File, specs []ast.Spec) []ast.Spec {
// Can't short-circuit here even if specs are already sorted,
// since they might yet need deduplication.
// A lone import, however, may be safely ignored.
if len(specs) <= 1 {
return specs
}
// Record positions for specs.
pos := make([]posSpan, len(specs))
for i, s := range specs {
pos[i] = posSpan{s.Pos(), s.End()}
}
// Identify comments in this range.
// Any comment from pos[0].Start to the final line counts.
lastLine := tokFile.Line(pos[len(pos)-1].End)
cstart := len(f.Comments)
cend := len(f.Comments)
for i, g := range f.Comments {
if g.Pos() < pos[0].Start {
continue
}
if i < cstart {
cstart = i
}
if tokFile.Line(g.End()) > lastLine {
cend = i
break
}
}
comments := f.Comments[cstart:cend]
// Assign each comment to the import spec preceding it.
importComment := map[*ast.ImportSpec][]*ast.CommentGroup{}
specIndex := 0
for _, g := range comments {
for specIndex+1 < len(specs) && pos[specIndex+1].Start <= g.Pos() {
specIndex++
}
s := specs[specIndex].(*ast.ImportSpec)
importComment[s] = append(importComment[s], g)
}
// Sort the import specs by import path.
// Remove duplicates, when possible without data loss.
// Reassign the import paths to have the same position sequence.
// Reassign each comment to abut the end of its spec.
// Sort the comments by new position.
sort.Sort(byImportSpec{localPrefix, specs})
// Dedup. Thanks to our sorting, we can just consider
// adjacent pairs of imports.
deduped := specs[:0]
for i, s := range specs {
if i == len(specs)-1 || !collapse(s, specs[i+1]) {
deduped = append(deduped, s)
} else {
p := s.Pos()
tokFile.MergeLine(tokFile.Line(p)) // has side effects!
}
}
specs = deduped
// Fix up comment positions
for i, s := range specs {
s := s.(*ast.ImportSpec)
if s.Name != nil {
s.Name.NamePos = pos[i].Start
}
s.Path.ValuePos = pos[i].Start
s.EndPos = pos[i].End
nextSpecPos := pos[i].End
for _, g := range importComment[s] {
for _, c := range g.List {
c.Slash = pos[i].End
nextSpecPos = c.End()
}
}
if i < len(specs)-1 {
pos[i+1].Start = nextSpecPos
pos[i+1].End = nextSpecPos
}
}
sort.Sort(byCommentPos(comments))
// Fixup comments can insert blank lines, because import specs are on different lines.
// We remove those blank lines here by merging import spec to the first import spec line.
firstSpecLine := tokFile.Line(specs[0].Pos())
for _, s := range specs[1:] {
p := s.Pos()
line := tokFile.Line(p)
for previousLine := line - 1; previousLine >= firstSpecLine; {
// MergeLine can panic. Avoid the panic at the cost of not removing the blank line
// golang/go#50329
if previousLine > 0 && previousLine < tokFile.LineCount() {
tokFile.MergeLine(previousLine) // has side effects!
previousLine--
} else {
// try to gather some data to diagnose how this could happen
req := "Please report what the imports section of your go file looked like."
log.Printf("panic avoided: first:%d line:%d previous:%d max:%d. %s",
firstSpecLine, line, previousLine, tokFile.LineCount(), req)
}
}
}
return specs
}
type byImportSpec struct {
localPrefix string
specs []ast.Spec // slice of *ast.ImportSpec
}
func (x byImportSpec) Len() int { return len(x.specs) }
func (x byImportSpec) Swap(i, j int) { x.specs[i], x.specs[j] = x.specs[j], x.specs[i] }
func (x byImportSpec) Less(i, j int) bool {
ipath := importPath(x.specs[i])
jpath := importPath(x.specs[j])
igroup := importGroup(x.localPrefix, ipath)
jgroup := importGroup(x.localPrefix, jpath)
if igroup != jgroup {
return igroup < jgroup
}
if ipath != jpath {
return ipath < jpath
}
iname := importName(x.specs[i])
jname := importName(x.specs[j])
if iname != jname {
return iname < jname
}
return importComment(x.specs[i]) < importComment(x.specs[j])
}
type byCommentPos []*ast.CommentGroup
func (x byCommentPos) Len() int { return len(x) }
func (x byCommentPos) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
func (x byCommentPos) Less(i, j int) bool { return x[i].Pos() < x[j].Pos() }

View File

@@ -0,0 +1,22 @@
// Copyright 2020 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package packagesinternal exposes internal-only fields from go/packages.
package packagesinternal
var GetForTest = func(p interface{}) string { return "" }
var GetDepsErrors = func(p interface{}) []*PackageError { return nil }
type PackageError struct {
ImportStack []string // shortest path from package named on command line to this one
Pos string // position of error (if present, file:line:col)
Err string // the error itself
}
var TypecheckCgo int
var DepsErrors int // must be set as a LoadMode to call GetDepsErrors
var ForTest int // must be set as a LoadMode to call GetForTest
var SetModFlag = func(config interface{}, value string) {}
var SetModFile = func(config interface{}, value string) {}

77
vendor/golang.org/x/tools/internal/pkgbits/codes.go generated vendored Normal file
View File

@@ -0,0 +1,77 @@
// Copyright 2021 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package pkgbits
// A Code is an enum value that can be encoded into bitstreams.
//
// Code types are preferable for enum types, because they allow
// Decoder to detect desyncs.
type Code interface {
// Marker returns the SyncMarker for the Code's dynamic type.
Marker() SyncMarker
// Value returns the Code's ordinal value.
Value() int
}
// A CodeVal distinguishes among go/constant.Value encodings.
type CodeVal int
func (c CodeVal) Marker() SyncMarker { return SyncVal }
func (c CodeVal) Value() int { return int(c) }
// Note: These values are public and cannot be changed without
// updating the go/types importers.
const (
ValBool CodeVal = iota
ValString
ValInt64
ValBigInt
ValBigRat
ValBigFloat
)
// A CodeType distinguishes among go/types.Type encodings.
type CodeType int
func (c CodeType) Marker() SyncMarker { return SyncType }
func (c CodeType) Value() int { return int(c) }
// Note: These values are public and cannot be changed without
// updating the go/types importers.
const (
TypeBasic CodeType = iota
TypeNamed
TypePointer
TypeSlice
TypeArray
TypeChan
TypeMap
TypeSignature
TypeStruct
TypeInterface
TypeUnion
TypeTypeParam
)
// A CodeObj distinguishes among go/types.Object encodings.
type CodeObj int
func (c CodeObj) Marker() SyncMarker { return SyncCodeObj }
func (c CodeObj) Value() int { return int(c) }
// Note: These values are public and cannot be changed without
// updating the go/types importers.
const (
ObjAlias CodeObj = iota
ObjConst
ObjType
ObjFunc
ObjVar
ObjStub
)

521
vendor/golang.org/x/tools/internal/pkgbits/decoder.go generated vendored Normal file
View File

@@ -0,0 +1,521 @@
// Copyright 2021 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package pkgbits
import (
"encoding/binary"
"errors"
"fmt"
"go/constant"
"go/token"
"io"
"math/big"
"os"
"runtime"
"strings"
)
// A PkgDecoder provides methods for decoding a package's Unified IR
// export data.
type PkgDecoder struct {
// version is the file format version.
version uint32
// aliases determines whether types.Aliases should be created
aliases bool
// sync indicates whether the file uses sync markers.
sync bool
// pkgPath is the package path for the package to be decoded.
//
// TODO(mdempsky): Remove; unneeded since CL 391014.
pkgPath string
// elemData is the full data payload of the encoded package.
// Elements are densely and contiguously packed together.
//
// The last 8 bytes of elemData are the package fingerprint.
elemData string
// elemEnds stores the byte-offset end positions of element
// bitstreams within elemData.
//
// For example, element I's bitstream data starts at elemEnds[I-1]
// (or 0, if I==0) and ends at elemEnds[I].
//
// Note: elemEnds is indexed by absolute indices, not
// section-relative indices.
elemEnds []uint32
// elemEndsEnds stores the index-offset end positions of relocation
// sections within elemEnds.
//
// For example, section K's end positions start at elemEndsEnds[K-1]
// (or 0, if K==0) and end at elemEndsEnds[K].
elemEndsEnds [numRelocs]uint32
scratchRelocEnt []RelocEnt
}
// PkgPath returns the package path for the package
//
// TODO(mdempsky): Remove; unneeded since CL 391014.
func (pr *PkgDecoder) PkgPath() string { return pr.pkgPath }
// SyncMarkers reports whether pr uses sync markers.
func (pr *PkgDecoder) SyncMarkers() bool { return pr.sync }
// NewPkgDecoder returns a PkgDecoder initialized to read the Unified
// IR export data from input. pkgPath is the package path for the
// compilation unit that produced the export data.
//
// TODO(mdempsky): Remove pkgPath parameter; unneeded since CL 391014.
func NewPkgDecoder(pkgPath, input string) PkgDecoder {
pr := PkgDecoder{
pkgPath: pkgPath,
//aliases: aliases.Enabled(),
}
// TODO(mdempsky): Implement direct indexing of input string to
// avoid copying the position information.
r := strings.NewReader(input)
assert(binary.Read(r, binary.LittleEndian, &pr.version) == nil)
switch pr.version {
default:
panic(fmt.Errorf("unsupported version: %v", pr.version))
case 0:
// no flags
case 1:
var flags uint32
assert(binary.Read(r, binary.LittleEndian, &flags) == nil)
pr.sync = flags&flagSyncMarkers != 0
}
assert(binary.Read(r, binary.LittleEndian, pr.elemEndsEnds[:]) == nil)
pr.elemEnds = make([]uint32, pr.elemEndsEnds[len(pr.elemEndsEnds)-1])
assert(binary.Read(r, binary.LittleEndian, pr.elemEnds[:]) == nil)
pos, err := r.Seek(0, io.SeekCurrent)
assert(err == nil)
pr.elemData = input[pos:]
assert(len(pr.elemData)-8 == int(pr.elemEnds[len(pr.elemEnds)-1]))
return pr
}
// NumElems returns the number of elements in section k.
func (pr *PkgDecoder) NumElems(k RelocKind) int {
count := int(pr.elemEndsEnds[k])
if k > 0 {
count -= int(pr.elemEndsEnds[k-1])
}
return count
}
// TotalElems returns the total number of elements across all sections.
func (pr *PkgDecoder) TotalElems() int {
return len(pr.elemEnds)
}
// Fingerprint returns the package fingerprint.
func (pr *PkgDecoder) Fingerprint() [8]byte {
var fp [8]byte
copy(fp[:], pr.elemData[len(pr.elemData)-8:])
return fp
}
// AbsIdx returns the absolute index for the given (section, index)
// pair.
func (pr *PkgDecoder) AbsIdx(k RelocKind, idx Index) int {
absIdx := int(idx)
if k > 0 {
absIdx += int(pr.elemEndsEnds[k-1])
}
if absIdx >= int(pr.elemEndsEnds[k]) {
errorf("%v:%v is out of bounds; %v", k, idx, pr.elemEndsEnds)
}
return absIdx
}
// DataIdx returns the raw element bitstream for the given (section,
// index) pair.
func (pr *PkgDecoder) DataIdx(k RelocKind, idx Index) string {
absIdx := pr.AbsIdx(k, idx)
var start uint32
if absIdx > 0 {
start = pr.elemEnds[absIdx-1]
}
end := pr.elemEnds[absIdx]
return pr.elemData[start:end]
}
// StringIdx returns the string value for the given string index.
func (pr *PkgDecoder) StringIdx(idx Index) string {
return pr.DataIdx(RelocString, idx)
}
// NewDecoder returns a Decoder for the given (section, index) pair,
// and decodes the given SyncMarker from the element bitstream.
func (pr *PkgDecoder) NewDecoder(k RelocKind, idx Index, marker SyncMarker) Decoder {
r := pr.NewDecoderRaw(k, idx)
r.Sync(marker)
return r
}
// TempDecoder returns a Decoder for the given (section, index) pair,
// and decodes the given SyncMarker from the element bitstream.
// If possible the Decoder should be RetireDecoder'd when it is no longer
// needed, this will avoid heap allocations.
func (pr *PkgDecoder) TempDecoder(k RelocKind, idx Index, marker SyncMarker) Decoder {
r := pr.TempDecoderRaw(k, idx)
r.Sync(marker)
return r
}
func (pr *PkgDecoder) RetireDecoder(d *Decoder) {
pr.scratchRelocEnt = d.Relocs
d.Relocs = nil
}
// NewDecoderRaw returns a Decoder for the given (section, index) pair.
//
// Most callers should use NewDecoder instead.
func (pr *PkgDecoder) NewDecoderRaw(k RelocKind, idx Index) Decoder {
r := Decoder{
common: pr,
k: k,
Idx: idx,
}
// TODO(mdempsky) r.data.Reset(...) after #44505 is resolved.
r.Data = *strings.NewReader(pr.DataIdx(k, idx))
r.Sync(SyncRelocs)
r.Relocs = make([]RelocEnt, r.Len())
for i := range r.Relocs {
r.Sync(SyncReloc)
r.Relocs[i] = RelocEnt{RelocKind(r.Len()), Index(r.Len())}
}
return r
}
func (pr *PkgDecoder) TempDecoderRaw(k RelocKind, idx Index) Decoder {
r := Decoder{
common: pr,
k: k,
Idx: idx,
}
r.Data.Reset(pr.DataIdx(k, idx))
r.Sync(SyncRelocs)
l := r.Len()
if cap(pr.scratchRelocEnt) >= l {
r.Relocs = pr.scratchRelocEnt[:l]
pr.scratchRelocEnt = nil
} else {
r.Relocs = make([]RelocEnt, l)
}
for i := range r.Relocs {
r.Sync(SyncReloc)
r.Relocs[i] = RelocEnt{RelocKind(r.Len()), Index(r.Len())}
}
return r
}
// A Decoder provides methods for decoding an individual element's
// bitstream data.
type Decoder struct {
common *PkgDecoder
Relocs []RelocEnt
Data strings.Reader
k RelocKind
Idx Index
}
func (r *Decoder) checkErr(err error) {
if err != nil {
errorf("unexpected decoding error: %w", err)
}
}
func (r *Decoder) rawUvarint() uint64 {
x, err := readUvarint(&r.Data)
r.checkErr(err)
return x
}
// readUvarint is a type-specialized copy of encoding/binary.ReadUvarint.
// This avoids the interface conversion and thus has better escape properties,
// which flows up the stack.
func readUvarint(r *strings.Reader) (uint64, error) {
var x uint64
var s uint
for i := 0; i < binary.MaxVarintLen64; i++ {
b, err := r.ReadByte()
if err != nil {
if i > 0 && err == io.EOF {
err = io.ErrUnexpectedEOF
}
return x, err
}
if b < 0x80 {
if i == binary.MaxVarintLen64-1 && b > 1 {
return x, overflow
}
return x | uint64(b)<<s, nil
}
x |= uint64(b&0x7f) << s
s += 7
}
return x, overflow
}
var overflow = errors.New("pkgbits: readUvarint overflows a 64-bit integer")
func (r *Decoder) rawVarint() int64 {
ux := r.rawUvarint()
// Zig-zag decode.
x := int64(ux >> 1)
if ux&1 != 0 {
x = ^x
}
return x
}
func (r *Decoder) rawReloc(k RelocKind, idx int) Index {
e := r.Relocs[idx]
assert(e.Kind == k)
return e.Idx
}
// Sync decodes a sync marker from the element bitstream and asserts
// that it matches the expected marker.
//
// If r.common.sync is false, then Sync is a no-op.
func (r *Decoder) Sync(mWant SyncMarker) {
if !r.common.sync {
return
}
pos, _ := r.Data.Seek(0, io.SeekCurrent)
mHave := SyncMarker(r.rawUvarint())
writerPCs := make([]int, r.rawUvarint())
for i := range writerPCs {
writerPCs[i] = int(r.rawUvarint())
}
if mHave == mWant {
return
}
// There's some tension here between printing:
//
// (1) full file paths that tools can recognize (e.g., so emacs
// hyperlinks the "file:line" text for easy navigation), or
//
// (2) short file paths that are easier for humans to read (e.g., by
// omitting redundant or irrelevant details, so it's easier to
// focus on the useful bits that remain).
//
// The current formatting favors the former, as it seems more
// helpful in practice. But perhaps the formatting could be improved
// to better address both concerns. For example, use relative file
// paths if they would be shorter, or rewrite file paths to contain
// "$GOROOT" (like objabi.AbsFile does) if tools can be taught how
// to reliably expand that again.
fmt.Printf("export data desync: package %q, section %v, index %v, offset %v\n", r.common.pkgPath, r.k, r.Idx, pos)
fmt.Printf("\nfound %v, written at:\n", mHave)
if len(writerPCs) == 0 {
fmt.Printf("\t[stack trace unavailable; recompile package %q with -d=syncframes]\n", r.common.pkgPath)
}
for _, pc := range writerPCs {
fmt.Printf("\t%s\n", r.common.StringIdx(r.rawReloc(RelocString, pc)))
}
fmt.Printf("\nexpected %v, reading at:\n", mWant)
var readerPCs [32]uintptr // TODO(mdempsky): Dynamically size?
n := runtime.Callers(2, readerPCs[:])
for _, pc := range fmtFrames(readerPCs[:n]...) {
fmt.Printf("\t%s\n", pc)
}
// We already printed a stack trace for the reader, so now we can
// simply exit. Printing a second one with panic or base.Fatalf
// would just be noise.
os.Exit(1)
}
// Bool decodes and returns a bool value from the element bitstream.
func (r *Decoder) Bool() bool {
r.Sync(SyncBool)
x, err := r.Data.ReadByte()
r.checkErr(err)
assert(x < 2)
return x != 0
}
// Int64 decodes and returns an int64 value from the element bitstream.
func (r *Decoder) Int64() int64 {
r.Sync(SyncInt64)
return r.rawVarint()
}
// Uint64 decodes and returns a uint64 value from the element bitstream.
func (r *Decoder) Uint64() uint64 {
r.Sync(SyncUint64)
return r.rawUvarint()
}
// Len decodes and returns a non-negative int value from the element bitstream.
func (r *Decoder) Len() int { x := r.Uint64(); v := int(x); assert(uint64(v) == x); return v }
// Int decodes and returns an int value from the element bitstream.
func (r *Decoder) Int() int { x := r.Int64(); v := int(x); assert(int64(v) == x); return v }
// Uint decodes and returns a uint value from the element bitstream.
func (r *Decoder) Uint() uint { x := r.Uint64(); v := uint(x); assert(uint64(v) == x); return v }
// Code decodes a Code value from the element bitstream and returns
// its ordinal value. It's the caller's responsibility to convert the
// result to an appropriate Code type.
//
// TODO(mdempsky): Ideally this method would have signature "Code[T
// Code] T" instead, but we don't allow generic methods and the
// compiler can't depend on generics yet anyway.
func (r *Decoder) Code(mark SyncMarker) int {
r.Sync(mark)
return r.Len()
}
// Reloc decodes a relocation of expected section k from the element
// bitstream and returns an index to the referenced element.
func (r *Decoder) Reloc(k RelocKind) Index {
r.Sync(SyncUseReloc)
return r.rawReloc(k, r.Len())
}
// String decodes and returns a string value from the element
// bitstream.
func (r *Decoder) String() string {
r.Sync(SyncString)
return r.common.StringIdx(r.Reloc(RelocString))
}
// Strings decodes and returns a variable-length slice of strings from
// the element bitstream.
func (r *Decoder) Strings() []string {
res := make([]string, r.Len())
for i := range res {
res[i] = r.String()
}
return res
}
// Value decodes and returns a constant.Value from the element
// bitstream.
func (r *Decoder) Value() constant.Value {
r.Sync(SyncValue)
isComplex := r.Bool()
val := r.scalar()
if isComplex {
val = constant.BinaryOp(val, token.ADD, constant.MakeImag(r.scalar()))
}
return val
}
func (r *Decoder) scalar() constant.Value {
switch tag := CodeVal(r.Code(SyncVal)); tag {
default:
panic(fmt.Errorf("unexpected scalar tag: %v", tag))
case ValBool:
return constant.MakeBool(r.Bool())
case ValString:
return constant.MakeString(r.String())
case ValInt64:
return constant.MakeInt64(r.Int64())
case ValBigInt:
return constant.Make(r.bigInt())
case ValBigRat:
num := r.bigInt()
denom := r.bigInt()
return constant.Make(new(big.Rat).SetFrac(num, denom))
case ValBigFloat:
return constant.Make(r.bigFloat())
}
}
func (r *Decoder) bigInt() *big.Int {
v := new(big.Int).SetBytes([]byte(r.String()))
if r.Bool() {
v.Neg(v)
}
return v
}
func (r *Decoder) bigFloat() *big.Float {
v := new(big.Float).SetPrec(512)
assert(v.UnmarshalText([]byte(r.String())) == nil)
return v
}
// @@@ Helpers
// TODO(mdempsky): These should probably be removed. I think they're a
// smell that the export data format is not yet quite right.
// PeekPkgPath returns the package path for the specified package
// index.
func (pr *PkgDecoder) PeekPkgPath(idx Index) string {
var path string
{
r := pr.TempDecoder(RelocPkg, idx, SyncPkgDef)
path = r.String()
pr.RetireDecoder(&r)
}
if path == "" {
path = pr.pkgPath
}
return path
}
// PeekObj returns the package path, object name, and CodeObj for the
// specified object index.
func (pr *PkgDecoder) PeekObj(idx Index) (string, string, CodeObj) {
var ridx Index
var name string
var rcode int
{
r := pr.TempDecoder(RelocName, idx, SyncObject1)
r.Sync(SyncSym)
r.Sync(SyncPkg)
ridx = r.Reloc(RelocPkg)
name = r.String()
rcode = r.Code(SyncCodeObj)
pr.RetireDecoder(&r)
}
path := pr.PeekPkgPath(ridx)
assert(name != "")
tag := CodeObj(rcode)
return path, name, tag
}

32
vendor/golang.org/x/tools/internal/pkgbits/doc.go generated vendored Normal file
View File

@@ -0,0 +1,32 @@
// Copyright 2022 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package pkgbits implements low-level coding abstractions for
// Unified IR's export data format.
//
// At a low-level, a package is a collection of bitstream elements.
// Each element has a "kind" and a dense, non-negative index.
// Elements can be randomly accessed given their kind and index.
//
// Individual elements are sequences of variable-length values (e.g.,
// integers, booleans, strings, go/constant values, cross-references
// to other elements). Package pkgbits provides APIs for encoding and
// decoding these low-level values, but the details of mapping
// higher-level Go constructs into elements is left to higher-level
// abstractions.
//
// Elements may cross-reference each other with "relocations." For
// example, an element representing a pointer type has a relocation
// referring to the element type.
//
// Go constructs may be composed as a constellation of multiple
// elements. For example, a declared function may have one element to
// describe the object (e.g., its name, type, position), and a
// separate element to describe its function body. This allows readers
// some flexibility in efficiently seeking or re-reading data (e.g.,
// inlining requires re-reading the function body for each inlined
// call, without needing to re-read the object-level details).
//
// This is a copy of internal/pkgbits in the Go implementation.
package pkgbits

383
vendor/golang.org/x/tools/internal/pkgbits/encoder.go generated vendored Normal file
View File

@@ -0,0 +1,383 @@
// Copyright 2021 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package pkgbits
import (
"bytes"
"crypto/md5"
"encoding/binary"
"go/constant"
"io"
"math/big"
"runtime"
)
// currentVersion is the current version number.
//
// - v0: initial prototype
//
// - v1: adds the flags uint32 word
const currentVersion uint32 = 1
// A PkgEncoder provides methods for encoding a package's Unified IR
// export data.
type PkgEncoder struct {
// elems holds the bitstream for previously encoded elements.
elems [numRelocs][]string
// stringsIdx maps previously encoded strings to their index within
// the RelocString section, to allow deduplication. That is,
// elems[RelocString][stringsIdx[s]] == s (if present).
stringsIdx map[string]Index
// syncFrames is the number of frames to write at each sync
// marker. A negative value means sync markers are omitted.
syncFrames int
}
// SyncMarkers reports whether pw uses sync markers.
func (pw *PkgEncoder) SyncMarkers() bool { return pw.syncFrames >= 0 }
// NewPkgEncoder returns an initialized PkgEncoder.
//
// syncFrames is the number of caller frames that should be serialized
// at Sync points. Serializing additional frames results in larger
// export data files, but can help diagnosing desync errors in
// higher-level Unified IR reader/writer code. If syncFrames is
// negative, then sync markers are omitted entirely.
func NewPkgEncoder(syncFrames int) PkgEncoder {
return PkgEncoder{
stringsIdx: make(map[string]Index),
syncFrames: syncFrames,
}
}
// DumpTo writes the package's encoded data to out0 and returns the
// package fingerprint.
func (pw *PkgEncoder) DumpTo(out0 io.Writer) (fingerprint [8]byte) {
h := md5.New()
out := io.MultiWriter(out0, h)
writeUint32 := func(x uint32) {
assert(binary.Write(out, binary.LittleEndian, x) == nil)
}
writeUint32(currentVersion)
var flags uint32
if pw.SyncMarkers() {
flags |= flagSyncMarkers
}
writeUint32(flags)
// Write elemEndsEnds.
var sum uint32
for _, elems := range &pw.elems {
sum += uint32(len(elems))
writeUint32(sum)
}
// Write elemEnds.
sum = 0
for _, elems := range &pw.elems {
for _, elem := range elems {
sum += uint32(len(elem))
writeUint32(sum)
}
}
// Write elemData.
for _, elems := range &pw.elems {
for _, elem := range elems {
_, err := io.WriteString(out, elem)
assert(err == nil)
}
}
// Write fingerprint.
copy(fingerprint[:], h.Sum(nil))
_, err := out0.Write(fingerprint[:])
assert(err == nil)
return
}
// StringIdx adds a string value to the strings section, if not
// already present, and returns its index.
func (pw *PkgEncoder) StringIdx(s string) Index {
if idx, ok := pw.stringsIdx[s]; ok {
assert(pw.elems[RelocString][idx] == s)
return idx
}
idx := Index(len(pw.elems[RelocString]))
pw.elems[RelocString] = append(pw.elems[RelocString], s)
pw.stringsIdx[s] = idx
return idx
}
// NewEncoder returns an Encoder for a new element within the given
// section, and encodes the given SyncMarker as the start of the
// element bitstream.
func (pw *PkgEncoder) NewEncoder(k RelocKind, marker SyncMarker) Encoder {
e := pw.NewEncoderRaw(k)
e.Sync(marker)
return e
}
// NewEncoderRaw returns an Encoder for a new element within the given
// section.
//
// Most callers should use NewEncoder instead.
func (pw *PkgEncoder) NewEncoderRaw(k RelocKind) Encoder {
idx := Index(len(pw.elems[k]))
pw.elems[k] = append(pw.elems[k], "") // placeholder
return Encoder{
p: pw,
k: k,
Idx: idx,
}
}
// An Encoder provides methods for encoding an individual element's
// bitstream data.
type Encoder struct {
p *PkgEncoder
Relocs []RelocEnt
RelocMap map[RelocEnt]uint32
Data bytes.Buffer // accumulated element bitstream data
encodingRelocHeader bool
k RelocKind
Idx Index // index within relocation section
}
// Flush finalizes the element's bitstream and returns its Index.
func (w *Encoder) Flush() Index {
var sb bytes.Buffer // TODO(mdempsky): strings.Builder after #44505 is resolved
// Backup the data so we write the relocations at the front.
var tmp bytes.Buffer
io.Copy(&tmp, &w.Data)
// TODO(mdempsky): Consider writing these out separately so they're
// easier to strip, along with function bodies, so that we can prune
// down to just the data that's relevant to go/types.
if w.encodingRelocHeader {
panic("encodingRelocHeader already true; recursive flush?")
}
w.encodingRelocHeader = true
w.Sync(SyncRelocs)
w.Len(len(w.Relocs))
for _, rEnt := range w.Relocs {
w.Sync(SyncReloc)
w.Len(int(rEnt.Kind))
w.Len(int(rEnt.Idx))
}
io.Copy(&sb, &w.Data)
io.Copy(&sb, &tmp)
w.p.elems[w.k][w.Idx] = sb.String()
return w.Idx
}
func (w *Encoder) checkErr(err error) {
if err != nil {
errorf("unexpected encoding error: %v", err)
}
}
func (w *Encoder) rawUvarint(x uint64) {
var buf [binary.MaxVarintLen64]byte
n := binary.PutUvarint(buf[:], x)
_, err := w.Data.Write(buf[:n])
w.checkErr(err)
}
func (w *Encoder) rawVarint(x int64) {
// Zig-zag encode.
ux := uint64(x) << 1
if x < 0 {
ux = ^ux
}
w.rawUvarint(ux)
}
func (w *Encoder) rawReloc(r RelocKind, idx Index) int {
e := RelocEnt{r, idx}
if w.RelocMap != nil {
if i, ok := w.RelocMap[e]; ok {
return int(i)
}
} else {
w.RelocMap = make(map[RelocEnt]uint32)
}
i := len(w.Relocs)
w.RelocMap[e] = uint32(i)
w.Relocs = append(w.Relocs, e)
return i
}
func (w *Encoder) Sync(m SyncMarker) {
if !w.p.SyncMarkers() {
return
}
// Writing out stack frame string references requires working
// relocations, but writing out the relocations themselves involves
// sync markers. To prevent infinite recursion, we simply trim the
// stack frame for sync markers within the relocation header.
var frames []string
if !w.encodingRelocHeader && w.p.syncFrames > 0 {
pcs := make([]uintptr, w.p.syncFrames)
n := runtime.Callers(2, pcs)
frames = fmtFrames(pcs[:n]...)
}
// TODO(mdempsky): Save space by writing out stack frames as a
// linked list so we can share common stack frames.
w.rawUvarint(uint64(m))
w.rawUvarint(uint64(len(frames)))
for _, frame := range frames {
w.rawUvarint(uint64(w.rawReloc(RelocString, w.p.StringIdx(frame))))
}
}
// Bool encodes and writes a bool value into the element bitstream,
// and then returns the bool value.
//
// For simple, 2-alternative encodings, the idiomatic way to call Bool
// is something like:
//
// if w.Bool(x != 0) {
// // alternative #1
// } else {
// // alternative #2
// }
//
// For multi-alternative encodings, use Code instead.
func (w *Encoder) Bool(b bool) bool {
w.Sync(SyncBool)
var x byte
if b {
x = 1
}
err := w.Data.WriteByte(x)
w.checkErr(err)
return b
}
// Int64 encodes and writes an int64 value into the element bitstream.
func (w *Encoder) Int64(x int64) {
w.Sync(SyncInt64)
w.rawVarint(x)
}
// Uint64 encodes and writes a uint64 value into the element bitstream.
func (w *Encoder) Uint64(x uint64) {
w.Sync(SyncUint64)
w.rawUvarint(x)
}
// Len encodes and writes a non-negative int value into the element bitstream.
func (w *Encoder) Len(x int) { assert(x >= 0); w.Uint64(uint64(x)) }
// Int encodes and writes an int value into the element bitstream.
func (w *Encoder) Int(x int) { w.Int64(int64(x)) }
// Uint encodes and writes a uint value into the element bitstream.
func (w *Encoder) Uint(x uint) { w.Uint64(uint64(x)) }
// Reloc encodes and writes a relocation for the given (section,
// index) pair into the element bitstream.
//
// Note: Only the index is formally written into the element
// bitstream, so bitstream decoders must know from context which
// section an encoded relocation refers to.
func (w *Encoder) Reloc(r RelocKind, idx Index) {
w.Sync(SyncUseReloc)
w.Len(w.rawReloc(r, idx))
}
// Code encodes and writes a Code value into the element bitstream.
func (w *Encoder) Code(c Code) {
w.Sync(c.Marker())
w.Len(c.Value())
}
// String encodes and writes a string value into the element
// bitstream.
//
// Internally, strings are deduplicated by adding them to the strings
// section (if not already present), and then writing a relocation
// into the element bitstream.
func (w *Encoder) String(s string) {
w.Sync(SyncString)
w.Reloc(RelocString, w.p.StringIdx(s))
}
// Strings encodes and writes a variable-length slice of strings into
// the element bitstream.
func (w *Encoder) Strings(ss []string) {
w.Len(len(ss))
for _, s := range ss {
w.String(s)
}
}
// Value encodes and writes a constant.Value into the element
// bitstream.
func (w *Encoder) Value(val constant.Value) {
w.Sync(SyncValue)
if w.Bool(val.Kind() == constant.Complex) {
w.scalar(constant.Real(val))
w.scalar(constant.Imag(val))
} else {
w.scalar(val)
}
}
func (w *Encoder) scalar(val constant.Value) {
switch v := constant.Val(val).(type) {
default:
errorf("unhandled %v (%v)", val, val.Kind())
case bool:
w.Code(ValBool)
w.Bool(v)
case string:
w.Code(ValString)
w.String(v)
case int64:
w.Code(ValInt64)
w.Int64(v)
case *big.Int:
w.Code(ValBigInt)
w.bigInt(v)
case *big.Rat:
w.Code(ValBigRat)
w.bigInt(v.Num())
w.bigInt(v.Denom())
case *big.Float:
w.Code(ValBigFloat)
w.bigFloat(v)
}
}
func (w *Encoder) bigInt(v *big.Int) {
b := v.Bytes()
w.String(string(b)) // TODO: More efficient encoding.
w.Bool(v.Sign() < 0)
}
func (w *Encoder) bigFloat(v *big.Float) {
b := v.Append(nil, 'p', -1)
w.String(string(b)) // TODO: More efficient encoding.
}

9
vendor/golang.org/x/tools/internal/pkgbits/flags.go generated vendored Normal file
View File

@@ -0,0 +1,9 @@
// Copyright 2022 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package pkgbits
const (
flagSyncMarkers = 1 << iota // file format contains sync markers
)

View File

@@ -0,0 +1,21 @@
// Copyright 2021 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build !go1.7
// +build !go1.7
// TODO(mdempsky): Remove after #44505 is resolved
package pkgbits
import "runtime"
func walkFrames(pcs []uintptr, visit frameVisitor) {
for _, pc := range pcs {
fn := runtime.FuncForPC(pc)
file, line := fn.FileLine(pc)
visit(file, line, fn.Name(), pc-fn.Entry())
}
}

View File

@@ -0,0 +1,28 @@
// Copyright 2021 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build go1.7
// +build go1.7
package pkgbits
import "runtime"
// walkFrames calls visit for each call frame represented by pcs.
//
// pcs should be a slice of PCs, as returned by runtime.Callers.
func walkFrames(pcs []uintptr, visit frameVisitor) {
if len(pcs) == 0 {
return
}
frames := runtime.CallersFrames(pcs)
for {
frame, more := frames.Next()
visit(frame.File, frame.Line, frame.Function, frame.PC-frame.Entry)
if !more {
return
}
}
}

42
vendor/golang.org/x/tools/internal/pkgbits/reloc.go generated vendored Normal file
View File

@@ -0,0 +1,42 @@
// Copyright 2021 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package pkgbits
// A RelocKind indicates a particular section within a unified IR export.
type RelocKind int32
// An Index represents a bitstream element index within a particular
// section.
type Index int32
// A relocEnt (relocation entry) is an entry in an element's local
// reference table.
//
// TODO(mdempsky): Rename this too.
type RelocEnt struct {
Kind RelocKind
Idx Index
}
// Reserved indices within the meta relocation section.
const (
PublicRootIdx Index = 0
PrivateRootIdx Index = 1
)
const (
RelocString RelocKind = iota
RelocMeta
RelocPosBase
RelocPkg
RelocName
RelocType
RelocObj
RelocObjExt
RelocObjDict
RelocBody
numRelocs = iota
)

17
vendor/golang.org/x/tools/internal/pkgbits/support.go generated vendored Normal file
View File

@@ -0,0 +1,17 @@
// Copyright 2022 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package pkgbits
import "fmt"
func assert(b bool) {
if !b {
panic("assertion failed")
}
}
func errorf(format string, args ...interface{}) {
panic(fmt.Errorf(format, args...))
}

113
vendor/golang.org/x/tools/internal/pkgbits/sync.go generated vendored Normal file
View File

@@ -0,0 +1,113 @@
// Copyright 2021 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package pkgbits
import (
"fmt"
"strings"
)
// fmtFrames formats a backtrace for reporting reader/writer desyncs.
func fmtFrames(pcs ...uintptr) []string {
res := make([]string, 0, len(pcs))
walkFrames(pcs, func(file string, line int, name string, offset uintptr) {
// Trim package from function name. It's just redundant noise.
name = strings.TrimPrefix(name, "cmd/compile/internal/noder.")
res = append(res, fmt.Sprintf("%s:%v: %s +0x%v", file, line, name, offset))
})
return res
}
type frameVisitor func(file string, line int, name string, offset uintptr)
// SyncMarker is an enum type that represents markers that may be
// written to export data to ensure the reader and writer stay
// synchronized.
type SyncMarker int
//go:generate stringer -type=SyncMarker -trimprefix=Sync
const (
_ SyncMarker = iota
// Public markers (known to go/types importers).
// Low-level coding markers.
SyncEOF
SyncBool
SyncInt64
SyncUint64
SyncString
SyncValue
SyncVal
SyncRelocs
SyncReloc
SyncUseReloc
// Higher-level object and type markers.
SyncPublic
SyncPos
SyncPosBase
SyncObject
SyncObject1
SyncPkg
SyncPkgDef
SyncMethod
SyncType
SyncTypeIdx
SyncTypeParamNames
SyncSignature
SyncParams
SyncParam
SyncCodeObj
SyncSym
SyncLocalIdent
SyncSelector
// Private markers (only known to cmd/compile).
SyncPrivate
SyncFuncExt
SyncVarExt
SyncTypeExt
SyncPragma
SyncExprList
SyncExprs
SyncExpr
SyncExprType
SyncAssign
SyncOp
SyncFuncLit
SyncCompLit
SyncDecl
SyncFuncBody
SyncOpenScope
SyncCloseScope
SyncCloseAnotherScope
SyncDeclNames
SyncDeclName
SyncStmts
SyncBlockStmt
SyncIfStmt
SyncForStmt
SyncSwitchStmt
SyncRangeStmt
SyncCaseClause
SyncCommClause
SyncSelectStmt
SyncDecls
SyncLabeledStmt
SyncUseObjLocal
SyncAddLocal
SyncLinkname
SyncStmt1
SyncStmtsEnd
SyncLabel
SyncOptLabel
)

View File

@@ -0,0 +1,89 @@
// Code generated by "stringer -type=SyncMarker -trimprefix=Sync"; DO NOT EDIT.
package pkgbits
import "strconv"
func _() {
// An "invalid array index" compiler error signifies that the constant values have changed.
// Re-run the stringer command to generate them again.
var x [1]struct{}
_ = x[SyncEOF-1]
_ = x[SyncBool-2]
_ = x[SyncInt64-3]
_ = x[SyncUint64-4]
_ = x[SyncString-5]
_ = x[SyncValue-6]
_ = x[SyncVal-7]
_ = x[SyncRelocs-8]
_ = x[SyncReloc-9]
_ = x[SyncUseReloc-10]
_ = x[SyncPublic-11]
_ = x[SyncPos-12]
_ = x[SyncPosBase-13]
_ = x[SyncObject-14]
_ = x[SyncObject1-15]
_ = x[SyncPkg-16]
_ = x[SyncPkgDef-17]
_ = x[SyncMethod-18]
_ = x[SyncType-19]
_ = x[SyncTypeIdx-20]
_ = x[SyncTypeParamNames-21]
_ = x[SyncSignature-22]
_ = x[SyncParams-23]
_ = x[SyncParam-24]
_ = x[SyncCodeObj-25]
_ = x[SyncSym-26]
_ = x[SyncLocalIdent-27]
_ = x[SyncSelector-28]
_ = x[SyncPrivate-29]
_ = x[SyncFuncExt-30]
_ = x[SyncVarExt-31]
_ = x[SyncTypeExt-32]
_ = x[SyncPragma-33]
_ = x[SyncExprList-34]
_ = x[SyncExprs-35]
_ = x[SyncExpr-36]
_ = x[SyncExprType-37]
_ = x[SyncAssign-38]
_ = x[SyncOp-39]
_ = x[SyncFuncLit-40]
_ = x[SyncCompLit-41]
_ = x[SyncDecl-42]
_ = x[SyncFuncBody-43]
_ = x[SyncOpenScope-44]
_ = x[SyncCloseScope-45]
_ = x[SyncCloseAnotherScope-46]
_ = x[SyncDeclNames-47]
_ = x[SyncDeclName-48]
_ = x[SyncStmts-49]
_ = x[SyncBlockStmt-50]
_ = x[SyncIfStmt-51]
_ = x[SyncForStmt-52]
_ = x[SyncSwitchStmt-53]
_ = x[SyncRangeStmt-54]
_ = x[SyncCaseClause-55]
_ = x[SyncCommClause-56]
_ = x[SyncSelectStmt-57]
_ = x[SyncDecls-58]
_ = x[SyncLabeledStmt-59]
_ = x[SyncUseObjLocal-60]
_ = x[SyncAddLocal-61]
_ = x[SyncLinkname-62]
_ = x[SyncStmt1-63]
_ = x[SyncStmtsEnd-64]
_ = x[SyncLabel-65]
_ = x[SyncOptLabel-66]
}
const _SyncMarker_name = "EOFBoolInt64Uint64StringValueValRelocsRelocUseRelocPublicPosPosBaseObjectObject1PkgPkgDefMethodTypeTypeIdxTypeParamNamesSignatureParamsParamCodeObjSymLocalIdentSelectorPrivateFuncExtVarExtTypeExtPragmaExprListExprsExprExprTypeAssignOpFuncLitCompLitDeclFuncBodyOpenScopeCloseScopeCloseAnotherScopeDeclNamesDeclNameStmtsBlockStmtIfStmtForStmtSwitchStmtRangeStmtCaseClauseCommClauseSelectStmtDeclsLabeledStmtUseObjLocalAddLocalLinknameStmt1StmtsEndLabelOptLabel"
var _SyncMarker_index = [...]uint16{0, 3, 7, 12, 18, 24, 29, 32, 38, 43, 51, 57, 60, 67, 73, 80, 83, 89, 95, 99, 106, 120, 129, 135, 140, 147, 150, 160, 168, 175, 182, 188, 195, 201, 209, 214, 218, 226, 232, 234, 241, 248, 252, 260, 269, 279, 296, 305, 313, 318, 327, 333, 340, 350, 359, 369, 379, 389, 394, 405, 416, 424, 432, 437, 445, 450, 458}
func (i SyncMarker) String() string {
i -= 1
if i < 0 || i >= SyncMarker(len(_SyncMarker_index)-1) {
return "SyncMarker(" + strconv.FormatInt(int64(i+1), 10) + ")"
}
return _SyncMarker_name[_SyncMarker_index[i]:_SyncMarker_index[i+1]]
}

17320
vendor/golang.org/x/tools/internal/stdlib/manifest.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

97
vendor/golang.org/x/tools/internal/stdlib/stdlib.go generated vendored Normal file
View File

@@ -0,0 +1,97 @@
// Copyright 2022 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:generate go run generate.go
// Package stdlib provides a table of all exported symbols in the
// standard library, along with the version at which they first
// appeared.
package stdlib
import (
"fmt"
"strings"
)
type Symbol struct {
Name string
Kind Kind
Version Version // Go version that first included the symbol
}
// A Kind indicates the kind of a symbol:
// function, variable, constant, type, and so on.
type Kind int8
const (
Invalid Kind = iota // Example name:
Type // "Buffer"
Func // "Println"
Var // "EOF"
Const // "Pi"
Field // "Point.X"
Method // "(*Buffer).Grow"
)
func (kind Kind) String() string {
return [...]string{
Invalid: "invalid",
Type: "type",
Func: "func",
Var: "var",
Const: "const",
Field: "field",
Method: "method",
}[kind]
}
// A Version represents a version of Go of the form "go1.%d".
type Version int8
// String returns a version string of the form "go1.23", without allocating.
func (v Version) String() string { return versions[v] }
var versions [30]string // (increase constant as needed)
func init() {
for i := range versions {
versions[i] = fmt.Sprintf("go1.%d", i)
}
}
// HasPackage reports whether the specified package path is part of
// the standard library's public API.
func HasPackage(path string) bool {
_, ok := PackageSymbols[path]
return ok
}
// SplitField splits the field symbol name into type and field
// components. It must be called only on Field symbols.
//
// Example: "File.Package" -> ("File", "Package")
func (sym *Symbol) SplitField() (typename, name string) {
if sym.Kind != Field {
panic("not a field")
}
typename, name, _ = strings.Cut(sym.Name, ".")
return
}
// SplitMethod splits the method symbol name into pointer, receiver,
// and method components. It must be called only on Method symbols.
//
// Example: "(*Buffer).Grow" -> (true, "Buffer", "Grow")
func (sym *Symbol) SplitMethod() (ptr bool, recv, name string) {
if sym.Kind != Method {
panic("not a method")
}
recv, name, _ = strings.Cut(sym.Name, ".")
recv = recv[len("(") : len(recv)-len(")")]
ptr = recv[0] == '*'
if ptr {
recv = recv[len("*"):]
}
return
}

View File

@@ -0,0 +1,137 @@
// Copyright 2023 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// package tokeninternal provides access to some internal features of the token
// package.
package tokeninternal
import (
"fmt"
"go/token"
"sort"
"sync"
"unsafe"
)
// GetLines returns the table of line-start offsets from a token.File.
func GetLines(file *token.File) []int {
// token.File has a Lines method on Go 1.21 and later.
if file, ok := (interface{})(file).(interface{ Lines() []int }); ok {
return file.Lines()
}
// This declaration must match that of token.File.
// This creates a risk of dependency skew.
// For now we check that the size of the two
// declarations is the same, on the (fragile) assumption
// that future changes would add fields.
type tokenFile119 struct {
_ string
_ int
_ int
mu sync.Mutex // we're not complete monsters
lines []int
_ []struct{}
}
if unsafe.Sizeof(*file) != unsafe.Sizeof(tokenFile119{}) {
panic("unexpected token.File size")
}
var ptr *tokenFile119
type uP = unsafe.Pointer
*(*uP)(uP(&ptr)) = uP(file)
ptr.mu.Lock()
defer ptr.mu.Unlock()
return ptr.lines
}
// AddExistingFiles adds the specified files to the FileSet if they
// are not already present. It panics if any pair of files in the
// resulting FileSet would overlap.
func AddExistingFiles(fset *token.FileSet, files []*token.File) {
// Punch through the FileSet encapsulation.
type tokenFileSet struct {
// This type remained essentially consistent from go1.16 to go1.21.
mutex sync.RWMutex
base int
files []*token.File
_ *token.File // changed to atomic.Pointer[token.File] in go1.19
}
// If the size of token.FileSet changes, this will fail to compile.
const delta = int64(unsafe.Sizeof(tokenFileSet{})) - int64(unsafe.Sizeof(token.FileSet{}))
var _ [-delta * delta]int
type uP = unsafe.Pointer
var ptr *tokenFileSet
*(*uP)(uP(&ptr)) = uP(fset)
ptr.mutex.Lock()
defer ptr.mutex.Unlock()
// Merge and sort.
newFiles := append(ptr.files, files...)
sort.Slice(newFiles, func(i, j int) bool {
return newFiles[i].Base() < newFiles[j].Base()
})
// Reject overlapping files.
// Discard adjacent identical files.
out := newFiles[:0]
for i, file := range newFiles {
if i > 0 {
prev := newFiles[i-1]
if file == prev {
continue
}
if prev.Base()+prev.Size()+1 > file.Base() {
panic(fmt.Sprintf("file %s (%d-%d) overlaps with file %s (%d-%d)",
prev.Name(), prev.Base(), prev.Base()+prev.Size(),
file.Name(), file.Base(), file.Base()+file.Size()))
}
}
out = append(out, file)
}
newFiles = out
ptr.files = newFiles
// Advance FileSet.Base().
if len(newFiles) > 0 {
last := newFiles[len(newFiles)-1]
newBase := last.Base() + last.Size() + 1
if ptr.base < newBase {
ptr.base = newBase
}
}
}
// FileSetFor returns a new FileSet containing a sequence of new Files with
// the same base, size, and line as the input files, for use in APIs that
// require a FileSet.
//
// Precondition: the input files must be non-overlapping, and sorted in order
// of their Base.
func FileSetFor(files ...*token.File) *token.FileSet {
fset := token.NewFileSet()
for _, f := range files {
f2 := fset.AddFile(f.Name(), f.Base(), f.Size())
lines := GetLines(f)
f2.SetLines(lines)
}
return fset
}
// CloneFileSet creates a new FileSet holding all files in fset. It does not
// create copies of the token.Files in fset: they are added to the resulting
// FileSet unmodified.
func CloneFileSet(fset *token.FileSet) *token.FileSet {
var files []*token.File
fset.Iterate(func(f *token.File) bool {
files = append(files, f)
return true
})
newFileSet := token.NewFileSet()
AddExistingFiles(newFileSet, files)
return newFileSet
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,179 @@
// Code generated by "stringer -type=ErrorCode"; DO NOT EDIT.
package typesinternal
import "strconv"
func _() {
// An "invalid array index" compiler error signifies that the constant values have changed.
// Re-run the stringer command to generate them again.
var x [1]struct{}
_ = x[InvalidSyntaxTree - -1]
_ = x[Test-1]
_ = x[BlankPkgName-2]
_ = x[MismatchedPkgName-3]
_ = x[InvalidPkgUse-4]
_ = x[BadImportPath-5]
_ = x[BrokenImport-6]
_ = x[ImportCRenamed-7]
_ = x[UnusedImport-8]
_ = x[InvalidInitCycle-9]
_ = x[DuplicateDecl-10]
_ = x[InvalidDeclCycle-11]
_ = x[InvalidTypeCycle-12]
_ = x[InvalidConstInit-13]
_ = x[InvalidConstVal-14]
_ = x[InvalidConstType-15]
_ = x[UntypedNilUse-16]
_ = x[WrongAssignCount-17]
_ = x[UnassignableOperand-18]
_ = x[NoNewVar-19]
_ = x[MultiValAssignOp-20]
_ = x[InvalidIfaceAssign-21]
_ = x[InvalidChanAssign-22]
_ = x[IncompatibleAssign-23]
_ = x[UnaddressableFieldAssign-24]
_ = x[NotAType-25]
_ = x[InvalidArrayLen-26]
_ = x[BlankIfaceMethod-27]
_ = x[IncomparableMapKey-28]
_ = x[InvalidIfaceEmbed-29]
_ = x[InvalidPtrEmbed-30]
_ = x[BadRecv-31]
_ = x[InvalidRecv-32]
_ = x[DuplicateFieldAndMethod-33]
_ = x[DuplicateMethod-34]
_ = x[InvalidBlank-35]
_ = x[InvalidIota-36]
_ = x[MissingInitBody-37]
_ = x[InvalidInitSig-38]
_ = x[InvalidInitDecl-39]
_ = x[InvalidMainDecl-40]
_ = x[TooManyValues-41]
_ = x[NotAnExpr-42]
_ = x[TruncatedFloat-43]
_ = x[NumericOverflow-44]
_ = x[UndefinedOp-45]
_ = x[MismatchedTypes-46]
_ = x[DivByZero-47]
_ = x[NonNumericIncDec-48]
_ = x[UnaddressableOperand-49]
_ = x[InvalidIndirection-50]
_ = x[NonIndexableOperand-51]
_ = x[InvalidIndex-52]
_ = x[SwappedSliceIndices-53]
_ = x[NonSliceableOperand-54]
_ = x[InvalidSliceExpr-55]
_ = x[InvalidShiftCount-56]
_ = x[InvalidShiftOperand-57]
_ = x[InvalidReceive-58]
_ = x[InvalidSend-59]
_ = x[DuplicateLitKey-60]
_ = x[MissingLitKey-61]
_ = x[InvalidLitIndex-62]
_ = x[OversizeArrayLit-63]
_ = x[MixedStructLit-64]
_ = x[InvalidStructLit-65]
_ = x[MissingLitField-66]
_ = x[DuplicateLitField-67]
_ = x[UnexportedLitField-68]
_ = x[InvalidLitField-69]
_ = x[UntypedLit-70]
_ = x[InvalidLit-71]
_ = x[AmbiguousSelector-72]
_ = x[UndeclaredImportedName-73]
_ = x[UnexportedName-74]
_ = x[UndeclaredName-75]
_ = x[MissingFieldOrMethod-76]
_ = x[BadDotDotDotSyntax-77]
_ = x[NonVariadicDotDotDot-78]
_ = x[MisplacedDotDotDot-79]
_ = x[InvalidDotDotDotOperand-80]
_ = x[InvalidDotDotDot-81]
_ = x[UncalledBuiltin-82]
_ = x[InvalidAppend-83]
_ = x[InvalidCap-84]
_ = x[InvalidClose-85]
_ = x[InvalidCopy-86]
_ = x[InvalidComplex-87]
_ = x[InvalidDelete-88]
_ = x[InvalidImag-89]
_ = x[InvalidLen-90]
_ = x[SwappedMakeArgs-91]
_ = x[InvalidMake-92]
_ = x[InvalidReal-93]
_ = x[InvalidAssert-94]
_ = x[ImpossibleAssert-95]
_ = x[InvalidConversion-96]
_ = x[InvalidUntypedConversion-97]
_ = x[BadOffsetofSyntax-98]
_ = x[InvalidOffsetof-99]
_ = x[UnusedExpr-100]
_ = x[UnusedVar-101]
_ = x[MissingReturn-102]
_ = x[WrongResultCount-103]
_ = x[OutOfScopeResult-104]
_ = x[InvalidCond-105]
_ = x[InvalidPostDecl-106]
_ = x[InvalidChanRange-107]
_ = x[InvalidIterVar-108]
_ = x[InvalidRangeExpr-109]
_ = x[MisplacedBreak-110]
_ = x[MisplacedContinue-111]
_ = x[MisplacedFallthrough-112]
_ = x[DuplicateCase-113]
_ = x[DuplicateDefault-114]
_ = x[BadTypeKeyword-115]
_ = x[InvalidTypeSwitch-116]
_ = x[InvalidExprSwitch-117]
_ = x[InvalidSelectCase-118]
_ = x[UndeclaredLabel-119]
_ = x[DuplicateLabel-120]
_ = x[MisplacedLabel-121]
_ = x[UnusedLabel-122]
_ = x[JumpOverDecl-123]
_ = x[JumpIntoBlock-124]
_ = x[InvalidMethodExpr-125]
_ = x[WrongArgCount-126]
_ = x[InvalidCall-127]
_ = x[UnusedResults-128]
_ = x[InvalidDefer-129]
_ = x[InvalidGo-130]
_ = x[BadDecl-131]
_ = x[RepeatedDecl-132]
_ = x[InvalidUnsafeAdd-133]
_ = x[InvalidUnsafeSlice-134]
_ = x[UnsupportedFeature-135]
_ = x[NotAGenericType-136]
_ = x[WrongTypeArgCount-137]
_ = x[CannotInferTypeArgs-138]
_ = x[InvalidTypeArg-139]
_ = x[InvalidInstanceCycle-140]
_ = x[InvalidUnion-141]
_ = x[MisplacedConstraintIface-142]
_ = x[InvalidMethodTypeParams-143]
_ = x[MisplacedTypeParam-144]
_ = x[InvalidUnsafeSliceData-145]
_ = x[InvalidUnsafeString-146]
}
const (
_ErrorCode_name_0 = "InvalidSyntaxTree"
_ErrorCode_name_1 = "TestBlankPkgNameMismatchedPkgNameInvalidPkgUseBadImportPathBrokenImportImportCRenamedUnusedImportInvalidInitCycleDuplicateDeclInvalidDeclCycleInvalidTypeCycleInvalidConstInitInvalidConstValInvalidConstTypeUntypedNilUseWrongAssignCountUnassignableOperandNoNewVarMultiValAssignOpInvalidIfaceAssignInvalidChanAssignIncompatibleAssignUnaddressableFieldAssignNotATypeInvalidArrayLenBlankIfaceMethodIncomparableMapKeyInvalidIfaceEmbedInvalidPtrEmbedBadRecvInvalidRecvDuplicateFieldAndMethodDuplicateMethodInvalidBlankInvalidIotaMissingInitBodyInvalidInitSigInvalidInitDeclInvalidMainDeclTooManyValuesNotAnExprTruncatedFloatNumericOverflowUndefinedOpMismatchedTypesDivByZeroNonNumericIncDecUnaddressableOperandInvalidIndirectionNonIndexableOperandInvalidIndexSwappedSliceIndicesNonSliceableOperandInvalidSliceExprInvalidShiftCountInvalidShiftOperandInvalidReceiveInvalidSendDuplicateLitKeyMissingLitKeyInvalidLitIndexOversizeArrayLitMixedStructLitInvalidStructLitMissingLitFieldDuplicateLitFieldUnexportedLitFieldInvalidLitFieldUntypedLitInvalidLitAmbiguousSelectorUndeclaredImportedNameUnexportedNameUndeclaredNameMissingFieldOrMethodBadDotDotDotSyntaxNonVariadicDotDotDotMisplacedDotDotDotInvalidDotDotDotOperandInvalidDotDotDotUncalledBuiltinInvalidAppendInvalidCapInvalidCloseInvalidCopyInvalidComplexInvalidDeleteInvalidImagInvalidLenSwappedMakeArgsInvalidMakeInvalidRealInvalidAssertImpossibleAssertInvalidConversionInvalidUntypedConversionBadOffsetofSyntaxInvalidOffsetofUnusedExprUnusedVarMissingReturnWrongResultCountOutOfScopeResultInvalidCondInvalidPostDeclInvalidChanRangeInvalidIterVarInvalidRangeExprMisplacedBreakMisplacedContinueMisplacedFallthroughDuplicateCaseDuplicateDefaultBadTypeKeywordInvalidTypeSwitchInvalidExprSwitchInvalidSelectCaseUndeclaredLabelDuplicateLabelMisplacedLabelUnusedLabelJumpOverDeclJumpIntoBlockInvalidMethodExprWrongArgCountInvalidCallUnusedResultsInvalidDeferInvalidGoBadDeclRepeatedDeclInvalidUnsafeAddInvalidUnsafeSliceUnsupportedFeatureNotAGenericTypeWrongTypeArgCountCannotInferTypeArgsInvalidTypeArgInvalidInstanceCycleInvalidUnionMisplacedConstraintIfaceInvalidMethodTypeParamsMisplacedTypeParamInvalidUnsafeSliceDataInvalidUnsafeString"
)
var (
_ErrorCode_index_1 = [...]uint16{0, 4, 16, 33, 46, 59, 71, 85, 97, 113, 126, 142, 158, 174, 189, 205, 218, 234, 253, 261, 277, 295, 312, 330, 354, 362, 377, 393, 411, 428, 443, 450, 461, 484, 499, 511, 522, 537, 551, 566, 581, 594, 603, 617, 632, 643, 658, 667, 683, 703, 721, 740, 752, 771, 790, 806, 823, 842, 856, 867, 882, 895, 910, 926, 940, 956, 971, 988, 1006, 1021, 1031, 1041, 1058, 1080, 1094, 1108, 1128, 1146, 1166, 1184, 1207, 1223, 1238, 1251, 1261, 1273, 1284, 1298, 1311, 1322, 1332, 1347, 1358, 1369, 1382, 1398, 1415, 1439, 1456, 1471, 1481, 1490, 1503, 1519, 1535, 1546, 1561, 1577, 1591, 1607, 1621, 1638, 1658, 1671, 1687, 1701, 1718, 1735, 1752, 1767, 1781, 1795, 1806, 1818, 1831, 1848, 1861, 1872, 1885, 1897, 1906, 1913, 1925, 1941, 1959, 1977, 1992, 2009, 2028, 2042, 2062, 2074, 2098, 2121, 2139, 2161, 2180}
)
func (i ErrorCode) String() string {
switch {
case i == -1:
return _ErrorCode_name_0
case 1 <= i && i <= 146:
i -= 1
return _ErrorCode_name_1[_ErrorCode_index_1[i]:_ErrorCode_index_1[i+1]]
default:
return "ErrorCode(" + strconv.FormatInt(int64(i), 10) + ")"
}
}

View File

@@ -0,0 +1,43 @@
// Copyright 2024 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package typesinternal
import (
"go/types"
"golang.org/x/tools/internal/aliases"
)
// ReceiverNamed returns the named type (if any) associated with the
// type of recv, which may be of the form N or *N, or aliases thereof.
// It also reports whether a Pointer was present.
func ReceiverNamed(recv *types.Var) (isPtr bool, named *types.Named) {
t := recv.Type()
if ptr, ok := aliases.Unalias(t).(*types.Pointer); ok {
isPtr = true
t = ptr.Elem()
}
named, _ = aliases.Unalias(t).(*types.Named)
return
}
// Unpointer returns T given *T or an alias thereof.
// For all other types it is the identity function.
// It does not look at underlying types.
// The result may be an alias.
//
// Use this function to strip off the optional pointer on a receiver
// in a field or method selection, without losing the named type
// (which is needed to compute the method set).
//
// See also [typeparams.MustDeref], which removes one level of
// indirection from the type, regardless of named types (analogous to
// a LOAD instruction).
func Unpointer(t types.Type) types.Type {
if ptr, ok := aliases.Unalias(t).(*types.Pointer); ok {
return ptr.Elem()
}
return t
}

View File

@@ -0,0 +1,89 @@
// Copyright 2024 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package typesinternal
import (
"go/types"
"golang.org/x/tools/internal/stdlib"
"golang.org/x/tools/internal/versions"
)
// TooNewStdSymbols computes the set of package-level symbols
// exported by pkg that are not available at the specified version.
// The result maps each symbol to its minimum version.
//
// The pkg is allowed to contain type errors.
func TooNewStdSymbols(pkg *types.Package, version string) map[types.Object]string {
disallowed := make(map[types.Object]string)
// Pass 1: package-level symbols.
symbols := stdlib.PackageSymbols[pkg.Path()]
for _, sym := range symbols {
symver := sym.Version.String()
if versions.Before(version, symver) {
switch sym.Kind {
case stdlib.Func, stdlib.Var, stdlib.Const, stdlib.Type:
disallowed[pkg.Scope().Lookup(sym.Name)] = symver
}
}
}
// Pass 2: fields and methods.
//
// We allow fields and methods if their associated type is
// disallowed, as otherwise we would report false positives
// for compatibility shims. Consider:
//
// //go:build go1.22
// type T struct { F std.Real } // correct new API
//
// //go:build !go1.22
// type T struct { F fake } // shim
// type fake struct { ... }
// func (fake) M () {}
//
// These alternative declarations of T use either the std.Real
// type, introduced in go1.22, or a fake type, for the field
// F. (The fakery could be arbitrarily deep, involving more
// nested fields and methods than are shown here.) Clients
// that use the compatibility shim T will compile with any
// version of go, whether older or newer than go1.22, but only
// the newer version will use the std.Real implementation.
//
// Now consider a reference to method M in new(T).F.M() in a
// module that requires a minimum of go1.21. The analysis may
// occur using a version of Go higher than 1.21, selecting the
// first version of T, so the method M is Real.M. This would
// spuriously cause the analyzer to report a reference to a
// too-new symbol even though this expression compiles just
// fine (with the fake implementation) using go1.21.
for _, sym := range symbols {
symVersion := sym.Version.String()
if !versions.Before(version, symVersion) {
continue // allowed
}
var obj types.Object
switch sym.Kind {
case stdlib.Field:
typename, name := sym.SplitField()
if t := pkg.Scope().Lookup(typename); t != nil && disallowed[t] == "" {
obj, _, _ = types.LookupFieldOrMethod(t.Type(), false, pkg, name)
}
case stdlib.Method:
ptr, recvname, name := sym.SplitMethod()
if t := pkg.Scope().Lookup(recvname); t != nil && disallowed[t] == "" {
obj, _, _ = types.LookupFieldOrMethod(t.Type(), ptr, pkg, name)
}
}
if obj != nil {
disallowed[obj] = symVersion
}
}
return disallowed
}

View File

@@ -0,0 +1,50 @@
// Copyright 2020 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package typesinternal provides access to internal go/types APIs that are not
// yet exported.
package typesinternal
import (
"go/token"
"go/types"
"reflect"
"unsafe"
)
func SetUsesCgo(conf *types.Config) bool {
v := reflect.ValueOf(conf).Elem()
f := v.FieldByName("go115UsesCgo")
if !f.IsValid() {
f = v.FieldByName("UsesCgo")
if !f.IsValid() {
return false
}
}
addr := unsafe.Pointer(f.UnsafeAddr())
*(*bool)(addr) = true
return true
}
// ReadGo116ErrorData extracts additional information from types.Error values
// generated by Go version 1.16 and later: the error code, start position, and
// end position. If all positions are valid, start <= err.Pos <= end.
//
// If the data could not be read, the final result parameter will be false.
func ReadGo116ErrorData(err types.Error) (code ErrorCode, start, end token.Pos, ok bool) {
var data [3]int
// By coincidence all of these fields are ints, which simplifies things.
v := reflect.ValueOf(err)
for i, name := range []string{"go116code", "go116start", "go116end"} {
f := v.FieldByName(name)
if !f.IsValid() {
return 0, 0, 0, false
}
data[i] = int(f.Int())
}
return ErrorCode(data[0]), token.Pos(data[1]), token.Pos(data[2]), true
}

View File

@@ -0,0 +1,43 @@
// Copyright 2023 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package versions
// This file contains predicates for working with file versions to
// decide when a tool should consider a language feature enabled.
// GoVersions that features in x/tools can be gated to.
const (
Go1_18 = "go1.18"
Go1_19 = "go1.19"
Go1_20 = "go1.20"
Go1_21 = "go1.21"
Go1_22 = "go1.22"
)
// Future is an invalid unknown Go version sometime in the future.
// Do not use directly with Compare.
const Future = ""
// AtLeast reports whether the file version v comes after a Go release.
//
// Use this predicate to enable a behavior once a certain Go release
// has happened (and stays enabled in the future).
func AtLeast(v, release string) bool {
if v == Future {
return true // an unknown future version is always after y.
}
return Compare(Lang(v), Lang(release)) >= 0
}
// Before reports whether the file version v is strictly before a Go release.
//
// Use this predicate to disable a behavior once a certain Go release
// has happened (and stays enabled in the future).
func Before(v, release string) bool {
if v == Future {
return false // an unknown future version happens after y.
}
return Compare(Lang(v), Lang(release)) < 0
}

172
vendor/golang.org/x/tools/internal/versions/gover.go generated vendored Normal file
View File

@@ -0,0 +1,172 @@
// Copyright 2023 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// This is a fork of internal/gover for use by x/tools until
// go1.21 and earlier are no longer supported by x/tools.
package versions
import "strings"
// A gover is a parsed Go gover: major[.Minor[.Patch]][kind[pre]]
// The numbers are the original decimal strings to avoid integer overflows
// and since there is very little actual math. (Probably overflow doesn't matter in practice,
// but at the time this code was written, there was an existing test that used
// go1.99999999999, which does not fit in an int on 32-bit platforms.
// The "big decimal" representation avoids the problem entirely.)
type gover struct {
major string // decimal
minor string // decimal or ""
patch string // decimal or ""
kind string // "", "alpha", "beta", "rc"
pre string // decimal or ""
}
// compare returns -1, 0, or +1 depending on whether
// x < y, x == y, or x > y, interpreted as toolchain versions.
// The versions x and y must not begin with a "go" prefix: just "1.21" not "go1.21".
// Malformed versions compare less than well-formed versions and equal to each other.
// The language version "1.21" compares less than the release candidate and eventual releases "1.21rc1" and "1.21.0".
func compare(x, y string) int {
vx := parse(x)
vy := parse(y)
if c := cmpInt(vx.major, vy.major); c != 0 {
return c
}
if c := cmpInt(vx.minor, vy.minor); c != 0 {
return c
}
if c := cmpInt(vx.patch, vy.patch); c != 0 {
return c
}
if c := strings.Compare(vx.kind, vy.kind); c != 0 { // "" < alpha < beta < rc
return c
}
if c := cmpInt(vx.pre, vy.pre); c != 0 {
return c
}
return 0
}
// lang returns the Go language version. For example, lang("1.2.3") == "1.2".
func lang(x string) string {
v := parse(x)
if v.minor == "" || v.major == "1" && v.minor == "0" {
return v.major
}
return v.major + "." + v.minor
}
// isValid reports whether the version x is valid.
func isValid(x string) bool {
return parse(x) != gover{}
}
// parse parses the Go version string x into a version.
// It returns the zero version if x is malformed.
func parse(x string) gover {
var v gover
// Parse major version.
var ok bool
v.major, x, ok = cutInt(x)
if !ok {
return gover{}
}
if x == "" {
// Interpret "1" as "1.0.0".
v.minor = "0"
v.patch = "0"
return v
}
// Parse . before minor version.
if x[0] != '.' {
return gover{}
}
// Parse minor version.
v.minor, x, ok = cutInt(x[1:])
if !ok {
return gover{}
}
if x == "" {
// Patch missing is same as "0" for older versions.
// Starting in Go 1.21, patch missing is different from explicit .0.
if cmpInt(v.minor, "21") < 0 {
v.patch = "0"
}
return v
}
// Parse patch if present.
if x[0] == '.' {
v.patch, x, ok = cutInt(x[1:])
if !ok || x != "" {
// Note that we are disallowing prereleases (alpha, beta, rc) for patch releases here (x != "").
// Allowing them would be a bit confusing because we already have:
// 1.21 < 1.21rc1
// But a prerelease of a patch would have the opposite effect:
// 1.21.3rc1 < 1.21.3
// We've never needed them before, so let's not start now.
return gover{}
}
return v
}
// Parse prerelease.
i := 0
for i < len(x) && (x[i] < '0' || '9' < x[i]) {
if x[i] < 'a' || 'z' < x[i] {
return gover{}
}
i++
}
if i == 0 {
return gover{}
}
v.kind, x = x[:i], x[i:]
if x == "" {
return v
}
v.pre, x, ok = cutInt(x)
if !ok || x != "" {
return gover{}
}
return v
}
// cutInt scans the leading decimal number at the start of x to an integer
// and returns that value and the rest of the string.
func cutInt(x string) (n, rest string, ok bool) {
i := 0
for i < len(x) && '0' <= x[i] && x[i] <= '9' {
i++
}
if i == 0 || x[0] == '0' && i != 1 { // no digits or unnecessary leading zero
return "", "", false
}
return x[:i], x[i:], true
}
// cmpInt returns cmp.Compare(x, y) interpreting x and y as decimal numbers.
// (Copied from golang.org/x/mod/semver's compareInt.)
func cmpInt(x, y string) int {
if x == y {
return 0
}
if len(x) < len(y) {
return -1
}
if len(x) > len(y) {
return +1
}
if x < y {
return -1
} else {
return +1
}
}

View File

@@ -0,0 +1,14 @@
// Copyright 2024 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package versions
// toolchain is maximum version (<1.22) that the go toolchain used
// to build the current tool is known to support.
//
// When a tool is built with >=1.22, the value of toolchain is unused.
//
// x/tools does not support building with go <1.18. So we take this
// as the minimum possible maximum.
var toolchain string = Go1_18

View File

@@ -0,0 +1,14 @@
// Copyright 2024 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build go1.19
// +build go1.19
package versions
func init() {
if Compare(toolchain, Go1_19) < 0 {
toolchain = Go1_19
}
}

View File

@@ -0,0 +1,14 @@
// Copyright 2024 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build go1.20
// +build go1.20
package versions
func init() {
if Compare(toolchain, Go1_20) < 0 {
toolchain = Go1_20
}
}

View File

@@ -0,0 +1,14 @@
// Copyright 2024 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build go1.21
// +build go1.21
package versions
func init() {
if Compare(toolchain, Go1_21) < 0 {
toolchain = Go1_21
}
}

19
vendor/golang.org/x/tools/internal/versions/types.go generated vendored Normal file
View File

@@ -0,0 +1,19 @@
// Copyright 2023 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package versions
import (
"go/types"
)
// GoVersion returns the Go version of the type package.
// It returns zero if no version can be determined.
func GoVersion(pkg *types.Package) string {
// TODO(taking): x/tools can call GoVersion() [from 1.21] after 1.25.
if pkg, ok := any(pkg).(interface{ GoVersion() string }); ok {
return pkg.GoVersion()
}
return ""
}

View File

@@ -0,0 +1,30 @@
// Copyright 2023 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build !go1.22
// +build !go1.22
package versions
import (
"go/ast"
"go/types"
)
// FileVersion returns a language version (<=1.21) derived from runtime.Version()
// or an unknown future version.
func FileVersion(info *types.Info, file *ast.File) string {
// In x/tools built with Go <= 1.21, we do not have Info.FileVersions
// available. We use a go version derived from the toolchain used to
// compile the tool by default.
// This will be <= go1.21. We take this as the maximum version that
// this tool can support.
//
// There are no features currently in x/tools that need to tell fine grained
// differences for versions <1.22.
return toolchain
}
// InitFileVersions is a noop when compiled with this Go version.
func InitFileVersions(*types.Info) {}

View File

@@ -0,0 +1,41 @@
// Copyright 2023 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build go1.22
// +build go1.22
package versions
import (
"go/ast"
"go/types"
)
// FileVersion returns a file's Go version.
// The reported version is an unknown Future version if a
// version cannot be determined.
func FileVersion(info *types.Info, file *ast.File) string {
// In tools built with Go >= 1.22, the Go version of a file
// follow a cascades of sources:
// 1) types.Info.FileVersion, which follows the cascade:
// 1.a) file version (ast.File.GoVersion),
// 1.b) the package version (types.Config.GoVersion), or
// 2) is some unknown Future version.
//
// File versions require a valid package version to be provided to types
// in Config.GoVersion. Config.GoVersion is either from the package's module
// or the toolchain (go run). This value should be provided by go/packages
// or unitchecker.Config.GoVersion.
if v := info.FileVersions[file]; IsValid(v) {
return v
}
// Note: we could instead return runtime.Version() [if valid].
// This would act as a max version on what a tool can support.
return Future
}
// InitFileVersions initializes info to record Go versions for Go files.
func InitFileVersions(info *types.Info) {
info.FileVersions = make(map[*ast.File]string)
}

View File

@@ -0,0 +1,57 @@
// Copyright 2023 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package versions
import (
"strings"
)
// Note: If we use build tags to use go/versions when go >=1.22,
// we run into go.dev/issue/53737. Under some operations users would see an
// import of "go/versions" even if they would not compile the file.
// For example, during `go get -u ./...` (go.dev/issue/64490) we do not try to include
// For this reason, this library just a clone of go/versions for the moment.
// Lang returns the Go language version for version x.
// If x is not a valid version, Lang returns the empty string.
// For example:
//
// Lang("go1.21rc2") = "go1.21"
// Lang("go1.21.2") = "go1.21"
// Lang("go1.21") = "go1.21"
// Lang("go1") = "go1"
// Lang("bad") = ""
// Lang("1.21") = ""
func Lang(x string) string {
v := lang(stripGo(x))
if v == "" {
return ""
}
return x[:2+len(v)] // "go"+v without allocation
}
// Compare returns -1, 0, or +1 depending on whether
// x < y, x == y, or x > y, interpreted as Go versions.
// The versions x and y must begin with a "go" prefix: "go1.21" not "1.21".
// Invalid versions, including the empty string, compare less than
// valid versions and equal to each other.
// The language version "go1.21" compares less than the
// release candidate and eventual releases "go1.21rc1" and "go1.21.0".
// Custom toolchain suffixes are ignored during comparison:
// "go1.21.0" and "go1.21.0-bigcorp" are equal.
func Compare(x, y string) int { return compare(stripGo(x), stripGo(y)) }
// IsValid reports whether the version x is valid.
func IsValid(x string) bool { return isValid(stripGo(x)) }
// stripGo converts from a "go1.21" version to a "1.21" version.
// If v does not start with "go", stripGo returns the empty string (a known invalid version).
func stripGo(v string) string {
v, _, _ = strings.Cut(v, "-") // strip -bigcorp suffix.
if len(v) < 2 || v[:2] != "go" {
return ""
}
return v[2:]
}