WIP: Save agent roles integration work before CHORUS rebrand
- Agent roles and coordination features - Chat API integration testing - New configuration and workspace management 🤖 Generated with [Claude Code](https://claude.ai/code) Co-Authored-By: Claude <noreply@anthropic.com>
This commit is contained in:
85
vendor/golang.org/x/tools/internal/event/core/event.go
generated
vendored
Normal file
85
vendor/golang.org/x/tools/internal/event/core/event.go
generated
vendored
Normal file
@@ -0,0 +1,85 @@
|
||||
// Copyright 2019 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package core provides support for event based telemetry.
|
||||
package core
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"golang.org/x/tools/internal/event/label"
|
||||
)
|
||||
|
||||
// Event holds the information about an event of note that occurred.
|
||||
type Event struct {
|
||||
at time.Time
|
||||
|
||||
// As events are often on the stack, storing the first few labels directly
|
||||
// in the event can avoid an allocation at all for the very common cases of
|
||||
// simple events.
|
||||
// The length needs to be large enough to cope with the majority of events
|
||||
// but no so large as to cause undue stack pressure.
|
||||
// A log message with two values will use 3 labels (one for each value and
|
||||
// one for the message itself).
|
||||
|
||||
static [3]label.Label // inline storage for the first few labels
|
||||
dynamic []label.Label // dynamically sized storage for remaining labels
|
||||
}
|
||||
|
||||
// eventLabelMap implements label.Map for a the labels of an Event.
|
||||
type eventLabelMap struct {
|
||||
event Event
|
||||
}
|
||||
|
||||
func (ev Event) At() time.Time { return ev.at }
|
||||
|
||||
func (ev Event) Format(f fmt.State, r rune) {
|
||||
if !ev.at.IsZero() {
|
||||
fmt.Fprint(f, ev.at.Format("2006/01/02 15:04:05 "))
|
||||
}
|
||||
for index := 0; ev.Valid(index); index++ {
|
||||
if l := ev.Label(index); l.Valid() {
|
||||
fmt.Fprintf(f, "\n\t%v", l)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (ev Event) Valid(index int) bool {
|
||||
return index >= 0 && index < len(ev.static)+len(ev.dynamic)
|
||||
}
|
||||
|
||||
func (ev Event) Label(index int) label.Label {
|
||||
if index < len(ev.static) {
|
||||
return ev.static[index]
|
||||
}
|
||||
return ev.dynamic[index-len(ev.static)]
|
||||
}
|
||||
|
||||
func (ev Event) Find(key label.Key) label.Label {
|
||||
for _, l := range ev.static {
|
||||
if l.Key() == key {
|
||||
return l
|
||||
}
|
||||
}
|
||||
for _, l := range ev.dynamic {
|
||||
if l.Key() == key {
|
||||
return l
|
||||
}
|
||||
}
|
||||
return label.Label{}
|
||||
}
|
||||
|
||||
func MakeEvent(static [3]label.Label, labels []label.Label) Event {
|
||||
return Event{
|
||||
static: static,
|
||||
dynamic: labels,
|
||||
}
|
||||
}
|
||||
|
||||
// CloneEvent event returns a copy of the event with the time adjusted to at.
|
||||
func CloneEvent(ev Event, at time.Time) Event {
|
||||
ev.at = at
|
||||
return ev
|
||||
}
|
||||
70
vendor/golang.org/x/tools/internal/event/core/export.go
generated
vendored
Normal file
70
vendor/golang.org/x/tools/internal/event/core/export.go
generated
vendored
Normal file
@@ -0,0 +1,70 @@
|
||||
// Copyright 2019 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package core
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
"unsafe"
|
||||
|
||||
"golang.org/x/tools/internal/event/label"
|
||||
)
|
||||
|
||||
// Exporter is a function that handles events.
|
||||
// It may return a modified context and event.
|
||||
type Exporter func(context.Context, Event, label.Map) context.Context
|
||||
|
||||
var (
|
||||
exporter unsafe.Pointer
|
||||
)
|
||||
|
||||
// SetExporter sets the global exporter function that handles all events.
|
||||
// The exporter is called synchronously from the event call site, so it should
|
||||
// return quickly so as not to hold up user code.
|
||||
func SetExporter(e Exporter) {
|
||||
p := unsafe.Pointer(&e)
|
||||
if e == nil {
|
||||
// &e is always valid, and so p is always valid, but for the early abort
|
||||
// of ProcessEvent to be efficient it needs to make the nil check on the
|
||||
// pointer without having to dereference it, so we make the nil function
|
||||
// also a nil pointer
|
||||
p = nil
|
||||
}
|
||||
atomic.StorePointer(&exporter, p)
|
||||
}
|
||||
|
||||
// deliver is called to deliver an event to the supplied exporter.
|
||||
// it will fill in the time.
|
||||
func deliver(ctx context.Context, exporter Exporter, ev Event) context.Context {
|
||||
// add the current time to the event
|
||||
ev.at = time.Now()
|
||||
// hand the event off to the current exporter
|
||||
return exporter(ctx, ev, ev)
|
||||
}
|
||||
|
||||
// Export is called to deliver an event to the global exporter if set.
|
||||
func Export(ctx context.Context, ev Event) context.Context {
|
||||
// get the global exporter and abort early if there is not one
|
||||
exporterPtr := (*Exporter)(atomic.LoadPointer(&exporter))
|
||||
if exporterPtr == nil {
|
||||
return ctx
|
||||
}
|
||||
return deliver(ctx, *exporterPtr, ev)
|
||||
}
|
||||
|
||||
// ExportPair is called to deliver a start event to the supplied exporter.
|
||||
// It also returns a function that will deliver the end event to the same
|
||||
// exporter.
|
||||
// It will fill in the time.
|
||||
func ExportPair(ctx context.Context, begin, end Event) (context.Context, func()) {
|
||||
// get the global exporter and abort early if there is not one
|
||||
exporterPtr := (*Exporter)(atomic.LoadPointer(&exporter))
|
||||
if exporterPtr == nil {
|
||||
return ctx, func() {}
|
||||
}
|
||||
ctx = deliver(ctx, *exporterPtr, begin)
|
||||
return ctx, func() { deliver(ctx, *exporterPtr, end) }
|
||||
}
|
||||
77
vendor/golang.org/x/tools/internal/event/core/fast.go
generated
vendored
Normal file
77
vendor/golang.org/x/tools/internal/event/core/fast.go
generated
vendored
Normal file
@@ -0,0 +1,77 @@
|
||||
// Copyright 2019 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package core
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"golang.org/x/tools/internal/event/keys"
|
||||
"golang.org/x/tools/internal/event/label"
|
||||
)
|
||||
|
||||
// Log1 takes a message and one label delivers a log event to the exporter.
|
||||
// It is a customized version of Print that is faster and does no allocation.
|
||||
func Log1(ctx context.Context, message string, t1 label.Label) {
|
||||
Export(ctx, MakeEvent([3]label.Label{
|
||||
keys.Msg.Of(message),
|
||||
t1,
|
||||
}, nil))
|
||||
}
|
||||
|
||||
// Log2 takes a message and two labels and delivers a log event to the exporter.
|
||||
// It is a customized version of Print that is faster and does no allocation.
|
||||
func Log2(ctx context.Context, message string, t1 label.Label, t2 label.Label) {
|
||||
Export(ctx, MakeEvent([3]label.Label{
|
||||
keys.Msg.Of(message),
|
||||
t1,
|
||||
t2,
|
||||
}, nil))
|
||||
}
|
||||
|
||||
// Metric1 sends a label event to the exporter with the supplied labels.
|
||||
func Metric1(ctx context.Context, t1 label.Label) context.Context {
|
||||
return Export(ctx, MakeEvent([3]label.Label{
|
||||
keys.Metric.New(),
|
||||
t1,
|
||||
}, nil))
|
||||
}
|
||||
|
||||
// Metric2 sends a label event to the exporter with the supplied labels.
|
||||
func Metric2(ctx context.Context, t1, t2 label.Label) context.Context {
|
||||
return Export(ctx, MakeEvent([3]label.Label{
|
||||
keys.Metric.New(),
|
||||
t1,
|
||||
t2,
|
||||
}, nil))
|
||||
}
|
||||
|
||||
// Start1 sends a span start event with the supplied label list to the exporter.
|
||||
// It also returns a function that will end the span, which should normally be
|
||||
// deferred.
|
||||
func Start1(ctx context.Context, name string, t1 label.Label) (context.Context, func()) {
|
||||
return ExportPair(ctx,
|
||||
MakeEvent([3]label.Label{
|
||||
keys.Start.Of(name),
|
||||
t1,
|
||||
}, nil),
|
||||
MakeEvent([3]label.Label{
|
||||
keys.End.New(),
|
||||
}, nil))
|
||||
}
|
||||
|
||||
// Start2 sends a span start event with the supplied label list to the exporter.
|
||||
// It also returns a function that will end the span, which should normally be
|
||||
// deferred.
|
||||
func Start2(ctx context.Context, name string, t1, t2 label.Label) (context.Context, func()) {
|
||||
return ExportPair(ctx,
|
||||
MakeEvent([3]label.Label{
|
||||
keys.Start.Of(name),
|
||||
t1,
|
||||
t2,
|
||||
}, nil),
|
||||
MakeEvent([3]label.Label{
|
||||
keys.End.New(),
|
||||
}, nil))
|
||||
}
|
||||
7
vendor/golang.org/x/tools/internal/event/doc.go
generated
vendored
Normal file
7
vendor/golang.org/x/tools/internal/event/doc.go
generated
vendored
Normal file
@@ -0,0 +1,7 @@
|
||||
// Copyright 2019 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package event provides a set of packages that cover the main
|
||||
// concepts of telemetry in an implementation agnostic way.
|
||||
package event
|
||||
127
vendor/golang.org/x/tools/internal/event/event.go
generated
vendored
Normal file
127
vendor/golang.org/x/tools/internal/event/event.go
generated
vendored
Normal file
@@ -0,0 +1,127 @@
|
||||
// Copyright 2019 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package event
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"golang.org/x/tools/internal/event/core"
|
||||
"golang.org/x/tools/internal/event/keys"
|
||||
"golang.org/x/tools/internal/event/label"
|
||||
)
|
||||
|
||||
// Exporter is a function that handles events.
|
||||
// It may return a modified context and event.
|
||||
type Exporter func(context.Context, core.Event, label.Map) context.Context
|
||||
|
||||
// SetExporter sets the global exporter function that handles all events.
|
||||
// The exporter is called synchronously from the event call site, so it should
|
||||
// return quickly so as not to hold up user code.
|
||||
func SetExporter(e Exporter) {
|
||||
core.SetExporter(core.Exporter(e))
|
||||
}
|
||||
|
||||
// Log takes a message and a label list and combines them into a single event
|
||||
// before delivering them to the exporter.
|
||||
func Log(ctx context.Context, message string, labels ...label.Label) {
|
||||
core.Export(ctx, core.MakeEvent([3]label.Label{
|
||||
keys.Msg.Of(message),
|
||||
}, labels))
|
||||
}
|
||||
|
||||
// IsLog returns true if the event was built by the Log function.
|
||||
// It is intended to be used in exporters to identify the semantics of the
|
||||
// event when deciding what to do with it.
|
||||
func IsLog(ev core.Event) bool {
|
||||
return ev.Label(0).Key() == keys.Msg
|
||||
}
|
||||
|
||||
// Error takes a message and a label list and combines them into a single event
|
||||
// before delivering them to the exporter. It captures the error in the
|
||||
// delivered event.
|
||||
func Error(ctx context.Context, message string, err error, labels ...label.Label) {
|
||||
core.Export(ctx, core.MakeEvent([3]label.Label{
|
||||
keys.Msg.Of(message),
|
||||
keys.Err.Of(err),
|
||||
}, labels))
|
||||
}
|
||||
|
||||
// IsError returns true if the event was built by the Error function.
|
||||
// It is intended to be used in exporters to identify the semantics of the
|
||||
// event when deciding what to do with it.
|
||||
func IsError(ev core.Event) bool {
|
||||
return ev.Label(0).Key() == keys.Msg &&
|
||||
ev.Label(1).Key() == keys.Err
|
||||
}
|
||||
|
||||
// Metric sends a label event to the exporter with the supplied labels.
|
||||
func Metric(ctx context.Context, labels ...label.Label) {
|
||||
core.Export(ctx, core.MakeEvent([3]label.Label{
|
||||
keys.Metric.New(),
|
||||
}, labels))
|
||||
}
|
||||
|
||||
// IsMetric returns true if the event was built by the Metric function.
|
||||
// It is intended to be used in exporters to identify the semantics of the
|
||||
// event when deciding what to do with it.
|
||||
func IsMetric(ev core.Event) bool {
|
||||
return ev.Label(0).Key() == keys.Metric
|
||||
}
|
||||
|
||||
// Label sends a label event to the exporter with the supplied labels.
|
||||
func Label(ctx context.Context, labels ...label.Label) context.Context {
|
||||
return core.Export(ctx, core.MakeEvent([3]label.Label{
|
||||
keys.Label.New(),
|
||||
}, labels))
|
||||
}
|
||||
|
||||
// IsLabel returns true if the event was built by the Label function.
|
||||
// It is intended to be used in exporters to identify the semantics of the
|
||||
// event when deciding what to do with it.
|
||||
func IsLabel(ev core.Event) bool {
|
||||
return ev.Label(0).Key() == keys.Label
|
||||
}
|
||||
|
||||
// Start sends a span start event with the supplied label list to the exporter.
|
||||
// It also returns a function that will end the span, which should normally be
|
||||
// deferred.
|
||||
func Start(ctx context.Context, name string, labels ...label.Label) (context.Context, func()) {
|
||||
return core.ExportPair(ctx,
|
||||
core.MakeEvent([3]label.Label{
|
||||
keys.Start.Of(name),
|
||||
}, labels),
|
||||
core.MakeEvent([3]label.Label{
|
||||
keys.End.New(),
|
||||
}, nil))
|
||||
}
|
||||
|
||||
// IsStart returns true if the event was built by the Start function.
|
||||
// It is intended to be used in exporters to identify the semantics of the
|
||||
// event when deciding what to do with it.
|
||||
func IsStart(ev core.Event) bool {
|
||||
return ev.Label(0).Key() == keys.Start
|
||||
}
|
||||
|
||||
// IsEnd returns true if the event was built by the End function.
|
||||
// It is intended to be used in exporters to identify the semantics of the
|
||||
// event when deciding what to do with it.
|
||||
func IsEnd(ev core.Event) bool {
|
||||
return ev.Label(0).Key() == keys.End
|
||||
}
|
||||
|
||||
// Detach returns a context without an associated span.
|
||||
// This allows the creation of spans that are not children of the current span.
|
||||
func Detach(ctx context.Context) context.Context {
|
||||
return core.Export(ctx, core.MakeEvent([3]label.Label{
|
||||
keys.Detach.New(),
|
||||
}, nil))
|
||||
}
|
||||
|
||||
// IsDetach returns true if the event was built by the Detach function.
|
||||
// It is intended to be used in exporters to identify the semantics of the
|
||||
// event when deciding what to do with it.
|
||||
func IsDetach(ev core.Event) bool {
|
||||
return ev.Label(0).Key() == keys.Detach
|
||||
}
|
||||
564
vendor/golang.org/x/tools/internal/event/keys/keys.go
generated
vendored
Normal file
564
vendor/golang.org/x/tools/internal/event/keys/keys.go
generated
vendored
Normal file
@@ -0,0 +1,564 @@
|
||||
// Copyright 2019 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package keys
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"strconv"
|
||||
|
||||
"golang.org/x/tools/internal/event/label"
|
||||
)
|
||||
|
||||
// Value represents a key for untyped values.
|
||||
type Value struct {
|
||||
name string
|
||||
description string
|
||||
}
|
||||
|
||||
// New creates a new Key for untyped values.
|
||||
func New(name, description string) *Value {
|
||||
return &Value{name: name, description: description}
|
||||
}
|
||||
|
||||
func (k *Value) Name() string { return k.name }
|
||||
func (k *Value) Description() string { return k.description }
|
||||
|
||||
func (k *Value) Format(w io.Writer, buf []byte, l label.Label) {
|
||||
fmt.Fprint(w, k.From(l))
|
||||
}
|
||||
|
||||
// Get can be used to get a label for the key from a label.Map.
|
||||
func (k *Value) Get(lm label.Map) interface{} {
|
||||
if t := lm.Find(k); t.Valid() {
|
||||
return k.From(t)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// From can be used to get a value from a Label.
|
||||
func (k *Value) From(t label.Label) interface{} { return t.UnpackValue() }
|
||||
|
||||
// Of creates a new Label with this key and the supplied value.
|
||||
func (k *Value) Of(value interface{}) label.Label { return label.OfValue(k, value) }
|
||||
|
||||
// Tag represents a key for tagging labels that have no value.
|
||||
// These are used when the existence of the label is the entire information it
|
||||
// carries, such as marking events to be of a specific kind, or from a specific
|
||||
// package.
|
||||
type Tag struct {
|
||||
name string
|
||||
description string
|
||||
}
|
||||
|
||||
// NewTag creates a new Key for tagging labels.
|
||||
func NewTag(name, description string) *Tag {
|
||||
return &Tag{name: name, description: description}
|
||||
}
|
||||
|
||||
func (k *Tag) Name() string { return k.name }
|
||||
func (k *Tag) Description() string { return k.description }
|
||||
|
||||
func (k *Tag) Format(w io.Writer, buf []byte, l label.Label) {}
|
||||
|
||||
// New creates a new Label with this key.
|
||||
func (k *Tag) New() label.Label { return label.OfValue(k, nil) }
|
||||
|
||||
// Int represents a key
|
||||
type Int struct {
|
||||
name string
|
||||
description string
|
||||
}
|
||||
|
||||
// NewInt creates a new Key for int values.
|
||||
func NewInt(name, description string) *Int {
|
||||
return &Int{name: name, description: description}
|
||||
}
|
||||
|
||||
func (k *Int) Name() string { return k.name }
|
||||
func (k *Int) Description() string { return k.description }
|
||||
|
||||
func (k *Int) Format(w io.Writer, buf []byte, l label.Label) {
|
||||
w.Write(strconv.AppendInt(buf, int64(k.From(l)), 10))
|
||||
}
|
||||
|
||||
// Of creates a new Label with this key and the supplied value.
|
||||
func (k *Int) Of(v int) label.Label { return label.Of64(k, uint64(v)) }
|
||||
|
||||
// Get can be used to get a label for the key from a label.Map.
|
||||
func (k *Int) Get(lm label.Map) int {
|
||||
if t := lm.Find(k); t.Valid() {
|
||||
return k.From(t)
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// From can be used to get a value from a Label.
|
||||
func (k *Int) From(t label.Label) int { return int(t.Unpack64()) }
|
||||
|
||||
// Int8 represents a key
|
||||
type Int8 struct {
|
||||
name string
|
||||
description string
|
||||
}
|
||||
|
||||
// NewInt8 creates a new Key for int8 values.
|
||||
func NewInt8(name, description string) *Int8 {
|
||||
return &Int8{name: name, description: description}
|
||||
}
|
||||
|
||||
func (k *Int8) Name() string { return k.name }
|
||||
func (k *Int8) Description() string { return k.description }
|
||||
|
||||
func (k *Int8) Format(w io.Writer, buf []byte, l label.Label) {
|
||||
w.Write(strconv.AppendInt(buf, int64(k.From(l)), 10))
|
||||
}
|
||||
|
||||
// Of creates a new Label with this key and the supplied value.
|
||||
func (k *Int8) Of(v int8) label.Label { return label.Of64(k, uint64(v)) }
|
||||
|
||||
// Get can be used to get a label for the key from a label.Map.
|
||||
func (k *Int8) Get(lm label.Map) int8 {
|
||||
if t := lm.Find(k); t.Valid() {
|
||||
return k.From(t)
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// From can be used to get a value from a Label.
|
||||
func (k *Int8) From(t label.Label) int8 { return int8(t.Unpack64()) }
|
||||
|
||||
// Int16 represents a key
|
||||
type Int16 struct {
|
||||
name string
|
||||
description string
|
||||
}
|
||||
|
||||
// NewInt16 creates a new Key for int16 values.
|
||||
func NewInt16(name, description string) *Int16 {
|
||||
return &Int16{name: name, description: description}
|
||||
}
|
||||
|
||||
func (k *Int16) Name() string { return k.name }
|
||||
func (k *Int16) Description() string { return k.description }
|
||||
|
||||
func (k *Int16) Format(w io.Writer, buf []byte, l label.Label) {
|
||||
w.Write(strconv.AppendInt(buf, int64(k.From(l)), 10))
|
||||
}
|
||||
|
||||
// Of creates a new Label with this key and the supplied value.
|
||||
func (k *Int16) Of(v int16) label.Label { return label.Of64(k, uint64(v)) }
|
||||
|
||||
// Get can be used to get a label for the key from a label.Map.
|
||||
func (k *Int16) Get(lm label.Map) int16 {
|
||||
if t := lm.Find(k); t.Valid() {
|
||||
return k.From(t)
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// From can be used to get a value from a Label.
|
||||
func (k *Int16) From(t label.Label) int16 { return int16(t.Unpack64()) }
|
||||
|
||||
// Int32 represents a key
|
||||
type Int32 struct {
|
||||
name string
|
||||
description string
|
||||
}
|
||||
|
||||
// NewInt32 creates a new Key for int32 values.
|
||||
func NewInt32(name, description string) *Int32 {
|
||||
return &Int32{name: name, description: description}
|
||||
}
|
||||
|
||||
func (k *Int32) Name() string { return k.name }
|
||||
func (k *Int32) Description() string { return k.description }
|
||||
|
||||
func (k *Int32) Format(w io.Writer, buf []byte, l label.Label) {
|
||||
w.Write(strconv.AppendInt(buf, int64(k.From(l)), 10))
|
||||
}
|
||||
|
||||
// Of creates a new Label with this key and the supplied value.
|
||||
func (k *Int32) Of(v int32) label.Label { return label.Of64(k, uint64(v)) }
|
||||
|
||||
// Get can be used to get a label for the key from a label.Map.
|
||||
func (k *Int32) Get(lm label.Map) int32 {
|
||||
if t := lm.Find(k); t.Valid() {
|
||||
return k.From(t)
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// From can be used to get a value from a Label.
|
||||
func (k *Int32) From(t label.Label) int32 { return int32(t.Unpack64()) }
|
||||
|
||||
// Int64 represents a key
|
||||
type Int64 struct {
|
||||
name string
|
||||
description string
|
||||
}
|
||||
|
||||
// NewInt64 creates a new Key for int64 values.
|
||||
func NewInt64(name, description string) *Int64 {
|
||||
return &Int64{name: name, description: description}
|
||||
}
|
||||
|
||||
func (k *Int64) Name() string { return k.name }
|
||||
func (k *Int64) Description() string { return k.description }
|
||||
|
||||
func (k *Int64) Format(w io.Writer, buf []byte, l label.Label) {
|
||||
w.Write(strconv.AppendInt(buf, k.From(l), 10))
|
||||
}
|
||||
|
||||
// Of creates a new Label with this key and the supplied value.
|
||||
func (k *Int64) Of(v int64) label.Label { return label.Of64(k, uint64(v)) }
|
||||
|
||||
// Get can be used to get a label for the key from a label.Map.
|
||||
func (k *Int64) Get(lm label.Map) int64 {
|
||||
if t := lm.Find(k); t.Valid() {
|
||||
return k.From(t)
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// From can be used to get a value from a Label.
|
||||
func (k *Int64) From(t label.Label) int64 { return int64(t.Unpack64()) }
|
||||
|
||||
// UInt represents a key
|
||||
type UInt struct {
|
||||
name string
|
||||
description string
|
||||
}
|
||||
|
||||
// NewUInt creates a new Key for uint values.
|
||||
func NewUInt(name, description string) *UInt {
|
||||
return &UInt{name: name, description: description}
|
||||
}
|
||||
|
||||
func (k *UInt) Name() string { return k.name }
|
||||
func (k *UInt) Description() string { return k.description }
|
||||
|
||||
func (k *UInt) Format(w io.Writer, buf []byte, l label.Label) {
|
||||
w.Write(strconv.AppendUint(buf, uint64(k.From(l)), 10))
|
||||
}
|
||||
|
||||
// Of creates a new Label with this key and the supplied value.
|
||||
func (k *UInt) Of(v uint) label.Label { return label.Of64(k, uint64(v)) }
|
||||
|
||||
// Get can be used to get a label for the key from a label.Map.
|
||||
func (k *UInt) Get(lm label.Map) uint {
|
||||
if t := lm.Find(k); t.Valid() {
|
||||
return k.From(t)
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// From can be used to get a value from a Label.
|
||||
func (k *UInt) From(t label.Label) uint { return uint(t.Unpack64()) }
|
||||
|
||||
// UInt8 represents a key
|
||||
type UInt8 struct {
|
||||
name string
|
||||
description string
|
||||
}
|
||||
|
||||
// NewUInt8 creates a new Key for uint8 values.
|
||||
func NewUInt8(name, description string) *UInt8 {
|
||||
return &UInt8{name: name, description: description}
|
||||
}
|
||||
|
||||
func (k *UInt8) Name() string { return k.name }
|
||||
func (k *UInt8) Description() string { return k.description }
|
||||
|
||||
func (k *UInt8) Format(w io.Writer, buf []byte, l label.Label) {
|
||||
w.Write(strconv.AppendUint(buf, uint64(k.From(l)), 10))
|
||||
}
|
||||
|
||||
// Of creates a new Label with this key and the supplied value.
|
||||
func (k *UInt8) Of(v uint8) label.Label { return label.Of64(k, uint64(v)) }
|
||||
|
||||
// Get can be used to get a label for the key from a label.Map.
|
||||
func (k *UInt8) Get(lm label.Map) uint8 {
|
||||
if t := lm.Find(k); t.Valid() {
|
||||
return k.From(t)
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// From can be used to get a value from a Label.
|
||||
func (k *UInt8) From(t label.Label) uint8 { return uint8(t.Unpack64()) }
|
||||
|
||||
// UInt16 represents a key
|
||||
type UInt16 struct {
|
||||
name string
|
||||
description string
|
||||
}
|
||||
|
||||
// NewUInt16 creates a new Key for uint16 values.
|
||||
func NewUInt16(name, description string) *UInt16 {
|
||||
return &UInt16{name: name, description: description}
|
||||
}
|
||||
|
||||
func (k *UInt16) Name() string { return k.name }
|
||||
func (k *UInt16) Description() string { return k.description }
|
||||
|
||||
func (k *UInt16) Format(w io.Writer, buf []byte, l label.Label) {
|
||||
w.Write(strconv.AppendUint(buf, uint64(k.From(l)), 10))
|
||||
}
|
||||
|
||||
// Of creates a new Label with this key and the supplied value.
|
||||
func (k *UInt16) Of(v uint16) label.Label { return label.Of64(k, uint64(v)) }
|
||||
|
||||
// Get can be used to get a label for the key from a label.Map.
|
||||
func (k *UInt16) Get(lm label.Map) uint16 {
|
||||
if t := lm.Find(k); t.Valid() {
|
||||
return k.From(t)
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// From can be used to get a value from a Label.
|
||||
func (k *UInt16) From(t label.Label) uint16 { return uint16(t.Unpack64()) }
|
||||
|
||||
// UInt32 represents a key
|
||||
type UInt32 struct {
|
||||
name string
|
||||
description string
|
||||
}
|
||||
|
||||
// NewUInt32 creates a new Key for uint32 values.
|
||||
func NewUInt32(name, description string) *UInt32 {
|
||||
return &UInt32{name: name, description: description}
|
||||
}
|
||||
|
||||
func (k *UInt32) Name() string { return k.name }
|
||||
func (k *UInt32) Description() string { return k.description }
|
||||
|
||||
func (k *UInt32) Format(w io.Writer, buf []byte, l label.Label) {
|
||||
w.Write(strconv.AppendUint(buf, uint64(k.From(l)), 10))
|
||||
}
|
||||
|
||||
// Of creates a new Label with this key and the supplied value.
|
||||
func (k *UInt32) Of(v uint32) label.Label { return label.Of64(k, uint64(v)) }
|
||||
|
||||
// Get can be used to get a label for the key from a label.Map.
|
||||
func (k *UInt32) Get(lm label.Map) uint32 {
|
||||
if t := lm.Find(k); t.Valid() {
|
||||
return k.From(t)
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// From can be used to get a value from a Label.
|
||||
func (k *UInt32) From(t label.Label) uint32 { return uint32(t.Unpack64()) }
|
||||
|
||||
// UInt64 represents a key
|
||||
type UInt64 struct {
|
||||
name string
|
||||
description string
|
||||
}
|
||||
|
||||
// NewUInt64 creates a new Key for uint64 values.
|
||||
func NewUInt64(name, description string) *UInt64 {
|
||||
return &UInt64{name: name, description: description}
|
||||
}
|
||||
|
||||
func (k *UInt64) Name() string { return k.name }
|
||||
func (k *UInt64) Description() string { return k.description }
|
||||
|
||||
func (k *UInt64) Format(w io.Writer, buf []byte, l label.Label) {
|
||||
w.Write(strconv.AppendUint(buf, k.From(l), 10))
|
||||
}
|
||||
|
||||
// Of creates a new Label with this key and the supplied value.
|
||||
func (k *UInt64) Of(v uint64) label.Label { return label.Of64(k, v) }
|
||||
|
||||
// Get can be used to get a label for the key from a label.Map.
|
||||
func (k *UInt64) Get(lm label.Map) uint64 {
|
||||
if t := lm.Find(k); t.Valid() {
|
||||
return k.From(t)
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// From can be used to get a value from a Label.
|
||||
func (k *UInt64) From(t label.Label) uint64 { return t.Unpack64() }
|
||||
|
||||
// Float32 represents a key
|
||||
type Float32 struct {
|
||||
name string
|
||||
description string
|
||||
}
|
||||
|
||||
// NewFloat32 creates a new Key for float32 values.
|
||||
func NewFloat32(name, description string) *Float32 {
|
||||
return &Float32{name: name, description: description}
|
||||
}
|
||||
|
||||
func (k *Float32) Name() string { return k.name }
|
||||
func (k *Float32) Description() string { return k.description }
|
||||
|
||||
func (k *Float32) Format(w io.Writer, buf []byte, l label.Label) {
|
||||
w.Write(strconv.AppendFloat(buf, float64(k.From(l)), 'E', -1, 32))
|
||||
}
|
||||
|
||||
// Of creates a new Label with this key and the supplied value.
|
||||
func (k *Float32) Of(v float32) label.Label {
|
||||
return label.Of64(k, uint64(math.Float32bits(v)))
|
||||
}
|
||||
|
||||
// Get can be used to get a label for the key from a label.Map.
|
||||
func (k *Float32) Get(lm label.Map) float32 {
|
||||
if t := lm.Find(k); t.Valid() {
|
||||
return k.From(t)
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// From can be used to get a value from a Label.
|
||||
func (k *Float32) From(t label.Label) float32 {
|
||||
return math.Float32frombits(uint32(t.Unpack64()))
|
||||
}
|
||||
|
||||
// Float64 represents a key
|
||||
type Float64 struct {
|
||||
name string
|
||||
description string
|
||||
}
|
||||
|
||||
// NewFloat64 creates a new Key for int64 values.
|
||||
func NewFloat64(name, description string) *Float64 {
|
||||
return &Float64{name: name, description: description}
|
||||
}
|
||||
|
||||
func (k *Float64) Name() string { return k.name }
|
||||
func (k *Float64) Description() string { return k.description }
|
||||
|
||||
func (k *Float64) Format(w io.Writer, buf []byte, l label.Label) {
|
||||
w.Write(strconv.AppendFloat(buf, k.From(l), 'E', -1, 64))
|
||||
}
|
||||
|
||||
// Of creates a new Label with this key and the supplied value.
|
||||
func (k *Float64) Of(v float64) label.Label {
|
||||
return label.Of64(k, math.Float64bits(v))
|
||||
}
|
||||
|
||||
// Get can be used to get a label for the key from a label.Map.
|
||||
func (k *Float64) Get(lm label.Map) float64 {
|
||||
if t := lm.Find(k); t.Valid() {
|
||||
return k.From(t)
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// From can be used to get a value from a Label.
|
||||
func (k *Float64) From(t label.Label) float64 {
|
||||
return math.Float64frombits(t.Unpack64())
|
||||
}
|
||||
|
||||
// String represents a key
|
||||
type String struct {
|
||||
name string
|
||||
description string
|
||||
}
|
||||
|
||||
// NewString creates a new Key for int64 values.
|
||||
func NewString(name, description string) *String {
|
||||
return &String{name: name, description: description}
|
||||
}
|
||||
|
||||
func (k *String) Name() string { return k.name }
|
||||
func (k *String) Description() string { return k.description }
|
||||
|
||||
func (k *String) Format(w io.Writer, buf []byte, l label.Label) {
|
||||
w.Write(strconv.AppendQuote(buf, k.From(l)))
|
||||
}
|
||||
|
||||
// Of creates a new Label with this key and the supplied value.
|
||||
func (k *String) Of(v string) label.Label { return label.OfString(k, v) }
|
||||
|
||||
// Get can be used to get a label for the key from a label.Map.
|
||||
func (k *String) Get(lm label.Map) string {
|
||||
if t := lm.Find(k); t.Valid() {
|
||||
return k.From(t)
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// From can be used to get a value from a Label.
|
||||
func (k *String) From(t label.Label) string { return t.UnpackString() }
|
||||
|
||||
// Boolean represents a key
|
||||
type Boolean struct {
|
||||
name string
|
||||
description string
|
||||
}
|
||||
|
||||
// NewBoolean creates a new Key for bool values.
|
||||
func NewBoolean(name, description string) *Boolean {
|
||||
return &Boolean{name: name, description: description}
|
||||
}
|
||||
|
||||
func (k *Boolean) Name() string { return k.name }
|
||||
func (k *Boolean) Description() string { return k.description }
|
||||
|
||||
func (k *Boolean) Format(w io.Writer, buf []byte, l label.Label) {
|
||||
w.Write(strconv.AppendBool(buf, k.From(l)))
|
||||
}
|
||||
|
||||
// Of creates a new Label with this key and the supplied value.
|
||||
func (k *Boolean) Of(v bool) label.Label {
|
||||
if v {
|
||||
return label.Of64(k, 1)
|
||||
}
|
||||
return label.Of64(k, 0)
|
||||
}
|
||||
|
||||
// Get can be used to get a label for the key from a label.Map.
|
||||
func (k *Boolean) Get(lm label.Map) bool {
|
||||
if t := lm.Find(k); t.Valid() {
|
||||
return k.From(t)
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// From can be used to get a value from a Label.
|
||||
func (k *Boolean) From(t label.Label) bool { return t.Unpack64() > 0 }
|
||||
|
||||
// Error represents a key
|
||||
type Error struct {
|
||||
name string
|
||||
description string
|
||||
}
|
||||
|
||||
// NewError creates a new Key for int64 values.
|
||||
func NewError(name, description string) *Error {
|
||||
return &Error{name: name, description: description}
|
||||
}
|
||||
|
||||
func (k *Error) Name() string { return k.name }
|
||||
func (k *Error) Description() string { return k.description }
|
||||
|
||||
func (k *Error) Format(w io.Writer, buf []byte, l label.Label) {
|
||||
io.WriteString(w, k.From(l).Error())
|
||||
}
|
||||
|
||||
// Of creates a new Label with this key and the supplied value.
|
||||
func (k *Error) Of(v error) label.Label { return label.OfValue(k, v) }
|
||||
|
||||
// Get can be used to get a label for the key from a label.Map.
|
||||
func (k *Error) Get(lm label.Map) error {
|
||||
if t := lm.Find(k); t.Valid() {
|
||||
return k.From(t)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// From can be used to get a value from a Label.
|
||||
func (k *Error) From(t label.Label) error {
|
||||
err, _ := t.UnpackValue().(error)
|
||||
return err
|
||||
}
|
||||
22
vendor/golang.org/x/tools/internal/event/keys/standard.go
generated
vendored
Normal file
22
vendor/golang.org/x/tools/internal/event/keys/standard.go
generated
vendored
Normal file
@@ -0,0 +1,22 @@
|
||||
// Copyright 2020 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package keys
|
||||
|
||||
var (
|
||||
// Msg is a key used to add message strings to label lists.
|
||||
Msg = NewString("message", "a readable message")
|
||||
// Label is a key used to indicate an event adds labels to the context.
|
||||
Label = NewTag("label", "a label context marker")
|
||||
// Start is used for things like traces that have a name.
|
||||
Start = NewString("start", "span start")
|
||||
// Metric is a key used to indicate an event records metrics.
|
||||
End = NewTag("end", "a span end marker")
|
||||
// Metric is a key used to indicate an event records metrics.
|
||||
Detach = NewTag("detach", "a span detach marker")
|
||||
// Err is a key used to add error values to label lists.
|
||||
Err = NewError("error", "an error that occurred")
|
||||
// Metric is a key used to indicate an event records metrics.
|
||||
Metric = NewTag("metric", "a metric event marker")
|
||||
)
|
||||
215
vendor/golang.org/x/tools/internal/event/label/label.go
generated
vendored
Normal file
215
vendor/golang.org/x/tools/internal/event/label/label.go
generated
vendored
Normal file
@@ -0,0 +1,215 @@
|
||||
// Copyright 2019 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package label
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"reflect"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// Key is used as the identity of a Label.
|
||||
// Keys are intended to be compared by pointer only, the name should be unique
|
||||
// for communicating with external systems, but it is not required or enforced.
|
||||
type Key interface {
|
||||
// Name returns the key name.
|
||||
Name() string
|
||||
// Description returns a string that can be used to describe the value.
|
||||
Description() string
|
||||
|
||||
// Format is used in formatting to append the value of the label to the
|
||||
// supplied buffer.
|
||||
// The formatter may use the supplied buf as a scratch area to avoid
|
||||
// allocations.
|
||||
Format(w io.Writer, buf []byte, l Label)
|
||||
}
|
||||
|
||||
// Label holds a key and value pair.
|
||||
// It is normally used when passing around lists of labels.
|
||||
type Label struct {
|
||||
key Key
|
||||
packed uint64
|
||||
untyped interface{}
|
||||
}
|
||||
|
||||
// Map is the interface to a collection of Labels indexed by key.
|
||||
type Map interface {
|
||||
// Find returns the label that matches the supplied key.
|
||||
Find(key Key) Label
|
||||
}
|
||||
|
||||
// List is the interface to something that provides an iterable
|
||||
// list of labels.
|
||||
// Iteration should start from 0 and continue until Valid returns false.
|
||||
type List interface {
|
||||
// Valid returns true if the index is within range for the list.
|
||||
// It does not imply the label at that index will itself be valid.
|
||||
Valid(index int) bool
|
||||
// Label returns the label at the given index.
|
||||
Label(index int) Label
|
||||
}
|
||||
|
||||
// list implements LabelList for a list of Labels.
|
||||
type list struct {
|
||||
labels []Label
|
||||
}
|
||||
|
||||
// filter wraps a LabelList filtering out specific labels.
|
||||
type filter struct {
|
||||
keys []Key
|
||||
underlying List
|
||||
}
|
||||
|
||||
// listMap implements LabelMap for a simple list of labels.
|
||||
type listMap struct {
|
||||
labels []Label
|
||||
}
|
||||
|
||||
// mapChain implements LabelMap for a list of underlying LabelMap.
|
||||
type mapChain struct {
|
||||
maps []Map
|
||||
}
|
||||
|
||||
// OfValue creates a new label from the key and value.
|
||||
// This method is for implementing new key types, label creation should
|
||||
// normally be done with the Of method of the key.
|
||||
func OfValue(k Key, value interface{}) Label { return Label{key: k, untyped: value} }
|
||||
|
||||
// UnpackValue assumes the label was built using LabelOfValue and returns the value
|
||||
// that was passed to that constructor.
|
||||
// This method is for implementing new key types, for type safety normal
|
||||
// access should be done with the From method of the key.
|
||||
func (t Label) UnpackValue() interface{} { return t.untyped }
|
||||
|
||||
// Of64 creates a new label from a key and a uint64. This is often
|
||||
// used for non uint64 values that can be packed into a uint64.
|
||||
// This method is for implementing new key types, label creation should
|
||||
// normally be done with the Of method of the key.
|
||||
func Of64(k Key, v uint64) Label { return Label{key: k, packed: v} }
|
||||
|
||||
// Unpack64 assumes the label was built using LabelOf64 and returns the value that
|
||||
// was passed to that constructor.
|
||||
// This method is for implementing new key types, for type safety normal
|
||||
// access should be done with the From method of the key.
|
||||
func (t Label) Unpack64() uint64 { return t.packed }
|
||||
|
||||
type stringptr unsafe.Pointer
|
||||
|
||||
// OfString creates a new label from a key and a string.
|
||||
// This method is for implementing new key types, label creation should
|
||||
// normally be done with the Of method of the key.
|
||||
func OfString(k Key, v string) Label {
|
||||
hdr := (*reflect.StringHeader)(unsafe.Pointer(&v))
|
||||
return Label{
|
||||
key: k,
|
||||
packed: uint64(hdr.Len),
|
||||
untyped: stringptr(hdr.Data),
|
||||
}
|
||||
}
|
||||
|
||||
// UnpackString assumes the label was built using LabelOfString and returns the
|
||||
// value that was passed to that constructor.
|
||||
// This method is for implementing new key types, for type safety normal
|
||||
// access should be done with the From method of the key.
|
||||
func (t Label) UnpackString() string {
|
||||
var v string
|
||||
hdr := (*reflect.StringHeader)(unsafe.Pointer(&v))
|
||||
hdr.Data = uintptr(t.untyped.(stringptr))
|
||||
hdr.Len = int(t.packed)
|
||||
return v
|
||||
}
|
||||
|
||||
// Valid returns true if the Label is a valid one (it has a key).
|
||||
func (t Label) Valid() bool { return t.key != nil }
|
||||
|
||||
// Key returns the key of this Label.
|
||||
func (t Label) Key() Key { return t.key }
|
||||
|
||||
// Format is used for debug printing of labels.
|
||||
func (t Label) Format(f fmt.State, r rune) {
|
||||
if !t.Valid() {
|
||||
io.WriteString(f, `nil`)
|
||||
return
|
||||
}
|
||||
io.WriteString(f, t.Key().Name())
|
||||
io.WriteString(f, "=")
|
||||
var buf [128]byte
|
||||
t.Key().Format(f, buf[:0], t)
|
||||
}
|
||||
|
||||
func (l *list) Valid(index int) bool {
|
||||
return index >= 0 && index < len(l.labels)
|
||||
}
|
||||
|
||||
func (l *list) Label(index int) Label {
|
||||
return l.labels[index]
|
||||
}
|
||||
|
||||
func (f *filter) Valid(index int) bool {
|
||||
return f.underlying.Valid(index)
|
||||
}
|
||||
|
||||
func (f *filter) Label(index int) Label {
|
||||
l := f.underlying.Label(index)
|
||||
for _, f := range f.keys {
|
||||
if l.Key() == f {
|
||||
return Label{}
|
||||
}
|
||||
}
|
||||
return l
|
||||
}
|
||||
|
||||
func (lm listMap) Find(key Key) Label {
|
||||
for _, l := range lm.labels {
|
||||
if l.Key() == key {
|
||||
return l
|
||||
}
|
||||
}
|
||||
return Label{}
|
||||
}
|
||||
|
||||
func (c mapChain) Find(key Key) Label {
|
||||
for _, src := range c.maps {
|
||||
l := src.Find(key)
|
||||
if l.Valid() {
|
||||
return l
|
||||
}
|
||||
}
|
||||
return Label{}
|
||||
}
|
||||
|
||||
var emptyList = &list{}
|
||||
|
||||
func NewList(labels ...Label) List {
|
||||
if len(labels) == 0 {
|
||||
return emptyList
|
||||
}
|
||||
return &list{labels: labels}
|
||||
}
|
||||
|
||||
func Filter(l List, keys ...Key) List {
|
||||
if len(keys) == 0 {
|
||||
return l
|
||||
}
|
||||
return &filter{keys: keys, underlying: l}
|
||||
}
|
||||
|
||||
func NewMap(labels ...Label) Map {
|
||||
return listMap{labels: labels}
|
||||
}
|
||||
|
||||
func MergeMaps(srcs ...Map) Map {
|
||||
var nonNil []Map
|
||||
for _, src := range srcs {
|
||||
if src != nil {
|
||||
nonNil = append(nonNil, src)
|
||||
}
|
||||
}
|
||||
if len(nonNil) == 1 {
|
||||
return nonNil[0]
|
||||
}
|
||||
return mapChain{maps: nonNil}
|
||||
}
|
||||
59
vendor/golang.org/x/tools/internal/event/tag/tag.go
generated
vendored
Normal file
59
vendor/golang.org/x/tools/internal/event/tag/tag.go
generated
vendored
Normal file
@@ -0,0 +1,59 @@
|
||||
// Copyright 2019 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package tag provides the labels used for telemetry throughout gopls.
|
||||
package tag
|
||||
|
||||
import (
|
||||
"golang.org/x/tools/internal/event/keys"
|
||||
)
|
||||
|
||||
var (
|
||||
// create the label keys we use
|
||||
Method = keys.NewString("method", "")
|
||||
StatusCode = keys.NewString("status.code", "")
|
||||
StatusMessage = keys.NewString("status.message", "")
|
||||
RPCID = keys.NewString("id", "")
|
||||
RPCDirection = keys.NewString("direction", "")
|
||||
File = keys.NewString("file", "")
|
||||
Directory = keys.New("directory", "")
|
||||
URI = keys.New("URI", "")
|
||||
Package = keys.NewString("package", "") // sorted comma-separated list of Package IDs
|
||||
PackagePath = keys.NewString("package_path", "")
|
||||
Query = keys.New("query", "")
|
||||
Snapshot = keys.NewUInt64("snapshot", "")
|
||||
Operation = keys.NewString("operation", "")
|
||||
|
||||
Position = keys.New("position", "")
|
||||
Category = keys.NewString("category", "")
|
||||
PackageCount = keys.NewInt("packages", "")
|
||||
Files = keys.New("files", "")
|
||||
Port = keys.NewInt("port", "")
|
||||
Type = keys.New("type", "")
|
||||
HoverKind = keys.NewString("hoverkind", "")
|
||||
|
||||
NewServer = keys.NewString("new_server", "A new server was added")
|
||||
EndServer = keys.NewString("end_server", "A server was shut down")
|
||||
|
||||
ServerID = keys.NewString("server", "The server ID an event is related to")
|
||||
Logfile = keys.NewString("logfile", "")
|
||||
DebugAddress = keys.NewString("debug_address", "")
|
||||
GoplsPath = keys.NewString("gopls_path", "")
|
||||
ClientID = keys.NewString("client_id", "")
|
||||
|
||||
Level = keys.NewInt("level", "The logging level")
|
||||
)
|
||||
|
||||
var (
|
||||
// create the stats we measure
|
||||
Started = keys.NewInt64("started", "Count of started RPCs.")
|
||||
ReceivedBytes = keys.NewInt64("received_bytes", "Bytes received.") //, unit.Bytes)
|
||||
SentBytes = keys.NewInt64("sent_bytes", "Bytes sent.") //, unit.Bytes)
|
||||
Latency = keys.NewFloat64("latency_ms", "Elapsed time in milliseconds") //, unit.Milliseconds)
|
||||
)
|
||||
|
||||
const (
|
||||
Inbound = "in"
|
||||
Outbound = "out"
|
||||
)
|
||||
196
vendor/golang.org/x/tools/internal/fastwalk/fastwalk.go
generated
vendored
Normal file
196
vendor/golang.org/x/tools/internal/fastwalk/fastwalk.go
generated
vendored
Normal file
@@ -0,0 +1,196 @@
|
||||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package fastwalk provides a faster version of [filepath.Walk] for file system
|
||||
// scanning tools.
|
||||
package fastwalk
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// ErrTraverseLink is used as a return value from WalkFuncs to indicate that the
|
||||
// symlink named in the call may be traversed.
|
||||
var ErrTraverseLink = errors.New("fastwalk: traverse symlink, assuming target is a directory")
|
||||
|
||||
// ErrSkipFiles is a used as a return value from WalkFuncs to indicate that the
|
||||
// callback should not be called for any other files in the current directory.
|
||||
// Child directories will still be traversed.
|
||||
var ErrSkipFiles = errors.New("fastwalk: skip remaining files in directory")
|
||||
|
||||
// Walk is a faster implementation of [filepath.Walk].
|
||||
//
|
||||
// [filepath.Walk]'s design necessarily calls [os.Lstat] on each file,
|
||||
// even if the caller needs less info.
|
||||
// Many tools need only the type of each file.
|
||||
// On some platforms, this information is provided directly by the readdir
|
||||
// system call, avoiding the need to stat each file individually.
|
||||
// fastwalk_unix.go contains a fork of the syscall routines.
|
||||
//
|
||||
// See golang.org/issue/16399.
|
||||
//
|
||||
// Walk walks the file tree rooted at root, calling walkFn for
|
||||
// each file or directory in the tree, including root.
|
||||
//
|
||||
// If Walk returns [filepath.SkipDir], the directory is skipped.
|
||||
//
|
||||
// Unlike [filepath.Walk]:
|
||||
// - file stat calls must be done by the user.
|
||||
// The only provided metadata is the file type, which does not include
|
||||
// any permission bits.
|
||||
// - multiple goroutines stat the filesystem concurrently. The provided
|
||||
// walkFn must be safe for concurrent use.
|
||||
// - Walk can follow symlinks if walkFn returns the TraverseLink
|
||||
// sentinel error. It is the walkFn's responsibility to prevent
|
||||
// Walk from going into symlink cycles.
|
||||
func Walk(root string, walkFn func(path string, typ os.FileMode) error) error {
|
||||
// TODO(bradfitz): make numWorkers configurable? We used a
|
||||
// minimum of 4 to give the kernel more info about multiple
|
||||
// things we want, in hopes its I/O scheduling can take
|
||||
// advantage of that. Hopefully most are in cache. Maybe 4 is
|
||||
// even too low of a minimum. Profile more.
|
||||
numWorkers := 4
|
||||
if n := runtime.NumCPU(); n > numWorkers {
|
||||
numWorkers = n
|
||||
}
|
||||
|
||||
// Make sure to wait for all workers to finish, otherwise
|
||||
// walkFn could still be called after returning. This Wait call
|
||||
// runs after close(e.donec) below.
|
||||
var wg sync.WaitGroup
|
||||
defer wg.Wait()
|
||||
|
||||
w := &walker{
|
||||
fn: walkFn,
|
||||
enqueuec: make(chan walkItem, numWorkers), // buffered for performance
|
||||
workc: make(chan walkItem, numWorkers), // buffered for performance
|
||||
donec: make(chan struct{}),
|
||||
|
||||
// buffered for correctness & not leaking goroutines:
|
||||
resc: make(chan error, numWorkers),
|
||||
}
|
||||
defer close(w.donec)
|
||||
|
||||
for i := 0; i < numWorkers; i++ {
|
||||
wg.Add(1)
|
||||
go w.doWork(&wg)
|
||||
}
|
||||
todo := []walkItem{{dir: root}}
|
||||
out := 0
|
||||
for {
|
||||
workc := w.workc
|
||||
var workItem walkItem
|
||||
if len(todo) == 0 {
|
||||
workc = nil
|
||||
} else {
|
||||
workItem = todo[len(todo)-1]
|
||||
}
|
||||
select {
|
||||
case workc <- workItem:
|
||||
todo = todo[:len(todo)-1]
|
||||
out++
|
||||
case it := <-w.enqueuec:
|
||||
todo = append(todo, it)
|
||||
case err := <-w.resc:
|
||||
out--
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if out == 0 && len(todo) == 0 {
|
||||
// It's safe to quit here, as long as the buffered
|
||||
// enqueue channel isn't also readable, which might
|
||||
// happen if the worker sends both another unit of
|
||||
// work and its result before the other select was
|
||||
// scheduled and both w.resc and w.enqueuec were
|
||||
// readable.
|
||||
select {
|
||||
case it := <-w.enqueuec:
|
||||
todo = append(todo, it)
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// doWork reads directories as instructed (via workc) and runs the
|
||||
// user's callback function.
|
||||
func (w *walker) doWork(wg *sync.WaitGroup) {
|
||||
defer wg.Done()
|
||||
for {
|
||||
select {
|
||||
case <-w.donec:
|
||||
return
|
||||
case it := <-w.workc:
|
||||
select {
|
||||
case <-w.donec:
|
||||
return
|
||||
case w.resc <- w.walk(it.dir, !it.callbackDone):
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type walker struct {
|
||||
fn func(path string, typ os.FileMode) error
|
||||
|
||||
donec chan struct{} // closed on fastWalk's return
|
||||
workc chan walkItem // to workers
|
||||
enqueuec chan walkItem // from workers
|
||||
resc chan error // from workers
|
||||
}
|
||||
|
||||
type walkItem struct {
|
||||
dir string
|
||||
callbackDone bool // callback already called; don't do it again
|
||||
}
|
||||
|
||||
func (w *walker) enqueue(it walkItem) {
|
||||
select {
|
||||
case w.enqueuec <- it:
|
||||
case <-w.donec:
|
||||
}
|
||||
}
|
||||
|
||||
func (w *walker) onDirEnt(dirName, baseName string, typ os.FileMode) error {
|
||||
joined := dirName + string(os.PathSeparator) + baseName
|
||||
if typ == os.ModeDir {
|
||||
w.enqueue(walkItem{dir: joined})
|
||||
return nil
|
||||
}
|
||||
|
||||
err := w.fn(joined, typ)
|
||||
if typ == os.ModeSymlink {
|
||||
if err == ErrTraverseLink {
|
||||
// Set callbackDone so we don't call it twice for both the
|
||||
// symlink-as-symlink and the symlink-as-directory later:
|
||||
w.enqueue(walkItem{dir: joined, callbackDone: true})
|
||||
return nil
|
||||
}
|
||||
if err == filepath.SkipDir {
|
||||
// Permit SkipDir on symlinks too.
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (w *walker) walk(root string, runUserCallback bool) error {
|
||||
if runUserCallback {
|
||||
err := w.fn(root, os.ModeDir)
|
||||
if err == filepath.SkipDir {
|
||||
return nil
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return readDir(root, w.onDirEnt)
|
||||
}
|
||||
119
vendor/golang.org/x/tools/internal/fastwalk/fastwalk_darwin.go
generated
vendored
Normal file
119
vendor/golang.org/x/tools/internal/fastwalk/fastwalk_darwin.go
generated
vendored
Normal file
@@ -0,0 +1,119 @@
|
||||
// Copyright 2022 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build darwin && cgo
|
||||
// +build darwin,cgo
|
||||
|
||||
package fastwalk
|
||||
|
||||
/*
|
||||
#include <dirent.h>
|
||||
|
||||
// fastwalk_readdir_r wraps readdir_r so that we don't have to pass a dirent**
|
||||
// result pointer which triggers CGO's "Go pointer to Go pointer" check unless
|
||||
// we allocat the result dirent* with malloc.
|
||||
//
|
||||
// fastwalk_readdir_r returns 0 on success, -1 upon reaching the end of the
|
||||
// directory, or a positive error number to indicate failure.
|
||||
static int fastwalk_readdir_r(DIR *fd, struct dirent *entry) {
|
||||
struct dirent *result;
|
||||
int ret = readdir_r(fd, entry, &result);
|
||||
if (ret == 0 && result == NULL) {
|
||||
ret = -1; // EOF
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
*/
|
||||
import "C"
|
||||
|
||||
import (
|
||||
"os"
|
||||
"syscall"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
func readDir(dirName string, fn func(dirName, entName string, typ os.FileMode) error) error {
|
||||
fd, err := openDir(dirName)
|
||||
if err != nil {
|
||||
return &os.PathError{Op: "opendir", Path: dirName, Err: err}
|
||||
}
|
||||
defer C.closedir(fd)
|
||||
|
||||
skipFiles := false
|
||||
var dirent syscall.Dirent
|
||||
for {
|
||||
ret := int(C.fastwalk_readdir_r(fd, (*C.struct_dirent)(unsafe.Pointer(&dirent))))
|
||||
if ret != 0 {
|
||||
if ret == -1 {
|
||||
break // EOF
|
||||
}
|
||||
if ret == int(syscall.EINTR) {
|
||||
continue
|
||||
}
|
||||
return &os.PathError{Op: "readdir", Path: dirName, Err: syscall.Errno(ret)}
|
||||
}
|
||||
if dirent.Ino == 0 {
|
||||
continue
|
||||
}
|
||||
typ := dtToType(dirent.Type)
|
||||
if skipFiles && typ.IsRegular() {
|
||||
continue
|
||||
}
|
||||
name := (*[len(syscall.Dirent{}.Name)]byte)(unsafe.Pointer(&dirent.Name))[:]
|
||||
name = name[:dirent.Namlen]
|
||||
for i, c := range name {
|
||||
if c == 0 {
|
||||
name = name[:i]
|
||||
break
|
||||
}
|
||||
}
|
||||
// Check for useless names before allocating a string.
|
||||
if string(name) == "." || string(name) == ".." {
|
||||
continue
|
||||
}
|
||||
if err := fn(dirName, string(name), typ); err != nil {
|
||||
if err != ErrSkipFiles {
|
||||
return err
|
||||
}
|
||||
skipFiles = true
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func dtToType(typ uint8) os.FileMode {
|
||||
switch typ {
|
||||
case syscall.DT_BLK:
|
||||
return os.ModeDevice
|
||||
case syscall.DT_CHR:
|
||||
return os.ModeDevice | os.ModeCharDevice
|
||||
case syscall.DT_DIR:
|
||||
return os.ModeDir
|
||||
case syscall.DT_FIFO:
|
||||
return os.ModeNamedPipe
|
||||
case syscall.DT_LNK:
|
||||
return os.ModeSymlink
|
||||
case syscall.DT_REG:
|
||||
return 0
|
||||
case syscall.DT_SOCK:
|
||||
return os.ModeSocket
|
||||
}
|
||||
return ^os.FileMode(0)
|
||||
}
|
||||
|
||||
// openDir wraps opendir(3) and handles any EINTR errors. The returned *DIR
|
||||
// needs to be closed with closedir(3).
|
||||
func openDir(path string) (*C.DIR, error) {
|
||||
name, err := syscall.BytePtrFromString(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for {
|
||||
fd, err := C.opendir((*C.char)(unsafe.Pointer(name)))
|
||||
if err != syscall.EINTR {
|
||||
return fd, err
|
||||
}
|
||||
}
|
||||
}
|
||||
14
vendor/golang.org/x/tools/internal/fastwalk/fastwalk_dirent_fileno.go
generated
vendored
Normal file
14
vendor/golang.org/x/tools/internal/fastwalk/fastwalk_dirent_fileno.go
generated
vendored
Normal file
@@ -0,0 +1,14 @@
|
||||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build freebsd || openbsd || netbsd
|
||||
// +build freebsd openbsd netbsd
|
||||
|
||||
package fastwalk
|
||||
|
||||
import "syscall"
|
||||
|
||||
func direntInode(dirent *syscall.Dirent) uint64 {
|
||||
return uint64(dirent.Fileno)
|
||||
}
|
||||
15
vendor/golang.org/x/tools/internal/fastwalk/fastwalk_dirent_ino.go
generated
vendored
Normal file
15
vendor/golang.org/x/tools/internal/fastwalk/fastwalk_dirent_ino.go
generated
vendored
Normal file
@@ -0,0 +1,15 @@
|
||||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build (linux || (darwin && !cgo)) && !appengine
|
||||
// +build linux darwin,!cgo
|
||||
// +build !appengine
|
||||
|
||||
package fastwalk
|
||||
|
||||
import "syscall"
|
||||
|
||||
func direntInode(dirent *syscall.Dirent) uint64 {
|
||||
return dirent.Ino
|
||||
}
|
||||
14
vendor/golang.org/x/tools/internal/fastwalk/fastwalk_dirent_namlen_bsd.go
generated
vendored
Normal file
14
vendor/golang.org/x/tools/internal/fastwalk/fastwalk_dirent_namlen_bsd.go
generated
vendored
Normal file
@@ -0,0 +1,14 @@
|
||||
// Copyright 2018 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build (darwin && !cgo) || freebsd || openbsd || netbsd
|
||||
// +build darwin,!cgo freebsd openbsd netbsd
|
||||
|
||||
package fastwalk
|
||||
|
||||
import "syscall"
|
||||
|
||||
func direntNamlen(dirent *syscall.Dirent) uint64 {
|
||||
return uint64(dirent.Namlen)
|
||||
}
|
||||
29
vendor/golang.org/x/tools/internal/fastwalk/fastwalk_dirent_namlen_linux.go
generated
vendored
Normal file
29
vendor/golang.org/x/tools/internal/fastwalk/fastwalk_dirent_namlen_linux.go
generated
vendored
Normal file
@@ -0,0 +1,29 @@
|
||||
// Copyright 2018 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build linux && !appengine
|
||||
// +build linux,!appengine
|
||||
|
||||
package fastwalk
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"syscall"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
func direntNamlen(dirent *syscall.Dirent) uint64 {
|
||||
const fixedHdr = uint16(unsafe.Offsetof(syscall.Dirent{}.Name))
|
||||
nameBuf := (*[unsafe.Sizeof(dirent.Name)]byte)(unsafe.Pointer(&dirent.Name[0]))
|
||||
const nameBufLen = uint16(len(nameBuf))
|
||||
limit := dirent.Reclen - fixedHdr
|
||||
if limit > nameBufLen {
|
||||
limit = nameBufLen
|
||||
}
|
||||
nameLen := bytes.IndexByte(nameBuf[:limit], 0)
|
||||
if nameLen < 0 {
|
||||
panic("failed to find terminating 0 byte in dirent")
|
||||
}
|
||||
return uint64(nameLen)
|
||||
}
|
||||
41
vendor/golang.org/x/tools/internal/fastwalk/fastwalk_portable.go
generated
vendored
Normal file
41
vendor/golang.org/x/tools/internal/fastwalk/fastwalk_portable.go
generated
vendored
Normal file
@@ -0,0 +1,41 @@
|
||||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build appengine || (!linux && !darwin && !freebsd && !openbsd && !netbsd)
|
||||
// +build appengine !linux,!darwin,!freebsd,!openbsd,!netbsd
|
||||
|
||||
package fastwalk
|
||||
|
||||
import (
|
||||
"os"
|
||||
)
|
||||
|
||||
// readDir calls fn for each directory entry in dirName.
|
||||
// It does not descend into directories or follow symlinks.
|
||||
// If fn returns a non-nil error, readDir returns with that error
|
||||
// immediately.
|
||||
func readDir(dirName string, fn func(dirName, entName string, typ os.FileMode) error) error {
|
||||
fis, err := os.ReadDir(dirName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
skipFiles := false
|
||||
for _, fi := range fis {
|
||||
info, err := fi.Info()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if info.Mode().IsRegular() && skipFiles {
|
||||
continue
|
||||
}
|
||||
if err := fn(dirName, fi.Name(), info.Mode()&os.ModeType); err != nil {
|
||||
if err == ErrSkipFiles {
|
||||
skipFiles = true
|
||||
continue
|
||||
}
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
153
vendor/golang.org/x/tools/internal/fastwalk/fastwalk_unix.go
generated
vendored
Normal file
153
vendor/golang.org/x/tools/internal/fastwalk/fastwalk_unix.go
generated
vendored
Normal file
@@ -0,0 +1,153 @@
|
||||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build (linux || freebsd || openbsd || netbsd || (darwin && !cgo)) && !appengine
|
||||
// +build linux freebsd openbsd netbsd darwin,!cgo
|
||||
// +build !appengine
|
||||
|
||||
package fastwalk
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"syscall"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
const blockSize = 8 << 10
|
||||
|
||||
// unknownFileMode is a sentinel (and bogus) os.FileMode
|
||||
// value used to represent a syscall.DT_UNKNOWN Dirent.Type.
|
||||
const unknownFileMode os.FileMode = os.ModeNamedPipe | os.ModeSocket | os.ModeDevice
|
||||
|
||||
func readDir(dirName string, fn func(dirName, entName string, typ os.FileMode) error) error {
|
||||
fd, err := open(dirName, 0, 0)
|
||||
if err != nil {
|
||||
return &os.PathError{Op: "open", Path: dirName, Err: err}
|
||||
}
|
||||
defer syscall.Close(fd)
|
||||
|
||||
// The buffer must be at least a block long.
|
||||
buf := make([]byte, blockSize) // stack-allocated; doesn't escape
|
||||
bufp := 0 // starting read position in buf
|
||||
nbuf := 0 // end valid data in buf
|
||||
skipFiles := false
|
||||
for {
|
||||
if bufp >= nbuf {
|
||||
bufp = 0
|
||||
nbuf, err = readDirent(fd, buf)
|
||||
if err != nil {
|
||||
return os.NewSyscallError("readdirent", err)
|
||||
}
|
||||
if nbuf <= 0 {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
consumed, name, typ := parseDirEnt(buf[bufp:nbuf])
|
||||
bufp += consumed
|
||||
if name == "" || name == "." || name == ".." {
|
||||
continue
|
||||
}
|
||||
// Fallback for filesystems (like old XFS) that don't
|
||||
// support Dirent.Type and have DT_UNKNOWN (0) there
|
||||
// instead.
|
||||
if typ == unknownFileMode {
|
||||
fi, err := os.Lstat(dirName + "/" + name)
|
||||
if err != nil {
|
||||
// It got deleted in the meantime.
|
||||
if os.IsNotExist(err) {
|
||||
continue
|
||||
}
|
||||
return err
|
||||
}
|
||||
typ = fi.Mode() & os.ModeType
|
||||
}
|
||||
if skipFiles && typ.IsRegular() {
|
||||
continue
|
||||
}
|
||||
if err := fn(dirName, name, typ); err != nil {
|
||||
if err == ErrSkipFiles {
|
||||
skipFiles = true
|
||||
continue
|
||||
}
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func parseDirEnt(buf []byte) (consumed int, name string, typ os.FileMode) {
|
||||
// golang.org/issue/37269
|
||||
dirent := &syscall.Dirent{}
|
||||
copy((*[unsafe.Sizeof(syscall.Dirent{})]byte)(unsafe.Pointer(dirent))[:], buf)
|
||||
if v := unsafe.Offsetof(dirent.Reclen) + unsafe.Sizeof(dirent.Reclen); uintptr(len(buf)) < v {
|
||||
panic(fmt.Sprintf("buf size of %d smaller than dirent header size %d", len(buf), v))
|
||||
}
|
||||
if len(buf) < int(dirent.Reclen) {
|
||||
panic(fmt.Sprintf("buf size %d < record length %d", len(buf), dirent.Reclen))
|
||||
}
|
||||
consumed = int(dirent.Reclen)
|
||||
if direntInode(dirent) == 0 { // File absent in directory.
|
||||
return
|
||||
}
|
||||
switch dirent.Type {
|
||||
case syscall.DT_REG:
|
||||
typ = 0
|
||||
case syscall.DT_DIR:
|
||||
typ = os.ModeDir
|
||||
case syscall.DT_LNK:
|
||||
typ = os.ModeSymlink
|
||||
case syscall.DT_BLK:
|
||||
typ = os.ModeDevice
|
||||
case syscall.DT_FIFO:
|
||||
typ = os.ModeNamedPipe
|
||||
case syscall.DT_SOCK:
|
||||
typ = os.ModeSocket
|
||||
case syscall.DT_UNKNOWN:
|
||||
typ = unknownFileMode
|
||||
default:
|
||||
// Skip weird things.
|
||||
// It's probably a DT_WHT (http://lwn.net/Articles/325369/)
|
||||
// or something. Revisit if/when this package is moved outside
|
||||
// of goimports. goimports only cares about regular files,
|
||||
// symlinks, and directories.
|
||||
return
|
||||
}
|
||||
|
||||
nameBuf := (*[unsafe.Sizeof(dirent.Name)]byte)(unsafe.Pointer(&dirent.Name[0]))
|
||||
nameLen := direntNamlen(dirent)
|
||||
|
||||
// Special cases for common things:
|
||||
if nameLen == 1 && nameBuf[0] == '.' {
|
||||
name = "."
|
||||
} else if nameLen == 2 && nameBuf[0] == '.' && nameBuf[1] == '.' {
|
||||
name = ".."
|
||||
} else {
|
||||
name = string(nameBuf[:nameLen])
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// According to https://golang.org/doc/go1.14#runtime
|
||||
// A consequence of the implementation of preemption is that on Unix systems, including Linux and macOS
|
||||
// systems, programs built with Go 1.14 will receive more signals than programs built with earlier releases.
|
||||
//
|
||||
// This causes syscall.Open and syscall.ReadDirent sometimes fail with EINTR errors.
|
||||
// We need to retry in this case.
|
||||
func open(path string, mode int, perm uint32) (fd int, err error) {
|
||||
for {
|
||||
fd, err := syscall.Open(path, mode, perm)
|
||||
if err != syscall.EINTR {
|
||||
return fd, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func readDirent(fd int, buf []byte) (n int, err error) {
|
||||
for {
|
||||
nbuf, err := syscall.ReadDirent(fd, buf)
|
||||
if err != syscall.EINTR {
|
||||
return nbuf, err
|
||||
}
|
||||
}
|
||||
}
|
||||
150
vendor/golang.org/x/tools/internal/gcimporter/bimport.go
generated
vendored
Normal file
150
vendor/golang.org/x/tools/internal/gcimporter/bimport.go
generated
vendored
Normal file
@@ -0,0 +1,150 @@
|
||||
// Copyright 2015 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// This file contains the remaining vestiges of
|
||||
// $GOROOT/src/go/internal/gcimporter/bimport.go.
|
||||
|
||||
package gcimporter
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"go/token"
|
||||
"go/types"
|
||||
"sync"
|
||||
)
|
||||
|
||||
func errorf(format string, args ...interface{}) {
|
||||
panic(fmt.Sprintf(format, args...))
|
||||
}
|
||||
|
||||
const deltaNewFile = -64 // see cmd/compile/internal/gc/bexport.go
|
||||
|
||||
// Synthesize a token.Pos
|
||||
type fakeFileSet struct {
|
||||
fset *token.FileSet
|
||||
files map[string]*fileInfo
|
||||
}
|
||||
|
||||
type fileInfo struct {
|
||||
file *token.File
|
||||
lastline int
|
||||
}
|
||||
|
||||
const maxlines = 64 * 1024
|
||||
|
||||
func (s *fakeFileSet) pos(file string, line, column int) token.Pos {
|
||||
// TODO(mdempsky): Make use of column.
|
||||
|
||||
// Since we don't know the set of needed file positions, we reserve maxlines
|
||||
// positions per file. We delay calling token.File.SetLines until all
|
||||
// positions have been calculated (by way of fakeFileSet.setLines), so that
|
||||
// we can avoid setting unnecessary lines. See also golang/go#46586.
|
||||
f := s.files[file]
|
||||
if f == nil {
|
||||
f = &fileInfo{file: s.fset.AddFile(file, -1, maxlines)}
|
||||
s.files[file] = f
|
||||
}
|
||||
if line > maxlines {
|
||||
line = 1
|
||||
}
|
||||
if line > f.lastline {
|
||||
f.lastline = line
|
||||
}
|
||||
|
||||
// Return a fake position assuming that f.file consists only of newlines.
|
||||
return token.Pos(f.file.Base() + line - 1)
|
||||
}
|
||||
|
||||
func (s *fakeFileSet) setLines() {
|
||||
fakeLinesOnce.Do(func() {
|
||||
fakeLines = make([]int, maxlines)
|
||||
for i := range fakeLines {
|
||||
fakeLines[i] = i
|
||||
}
|
||||
})
|
||||
for _, f := range s.files {
|
||||
f.file.SetLines(fakeLines[:f.lastline])
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
fakeLines []int
|
||||
fakeLinesOnce sync.Once
|
||||
)
|
||||
|
||||
func chanDir(d int) types.ChanDir {
|
||||
// tag values must match the constants in cmd/compile/internal/gc/go.go
|
||||
switch d {
|
||||
case 1 /* Crecv */ :
|
||||
return types.RecvOnly
|
||||
case 2 /* Csend */ :
|
||||
return types.SendOnly
|
||||
case 3 /* Cboth */ :
|
||||
return types.SendRecv
|
||||
default:
|
||||
errorf("unexpected channel dir %d", d)
|
||||
return 0
|
||||
}
|
||||
}
|
||||
|
||||
var predeclOnce sync.Once
|
||||
var predecl []types.Type // initialized lazily
|
||||
|
||||
func predeclared() []types.Type {
|
||||
predeclOnce.Do(func() {
|
||||
// initialize lazily to be sure that all
|
||||
// elements have been initialized before
|
||||
predecl = []types.Type{ // basic types
|
||||
types.Typ[types.Bool],
|
||||
types.Typ[types.Int],
|
||||
types.Typ[types.Int8],
|
||||
types.Typ[types.Int16],
|
||||
types.Typ[types.Int32],
|
||||
types.Typ[types.Int64],
|
||||
types.Typ[types.Uint],
|
||||
types.Typ[types.Uint8],
|
||||
types.Typ[types.Uint16],
|
||||
types.Typ[types.Uint32],
|
||||
types.Typ[types.Uint64],
|
||||
types.Typ[types.Uintptr],
|
||||
types.Typ[types.Float32],
|
||||
types.Typ[types.Float64],
|
||||
types.Typ[types.Complex64],
|
||||
types.Typ[types.Complex128],
|
||||
types.Typ[types.String],
|
||||
|
||||
// basic type aliases
|
||||
types.Universe.Lookup("byte").Type(),
|
||||
types.Universe.Lookup("rune").Type(),
|
||||
|
||||
// error
|
||||
types.Universe.Lookup("error").Type(),
|
||||
|
||||
// untyped types
|
||||
types.Typ[types.UntypedBool],
|
||||
types.Typ[types.UntypedInt],
|
||||
types.Typ[types.UntypedRune],
|
||||
types.Typ[types.UntypedFloat],
|
||||
types.Typ[types.UntypedComplex],
|
||||
types.Typ[types.UntypedString],
|
||||
types.Typ[types.UntypedNil],
|
||||
|
||||
// package unsafe
|
||||
types.Typ[types.UnsafePointer],
|
||||
|
||||
// invalid type
|
||||
types.Typ[types.Invalid], // only appears in packages with errors
|
||||
|
||||
// used internally by gc; never used by this package or in .a files
|
||||
anyType{},
|
||||
}
|
||||
predecl = append(predecl, additionalPredeclared()...)
|
||||
})
|
||||
return predecl
|
||||
}
|
||||
|
||||
type anyType struct{}
|
||||
|
||||
func (t anyType) Underlying() types.Type { return t }
|
||||
func (t anyType) String() string { return "any" }
|
||||
99
vendor/golang.org/x/tools/internal/gcimporter/exportdata.go
generated
vendored
Normal file
99
vendor/golang.org/x/tools/internal/gcimporter/exportdata.go
generated
vendored
Normal file
@@ -0,0 +1,99 @@
|
||||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// This file is a copy of $GOROOT/src/go/internal/gcimporter/exportdata.go.
|
||||
|
||||
// This file implements FindExportData.
|
||||
|
||||
package gcimporter
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"io"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
func readGopackHeader(r *bufio.Reader) (name string, size int64, err error) {
|
||||
// See $GOROOT/include/ar.h.
|
||||
hdr := make([]byte, 16+12+6+6+8+10+2)
|
||||
_, err = io.ReadFull(r, hdr)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
// leave for debugging
|
||||
if false {
|
||||
fmt.Printf("header: %s", hdr)
|
||||
}
|
||||
s := strings.TrimSpace(string(hdr[16+12+6+6+8:][:10]))
|
||||
length, err := strconv.Atoi(s)
|
||||
size = int64(length)
|
||||
if err != nil || hdr[len(hdr)-2] != '`' || hdr[len(hdr)-1] != '\n' {
|
||||
err = fmt.Errorf("invalid archive header")
|
||||
return
|
||||
}
|
||||
name = strings.TrimSpace(string(hdr[:16]))
|
||||
return
|
||||
}
|
||||
|
||||
// FindExportData positions the reader r at the beginning of the
|
||||
// export data section of an underlying GC-created object/archive
|
||||
// file by reading from it. The reader must be positioned at the
|
||||
// start of the file before calling this function. The hdr result
|
||||
// is the string before the export data, either "$$" or "$$B".
|
||||
// The size result is the length of the export data in bytes, or -1 if not known.
|
||||
func FindExportData(r *bufio.Reader) (hdr string, size int64, err error) {
|
||||
// Read first line to make sure this is an object file.
|
||||
line, err := r.ReadSlice('\n')
|
||||
if err != nil {
|
||||
err = fmt.Errorf("can't find export data (%v)", err)
|
||||
return
|
||||
}
|
||||
|
||||
if string(line) == "!<arch>\n" {
|
||||
// Archive file. Scan to __.PKGDEF.
|
||||
var name string
|
||||
if name, size, err = readGopackHeader(r); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// First entry should be __.PKGDEF.
|
||||
if name != "__.PKGDEF" {
|
||||
err = fmt.Errorf("go archive is missing __.PKGDEF")
|
||||
return
|
||||
}
|
||||
|
||||
// Read first line of __.PKGDEF data, so that line
|
||||
// is once again the first line of the input.
|
||||
if line, err = r.ReadSlice('\n'); err != nil {
|
||||
err = fmt.Errorf("can't find export data (%v)", err)
|
||||
return
|
||||
}
|
||||
size -= int64(len(line))
|
||||
}
|
||||
|
||||
// Now at __.PKGDEF in archive or still at beginning of file.
|
||||
// Either way, line should begin with "go object ".
|
||||
if !strings.HasPrefix(string(line), "go object ") {
|
||||
err = fmt.Errorf("not a Go object file")
|
||||
return
|
||||
}
|
||||
|
||||
// Skip over object header to export data.
|
||||
// Begins after first line starting with $$.
|
||||
for line[0] != '$' {
|
||||
if line, err = r.ReadSlice('\n'); err != nil {
|
||||
err = fmt.Errorf("can't find export data (%v)", err)
|
||||
return
|
||||
}
|
||||
size -= int64(len(line))
|
||||
}
|
||||
hdr = string(line)
|
||||
if size < 0 {
|
||||
size = -1
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
273
vendor/golang.org/x/tools/internal/gcimporter/gcimporter.go
generated
vendored
Normal file
273
vendor/golang.org/x/tools/internal/gcimporter/gcimporter.go
generated
vendored
Normal file
@@ -0,0 +1,273 @@
|
||||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// This file is a reduced copy of $GOROOT/src/go/internal/gcimporter/gcimporter.go.
|
||||
|
||||
// Package gcimporter provides various functions for reading
|
||||
// gc-generated object files that can be used to implement the
|
||||
// Importer interface defined by the Go 1.5 standard library package.
|
||||
//
|
||||
// The encoding is deterministic: if the encoder is applied twice to
|
||||
// the same types.Package data structure, both encodings are equal.
|
||||
// This property may be important to avoid spurious changes in
|
||||
// applications such as build systems.
|
||||
//
|
||||
// However, the encoder is not necessarily idempotent. Importing an
|
||||
// exported package may yield a types.Package that, while it
|
||||
// represents the same set of Go types as the original, may differ in
|
||||
// the details of its internal representation. Because of these
|
||||
// differences, re-encoding the imported package may yield a
|
||||
// different, but equally valid, encoding of the package.
|
||||
package gcimporter // import "golang.org/x/tools/internal/gcimporter"
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"fmt"
|
||||
"go/build"
|
||||
"go/token"
|
||||
"go/types"
|
||||
"io"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"sync"
|
||||
)
|
||||
|
||||
const (
|
||||
// Enable debug during development: it adds some additional checks, and
|
||||
// prevents errors from being recovered.
|
||||
debug = false
|
||||
|
||||
// If trace is set, debugging output is printed to std out.
|
||||
trace = false
|
||||
)
|
||||
|
||||
var exportMap sync.Map // package dir → func() (string, bool)
|
||||
|
||||
// lookupGorootExport returns the location of the export data
|
||||
// (normally found in the build cache, but located in GOROOT/pkg
|
||||
// in prior Go releases) for the package located in pkgDir.
|
||||
//
|
||||
// (We use the package's directory instead of its import path
|
||||
// mainly to simplify handling of the packages in src/vendor
|
||||
// and cmd/vendor.)
|
||||
func lookupGorootExport(pkgDir string) (string, bool) {
|
||||
f, ok := exportMap.Load(pkgDir)
|
||||
if !ok {
|
||||
var (
|
||||
listOnce sync.Once
|
||||
exportPath string
|
||||
)
|
||||
f, _ = exportMap.LoadOrStore(pkgDir, func() (string, bool) {
|
||||
listOnce.Do(func() {
|
||||
cmd := exec.Command("go", "list", "-export", "-f", "{{.Export}}", pkgDir)
|
||||
cmd.Dir = build.Default.GOROOT
|
||||
var output []byte
|
||||
output, err := cmd.Output()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
exports := strings.Split(string(bytes.TrimSpace(output)), "\n")
|
||||
if len(exports) != 1 {
|
||||
return
|
||||
}
|
||||
|
||||
exportPath = exports[0]
|
||||
})
|
||||
|
||||
return exportPath, exportPath != ""
|
||||
})
|
||||
}
|
||||
|
||||
return f.(func() (string, bool))()
|
||||
}
|
||||
|
||||
var pkgExts = [...]string{".a", ".o"}
|
||||
|
||||
// FindPkg returns the filename and unique package id for an import
|
||||
// path based on package information provided by build.Import (using
|
||||
// the build.Default build.Context). A relative srcDir is interpreted
|
||||
// relative to the current working directory.
|
||||
// If no file was found, an empty filename is returned.
|
||||
func FindPkg(path, srcDir string) (filename, id string) {
|
||||
if path == "" {
|
||||
return
|
||||
}
|
||||
|
||||
var noext string
|
||||
switch {
|
||||
default:
|
||||
// "x" -> "$GOPATH/pkg/$GOOS_$GOARCH/x.ext", "x"
|
||||
// Don't require the source files to be present.
|
||||
if abs, err := filepath.Abs(srcDir); err == nil { // see issue 14282
|
||||
srcDir = abs
|
||||
}
|
||||
bp, _ := build.Import(path, srcDir, build.FindOnly|build.AllowBinary)
|
||||
if bp.PkgObj == "" {
|
||||
var ok bool
|
||||
if bp.Goroot && bp.Dir != "" {
|
||||
filename, ok = lookupGorootExport(bp.Dir)
|
||||
}
|
||||
if !ok {
|
||||
id = path // make sure we have an id to print in error message
|
||||
return
|
||||
}
|
||||
} else {
|
||||
noext = strings.TrimSuffix(bp.PkgObj, ".a")
|
||||
id = bp.ImportPath
|
||||
}
|
||||
|
||||
case build.IsLocalImport(path):
|
||||
// "./x" -> "/this/directory/x.ext", "/this/directory/x"
|
||||
noext = filepath.Join(srcDir, path)
|
||||
id = noext
|
||||
|
||||
case filepath.IsAbs(path):
|
||||
// for completeness only - go/build.Import
|
||||
// does not support absolute imports
|
||||
// "/x" -> "/x.ext", "/x"
|
||||
noext = path
|
||||
id = path
|
||||
}
|
||||
|
||||
if false { // for debugging
|
||||
if path != id {
|
||||
fmt.Printf("%s -> %s\n", path, id)
|
||||
}
|
||||
}
|
||||
|
||||
if filename != "" {
|
||||
if f, err := os.Stat(filename); err == nil && !f.IsDir() {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// try extensions
|
||||
for _, ext := range pkgExts {
|
||||
filename = noext + ext
|
||||
if f, err := os.Stat(filename); err == nil && !f.IsDir() {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
filename = "" // not found
|
||||
return
|
||||
}
|
||||
|
||||
// Import imports a gc-generated package given its import path and srcDir, adds
|
||||
// the corresponding package object to the packages map, and returns the object.
|
||||
// The packages map must contain all packages already imported.
|
||||
func Import(packages map[string]*types.Package, path, srcDir string, lookup func(path string) (io.ReadCloser, error)) (pkg *types.Package, err error) {
|
||||
var rc io.ReadCloser
|
||||
var filename, id string
|
||||
if lookup != nil {
|
||||
// With custom lookup specified, assume that caller has
|
||||
// converted path to a canonical import path for use in the map.
|
||||
if path == "unsafe" {
|
||||
return types.Unsafe, nil
|
||||
}
|
||||
id = path
|
||||
|
||||
// No need to re-import if the package was imported completely before.
|
||||
if pkg = packages[id]; pkg != nil && pkg.Complete() {
|
||||
return
|
||||
}
|
||||
f, err := lookup(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
rc = f
|
||||
} else {
|
||||
filename, id = FindPkg(path, srcDir)
|
||||
if filename == "" {
|
||||
if path == "unsafe" {
|
||||
return types.Unsafe, nil
|
||||
}
|
||||
return nil, fmt.Errorf("can't find import: %q", id)
|
||||
}
|
||||
|
||||
// no need to re-import if the package was imported completely before
|
||||
if pkg = packages[id]; pkg != nil && pkg.Complete() {
|
||||
return
|
||||
}
|
||||
|
||||
// open file
|
||||
f, err := os.Open(filename)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer func() {
|
||||
if err != nil {
|
||||
// add file name to error
|
||||
err = fmt.Errorf("%s: %v", filename, err)
|
||||
}
|
||||
}()
|
||||
rc = f
|
||||
}
|
||||
defer rc.Close()
|
||||
|
||||
var hdr string
|
||||
var size int64
|
||||
buf := bufio.NewReader(rc)
|
||||
if hdr, size, err = FindExportData(buf); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
switch hdr {
|
||||
case "$$B\n":
|
||||
var data []byte
|
||||
data, err = io.ReadAll(buf)
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
|
||||
// TODO(gri): allow clients of go/importer to provide a FileSet.
|
||||
// Or, define a new standard go/types/gcexportdata package.
|
||||
fset := token.NewFileSet()
|
||||
|
||||
// Select appropriate importer.
|
||||
if len(data) > 0 {
|
||||
switch data[0] {
|
||||
case 'v', 'c', 'd': // binary, till go1.10
|
||||
return nil, fmt.Errorf("binary (%c) import format is no longer supported", data[0])
|
||||
|
||||
case 'i': // indexed, till go1.19
|
||||
_, pkg, err := IImportData(fset, packages, data[1:], id)
|
||||
return pkg, err
|
||||
|
||||
case 'u': // unified, from go1.20
|
||||
_, pkg, err := UImportData(fset, packages, data[1:size], id)
|
||||
return pkg, err
|
||||
|
||||
default:
|
||||
l := len(data)
|
||||
if l > 10 {
|
||||
l = 10
|
||||
}
|
||||
return nil, fmt.Errorf("unexpected export data with prefix %q for path %s", string(data[:l]), id)
|
||||
}
|
||||
}
|
||||
|
||||
default:
|
||||
err = fmt.Errorf("unknown export data header: %q", hdr)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func deref(typ types.Type) types.Type {
|
||||
if p, _ := typ.(*types.Pointer); p != nil {
|
||||
return p.Elem()
|
||||
}
|
||||
return typ
|
||||
}
|
||||
|
||||
type byPath []*types.Package
|
||||
|
||||
func (a byPath) Len() int { return len(a) }
|
||||
func (a byPath) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
|
||||
func (a byPath) Less(i, j int) bool { return a[i].Path() < a[j].Path() }
|
||||
1322
vendor/golang.org/x/tools/internal/gcimporter/iexport.go
generated
vendored
Normal file
1322
vendor/golang.org/x/tools/internal/gcimporter/iexport.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
1083
vendor/golang.org/x/tools/internal/gcimporter/iimport.go
generated
vendored
Normal file
1083
vendor/golang.org/x/tools/internal/gcimporter/iimport.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
22
vendor/golang.org/x/tools/internal/gcimporter/newInterface10.go
generated
vendored
Normal file
22
vendor/golang.org/x/tools/internal/gcimporter/newInterface10.go
generated
vendored
Normal file
@@ -0,0 +1,22 @@
|
||||
// Copyright 2018 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build !go1.11
|
||||
// +build !go1.11
|
||||
|
||||
package gcimporter
|
||||
|
||||
import "go/types"
|
||||
|
||||
func newInterface(methods []*types.Func, embeddeds []types.Type) *types.Interface {
|
||||
named := make([]*types.Named, len(embeddeds))
|
||||
for i, e := range embeddeds {
|
||||
var ok bool
|
||||
named[i], ok = e.(*types.Named)
|
||||
if !ok {
|
||||
panic("embedding of non-defined interfaces in interfaces is not supported before Go 1.11")
|
||||
}
|
||||
}
|
||||
return types.NewInterface(methods, named)
|
||||
}
|
||||
14
vendor/golang.org/x/tools/internal/gcimporter/newInterface11.go
generated
vendored
Normal file
14
vendor/golang.org/x/tools/internal/gcimporter/newInterface11.go
generated
vendored
Normal file
@@ -0,0 +1,14 @@
|
||||
// Copyright 2018 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build go1.11
|
||||
// +build go1.11
|
||||
|
||||
package gcimporter
|
||||
|
||||
import "go/types"
|
||||
|
||||
func newInterface(methods []*types.Func, embeddeds []types.Type) *types.Interface {
|
||||
return types.NewInterfaceType(methods, embeddeds)
|
||||
}
|
||||
16
vendor/golang.org/x/tools/internal/gcimporter/support_go117.go
generated
vendored
Normal file
16
vendor/golang.org/x/tools/internal/gcimporter/support_go117.go
generated
vendored
Normal file
@@ -0,0 +1,16 @@
|
||||
// Copyright 2021 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build !go1.18
|
||||
// +build !go1.18
|
||||
|
||||
package gcimporter
|
||||
|
||||
import "go/types"
|
||||
|
||||
const iexportVersion = iexportVersionGo1_11
|
||||
|
||||
func additionalPredeclared() []types.Type {
|
||||
return nil
|
||||
}
|
||||
37
vendor/golang.org/x/tools/internal/gcimporter/support_go118.go
generated
vendored
Normal file
37
vendor/golang.org/x/tools/internal/gcimporter/support_go118.go
generated
vendored
Normal file
@@ -0,0 +1,37 @@
|
||||
// Copyright 2021 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build go1.18
|
||||
// +build go1.18
|
||||
|
||||
package gcimporter
|
||||
|
||||
import "go/types"
|
||||
|
||||
const iexportVersion = iexportVersionGenerics
|
||||
|
||||
// additionalPredeclared returns additional predeclared types in go.1.18.
|
||||
func additionalPredeclared() []types.Type {
|
||||
return []types.Type{
|
||||
// comparable
|
||||
types.Universe.Lookup("comparable").Type(),
|
||||
|
||||
// any
|
||||
types.Universe.Lookup("any").Type(),
|
||||
}
|
||||
}
|
||||
|
||||
// See cmd/compile/internal/types.SplitVargenSuffix.
|
||||
func splitVargenSuffix(name string) (base, suffix string) {
|
||||
i := len(name)
|
||||
for i > 0 && name[i-1] >= '0' && name[i-1] <= '9' {
|
||||
i--
|
||||
}
|
||||
const dot = "·"
|
||||
if i >= len(dot) && name[i-len(dot):i] == dot {
|
||||
i -= len(dot)
|
||||
return name[:i], name[i:]
|
||||
}
|
||||
return name, ""
|
||||
}
|
||||
10
vendor/golang.org/x/tools/internal/gcimporter/unified_no.go
generated
vendored
Normal file
10
vendor/golang.org/x/tools/internal/gcimporter/unified_no.go
generated
vendored
Normal file
@@ -0,0 +1,10 @@
|
||||
// Copyright 2022 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build !(go1.18 && goexperiment.unified)
|
||||
// +build !go1.18 !goexperiment.unified
|
||||
|
||||
package gcimporter
|
||||
|
||||
const unifiedIR = false
|
||||
10
vendor/golang.org/x/tools/internal/gcimporter/unified_yes.go
generated
vendored
Normal file
10
vendor/golang.org/x/tools/internal/gcimporter/unified_yes.go
generated
vendored
Normal file
@@ -0,0 +1,10 @@
|
||||
// Copyright 2022 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build go1.18 && goexperiment.unified
|
||||
// +build go1.18,goexperiment.unified
|
||||
|
||||
package gcimporter
|
||||
|
||||
const unifiedIR = true
|
||||
19
vendor/golang.org/x/tools/internal/gcimporter/ureader_no.go
generated
vendored
Normal file
19
vendor/golang.org/x/tools/internal/gcimporter/ureader_no.go
generated
vendored
Normal file
@@ -0,0 +1,19 @@
|
||||
// Copyright 2022 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build !go1.18
|
||||
// +build !go1.18
|
||||
|
||||
package gcimporter
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"go/token"
|
||||
"go/types"
|
||||
)
|
||||
|
||||
func UImportData(fset *token.FileSet, imports map[string]*types.Package, data []byte, path string) (_ int, pkg *types.Package, err error) {
|
||||
err = fmt.Errorf("go/tools compiled with a Go version earlier than 1.18 cannot read unified IR export data")
|
||||
return
|
||||
}
|
||||
728
vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go
generated
vendored
Normal file
728
vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go
generated
vendored
Normal file
@@ -0,0 +1,728 @@
|
||||
// Copyright 2021 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Derived from go/internal/gcimporter/ureader.go
|
||||
|
||||
//go:build go1.18
|
||||
// +build go1.18
|
||||
|
||||
package gcimporter
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"go/token"
|
||||
"go/types"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"golang.org/x/tools/internal/pkgbits"
|
||||
)
|
||||
|
||||
// A pkgReader holds the shared state for reading a unified IR package
|
||||
// description.
|
||||
type pkgReader struct {
|
||||
pkgbits.PkgDecoder
|
||||
|
||||
fake fakeFileSet
|
||||
|
||||
ctxt *types.Context
|
||||
imports map[string]*types.Package // previously imported packages, indexed by path
|
||||
|
||||
// lazily initialized arrays corresponding to the unified IR
|
||||
// PosBase, Pkg, and Type sections, respectively.
|
||||
posBases []string // position bases (i.e., file names)
|
||||
pkgs []*types.Package
|
||||
typs []types.Type
|
||||
|
||||
// laterFns holds functions that need to be invoked at the end of
|
||||
// import reading.
|
||||
laterFns []func()
|
||||
// laterFors is used in case of 'type A B' to ensure that B is processed before A.
|
||||
laterFors map[types.Type]int
|
||||
|
||||
// ifaces holds a list of constructed Interfaces, which need to have
|
||||
// Complete called after importing is done.
|
||||
ifaces []*types.Interface
|
||||
}
|
||||
|
||||
// later adds a function to be invoked at the end of import reading.
|
||||
func (pr *pkgReader) later(fn func()) {
|
||||
pr.laterFns = append(pr.laterFns, fn)
|
||||
}
|
||||
|
||||
// See cmd/compile/internal/noder.derivedInfo.
|
||||
type derivedInfo struct {
|
||||
idx pkgbits.Index
|
||||
needed bool
|
||||
}
|
||||
|
||||
// See cmd/compile/internal/noder.typeInfo.
|
||||
type typeInfo struct {
|
||||
idx pkgbits.Index
|
||||
derived bool
|
||||
}
|
||||
|
||||
func UImportData(fset *token.FileSet, imports map[string]*types.Package, data []byte, path string) (_ int, pkg *types.Package, err error) {
|
||||
if !debug {
|
||||
defer func() {
|
||||
if x := recover(); x != nil {
|
||||
err = fmt.Errorf("internal error in importing %q (%v); please report an issue", path, x)
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
s := string(data)
|
||||
s = s[:strings.LastIndex(s, "\n$$\n")]
|
||||
input := pkgbits.NewPkgDecoder(path, s)
|
||||
pkg = readUnifiedPackage(fset, nil, imports, input)
|
||||
return
|
||||
}
|
||||
|
||||
// laterFor adds a function to be invoked at the end of import reading, and records the type that function is finishing.
|
||||
func (pr *pkgReader) laterFor(t types.Type, fn func()) {
|
||||
if pr.laterFors == nil {
|
||||
pr.laterFors = make(map[types.Type]int)
|
||||
}
|
||||
pr.laterFors[t] = len(pr.laterFns)
|
||||
pr.laterFns = append(pr.laterFns, fn)
|
||||
}
|
||||
|
||||
// readUnifiedPackage reads a package description from the given
|
||||
// unified IR export data decoder.
|
||||
func readUnifiedPackage(fset *token.FileSet, ctxt *types.Context, imports map[string]*types.Package, input pkgbits.PkgDecoder) *types.Package {
|
||||
pr := pkgReader{
|
||||
PkgDecoder: input,
|
||||
|
||||
fake: fakeFileSet{
|
||||
fset: fset,
|
||||
files: make(map[string]*fileInfo),
|
||||
},
|
||||
|
||||
ctxt: ctxt,
|
||||
imports: imports,
|
||||
|
||||
posBases: make([]string, input.NumElems(pkgbits.RelocPosBase)),
|
||||
pkgs: make([]*types.Package, input.NumElems(pkgbits.RelocPkg)),
|
||||
typs: make([]types.Type, input.NumElems(pkgbits.RelocType)),
|
||||
}
|
||||
defer pr.fake.setLines()
|
||||
|
||||
r := pr.newReader(pkgbits.RelocMeta, pkgbits.PublicRootIdx, pkgbits.SyncPublic)
|
||||
pkg := r.pkg()
|
||||
r.Bool() // has init
|
||||
|
||||
for i, n := 0, r.Len(); i < n; i++ {
|
||||
// As if r.obj(), but avoiding the Scope.Lookup call,
|
||||
// to avoid eager loading of imports.
|
||||
r.Sync(pkgbits.SyncObject)
|
||||
assert(!r.Bool())
|
||||
r.p.objIdx(r.Reloc(pkgbits.RelocObj))
|
||||
assert(r.Len() == 0)
|
||||
}
|
||||
|
||||
r.Sync(pkgbits.SyncEOF)
|
||||
|
||||
for _, fn := range pr.laterFns {
|
||||
fn()
|
||||
}
|
||||
|
||||
for _, iface := range pr.ifaces {
|
||||
iface.Complete()
|
||||
}
|
||||
|
||||
// Imports() of pkg are all of the transitive packages that were loaded.
|
||||
var imps []*types.Package
|
||||
for _, imp := range pr.pkgs {
|
||||
if imp != nil && imp != pkg {
|
||||
imps = append(imps, imp)
|
||||
}
|
||||
}
|
||||
sort.Sort(byPath(imps))
|
||||
pkg.SetImports(imps)
|
||||
|
||||
pkg.MarkComplete()
|
||||
return pkg
|
||||
}
|
||||
|
||||
// A reader holds the state for reading a single unified IR element
|
||||
// within a package.
|
||||
type reader struct {
|
||||
pkgbits.Decoder
|
||||
|
||||
p *pkgReader
|
||||
|
||||
dict *readerDict
|
||||
}
|
||||
|
||||
// A readerDict holds the state for type parameters that parameterize
|
||||
// the current unified IR element.
|
||||
type readerDict struct {
|
||||
// bounds is a slice of typeInfos corresponding to the underlying
|
||||
// bounds of the element's type parameters.
|
||||
bounds []typeInfo
|
||||
|
||||
// tparams is a slice of the constructed TypeParams for the element.
|
||||
tparams []*types.TypeParam
|
||||
|
||||
// devived is a slice of types derived from tparams, which may be
|
||||
// instantiated while reading the current element.
|
||||
derived []derivedInfo
|
||||
derivedTypes []types.Type // lazily instantiated from derived
|
||||
}
|
||||
|
||||
func (pr *pkgReader) newReader(k pkgbits.RelocKind, idx pkgbits.Index, marker pkgbits.SyncMarker) *reader {
|
||||
return &reader{
|
||||
Decoder: pr.NewDecoder(k, idx, marker),
|
||||
p: pr,
|
||||
}
|
||||
}
|
||||
|
||||
func (pr *pkgReader) tempReader(k pkgbits.RelocKind, idx pkgbits.Index, marker pkgbits.SyncMarker) *reader {
|
||||
return &reader{
|
||||
Decoder: pr.TempDecoder(k, idx, marker),
|
||||
p: pr,
|
||||
}
|
||||
}
|
||||
|
||||
func (pr *pkgReader) retireReader(r *reader) {
|
||||
pr.RetireDecoder(&r.Decoder)
|
||||
}
|
||||
|
||||
// @@@ Positions
|
||||
|
||||
func (r *reader) pos() token.Pos {
|
||||
r.Sync(pkgbits.SyncPos)
|
||||
if !r.Bool() {
|
||||
return token.NoPos
|
||||
}
|
||||
|
||||
// TODO(mdempsky): Delta encoding.
|
||||
posBase := r.posBase()
|
||||
line := r.Uint()
|
||||
col := r.Uint()
|
||||
return r.p.fake.pos(posBase, int(line), int(col))
|
||||
}
|
||||
|
||||
func (r *reader) posBase() string {
|
||||
return r.p.posBaseIdx(r.Reloc(pkgbits.RelocPosBase))
|
||||
}
|
||||
|
||||
func (pr *pkgReader) posBaseIdx(idx pkgbits.Index) string {
|
||||
if b := pr.posBases[idx]; b != "" {
|
||||
return b
|
||||
}
|
||||
|
||||
var filename string
|
||||
{
|
||||
r := pr.tempReader(pkgbits.RelocPosBase, idx, pkgbits.SyncPosBase)
|
||||
|
||||
// Within types2, position bases have a lot more details (e.g.,
|
||||
// keeping track of where //line directives appeared exactly).
|
||||
//
|
||||
// For go/types, we just track the file name.
|
||||
|
||||
filename = r.String()
|
||||
|
||||
if r.Bool() { // file base
|
||||
// Was: "b = token.NewTrimmedFileBase(filename, true)"
|
||||
} else { // line base
|
||||
pos := r.pos()
|
||||
line := r.Uint()
|
||||
col := r.Uint()
|
||||
|
||||
// Was: "b = token.NewLineBase(pos, filename, true, line, col)"
|
||||
_, _, _ = pos, line, col
|
||||
}
|
||||
pr.retireReader(r)
|
||||
}
|
||||
b := filename
|
||||
pr.posBases[idx] = b
|
||||
return b
|
||||
}
|
||||
|
||||
// @@@ Packages
|
||||
|
||||
func (r *reader) pkg() *types.Package {
|
||||
r.Sync(pkgbits.SyncPkg)
|
||||
return r.p.pkgIdx(r.Reloc(pkgbits.RelocPkg))
|
||||
}
|
||||
|
||||
func (pr *pkgReader) pkgIdx(idx pkgbits.Index) *types.Package {
|
||||
// TODO(mdempsky): Consider using some non-nil pointer to indicate
|
||||
// the universe scope, so we don't need to keep re-reading it.
|
||||
if pkg := pr.pkgs[idx]; pkg != nil {
|
||||
return pkg
|
||||
}
|
||||
|
||||
pkg := pr.newReader(pkgbits.RelocPkg, idx, pkgbits.SyncPkgDef).doPkg()
|
||||
pr.pkgs[idx] = pkg
|
||||
return pkg
|
||||
}
|
||||
|
||||
func (r *reader) doPkg() *types.Package {
|
||||
path := r.String()
|
||||
switch path {
|
||||
case "":
|
||||
path = r.p.PkgPath()
|
||||
case "builtin":
|
||||
return nil // universe
|
||||
case "unsafe":
|
||||
return types.Unsafe
|
||||
}
|
||||
|
||||
if pkg := r.p.imports[path]; pkg != nil {
|
||||
return pkg
|
||||
}
|
||||
|
||||
name := r.String()
|
||||
|
||||
pkg := types.NewPackage(path, name)
|
||||
r.p.imports[path] = pkg
|
||||
|
||||
return pkg
|
||||
}
|
||||
|
||||
// @@@ Types
|
||||
|
||||
func (r *reader) typ() types.Type {
|
||||
return r.p.typIdx(r.typInfo(), r.dict)
|
||||
}
|
||||
|
||||
func (r *reader) typInfo() typeInfo {
|
||||
r.Sync(pkgbits.SyncType)
|
||||
if r.Bool() {
|
||||
return typeInfo{idx: pkgbits.Index(r.Len()), derived: true}
|
||||
}
|
||||
return typeInfo{idx: r.Reloc(pkgbits.RelocType), derived: false}
|
||||
}
|
||||
|
||||
func (pr *pkgReader) typIdx(info typeInfo, dict *readerDict) types.Type {
|
||||
idx := info.idx
|
||||
var where *types.Type
|
||||
if info.derived {
|
||||
where = &dict.derivedTypes[idx]
|
||||
idx = dict.derived[idx].idx
|
||||
} else {
|
||||
where = &pr.typs[idx]
|
||||
}
|
||||
|
||||
if typ := *where; typ != nil {
|
||||
return typ
|
||||
}
|
||||
|
||||
var typ types.Type
|
||||
{
|
||||
r := pr.tempReader(pkgbits.RelocType, idx, pkgbits.SyncTypeIdx)
|
||||
r.dict = dict
|
||||
|
||||
typ = r.doTyp()
|
||||
assert(typ != nil)
|
||||
pr.retireReader(r)
|
||||
}
|
||||
// See comment in pkgReader.typIdx explaining how this happens.
|
||||
if prev := *where; prev != nil {
|
||||
return prev
|
||||
}
|
||||
|
||||
*where = typ
|
||||
return typ
|
||||
}
|
||||
|
||||
func (r *reader) doTyp() (res types.Type) {
|
||||
switch tag := pkgbits.CodeType(r.Code(pkgbits.SyncType)); tag {
|
||||
default:
|
||||
errorf("unhandled type tag: %v", tag)
|
||||
panic("unreachable")
|
||||
|
||||
case pkgbits.TypeBasic:
|
||||
return types.Typ[r.Len()]
|
||||
|
||||
case pkgbits.TypeNamed:
|
||||
obj, targs := r.obj()
|
||||
name := obj.(*types.TypeName)
|
||||
if len(targs) != 0 {
|
||||
t, _ := types.Instantiate(r.p.ctxt, name.Type(), targs, false)
|
||||
return t
|
||||
}
|
||||
return name.Type()
|
||||
|
||||
case pkgbits.TypeTypeParam:
|
||||
return r.dict.tparams[r.Len()]
|
||||
|
||||
case pkgbits.TypeArray:
|
||||
len := int64(r.Uint64())
|
||||
return types.NewArray(r.typ(), len)
|
||||
case pkgbits.TypeChan:
|
||||
dir := types.ChanDir(r.Len())
|
||||
return types.NewChan(dir, r.typ())
|
||||
case pkgbits.TypeMap:
|
||||
return types.NewMap(r.typ(), r.typ())
|
||||
case pkgbits.TypePointer:
|
||||
return types.NewPointer(r.typ())
|
||||
case pkgbits.TypeSignature:
|
||||
return r.signature(nil, nil, nil)
|
||||
case pkgbits.TypeSlice:
|
||||
return types.NewSlice(r.typ())
|
||||
case pkgbits.TypeStruct:
|
||||
return r.structType()
|
||||
case pkgbits.TypeInterface:
|
||||
return r.interfaceType()
|
||||
case pkgbits.TypeUnion:
|
||||
return r.unionType()
|
||||
}
|
||||
}
|
||||
|
||||
func (r *reader) structType() *types.Struct {
|
||||
fields := make([]*types.Var, r.Len())
|
||||
var tags []string
|
||||
for i := range fields {
|
||||
pos := r.pos()
|
||||
pkg, name := r.selector()
|
||||
ftyp := r.typ()
|
||||
tag := r.String()
|
||||
embedded := r.Bool()
|
||||
|
||||
fields[i] = types.NewField(pos, pkg, name, ftyp, embedded)
|
||||
if tag != "" {
|
||||
for len(tags) < i {
|
||||
tags = append(tags, "")
|
||||
}
|
||||
tags = append(tags, tag)
|
||||
}
|
||||
}
|
||||
return types.NewStruct(fields, tags)
|
||||
}
|
||||
|
||||
func (r *reader) unionType() *types.Union {
|
||||
terms := make([]*types.Term, r.Len())
|
||||
for i := range terms {
|
||||
terms[i] = types.NewTerm(r.Bool(), r.typ())
|
||||
}
|
||||
return types.NewUnion(terms)
|
||||
}
|
||||
|
||||
func (r *reader) interfaceType() *types.Interface {
|
||||
methods := make([]*types.Func, r.Len())
|
||||
embeddeds := make([]types.Type, r.Len())
|
||||
implicit := len(methods) == 0 && len(embeddeds) == 1 && r.Bool()
|
||||
|
||||
for i := range methods {
|
||||
pos := r.pos()
|
||||
pkg, name := r.selector()
|
||||
mtyp := r.signature(nil, nil, nil)
|
||||
methods[i] = types.NewFunc(pos, pkg, name, mtyp)
|
||||
}
|
||||
|
||||
for i := range embeddeds {
|
||||
embeddeds[i] = r.typ()
|
||||
}
|
||||
|
||||
iface := types.NewInterfaceType(methods, embeddeds)
|
||||
if implicit {
|
||||
iface.MarkImplicit()
|
||||
}
|
||||
|
||||
// We need to call iface.Complete(), but if there are any embedded
|
||||
// defined types, then we may not have set their underlying
|
||||
// interface type yet. So we need to defer calling Complete until
|
||||
// after we've called SetUnderlying everywhere.
|
||||
//
|
||||
// TODO(mdempsky): After CL 424876 lands, it should be safe to call
|
||||
// iface.Complete() immediately.
|
||||
r.p.ifaces = append(r.p.ifaces, iface)
|
||||
|
||||
return iface
|
||||
}
|
||||
|
||||
func (r *reader) signature(recv *types.Var, rtparams, tparams []*types.TypeParam) *types.Signature {
|
||||
r.Sync(pkgbits.SyncSignature)
|
||||
|
||||
params := r.params()
|
||||
results := r.params()
|
||||
variadic := r.Bool()
|
||||
|
||||
return types.NewSignatureType(recv, rtparams, tparams, params, results, variadic)
|
||||
}
|
||||
|
||||
func (r *reader) params() *types.Tuple {
|
||||
r.Sync(pkgbits.SyncParams)
|
||||
|
||||
params := make([]*types.Var, r.Len())
|
||||
for i := range params {
|
||||
params[i] = r.param()
|
||||
}
|
||||
|
||||
return types.NewTuple(params...)
|
||||
}
|
||||
|
||||
func (r *reader) param() *types.Var {
|
||||
r.Sync(pkgbits.SyncParam)
|
||||
|
||||
pos := r.pos()
|
||||
pkg, name := r.localIdent()
|
||||
typ := r.typ()
|
||||
|
||||
return types.NewParam(pos, pkg, name, typ)
|
||||
}
|
||||
|
||||
// @@@ Objects
|
||||
|
||||
func (r *reader) obj() (types.Object, []types.Type) {
|
||||
r.Sync(pkgbits.SyncObject)
|
||||
|
||||
assert(!r.Bool())
|
||||
|
||||
pkg, name := r.p.objIdx(r.Reloc(pkgbits.RelocObj))
|
||||
obj := pkgScope(pkg).Lookup(name)
|
||||
|
||||
targs := make([]types.Type, r.Len())
|
||||
for i := range targs {
|
||||
targs[i] = r.typ()
|
||||
}
|
||||
|
||||
return obj, targs
|
||||
}
|
||||
|
||||
func (pr *pkgReader) objIdx(idx pkgbits.Index) (*types.Package, string) {
|
||||
|
||||
var objPkg *types.Package
|
||||
var objName string
|
||||
var tag pkgbits.CodeObj
|
||||
{
|
||||
rname := pr.tempReader(pkgbits.RelocName, idx, pkgbits.SyncObject1)
|
||||
|
||||
objPkg, objName = rname.qualifiedIdent()
|
||||
assert(objName != "")
|
||||
|
||||
tag = pkgbits.CodeObj(rname.Code(pkgbits.SyncCodeObj))
|
||||
pr.retireReader(rname)
|
||||
}
|
||||
|
||||
if tag == pkgbits.ObjStub {
|
||||
assert(objPkg == nil || objPkg == types.Unsafe)
|
||||
return objPkg, objName
|
||||
}
|
||||
|
||||
// Ignore local types promoted to global scope (#55110).
|
||||
if _, suffix := splitVargenSuffix(objName); suffix != "" {
|
||||
return objPkg, objName
|
||||
}
|
||||
|
||||
if objPkg.Scope().Lookup(objName) == nil {
|
||||
dict := pr.objDictIdx(idx)
|
||||
|
||||
r := pr.newReader(pkgbits.RelocObj, idx, pkgbits.SyncObject1)
|
||||
r.dict = dict
|
||||
|
||||
declare := func(obj types.Object) {
|
||||
objPkg.Scope().Insert(obj)
|
||||
}
|
||||
|
||||
switch tag {
|
||||
default:
|
||||
panic("weird")
|
||||
|
||||
case pkgbits.ObjAlias:
|
||||
pos := r.pos()
|
||||
typ := r.typ()
|
||||
declare(types.NewTypeName(pos, objPkg, objName, typ))
|
||||
|
||||
case pkgbits.ObjConst:
|
||||
pos := r.pos()
|
||||
typ := r.typ()
|
||||
val := r.Value()
|
||||
declare(types.NewConst(pos, objPkg, objName, typ, val))
|
||||
|
||||
case pkgbits.ObjFunc:
|
||||
pos := r.pos()
|
||||
tparams := r.typeParamNames()
|
||||
sig := r.signature(nil, nil, tparams)
|
||||
declare(types.NewFunc(pos, objPkg, objName, sig))
|
||||
|
||||
case pkgbits.ObjType:
|
||||
pos := r.pos()
|
||||
|
||||
obj := types.NewTypeName(pos, objPkg, objName, nil)
|
||||
named := types.NewNamed(obj, nil, nil)
|
||||
declare(obj)
|
||||
|
||||
named.SetTypeParams(r.typeParamNames())
|
||||
|
||||
setUnderlying := func(underlying types.Type) {
|
||||
// If the underlying type is an interface, we need to
|
||||
// duplicate its methods so we can replace the receiver
|
||||
// parameter's type (#49906).
|
||||
if iface, ok := underlying.(*types.Interface); ok && iface.NumExplicitMethods() != 0 {
|
||||
methods := make([]*types.Func, iface.NumExplicitMethods())
|
||||
for i := range methods {
|
||||
fn := iface.ExplicitMethod(i)
|
||||
sig := fn.Type().(*types.Signature)
|
||||
|
||||
recv := types.NewVar(fn.Pos(), fn.Pkg(), "", named)
|
||||
methods[i] = types.NewFunc(fn.Pos(), fn.Pkg(), fn.Name(), types.NewSignature(recv, sig.Params(), sig.Results(), sig.Variadic()))
|
||||
}
|
||||
|
||||
embeds := make([]types.Type, iface.NumEmbeddeds())
|
||||
for i := range embeds {
|
||||
embeds[i] = iface.EmbeddedType(i)
|
||||
}
|
||||
|
||||
newIface := types.NewInterfaceType(methods, embeds)
|
||||
r.p.ifaces = append(r.p.ifaces, newIface)
|
||||
underlying = newIface
|
||||
}
|
||||
|
||||
named.SetUnderlying(underlying)
|
||||
}
|
||||
|
||||
// Since go.dev/cl/455279, we can assume rhs.Underlying() will
|
||||
// always be non-nil. However, to temporarily support users of
|
||||
// older snapshot releases, we continue to fallback to the old
|
||||
// behavior for now.
|
||||
//
|
||||
// TODO(mdempsky): Remove fallback code and simplify after
|
||||
// allowing time for snapshot users to upgrade.
|
||||
rhs := r.typ()
|
||||
if underlying := rhs.Underlying(); underlying != nil {
|
||||
setUnderlying(underlying)
|
||||
} else {
|
||||
pk := r.p
|
||||
pk.laterFor(named, func() {
|
||||
// First be sure that the rhs is initialized, if it needs to be initialized.
|
||||
delete(pk.laterFors, named) // prevent cycles
|
||||
if i, ok := pk.laterFors[rhs]; ok {
|
||||
f := pk.laterFns[i]
|
||||
pk.laterFns[i] = func() {} // function is running now, so replace it with a no-op
|
||||
f() // initialize RHS
|
||||
}
|
||||
setUnderlying(rhs.Underlying())
|
||||
})
|
||||
}
|
||||
|
||||
for i, n := 0, r.Len(); i < n; i++ {
|
||||
named.AddMethod(r.method())
|
||||
}
|
||||
|
||||
case pkgbits.ObjVar:
|
||||
pos := r.pos()
|
||||
typ := r.typ()
|
||||
declare(types.NewVar(pos, objPkg, objName, typ))
|
||||
}
|
||||
}
|
||||
|
||||
return objPkg, objName
|
||||
}
|
||||
|
||||
func (pr *pkgReader) objDictIdx(idx pkgbits.Index) *readerDict {
|
||||
|
||||
var dict readerDict
|
||||
|
||||
{
|
||||
r := pr.tempReader(pkgbits.RelocObjDict, idx, pkgbits.SyncObject1)
|
||||
if implicits := r.Len(); implicits != 0 {
|
||||
errorf("unexpected object with %v implicit type parameter(s)", implicits)
|
||||
}
|
||||
|
||||
dict.bounds = make([]typeInfo, r.Len())
|
||||
for i := range dict.bounds {
|
||||
dict.bounds[i] = r.typInfo()
|
||||
}
|
||||
|
||||
dict.derived = make([]derivedInfo, r.Len())
|
||||
dict.derivedTypes = make([]types.Type, len(dict.derived))
|
||||
for i := range dict.derived {
|
||||
dict.derived[i] = derivedInfo{r.Reloc(pkgbits.RelocType), r.Bool()}
|
||||
}
|
||||
|
||||
pr.retireReader(r)
|
||||
}
|
||||
// function references follow, but reader doesn't need those
|
||||
|
||||
return &dict
|
||||
}
|
||||
|
||||
func (r *reader) typeParamNames() []*types.TypeParam {
|
||||
r.Sync(pkgbits.SyncTypeParamNames)
|
||||
|
||||
// Note: This code assumes it only processes objects without
|
||||
// implement type parameters. This is currently fine, because
|
||||
// reader is only used to read in exported declarations, which are
|
||||
// always package scoped.
|
||||
|
||||
if len(r.dict.bounds) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Careful: Type parameter lists may have cycles. To allow for this,
|
||||
// we construct the type parameter list in two passes: first we
|
||||
// create all the TypeNames and TypeParams, then we construct and
|
||||
// set the bound type.
|
||||
|
||||
r.dict.tparams = make([]*types.TypeParam, len(r.dict.bounds))
|
||||
for i := range r.dict.bounds {
|
||||
pos := r.pos()
|
||||
pkg, name := r.localIdent()
|
||||
|
||||
tname := types.NewTypeName(pos, pkg, name, nil)
|
||||
r.dict.tparams[i] = types.NewTypeParam(tname, nil)
|
||||
}
|
||||
|
||||
typs := make([]types.Type, len(r.dict.bounds))
|
||||
for i, bound := range r.dict.bounds {
|
||||
typs[i] = r.p.typIdx(bound, r.dict)
|
||||
}
|
||||
|
||||
// TODO(mdempsky): This is subtle, elaborate further.
|
||||
//
|
||||
// We have to save tparams outside of the closure, because
|
||||
// typeParamNames() can be called multiple times with the same
|
||||
// dictionary instance.
|
||||
//
|
||||
// Also, this needs to happen later to make sure SetUnderlying has
|
||||
// been called.
|
||||
//
|
||||
// TODO(mdempsky): Is it safe to have a single "later" slice or do
|
||||
// we need to have multiple passes? See comments on CL 386002 and
|
||||
// go.dev/issue/52104.
|
||||
tparams := r.dict.tparams
|
||||
r.p.later(func() {
|
||||
for i, typ := range typs {
|
||||
tparams[i].SetConstraint(typ)
|
||||
}
|
||||
})
|
||||
|
||||
return r.dict.tparams
|
||||
}
|
||||
|
||||
func (r *reader) method() *types.Func {
|
||||
r.Sync(pkgbits.SyncMethod)
|
||||
pos := r.pos()
|
||||
pkg, name := r.selector()
|
||||
|
||||
rparams := r.typeParamNames()
|
||||
sig := r.signature(r.param(), rparams, nil)
|
||||
|
||||
_ = r.pos() // TODO(mdempsky): Remove; this is a hacker for linker.go.
|
||||
return types.NewFunc(pos, pkg, name, sig)
|
||||
}
|
||||
|
||||
func (r *reader) qualifiedIdent() (*types.Package, string) { return r.ident(pkgbits.SyncSym) }
|
||||
func (r *reader) localIdent() (*types.Package, string) { return r.ident(pkgbits.SyncLocalIdent) }
|
||||
func (r *reader) selector() (*types.Package, string) { return r.ident(pkgbits.SyncSelector) }
|
||||
|
||||
func (r *reader) ident(marker pkgbits.SyncMarker) (*types.Package, string) {
|
||||
r.Sync(marker)
|
||||
return r.pkg(), r.String()
|
||||
}
|
||||
|
||||
// pkgScope returns pkg.Scope().
|
||||
// If pkg is nil, it returns types.Universe instead.
|
||||
//
|
||||
// TODO(mdempsky): Remove after x/tools can depend on Go 1.19.
|
||||
func pkgScope(pkg *types.Package) *types.Scope {
|
||||
if pkg != nil {
|
||||
return pkg.Scope()
|
||||
}
|
||||
return types.Universe
|
||||
}
|
||||
462
vendor/golang.org/x/tools/internal/gocommand/invoke.go
generated
vendored
Normal file
462
vendor/golang.org/x/tools/internal/gocommand/invoke.go
generated
vendored
Normal file
@@ -0,0 +1,462 @@
|
||||
// Copyright 2020 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package gocommand is a helper for calling the go command.
|
||||
package gocommand
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"os"
|
||||
"reflect"
|
||||
"regexp"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
exec "golang.org/x/sys/execabs"
|
||||
|
||||
"golang.org/x/tools/internal/event"
|
||||
"golang.org/x/tools/internal/event/keys"
|
||||
"golang.org/x/tools/internal/event/label"
|
||||
"golang.org/x/tools/internal/event/tag"
|
||||
)
|
||||
|
||||
// An Runner will run go command invocations and serialize
|
||||
// them if it sees a concurrency error.
|
||||
type Runner struct {
|
||||
// once guards the runner initialization.
|
||||
once sync.Once
|
||||
|
||||
// inFlight tracks available workers.
|
||||
inFlight chan struct{}
|
||||
|
||||
// serialized guards the ability to run a go command serially,
|
||||
// to avoid deadlocks when claiming workers.
|
||||
serialized chan struct{}
|
||||
}
|
||||
|
||||
const maxInFlight = 10
|
||||
|
||||
func (runner *Runner) initialize() {
|
||||
runner.once.Do(func() {
|
||||
runner.inFlight = make(chan struct{}, maxInFlight)
|
||||
runner.serialized = make(chan struct{}, 1)
|
||||
})
|
||||
}
|
||||
|
||||
// 1.13: go: updates to go.mod needed, but contents have changed
|
||||
// 1.14: go: updating go.mod: existing contents have changed since last read
|
||||
var modConcurrencyError = regexp.MustCompile(`go:.*go.mod.*contents have changed`)
|
||||
|
||||
// verb is an event label for the go command verb.
|
||||
var verb = keys.NewString("verb", "go command verb")
|
||||
|
||||
func invLabels(inv Invocation) []label.Label {
|
||||
return []label.Label{verb.Of(inv.Verb), tag.Directory.Of(inv.WorkingDir)}
|
||||
}
|
||||
|
||||
// Run is a convenience wrapper around RunRaw.
|
||||
// It returns only stdout and a "friendly" error.
|
||||
func (runner *Runner) Run(ctx context.Context, inv Invocation) (*bytes.Buffer, error) {
|
||||
ctx, done := event.Start(ctx, "gocommand.Runner.Run", invLabels(inv)...)
|
||||
defer done()
|
||||
|
||||
stdout, _, friendly, _ := runner.RunRaw(ctx, inv)
|
||||
return stdout, friendly
|
||||
}
|
||||
|
||||
// RunPiped runs the invocation serially, always waiting for any concurrent
|
||||
// invocations to complete first.
|
||||
func (runner *Runner) RunPiped(ctx context.Context, inv Invocation, stdout, stderr io.Writer) error {
|
||||
ctx, done := event.Start(ctx, "gocommand.Runner.RunPiped", invLabels(inv)...)
|
||||
defer done()
|
||||
|
||||
_, err := runner.runPiped(ctx, inv, stdout, stderr)
|
||||
return err
|
||||
}
|
||||
|
||||
// RunRaw runs the invocation, serializing requests only if they fight over
|
||||
// go.mod changes.
|
||||
func (runner *Runner) RunRaw(ctx context.Context, inv Invocation) (*bytes.Buffer, *bytes.Buffer, error, error) {
|
||||
ctx, done := event.Start(ctx, "gocommand.Runner.RunRaw", invLabels(inv)...)
|
||||
defer done()
|
||||
// Make sure the runner is always initialized.
|
||||
runner.initialize()
|
||||
|
||||
// First, try to run the go command concurrently.
|
||||
stdout, stderr, friendlyErr, err := runner.runConcurrent(ctx, inv)
|
||||
|
||||
// If we encounter a load concurrency error, we need to retry serially.
|
||||
if friendlyErr == nil || !modConcurrencyError.MatchString(friendlyErr.Error()) {
|
||||
return stdout, stderr, friendlyErr, err
|
||||
}
|
||||
event.Error(ctx, "Load concurrency error, will retry serially", err)
|
||||
|
||||
// Run serially by calling runPiped.
|
||||
stdout.Reset()
|
||||
stderr.Reset()
|
||||
friendlyErr, err = runner.runPiped(ctx, inv, stdout, stderr)
|
||||
return stdout, stderr, friendlyErr, err
|
||||
}
|
||||
|
||||
func (runner *Runner) runConcurrent(ctx context.Context, inv Invocation) (*bytes.Buffer, *bytes.Buffer, error, error) {
|
||||
// Wait for 1 worker to become available.
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return nil, nil, nil, ctx.Err()
|
||||
case runner.inFlight <- struct{}{}:
|
||||
defer func() { <-runner.inFlight }()
|
||||
}
|
||||
|
||||
stdout, stderr := &bytes.Buffer{}, &bytes.Buffer{}
|
||||
friendlyErr, err := inv.runWithFriendlyError(ctx, stdout, stderr)
|
||||
return stdout, stderr, friendlyErr, err
|
||||
}
|
||||
|
||||
func (runner *Runner) runPiped(ctx context.Context, inv Invocation, stdout, stderr io.Writer) (error, error) {
|
||||
// Make sure the runner is always initialized.
|
||||
runner.initialize()
|
||||
|
||||
// Acquire the serialization lock. This avoids deadlocks between two
|
||||
// runPiped commands.
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return nil, ctx.Err()
|
||||
case runner.serialized <- struct{}{}:
|
||||
defer func() { <-runner.serialized }()
|
||||
}
|
||||
|
||||
// Wait for all in-progress go commands to return before proceeding,
|
||||
// to avoid load concurrency errors.
|
||||
for i := 0; i < maxInFlight; i++ {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return nil, ctx.Err()
|
||||
case runner.inFlight <- struct{}{}:
|
||||
// Make sure we always "return" any workers we took.
|
||||
defer func() { <-runner.inFlight }()
|
||||
}
|
||||
}
|
||||
|
||||
return inv.runWithFriendlyError(ctx, stdout, stderr)
|
||||
}
|
||||
|
||||
// An Invocation represents a call to the go command.
|
||||
type Invocation struct {
|
||||
Verb string
|
||||
Args []string
|
||||
BuildFlags []string
|
||||
|
||||
// If ModFlag is set, the go command is invoked with -mod=ModFlag.
|
||||
ModFlag string
|
||||
|
||||
// If ModFile is set, the go command is invoked with -modfile=ModFile.
|
||||
ModFile string
|
||||
|
||||
// If Overlay is set, the go command is invoked with -overlay=Overlay.
|
||||
Overlay string
|
||||
|
||||
// If CleanEnv is set, the invocation will run only with the environment
|
||||
// in Env, not starting with os.Environ.
|
||||
CleanEnv bool
|
||||
Env []string
|
||||
WorkingDir string
|
||||
Logf func(format string, args ...interface{})
|
||||
}
|
||||
|
||||
func (i *Invocation) runWithFriendlyError(ctx context.Context, stdout, stderr io.Writer) (friendlyError error, rawError error) {
|
||||
rawError = i.run(ctx, stdout, stderr)
|
||||
if rawError != nil {
|
||||
friendlyError = rawError
|
||||
// Check for 'go' executable not being found.
|
||||
if ee, ok := rawError.(*exec.Error); ok && ee.Err == exec.ErrNotFound {
|
||||
friendlyError = fmt.Errorf("go command required, not found: %v", ee)
|
||||
}
|
||||
if ctx.Err() != nil {
|
||||
friendlyError = ctx.Err()
|
||||
}
|
||||
friendlyError = fmt.Errorf("err: %v: stderr: %s", friendlyError, stderr)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (i *Invocation) run(ctx context.Context, stdout, stderr io.Writer) error {
|
||||
log := i.Logf
|
||||
if log == nil {
|
||||
log = func(string, ...interface{}) {}
|
||||
}
|
||||
|
||||
goArgs := []string{i.Verb}
|
||||
|
||||
appendModFile := func() {
|
||||
if i.ModFile != "" {
|
||||
goArgs = append(goArgs, "-modfile="+i.ModFile)
|
||||
}
|
||||
}
|
||||
appendModFlag := func() {
|
||||
if i.ModFlag != "" {
|
||||
goArgs = append(goArgs, "-mod="+i.ModFlag)
|
||||
}
|
||||
}
|
||||
appendOverlayFlag := func() {
|
||||
if i.Overlay != "" {
|
||||
goArgs = append(goArgs, "-overlay="+i.Overlay)
|
||||
}
|
||||
}
|
||||
|
||||
switch i.Verb {
|
||||
case "env", "version":
|
||||
goArgs = append(goArgs, i.Args...)
|
||||
case "mod":
|
||||
// mod needs the sub-verb before flags.
|
||||
goArgs = append(goArgs, i.Args[0])
|
||||
appendModFile()
|
||||
goArgs = append(goArgs, i.Args[1:]...)
|
||||
case "get":
|
||||
goArgs = append(goArgs, i.BuildFlags...)
|
||||
appendModFile()
|
||||
goArgs = append(goArgs, i.Args...)
|
||||
|
||||
default: // notably list and build.
|
||||
goArgs = append(goArgs, i.BuildFlags...)
|
||||
appendModFile()
|
||||
appendModFlag()
|
||||
appendOverlayFlag()
|
||||
goArgs = append(goArgs, i.Args...)
|
||||
}
|
||||
cmd := exec.Command("go", goArgs...)
|
||||
cmd.Stdout = stdout
|
||||
cmd.Stderr = stderr
|
||||
|
||||
// cmd.WaitDelay was added only in go1.20 (see #50436).
|
||||
if waitDelay := reflect.ValueOf(cmd).Elem().FieldByName("WaitDelay"); waitDelay.IsValid() {
|
||||
// https://go.dev/issue/59541: don't wait forever copying stderr
|
||||
// after the command has exited.
|
||||
// After CL 484741 we copy stdout manually, so we we'll stop reading that as
|
||||
// soon as ctx is done. However, we also don't want to wait around forever
|
||||
// for stderr. Give a much-longer-than-reasonable delay and then assume that
|
||||
// something has wedged in the kernel or runtime.
|
||||
waitDelay.Set(reflect.ValueOf(30 * time.Second))
|
||||
}
|
||||
|
||||
// On darwin the cwd gets resolved to the real path, which breaks anything that
|
||||
// expects the working directory to keep the original path, including the
|
||||
// go command when dealing with modules.
|
||||
// The Go stdlib has a special feature where if the cwd and the PWD are the
|
||||
// same node then it trusts the PWD, so by setting it in the env for the child
|
||||
// process we fix up all the paths returned by the go command.
|
||||
if !i.CleanEnv {
|
||||
cmd.Env = os.Environ()
|
||||
}
|
||||
cmd.Env = append(cmd.Env, i.Env...)
|
||||
if i.WorkingDir != "" {
|
||||
cmd.Env = append(cmd.Env, "PWD="+i.WorkingDir)
|
||||
cmd.Dir = i.WorkingDir
|
||||
}
|
||||
|
||||
defer func(start time.Time) { log("%s for %v", time.Since(start), cmdDebugStr(cmd)) }(time.Now())
|
||||
|
||||
return runCmdContext(ctx, cmd)
|
||||
}
|
||||
|
||||
// DebugHangingGoCommands may be set by tests to enable additional
|
||||
// instrumentation (including panics) for debugging hanging Go commands.
|
||||
//
|
||||
// See golang/go#54461 for details.
|
||||
var DebugHangingGoCommands = false
|
||||
|
||||
// runCmdContext is like exec.CommandContext except it sends os.Interrupt
|
||||
// before os.Kill.
|
||||
func runCmdContext(ctx context.Context, cmd *exec.Cmd) (err error) {
|
||||
// If cmd.Stdout is not an *os.File, the exec package will create a pipe and
|
||||
// copy it to the Writer in a goroutine until the process has finished and
|
||||
// either the pipe reaches EOF or command's WaitDelay expires.
|
||||
//
|
||||
// However, the output from 'go list' can be quite large, and we don't want to
|
||||
// keep reading (and allocating buffers) if we've already decided we don't
|
||||
// care about the output. We don't want to wait for the process to finish, and
|
||||
// we don't wait to wait for the WaitDelay to expire either.
|
||||
//
|
||||
// Instead, if cmd.Stdout requires a copying goroutine we explicitly replace
|
||||
// it with a pipe (which is an *os.File), which we can close in order to stop
|
||||
// copying output as soon as we realize we don't care about it.
|
||||
var stdoutW *os.File
|
||||
if cmd.Stdout != nil {
|
||||
if _, ok := cmd.Stdout.(*os.File); !ok {
|
||||
var stdoutR *os.File
|
||||
stdoutR, stdoutW, err = os.Pipe()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
prevStdout := cmd.Stdout
|
||||
cmd.Stdout = stdoutW
|
||||
|
||||
stdoutErr := make(chan error, 1)
|
||||
go func() {
|
||||
_, err := io.Copy(prevStdout, stdoutR)
|
||||
if err != nil {
|
||||
err = fmt.Errorf("copying stdout: %w", err)
|
||||
}
|
||||
stdoutErr <- err
|
||||
}()
|
||||
defer func() {
|
||||
// We started a goroutine to copy a stdout pipe.
|
||||
// Wait for it to finish, or terminate it if need be.
|
||||
var err2 error
|
||||
select {
|
||||
case err2 = <-stdoutErr:
|
||||
stdoutR.Close()
|
||||
case <-ctx.Done():
|
||||
stdoutR.Close()
|
||||
// Per https://pkg.go.dev/os#File.Close, the call to stdoutR.Close
|
||||
// should cause the Read call in io.Copy to unblock and return
|
||||
// immediately, but we still need to receive from stdoutErr to confirm
|
||||
// that it has happened.
|
||||
<-stdoutErr
|
||||
err2 = ctx.Err()
|
||||
}
|
||||
if err == nil {
|
||||
err = err2
|
||||
}
|
||||
}()
|
||||
|
||||
// Per https://pkg.go.dev/os/exec#Cmd, “If Stdout and Stderr are the
|
||||
// same writer, and have a type that can be compared with ==, at most
|
||||
// one goroutine at a time will call Write.”
|
||||
//
|
||||
// Since we're starting a goroutine that writes to cmd.Stdout, we must
|
||||
// also update cmd.Stderr so that it still holds.
|
||||
func() {
|
||||
defer func() { recover() }()
|
||||
if cmd.Stderr == prevStdout {
|
||||
cmd.Stderr = cmd.Stdout
|
||||
}
|
||||
}()
|
||||
}
|
||||
}
|
||||
|
||||
err = cmd.Start()
|
||||
if stdoutW != nil {
|
||||
// The child process has inherited the pipe file,
|
||||
// so close the copy held in this process.
|
||||
stdoutW.Close()
|
||||
stdoutW = nil
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
resChan := make(chan error, 1)
|
||||
go func() {
|
||||
resChan <- cmd.Wait()
|
||||
}()
|
||||
|
||||
// If we're interested in debugging hanging Go commands, stop waiting after a
|
||||
// minute and panic with interesting information.
|
||||
debug := DebugHangingGoCommands
|
||||
if debug {
|
||||
timer := time.NewTimer(1 * time.Minute)
|
||||
defer timer.Stop()
|
||||
select {
|
||||
case err := <-resChan:
|
||||
return err
|
||||
case <-timer.C:
|
||||
HandleHangingGoCommand(cmd.Process)
|
||||
case <-ctx.Done():
|
||||
}
|
||||
} else {
|
||||
select {
|
||||
case err := <-resChan:
|
||||
return err
|
||||
case <-ctx.Done():
|
||||
}
|
||||
}
|
||||
|
||||
// Cancelled. Interrupt and see if it ends voluntarily.
|
||||
if err := cmd.Process.Signal(os.Interrupt); err == nil {
|
||||
// (We used to wait only 1s but this proved
|
||||
// fragile on loaded builder machines.)
|
||||
timer := time.NewTimer(5 * time.Second)
|
||||
defer timer.Stop()
|
||||
select {
|
||||
case err := <-resChan:
|
||||
return err
|
||||
case <-timer.C:
|
||||
}
|
||||
}
|
||||
|
||||
// Didn't shut down in response to interrupt. Kill it hard.
|
||||
// TODO(rfindley): per advice from bcmills@, it may be better to send SIGQUIT
|
||||
// on certain platforms, such as unix.
|
||||
if err := cmd.Process.Kill(); err != nil && !errors.Is(err, os.ErrProcessDone) && debug {
|
||||
log.Printf("error killing the Go command: %v", err)
|
||||
}
|
||||
|
||||
return <-resChan
|
||||
}
|
||||
|
||||
func HandleHangingGoCommand(proc *os.Process) {
|
||||
switch runtime.GOOS {
|
||||
case "linux", "darwin", "freebsd", "netbsd":
|
||||
fmt.Fprintln(os.Stderr, `DETECTED A HANGING GO COMMAND
|
||||
|
||||
The gopls test runner has detected a hanging go command. In order to debug
|
||||
this, the output of ps and lsof/fstat is printed below.
|
||||
|
||||
See golang/go#54461 for more details.`)
|
||||
|
||||
fmt.Fprintln(os.Stderr, "\nps axo ppid,pid,command:")
|
||||
fmt.Fprintln(os.Stderr, "-------------------------")
|
||||
psCmd := exec.Command("ps", "axo", "ppid,pid,command")
|
||||
psCmd.Stdout = os.Stderr
|
||||
psCmd.Stderr = os.Stderr
|
||||
if err := psCmd.Run(); err != nil {
|
||||
panic(fmt.Sprintf("running ps: %v", err))
|
||||
}
|
||||
|
||||
listFiles := "lsof"
|
||||
if runtime.GOOS == "freebsd" || runtime.GOOS == "netbsd" {
|
||||
listFiles = "fstat"
|
||||
}
|
||||
|
||||
fmt.Fprintln(os.Stderr, "\n"+listFiles+":")
|
||||
fmt.Fprintln(os.Stderr, "-----")
|
||||
listFilesCmd := exec.Command(listFiles)
|
||||
listFilesCmd.Stdout = os.Stderr
|
||||
listFilesCmd.Stderr = os.Stderr
|
||||
if err := listFilesCmd.Run(); err != nil {
|
||||
panic(fmt.Sprintf("running %s: %v", listFiles, err))
|
||||
}
|
||||
}
|
||||
panic(fmt.Sprintf("detected hanging go command (pid %d): see golang/go#54461 for more details", proc.Pid))
|
||||
}
|
||||
|
||||
func cmdDebugStr(cmd *exec.Cmd) string {
|
||||
env := make(map[string]string)
|
||||
for _, kv := range cmd.Env {
|
||||
split := strings.SplitN(kv, "=", 2)
|
||||
if len(split) == 2 {
|
||||
k, v := split[0], split[1]
|
||||
env[k] = v
|
||||
}
|
||||
}
|
||||
|
||||
var args []string
|
||||
for _, arg := range cmd.Args {
|
||||
quoted := strconv.Quote(arg)
|
||||
if quoted[1:len(quoted)-1] != arg || strings.Contains(arg, " ") {
|
||||
args = append(args, quoted)
|
||||
} else {
|
||||
args = append(args, arg)
|
||||
}
|
||||
}
|
||||
return fmt.Sprintf("GOROOT=%v GOPATH=%v GO111MODULE=%v GOPROXY=%v PWD=%v %v", env["GOROOT"], env["GOPATH"], env["GO111MODULE"], env["GOPROXY"], env["PWD"], strings.Join(args, " "))
|
||||
}
|
||||
109
vendor/golang.org/x/tools/internal/gocommand/vendor.go
generated
vendored
Normal file
109
vendor/golang.org/x/tools/internal/gocommand/vendor.go
generated
vendored
Normal file
@@ -0,0 +1,109 @@
|
||||
// Copyright 2020 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package gocommand
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"golang.org/x/mod/semver"
|
||||
)
|
||||
|
||||
// ModuleJSON holds information about a module.
|
||||
type ModuleJSON struct {
|
||||
Path string // module path
|
||||
Version string // module version
|
||||
Versions []string // available module versions (with -versions)
|
||||
Replace *ModuleJSON // replaced by this module
|
||||
Time *time.Time // time version was created
|
||||
Update *ModuleJSON // available update, if any (with -u)
|
||||
Main bool // is this the main module?
|
||||
Indirect bool // is this module only an indirect dependency of main module?
|
||||
Dir string // directory holding files for this module, if any
|
||||
GoMod string // path to go.mod file used when loading this module, if any
|
||||
GoVersion string // go version used in module
|
||||
}
|
||||
|
||||
var modFlagRegexp = regexp.MustCompile(`-mod[ =](\w+)`)
|
||||
|
||||
// VendorEnabled reports whether vendoring is enabled. It takes a *Runner to execute Go commands
|
||||
// with the supplied context.Context and Invocation. The Invocation can contain pre-defined fields,
|
||||
// of which only Verb and Args are modified to run the appropriate Go command.
|
||||
// Inspired by setDefaultBuildMod in modload/init.go
|
||||
func VendorEnabled(ctx context.Context, inv Invocation, r *Runner) (bool, *ModuleJSON, error) {
|
||||
mainMod, go114, err := getMainModuleAnd114(ctx, inv, r)
|
||||
if err != nil {
|
||||
return false, nil, err
|
||||
}
|
||||
|
||||
// We check the GOFLAGS to see if there is anything overridden or not.
|
||||
inv.Verb = "env"
|
||||
inv.Args = []string{"GOFLAGS"}
|
||||
stdout, err := r.Run(ctx, inv)
|
||||
if err != nil {
|
||||
return false, nil, err
|
||||
}
|
||||
goflags := string(bytes.TrimSpace(stdout.Bytes()))
|
||||
matches := modFlagRegexp.FindStringSubmatch(goflags)
|
||||
var modFlag string
|
||||
if len(matches) != 0 {
|
||||
modFlag = matches[1]
|
||||
}
|
||||
// Don't override an explicit '-mod=' argument.
|
||||
if modFlag == "vendor" {
|
||||
return true, mainMod, nil
|
||||
} else if modFlag != "" {
|
||||
return false, nil, nil
|
||||
}
|
||||
if mainMod == nil || !go114 {
|
||||
return false, nil, nil
|
||||
}
|
||||
// Check 1.14's automatic vendor mode.
|
||||
if fi, err := os.Stat(filepath.Join(mainMod.Dir, "vendor")); err == nil && fi.IsDir() {
|
||||
if mainMod.GoVersion != "" && semver.Compare("v"+mainMod.GoVersion, "v1.14") >= 0 {
|
||||
// The Go version is at least 1.14, and a vendor directory exists.
|
||||
// Set -mod=vendor by default.
|
||||
return true, mainMod, nil
|
||||
}
|
||||
}
|
||||
return false, nil, nil
|
||||
}
|
||||
|
||||
// getMainModuleAnd114 gets one of the main modules' information and whether the
|
||||
// go command in use is 1.14+. This is the information needed to figure out
|
||||
// if vendoring should be enabled.
|
||||
func getMainModuleAnd114(ctx context.Context, inv Invocation, r *Runner) (*ModuleJSON, bool, error) {
|
||||
const format = `{{.Path}}
|
||||
{{.Dir}}
|
||||
{{.GoMod}}
|
||||
{{.GoVersion}}
|
||||
{{range context.ReleaseTags}}{{if eq . "go1.14"}}{{.}}{{end}}{{end}}
|
||||
`
|
||||
inv.Verb = "list"
|
||||
inv.Args = []string{"-m", "-f", format}
|
||||
stdout, err := r.Run(ctx, inv)
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
|
||||
lines := strings.Split(stdout.String(), "\n")
|
||||
if len(lines) < 5 {
|
||||
return nil, false, fmt.Errorf("unexpected stdout: %q", stdout.String())
|
||||
}
|
||||
mod := &ModuleJSON{
|
||||
Path: lines[0],
|
||||
Dir: lines[1],
|
||||
GoMod: lines[2],
|
||||
GoVersion: lines[3],
|
||||
Main: true,
|
||||
}
|
||||
return mod, lines[4] == "go1.14", nil
|
||||
}
|
||||
71
vendor/golang.org/x/tools/internal/gocommand/version.go
generated
vendored
Normal file
71
vendor/golang.org/x/tools/internal/gocommand/version.go
generated
vendored
Normal file
@@ -0,0 +1,71 @@
|
||||
// Copyright 2020 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package gocommand
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"regexp"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// GoVersion reports the minor version number of the highest release
|
||||
// tag built into the go command on the PATH.
|
||||
//
|
||||
// Note that this may be higher than the version of the go tool used
|
||||
// to build this application, and thus the versions of the standard
|
||||
// go/{scanner,parser,ast,types} packages that are linked into it.
|
||||
// In that case, callers should either downgrade to the version of
|
||||
// go used to build the application, or report an error that the
|
||||
// application is too old to use the go command on the PATH.
|
||||
func GoVersion(ctx context.Context, inv Invocation, r *Runner) (int, error) {
|
||||
inv.Verb = "list"
|
||||
inv.Args = []string{"-e", "-f", `{{context.ReleaseTags}}`, `--`, `unsafe`}
|
||||
inv.BuildFlags = nil // This is not a build command.
|
||||
inv.ModFlag = ""
|
||||
inv.ModFile = ""
|
||||
inv.Env = append(inv.Env[:len(inv.Env):len(inv.Env)], "GO111MODULE=off")
|
||||
|
||||
stdoutBytes, err := r.Run(ctx, inv)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
stdout := stdoutBytes.String()
|
||||
if len(stdout) < 3 {
|
||||
return 0, fmt.Errorf("bad ReleaseTags output: %q", stdout)
|
||||
}
|
||||
// Split up "[go1.1 go1.15]" and return highest go1.X value.
|
||||
tags := strings.Fields(stdout[1 : len(stdout)-2])
|
||||
for i := len(tags) - 1; i >= 0; i-- {
|
||||
var version int
|
||||
if _, err := fmt.Sscanf(tags[i], "go1.%d", &version); err != nil {
|
||||
continue
|
||||
}
|
||||
return version, nil
|
||||
}
|
||||
return 0, fmt.Errorf("no parseable ReleaseTags in %v", tags)
|
||||
}
|
||||
|
||||
// GoVersionOutput returns the complete output of the go version command.
|
||||
func GoVersionOutput(ctx context.Context, inv Invocation, r *Runner) (string, error) {
|
||||
inv.Verb = "version"
|
||||
goVersion, err := r.Run(ctx, inv)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return goVersion.String(), nil
|
||||
}
|
||||
|
||||
// ParseGoVersionOutput extracts the Go version string
|
||||
// from the output of the "go version" command.
|
||||
// Given an unrecognized form, it returns an empty string.
|
||||
func ParseGoVersionOutput(data string) string {
|
||||
re := regexp.MustCompile(`^go version (go\S+|devel \S+)`)
|
||||
m := re.FindStringSubmatch(data)
|
||||
if len(m) != 2 {
|
||||
return "" // unrecognized version
|
||||
}
|
||||
return m[1]
|
||||
}
|
||||
260
vendor/golang.org/x/tools/internal/gopathwalk/walk.go
generated
vendored
Normal file
260
vendor/golang.org/x/tools/internal/gopathwalk/walk.go
generated
vendored
Normal file
@@ -0,0 +1,260 @@
|
||||
// Copyright 2018 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package gopathwalk is like filepath.Walk but specialized for finding Go
|
||||
// packages, particularly in $GOPATH and $GOROOT.
|
||||
package gopathwalk
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"log"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"golang.org/x/tools/internal/fastwalk"
|
||||
)
|
||||
|
||||
// Options controls the behavior of a Walk call.
|
||||
type Options struct {
|
||||
// If Logf is non-nil, debug logging is enabled through this function.
|
||||
Logf func(format string, args ...interface{})
|
||||
// Search module caches. Also disables legacy goimports ignore rules.
|
||||
ModulesEnabled bool
|
||||
}
|
||||
|
||||
// RootType indicates the type of a Root.
|
||||
type RootType int
|
||||
|
||||
const (
|
||||
RootUnknown RootType = iota
|
||||
RootGOROOT
|
||||
RootGOPATH
|
||||
RootCurrentModule
|
||||
RootModuleCache
|
||||
RootOther
|
||||
)
|
||||
|
||||
// A Root is a starting point for a Walk.
|
||||
type Root struct {
|
||||
Path string
|
||||
Type RootType
|
||||
}
|
||||
|
||||
// Walk walks Go source directories ($GOROOT, $GOPATH, etc) to find packages.
|
||||
// For each package found, add will be called (concurrently) with the absolute
|
||||
// paths of the containing source directory and the package directory.
|
||||
// add will be called concurrently.
|
||||
func Walk(roots []Root, add func(root Root, dir string), opts Options) {
|
||||
WalkSkip(roots, add, func(Root, string) bool { return false }, opts)
|
||||
}
|
||||
|
||||
// WalkSkip walks Go source directories ($GOROOT, $GOPATH, etc) to find packages.
|
||||
// For each package found, add will be called (concurrently) with the absolute
|
||||
// paths of the containing source directory and the package directory.
|
||||
// For each directory that will be scanned, skip will be called (concurrently)
|
||||
// with the absolute paths of the containing source directory and the directory.
|
||||
// If skip returns false on a directory it will be processed.
|
||||
// add will be called concurrently.
|
||||
// skip will be called concurrently.
|
||||
func WalkSkip(roots []Root, add func(root Root, dir string), skip func(root Root, dir string) bool, opts Options) {
|
||||
for _, root := range roots {
|
||||
walkDir(root, add, skip, opts)
|
||||
}
|
||||
}
|
||||
|
||||
// walkDir creates a walker and starts fastwalk with this walker.
|
||||
func walkDir(root Root, add func(Root, string), skip func(root Root, dir string) bool, opts Options) {
|
||||
if _, err := os.Stat(root.Path); os.IsNotExist(err) {
|
||||
if opts.Logf != nil {
|
||||
opts.Logf("skipping nonexistent directory: %v", root.Path)
|
||||
}
|
||||
return
|
||||
}
|
||||
start := time.Now()
|
||||
if opts.Logf != nil {
|
||||
opts.Logf("scanning %s", root.Path)
|
||||
}
|
||||
w := &walker{
|
||||
root: root,
|
||||
add: add,
|
||||
skip: skip,
|
||||
opts: opts,
|
||||
}
|
||||
w.init()
|
||||
if err := fastwalk.Walk(root.Path, w.walk); err != nil {
|
||||
logf := opts.Logf
|
||||
if logf == nil {
|
||||
logf = log.Printf
|
||||
}
|
||||
logf("scanning directory %v: %v", root.Path, err)
|
||||
}
|
||||
|
||||
if opts.Logf != nil {
|
||||
opts.Logf("scanned %s in %v", root.Path, time.Since(start))
|
||||
}
|
||||
}
|
||||
|
||||
// walker is the callback for fastwalk.Walk.
|
||||
type walker struct {
|
||||
root Root // The source directory to scan.
|
||||
add func(Root, string) // The callback that will be invoked for every possible Go package dir.
|
||||
skip func(Root, string) bool // The callback that will be invoked for every dir. dir is skipped if it returns true.
|
||||
opts Options // Options passed to Walk by the user.
|
||||
|
||||
ignoredDirs []os.FileInfo // The ignored directories, loaded from .goimportsignore files.
|
||||
}
|
||||
|
||||
// init initializes the walker based on its Options
|
||||
func (w *walker) init() {
|
||||
var ignoredPaths []string
|
||||
if w.root.Type == RootModuleCache {
|
||||
ignoredPaths = []string{"cache"}
|
||||
}
|
||||
if !w.opts.ModulesEnabled && w.root.Type == RootGOPATH {
|
||||
ignoredPaths = w.getIgnoredDirs(w.root.Path)
|
||||
ignoredPaths = append(ignoredPaths, "v", "mod")
|
||||
}
|
||||
|
||||
for _, p := range ignoredPaths {
|
||||
full := filepath.Join(w.root.Path, p)
|
||||
if fi, err := os.Stat(full); err == nil {
|
||||
w.ignoredDirs = append(w.ignoredDirs, fi)
|
||||
if w.opts.Logf != nil {
|
||||
w.opts.Logf("Directory added to ignore list: %s", full)
|
||||
}
|
||||
} else if w.opts.Logf != nil {
|
||||
w.opts.Logf("Error statting ignored directory: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// getIgnoredDirs reads an optional config file at <path>/.goimportsignore
|
||||
// of relative directories to ignore when scanning for go files.
|
||||
// The provided path is one of the $GOPATH entries with "src" appended.
|
||||
func (w *walker) getIgnoredDirs(path string) []string {
|
||||
file := filepath.Join(path, ".goimportsignore")
|
||||
slurp, err := os.ReadFile(file)
|
||||
if w.opts.Logf != nil {
|
||||
if err != nil {
|
||||
w.opts.Logf("%v", err)
|
||||
} else {
|
||||
w.opts.Logf("Read %s", file)
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
var ignoredDirs []string
|
||||
bs := bufio.NewScanner(bytes.NewReader(slurp))
|
||||
for bs.Scan() {
|
||||
line := strings.TrimSpace(bs.Text())
|
||||
if line == "" || strings.HasPrefix(line, "#") {
|
||||
continue
|
||||
}
|
||||
ignoredDirs = append(ignoredDirs, line)
|
||||
}
|
||||
return ignoredDirs
|
||||
}
|
||||
|
||||
// shouldSkipDir reports whether the file should be skipped or not.
|
||||
func (w *walker) shouldSkipDir(fi os.FileInfo, dir string) bool {
|
||||
for _, ignoredDir := range w.ignoredDirs {
|
||||
if os.SameFile(fi, ignoredDir) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
if w.skip != nil {
|
||||
// Check with the user specified callback.
|
||||
return w.skip(w.root, dir)
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// walk walks through the given path.
|
||||
func (w *walker) walk(path string, typ os.FileMode) error {
|
||||
if typ.IsRegular() {
|
||||
dir := filepath.Dir(path)
|
||||
if dir == w.root.Path && (w.root.Type == RootGOROOT || w.root.Type == RootGOPATH) {
|
||||
// Doesn't make sense to have regular files
|
||||
// directly in your $GOPATH/src or $GOROOT/src.
|
||||
return fastwalk.ErrSkipFiles
|
||||
}
|
||||
if !strings.HasSuffix(path, ".go") {
|
||||
return nil
|
||||
}
|
||||
|
||||
w.add(w.root, dir)
|
||||
return fastwalk.ErrSkipFiles
|
||||
}
|
||||
if typ == os.ModeDir {
|
||||
base := filepath.Base(path)
|
||||
if base == "" || base[0] == '.' || base[0] == '_' ||
|
||||
base == "testdata" ||
|
||||
(w.root.Type == RootGOROOT && w.opts.ModulesEnabled && base == "vendor") ||
|
||||
(!w.opts.ModulesEnabled && base == "node_modules") {
|
||||
return filepath.SkipDir
|
||||
}
|
||||
fi, err := os.Lstat(path)
|
||||
if err == nil && w.shouldSkipDir(fi, path) {
|
||||
return filepath.SkipDir
|
||||
}
|
||||
return nil
|
||||
}
|
||||
if typ == os.ModeSymlink {
|
||||
base := filepath.Base(path)
|
||||
if strings.HasPrefix(base, ".#") {
|
||||
// Emacs noise.
|
||||
return nil
|
||||
}
|
||||
if w.shouldTraverse(path) {
|
||||
return fastwalk.ErrTraverseLink
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// shouldTraverse reports whether the symlink fi, found in dir,
|
||||
// should be followed. It makes sure symlinks were never visited
|
||||
// before to avoid symlink loops.
|
||||
func (w *walker) shouldTraverse(path string) bool {
|
||||
ts, err := os.Stat(path)
|
||||
if err != nil {
|
||||
logf := w.opts.Logf
|
||||
if logf == nil {
|
||||
logf = log.Printf
|
||||
}
|
||||
logf("%v", err)
|
||||
return false
|
||||
}
|
||||
if !ts.IsDir() {
|
||||
return false
|
||||
}
|
||||
if w.shouldSkipDir(ts, filepath.Dir(path)) {
|
||||
return false
|
||||
}
|
||||
// Check for symlink loops by statting each directory component
|
||||
// and seeing if any are the same file as ts.
|
||||
for {
|
||||
parent := filepath.Dir(path)
|
||||
if parent == path {
|
||||
// Made it to the root without seeing a cycle.
|
||||
// Use this symlink.
|
||||
return true
|
||||
}
|
||||
parentInfo, err := os.Stat(parent)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
if os.SameFile(ts, parentInfo) {
|
||||
// Cycle. Don't traverse.
|
||||
return false
|
||||
}
|
||||
path = parent
|
||||
}
|
||||
|
||||
}
|
||||
1767
vendor/golang.org/x/tools/internal/imports/fix.go
generated
vendored
Normal file
1767
vendor/golang.org/x/tools/internal/imports/fix.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
356
vendor/golang.org/x/tools/internal/imports/imports.go
generated
vendored
Normal file
356
vendor/golang.org/x/tools/internal/imports/imports.go
generated
vendored
Normal file
@@ -0,0 +1,356 @@
|
||||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:generate go run mkstdlib.go
|
||||
|
||||
// Package imports implements a Go pretty-printer (like package "go/format")
|
||||
// that also adds or removes import statements as necessary.
|
||||
package imports
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"go/ast"
|
||||
"go/format"
|
||||
"go/parser"
|
||||
"go/printer"
|
||||
"go/token"
|
||||
"io"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"golang.org/x/tools/go/ast/astutil"
|
||||
"golang.org/x/tools/internal/event"
|
||||
)
|
||||
|
||||
// Options is golang.org/x/tools/imports.Options with extra internal-only options.
|
||||
type Options struct {
|
||||
Env *ProcessEnv // The environment to use. Note: this contains the cached module and filesystem state.
|
||||
|
||||
// LocalPrefix is a comma-separated string of import path prefixes, which, if
|
||||
// set, instructs Process to sort the import paths with the given prefixes
|
||||
// into another group after 3rd-party packages.
|
||||
LocalPrefix string
|
||||
|
||||
Fragment bool // Accept fragment of a source file (no package statement)
|
||||
AllErrors bool // Report all errors (not just the first 10 on different lines)
|
||||
|
||||
Comments bool // Print comments (true if nil *Options provided)
|
||||
TabIndent bool // Use tabs for indent (true if nil *Options provided)
|
||||
TabWidth int // Tab width (8 if nil *Options provided)
|
||||
|
||||
FormatOnly bool // Disable the insertion and deletion of imports
|
||||
}
|
||||
|
||||
// Process implements golang.org/x/tools/imports.Process with explicit context in opt.Env.
|
||||
func Process(filename string, src []byte, opt *Options) (formatted []byte, err error) {
|
||||
fileSet := token.NewFileSet()
|
||||
file, adjust, err := parse(fileSet, filename, src, opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if !opt.FormatOnly {
|
||||
if err := fixImports(fileSet, file, filename, opt.Env); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return formatFile(fileSet, file, src, adjust, opt)
|
||||
}
|
||||
|
||||
// FixImports returns a list of fixes to the imports that, when applied,
|
||||
// will leave the imports in the same state as Process. src and opt must
|
||||
// be specified.
|
||||
//
|
||||
// Note that filename's directory influences which imports can be chosen,
|
||||
// so it is important that filename be accurate.
|
||||
func FixImports(ctx context.Context, filename string, src []byte, opt *Options) (fixes []*ImportFix, err error) {
|
||||
ctx, done := event.Start(ctx, "imports.FixImports")
|
||||
defer done()
|
||||
|
||||
fileSet := token.NewFileSet()
|
||||
file, _, err := parse(fileSet, filename, src, opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return getFixes(ctx, fileSet, file, filename, opt.Env)
|
||||
}
|
||||
|
||||
// ApplyFixes applies all of the fixes to the file and formats it. extraMode
|
||||
// is added in when parsing the file. src and opts must be specified, but no
|
||||
// env is needed.
|
||||
func ApplyFixes(fixes []*ImportFix, filename string, src []byte, opt *Options, extraMode parser.Mode) (formatted []byte, err error) {
|
||||
// Don't use parse() -- we don't care about fragments or statement lists
|
||||
// here, and we need to work with unparseable files.
|
||||
fileSet := token.NewFileSet()
|
||||
parserMode := parser.Mode(0)
|
||||
if opt.Comments {
|
||||
parserMode |= parser.ParseComments
|
||||
}
|
||||
if opt.AllErrors {
|
||||
parserMode |= parser.AllErrors
|
||||
}
|
||||
parserMode |= extraMode
|
||||
|
||||
file, err := parser.ParseFile(fileSet, filename, src, parserMode)
|
||||
if file == nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Apply the fixes to the file.
|
||||
apply(fileSet, file, fixes)
|
||||
|
||||
return formatFile(fileSet, file, src, nil, opt)
|
||||
}
|
||||
|
||||
// formatFile formats the file syntax tree.
|
||||
// It may mutate the token.FileSet.
|
||||
//
|
||||
// If an adjust function is provided, it is called after formatting
|
||||
// with the original source (formatFile's src parameter) and the
|
||||
// formatted file, and returns the postpocessed result.
|
||||
func formatFile(fset *token.FileSet, file *ast.File, src []byte, adjust func(orig []byte, src []byte) []byte, opt *Options) ([]byte, error) {
|
||||
mergeImports(file)
|
||||
sortImports(opt.LocalPrefix, fset.File(file.Pos()), file)
|
||||
var spacesBefore []string // import paths we need spaces before
|
||||
for _, impSection := range astutil.Imports(fset, file) {
|
||||
// Within each block of contiguous imports, see if any
|
||||
// import lines are in different group numbers. If so,
|
||||
// we'll need to put a space between them so it's
|
||||
// compatible with gofmt.
|
||||
lastGroup := -1
|
||||
for _, importSpec := range impSection {
|
||||
importPath, _ := strconv.Unquote(importSpec.Path.Value)
|
||||
groupNum := importGroup(opt.LocalPrefix, importPath)
|
||||
if groupNum != lastGroup && lastGroup != -1 {
|
||||
spacesBefore = append(spacesBefore, importPath)
|
||||
}
|
||||
lastGroup = groupNum
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
printerMode := printer.UseSpaces
|
||||
if opt.TabIndent {
|
||||
printerMode |= printer.TabIndent
|
||||
}
|
||||
printConfig := &printer.Config{Mode: printerMode, Tabwidth: opt.TabWidth}
|
||||
|
||||
var buf bytes.Buffer
|
||||
err := printConfig.Fprint(&buf, fset, file)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
out := buf.Bytes()
|
||||
if adjust != nil {
|
||||
out = adjust(src, out)
|
||||
}
|
||||
if len(spacesBefore) > 0 {
|
||||
out, err = addImportSpaces(bytes.NewReader(out), spacesBefore)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
out, err = format.Source(out)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
// parse parses src, which was read from filename,
|
||||
// as a Go source file or statement list.
|
||||
func parse(fset *token.FileSet, filename string, src []byte, opt *Options) (*ast.File, func(orig, src []byte) []byte, error) {
|
||||
parserMode := parser.Mode(0)
|
||||
if opt.Comments {
|
||||
parserMode |= parser.ParseComments
|
||||
}
|
||||
if opt.AllErrors {
|
||||
parserMode |= parser.AllErrors
|
||||
}
|
||||
|
||||
// Try as whole source file.
|
||||
file, err := parser.ParseFile(fset, filename, src, parserMode)
|
||||
if err == nil {
|
||||
return file, nil, nil
|
||||
}
|
||||
// If the error is that the source file didn't begin with a
|
||||
// package line and we accept fragmented input, fall through to
|
||||
// try as a source fragment. Stop and return on any other error.
|
||||
if !opt.Fragment || !strings.Contains(err.Error(), "expected 'package'") {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
// If this is a declaration list, make it a source file
|
||||
// by inserting a package clause.
|
||||
// Insert using a ;, not a newline, so that parse errors are on
|
||||
// the correct line.
|
||||
const prefix = "package main;"
|
||||
psrc := append([]byte(prefix), src...)
|
||||
file, err = parser.ParseFile(fset, filename, psrc, parserMode)
|
||||
if err == nil {
|
||||
// Gofmt will turn the ; into a \n.
|
||||
// Do that ourselves now and update the file contents,
|
||||
// so that positions and line numbers are correct going forward.
|
||||
psrc[len(prefix)-1] = '\n'
|
||||
fset.File(file.Package).SetLinesForContent(psrc)
|
||||
|
||||
// If a main function exists, we will assume this is a main
|
||||
// package and leave the file.
|
||||
if containsMainFunc(file) {
|
||||
return file, nil, nil
|
||||
}
|
||||
|
||||
adjust := func(orig, src []byte) []byte {
|
||||
// Remove the package clause.
|
||||
src = src[len(prefix):]
|
||||
return matchSpace(orig, src)
|
||||
}
|
||||
return file, adjust, nil
|
||||
}
|
||||
// If the error is that the source file didn't begin with a
|
||||
// declaration, fall through to try as a statement list.
|
||||
// Stop and return on any other error.
|
||||
if !strings.Contains(err.Error(), "expected declaration") {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
// If this is a statement list, make it a source file
|
||||
// by inserting a package clause and turning the list
|
||||
// into a function body. This handles expressions too.
|
||||
// Insert using a ;, not a newline, so that the line numbers
|
||||
// in fsrc match the ones in src.
|
||||
fsrc := append(append([]byte("package p; func _() {"), src...), '}')
|
||||
file, err = parser.ParseFile(fset, filename, fsrc, parserMode)
|
||||
if err == nil {
|
||||
adjust := func(orig, src []byte) []byte {
|
||||
// Remove the wrapping.
|
||||
// Gofmt has turned the ; into a \n\n.
|
||||
src = src[len("package p\n\nfunc _() {"):]
|
||||
src = src[:len(src)-len("}\n")]
|
||||
// Gofmt has also indented the function body one level.
|
||||
// Remove that indent.
|
||||
src = bytes.Replace(src, []byte("\n\t"), []byte("\n"), -1)
|
||||
return matchSpace(orig, src)
|
||||
}
|
||||
return file, adjust, nil
|
||||
}
|
||||
|
||||
// Failed, and out of options.
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
// containsMainFunc checks if a file contains a function declaration with the
|
||||
// function signature 'func main()'
|
||||
func containsMainFunc(file *ast.File) bool {
|
||||
for _, decl := range file.Decls {
|
||||
if f, ok := decl.(*ast.FuncDecl); ok {
|
||||
if f.Name.Name != "main" {
|
||||
continue
|
||||
}
|
||||
|
||||
if len(f.Type.Params.List) != 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
if f.Type.Results != nil && len(f.Type.Results.List) != 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func cutSpace(b []byte) (before, middle, after []byte) {
|
||||
i := 0
|
||||
for i < len(b) && (b[i] == ' ' || b[i] == '\t' || b[i] == '\n') {
|
||||
i++
|
||||
}
|
||||
j := len(b)
|
||||
for j > 0 && (b[j-1] == ' ' || b[j-1] == '\t' || b[j-1] == '\n') {
|
||||
j--
|
||||
}
|
||||
if i <= j {
|
||||
return b[:i], b[i:j], b[j:]
|
||||
}
|
||||
return nil, nil, b[j:]
|
||||
}
|
||||
|
||||
// matchSpace reformats src to use the same space context as orig.
|
||||
// 1. If orig begins with blank lines, matchSpace inserts them at the beginning of src.
|
||||
// 2. matchSpace copies the indentation of the first non-blank line in orig
|
||||
// to every non-blank line in src.
|
||||
// 3. matchSpace copies the trailing space from orig and uses it in place
|
||||
// of src's trailing space.
|
||||
func matchSpace(orig []byte, src []byte) []byte {
|
||||
before, _, after := cutSpace(orig)
|
||||
i := bytes.LastIndex(before, []byte{'\n'})
|
||||
before, indent := before[:i+1], before[i+1:]
|
||||
|
||||
_, src, _ = cutSpace(src)
|
||||
|
||||
var b bytes.Buffer
|
||||
b.Write(before)
|
||||
for len(src) > 0 {
|
||||
line := src
|
||||
if i := bytes.IndexByte(line, '\n'); i >= 0 {
|
||||
line, src = line[:i+1], line[i+1:]
|
||||
} else {
|
||||
src = nil
|
||||
}
|
||||
if len(line) > 0 && line[0] != '\n' { // not blank
|
||||
b.Write(indent)
|
||||
}
|
||||
b.Write(line)
|
||||
}
|
||||
b.Write(after)
|
||||
return b.Bytes()
|
||||
}
|
||||
|
||||
var impLine = regexp.MustCompile(`^\s+(?:[\w\.]+\s+)?"(.+?)"`)
|
||||
|
||||
func addImportSpaces(r io.Reader, breaks []string) ([]byte, error) {
|
||||
var out bytes.Buffer
|
||||
in := bufio.NewReader(r)
|
||||
inImports := false
|
||||
done := false
|
||||
for {
|
||||
s, err := in.ReadString('\n')
|
||||
if err == io.EOF {
|
||||
break
|
||||
} else if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if !inImports && !done && strings.HasPrefix(s, "import") {
|
||||
inImports = true
|
||||
}
|
||||
if inImports && (strings.HasPrefix(s, "var") ||
|
||||
strings.HasPrefix(s, "func") ||
|
||||
strings.HasPrefix(s, "const") ||
|
||||
strings.HasPrefix(s, "type")) {
|
||||
done = true
|
||||
inImports = false
|
||||
}
|
||||
if inImports && len(breaks) > 0 {
|
||||
if m := impLine.FindStringSubmatch(s); m != nil {
|
||||
if m[1] == breaks[0] {
|
||||
out.WriteByte('\n')
|
||||
breaks = breaks[1:]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fmt.Fprint(&out, s)
|
||||
}
|
||||
return out.Bytes(), nil
|
||||
}
|
||||
723
vendor/golang.org/x/tools/internal/imports/mod.go
generated
vendored
Normal file
723
vendor/golang.org/x/tools/internal/imports/mod.go
generated
vendored
Normal file
@@ -0,0 +1,723 @@
|
||||
// Copyright 2019 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package imports
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"golang.org/x/mod/module"
|
||||
"golang.org/x/tools/internal/event"
|
||||
"golang.org/x/tools/internal/gocommand"
|
||||
"golang.org/x/tools/internal/gopathwalk"
|
||||
)
|
||||
|
||||
// ModuleResolver implements resolver for modules using the go command as little
|
||||
// as feasible.
|
||||
type ModuleResolver struct {
|
||||
env *ProcessEnv
|
||||
moduleCacheDir string
|
||||
dummyVendorMod *gocommand.ModuleJSON // If vendoring is enabled, the pseudo-module that represents the /vendor directory.
|
||||
roots []gopathwalk.Root
|
||||
scanSema chan struct{} // scanSema prevents concurrent scans and guards scannedRoots.
|
||||
scannedRoots map[gopathwalk.Root]bool
|
||||
|
||||
initialized bool
|
||||
mains []*gocommand.ModuleJSON
|
||||
mainByDir map[string]*gocommand.ModuleJSON
|
||||
modsByModPath []*gocommand.ModuleJSON // All modules, ordered by # of path components in module Path...
|
||||
modsByDir []*gocommand.ModuleJSON // ...or number of path components in their Dir.
|
||||
|
||||
// moduleCacheCache stores information about the module cache.
|
||||
moduleCacheCache *dirInfoCache
|
||||
otherCache *dirInfoCache
|
||||
}
|
||||
|
||||
func newModuleResolver(e *ProcessEnv) *ModuleResolver {
|
||||
r := &ModuleResolver{
|
||||
env: e,
|
||||
scanSema: make(chan struct{}, 1),
|
||||
}
|
||||
r.scanSema <- struct{}{}
|
||||
return r
|
||||
}
|
||||
|
||||
func (r *ModuleResolver) init() error {
|
||||
if r.initialized {
|
||||
return nil
|
||||
}
|
||||
|
||||
goenv, err := r.env.goEnv()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
inv := gocommand.Invocation{
|
||||
BuildFlags: r.env.BuildFlags,
|
||||
ModFlag: r.env.ModFlag,
|
||||
ModFile: r.env.ModFile,
|
||||
Env: r.env.env(),
|
||||
Logf: r.env.Logf,
|
||||
WorkingDir: r.env.WorkingDir,
|
||||
}
|
||||
|
||||
vendorEnabled := false
|
||||
var mainModVendor *gocommand.ModuleJSON
|
||||
|
||||
// Module vendor directories are ignored in workspace mode:
|
||||
// https://go.googlesource.com/proposal/+/master/design/45713-workspace.md
|
||||
if len(r.env.Env["GOWORK"]) == 0 {
|
||||
vendorEnabled, mainModVendor, err = gocommand.VendorEnabled(context.TODO(), inv, r.env.GocmdRunner)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if mainModVendor != nil && vendorEnabled {
|
||||
// Vendor mode is on, so all the non-Main modules are irrelevant,
|
||||
// and we need to search /vendor for everything.
|
||||
r.mains = []*gocommand.ModuleJSON{mainModVendor}
|
||||
r.dummyVendorMod = &gocommand.ModuleJSON{
|
||||
Path: "",
|
||||
Dir: filepath.Join(mainModVendor.Dir, "vendor"),
|
||||
}
|
||||
r.modsByModPath = []*gocommand.ModuleJSON{mainModVendor, r.dummyVendorMod}
|
||||
r.modsByDir = []*gocommand.ModuleJSON{mainModVendor, r.dummyVendorMod}
|
||||
} else {
|
||||
// Vendor mode is off, so run go list -m ... to find everything.
|
||||
err := r.initAllMods()
|
||||
// We expect an error when running outside of a module with
|
||||
// GO111MODULE=on. Other errors are fatal.
|
||||
if err != nil {
|
||||
if errMsg := err.Error(); !strings.Contains(errMsg, "working directory is not part of a module") && !strings.Contains(errMsg, "go.mod file not found") {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if gmc := r.env.Env["GOMODCACHE"]; gmc != "" {
|
||||
r.moduleCacheDir = gmc
|
||||
} else {
|
||||
gopaths := filepath.SplitList(goenv["GOPATH"])
|
||||
if len(gopaths) == 0 {
|
||||
return fmt.Errorf("empty GOPATH")
|
||||
}
|
||||
r.moduleCacheDir = filepath.Join(gopaths[0], "/pkg/mod")
|
||||
}
|
||||
|
||||
sort.Slice(r.modsByModPath, func(i, j int) bool {
|
||||
count := func(x int) int {
|
||||
return strings.Count(r.modsByModPath[x].Path, "/")
|
||||
}
|
||||
return count(j) < count(i) // descending order
|
||||
})
|
||||
sort.Slice(r.modsByDir, func(i, j int) bool {
|
||||
count := func(x int) int {
|
||||
return strings.Count(r.modsByDir[x].Dir, string(filepath.Separator))
|
||||
}
|
||||
return count(j) < count(i) // descending order
|
||||
})
|
||||
|
||||
r.roots = []gopathwalk.Root{
|
||||
{Path: filepath.Join(goenv["GOROOT"], "/src"), Type: gopathwalk.RootGOROOT},
|
||||
}
|
||||
r.mainByDir = make(map[string]*gocommand.ModuleJSON)
|
||||
for _, main := range r.mains {
|
||||
r.roots = append(r.roots, gopathwalk.Root{Path: main.Dir, Type: gopathwalk.RootCurrentModule})
|
||||
r.mainByDir[main.Dir] = main
|
||||
}
|
||||
if vendorEnabled {
|
||||
r.roots = append(r.roots, gopathwalk.Root{Path: r.dummyVendorMod.Dir, Type: gopathwalk.RootOther})
|
||||
} else {
|
||||
addDep := func(mod *gocommand.ModuleJSON) {
|
||||
if mod.Replace == nil {
|
||||
// This is redundant with the cache, but we'll skip it cheaply enough.
|
||||
r.roots = append(r.roots, gopathwalk.Root{Path: mod.Dir, Type: gopathwalk.RootModuleCache})
|
||||
} else {
|
||||
r.roots = append(r.roots, gopathwalk.Root{Path: mod.Dir, Type: gopathwalk.RootOther})
|
||||
}
|
||||
}
|
||||
// Walk dependent modules before scanning the full mod cache, direct deps first.
|
||||
for _, mod := range r.modsByModPath {
|
||||
if !mod.Indirect && !mod.Main {
|
||||
addDep(mod)
|
||||
}
|
||||
}
|
||||
for _, mod := range r.modsByModPath {
|
||||
if mod.Indirect && !mod.Main {
|
||||
addDep(mod)
|
||||
}
|
||||
}
|
||||
r.roots = append(r.roots, gopathwalk.Root{Path: r.moduleCacheDir, Type: gopathwalk.RootModuleCache})
|
||||
}
|
||||
|
||||
r.scannedRoots = map[gopathwalk.Root]bool{}
|
||||
if r.moduleCacheCache == nil {
|
||||
r.moduleCacheCache = &dirInfoCache{
|
||||
dirs: map[string]*directoryPackageInfo{},
|
||||
listeners: map[*int]cacheListener{},
|
||||
}
|
||||
}
|
||||
if r.otherCache == nil {
|
||||
r.otherCache = &dirInfoCache{
|
||||
dirs: map[string]*directoryPackageInfo{},
|
||||
listeners: map[*int]cacheListener{},
|
||||
}
|
||||
}
|
||||
r.initialized = true
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *ModuleResolver) initAllMods() error {
|
||||
stdout, err := r.env.invokeGo(context.TODO(), "list", "-m", "-e", "-json", "...")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for dec := json.NewDecoder(stdout); dec.More(); {
|
||||
mod := &gocommand.ModuleJSON{}
|
||||
if err := dec.Decode(mod); err != nil {
|
||||
return err
|
||||
}
|
||||
if mod.Dir == "" {
|
||||
if r.env.Logf != nil {
|
||||
r.env.Logf("module %v has not been downloaded and will be ignored", mod.Path)
|
||||
}
|
||||
// Can't do anything with a module that's not downloaded.
|
||||
continue
|
||||
}
|
||||
// golang/go#36193: the go command doesn't always clean paths.
|
||||
mod.Dir = filepath.Clean(mod.Dir)
|
||||
r.modsByModPath = append(r.modsByModPath, mod)
|
||||
r.modsByDir = append(r.modsByDir, mod)
|
||||
if mod.Main {
|
||||
r.mains = append(r.mains, mod)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *ModuleResolver) ClearForNewScan() {
|
||||
<-r.scanSema
|
||||
r.scannedRoots = map[gopathwalk.Root]bool{}
|
||||
r.otherCache = &dirInfoCache{
|
||||
dirs: map[string]*directoryPackageInfo{},
|
||||
listeners: map[*int]cacheListener{},
|
||||
}
|
||||
r.scanSema <- struct{}{}
|
||||
}
|
||||
|
||||
func (r *ModuleResolver) ClearForNewMod() {
|
||||
<-r.scanSema
|
||||
*r = ModuleResolver{
|
||||
env: r.env,
|
||||
moduleCacheCache: r.moduleCacheCache,
|
||||
otherCache: r.otherCache,
|
||||
scanSema: r.scanSema,
|
||||
}
|
||||
r.init()
|
||||
r.scanSema <- struct{}{}
|
||||
}
|
||||
|
||||
// findPackage returns the module and directory that contains the package at
|
||||
// the given import path, or returns nil, "" if no module is in scope.
|
||||
func (r *ModuleResolver) findPackage(importPath string) (*gocommand.ModuleJSON, string) {
|
||||
// This can't find packages in the stdlib, but that's harmless for all
|
||||
// the existing code paths.
|
||||
for _, m := range r.modsByModPath {
|
||||
if !strings.HasPrefix(importPath, m.Path) {
|
||||
continue
|
||||
}
|
||||
pathInModule := importPath[len(m.Path):]
|
||||
pkgDir := filepath.Join(m.Dir, pathInModule)
|
||||
if r.dirIsNestedModule(pkgDir, m) {
|
||||
continue
|
||||
}
|
||||
|
||||
if info, ok := r.cacheLoad(pkgDir); ok {
|
||||
if loaded, err := info.reachedStatus(nameLoaded); loaded {
|
||||
if err != nil {
|
||||
continue // No package in this dir.
|
||||
}
|
||||
return m, pkgDir
|
||||
}
|
||||
if scanned, err := info.reachedStatus(directoryScanned); scanned && err != nil {
|
||||
continue // Dir is unreadable, etc.
|
||||
}
|
||||
// This is slightly wrong: a directory doesn't have to have an
|
||||
// importable package to count as a package for package-to-module
|
||||
// resolution. package main or _test files should count but
|
||||
// don't.
|
||||
// TODO(heschi): fix this.
|
||||
if _, err := r.cachePackageName(info); err == nil {
|
||||
return m, pkgDir
|
||||
}
|
||||
}
|
||||
|
||||
// Not cached. Read the filesystem.
|
||||
pkgFiles, err := os.ReadDir(pkgDir)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
// A module only contains a package if it has buildable go
|
||||
// files in that directory. If not, it could be provided by an
|
||||
// outer module. See #29736.
|
||||
for _, fi := range pkgFiles {
|
||||
if ok, _ := r.env.matchFile(pkgDir, fi.Name()); ok {
|
||||
return m, pkgDir
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil, ""
|
||||
}
|
||||
|
||||
func (r *ModuleResolver) cacheLoad(dir string) (directoryPackageInfo, bool) {
|
||||
if info, ok := r.moduleCacheCache.Load(dir); ok {
|
||||
return info, ok
|
||||
}
|
||||
return r.otherCache.Load(dir)
|
||||
}
|
||||
|
||||
func (r *ModuleResolver) cacheStore(info directoryPackageInfo) {
|
||||
if info.rootType == gopathwalk.RootModuleCache {
|
||||
r.moduleCacheCache.Store(info.dir, info)
|
||||
} else {
|
||||
r.otherCache.Store(info.dir, info)
|
||||
}
|
||||
}
|
||||
|
||||
func (r *ModuleResolver) cacheKeys() []string {
|
||||
return append(r.moduleCacheCache.Keys(), r.otherCache.Keys()...)
|
||||
}
|
||||
|
||||
// cachePackageName caches the package name for a dir already in the cache.
|
||||
func (r *ModuleResolver) cachePackageName(info directoryPackageInfo) (string, error) {
|
||||
if info.rootType == gopathwalk.RootModuleCache {
|
||||
return r.moduleCacheCache.CachePackageName(info)
|
||||
}
|
||||
return r.otherCache.CachePackageName(info)
|
||||
}
|
||||
|
||||
func (r *ModuleResolver) cacheExports(ctx context.Context, env *ProcessEnv, info directoryPackageInfo) (string, []string, error) {
|
||||
if info.rootType == gopathwalk.RootModuleCache {
|
||||
return r.moduleCacheCache.CacheExports(ctx, env, info)
|
||||
}
|
||||
return r.otherCache.CacheExports(ctx, env, info)
|
||||
}
|
||||
|
||||
// findModuleByDir returns the module that contains dir, or nil if no such
|
||||
// module is in scope.
|
||||
func (r *ModuleResolver) findModuleByDir(dir string) *gocommand.ModuleJSON {
|
||||
// This is quite tricky and may not be correct. dir could be:
|
||||
// - a package in the main module.
|
||||
// - a replace target underneath the main module's directory.
|
||||
// - a nested module in the above.
|
||||
// - a replace target somewhere totally random.
|
||||
// - a nested module in the above.
|
||||
// - in the mod cache.
|
||||
// - in /vendor/ in -mod=vendor mode.
|
||||
// - nested module? Dunno.
|
||||
// Rumor has it that replace targets cannot contain other replace targets.
|
||||
//
|
||||
// Note that it is critical here that modsByDir is sorted to have deeper dirs
|
||||
// first. This ensures that findModuleByDir finds the innermost module.
|
||||
// See also golang/go#56291.
|
||||
for _, m := range r.modsByDir {
|
||||
if !strings.HasPrefix(dir, m.Dir) {
|
||||
continue
|
||||
}
|
||||
|
||||
if r.dirIsNestedModule(dir, m) {
|
||||
continue
|
||||
}
|
||||
|
||||
return m
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// dirIsNestedModule reports if dir is contained in a nested module underneath
|
||||
// mod, not actually in mod.
|
||||
func (r *ModuleResolver) dirIsNestedModule(dir string, mod *gocommand.ModuleJSON) bool {
|
||||
if !strings.HasPrefix(dir, mod.Dir) {
|
||||
return false
|
||||
}
|
||||
if r.dirInModuleCache(dir) {
|
||||
// Nested modules in the module cache are pruned,
|
||||
// so it cannot be a nested module.
|
||||
return false
|
||||
}
|
||||
if mod != nil && mod == r.dummyVendorMod {
|
||||
// The /vendor pseudomodule is flattened and doesn't actually count.
|
||||
return false
|
||||
}
|
||||
modDir, _ := r.modInfo(dir)
|
||||
if modDir == "" {
|
||||
return false
|
||||
}
|
||||
return modDir != mod.Dir
|
||||
}
|
||||
|
||||
func (r *ModuleResolver) modInfo(dir string) (modDir string, modName string) {
|
||||
readModName := func(modFile string) string {
|
||||
modBytes, err := os.ReadFile(modFile)
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
return modulePath(modBytes)
|
||||
}
|
||||
|
||||
if r.dirInModuleCache(dir) {
|
||||
if matches := modCacheRegexp.FindStringSubmatch(dir); len(matches) == 3 {
|
||||
index := strings.Index(dir, matches[1]+"@"+matches[2])
|
||||
modDir := filepath.Join(dir[:index], matches[1]+"@"+matches[2])
|
||||
return modDir, readModName(filepath.Join(modDir, "go.mod"))
|
||||
}
|
||||
}
|
||||
for {
|
||||
if info, ok := r.cacheLoad(dir); ok {
|
||||
return info.moduleDir, info.moduleName
|
||||
}
|
||||
f := filepath.Join(dir, "go.mod")
|
||||
info, err := os.Stat(f)
|
||||
if err == nil && !info.IsDir() {
|
||||
return dir, readModName(f)
|
||||
}
|
||||
|
||||
d := filepath.Dir(dir)
|
||||
if len(d) >= len(dir) {
|
||||
return "", "" // reached top of file system, no go.mod
|
||||
}
|
||||
dir = d
|
||||
}
|
||||
}
|
||||
|
||||
func (r *ModuleResolver) dirInModuleCache(dir string) bool {
|
||||
if r.moduleCacheDir == "" {
|
||||
return false
|
||||
}
|
||||
return strings.HasPrefix(dir, r.moduleCacheDir)
|
||||
}
|
||||
|
||||
func (r *ModuleResolver) loadPackageNames(importPaths []string, srcDir string) (map[string]string, error) {
|
||||
if err := r.init(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
names := map[string]string{}
|
||||
for _, path := range importPaths {
|
||||
_, packageDir := r.findPackage(path)
|
||||
if packageDir == "" {
|
||||
continue
|
||||
}
|
||||
name, err := packageDirToName(packageDir)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
names[path] = name
|
||||
}
|
||||
return names, nil
|
||||
}
|
||||
|
||||
func (r *ModuleResolver) scan(ctx context.Context, callback *scanCallback) error {
|
||||
ctx, done := event.Start(ctx, "imports.ModuleResolver.scan")
|
||||
defer done()
|
||||
|
||||
if err := r.init(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
processDir := func(info directoryPackageInfo) {
|
||||
// Skip this directory if we were not able to get the package information successfully.
|
||||
if scanned, err := info.reachedStatus(directoryScanned); !scanned || err != nil {
|
||||
return
|
||||
}
|
||||
pkg, err := r.canonicalize(info)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if !callback.dirFound(pkg) {
|
||||
return
|
||||
}
|
||||
pkg.packageName, err = r.cachePackageName(info)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if !callback.packageNameLoaded(pkg) {
|
||||
return
|
||||
}
|
||||
_, exports, err := r.loadExports(ctx, pkg, false)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
callback.exportsLoaded(pkg, exports)
|
||||
}
|
||||
|
||||
// Start processing everything in the cache, and listen for the new stuff
|
||||
// we discover in the walk below.
|
||||
stop1 := r.moduleCacheCache.ScanAndListen(ctx, processDir)
|
||||
defer stop1()
|
||||
stop2 := r.otherCache.ScanAndListen(ctx, processDir)
|
||||
defer stop2()
|
||||
|
||||
// We assume cached directories are fully cached, including all their
|
||||
// children, and have not changed. We can skip them.
|
||||
skip := func(root gopathwalk.Root, dir string) bool {
|
||||
if r.env.SkipPathInScan != nil && root.Type == gopathwalk.RootCurrentModule {
|
||||
if root.Path == dir {
|
||||
return false
|
||||
}
|
||||
|
||||
if r.env.SkipPathInScan(filepath.Clean(dir)) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
info, ok := r.cacheLoad(dir)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
// This directory can be skipped as long as we have already scanned it.
|
||||
// Packages with errors will continue to have errors, so there is no need
|
||||
// to rescan them.
|
||||
packageScanned, _ := info.reachedStatus(directoryScanned)
|
||||
return packageScanned
|
||||
}
|
||||
|
||||
// Add anything new to the cache, and process it if we're still listening.
|
||||
add := func(root gopathwalk.Root, dir string) {
|
||||
r.cacheStore(r.scanDirForPackage(root, dir))
|
||||
}
|
||||
|
||||
// r.roots and the callback are not necessarily safe to use in the
|
||||
// goroutine below. Process them eagerly.
|
||||
roots := filterRoots(r.roots, callback.rootFound)
|
||||
// We can't cancel walks, because we need them to finish to have a usable
|
||||
// cache. Instead, run them in a separate goroutine and detach.
|
||||
scanDone := make(chan struct{})
|
||||
go func() {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case <-r.scanSema:
|
||||
}
|
||||
defer func() { r.scanSema <- struct{}{} }()
|
||||
// We have the lock on r.scannedRoots, and no other scans can run.
|
||||
for _, root := range roots {
|
||||
if ctx.Err() != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if r.scannedRoots[root] {
|
||||
continue
|
||||
}
|
||||
gopathwalk.WalkSkip([]gopathwalk.Root{root}, add, skip, gopathwalk.Options{Logf: r.env.Logf, ModulesEnabled: true})
|
||||
r.scannedRoots[root] = true
|
||||
}
|
||||
close(scanDone)
|
||||
}()
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
case <-scanDone:
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *ModuleResolver) scoreImportPath(ctx context.Context, path string) float64 {
|
||||
if _, ok := stdlib[path]; ok {
|
||||
return MaxRelevance
|
||||
}
|
||||
mod, _ := r.findPackage(path)
|
||||
return modRelevance(mod)
|
||||
}
|
||||
|
||||
func modRelevance(mod *gocommand.ModuleJSON) float64 {
|
||||
var relevance float64
|
||||
switch {
|
||||
case mod == nil: // out of scope
|
||||
return MaxRelevance - 4
|
||||
case mod.Indirect:
|
||||
relevance = MaxRelevance - 3
|
||||
case !mod.Main:
|
||||
relevance = MaxRelevance - 2
|
||||
default:
|
||||
relevance = MaxRelevance - 1 // main module ties with stdlib
|
||||
}
|
||||
|
||||
_, versionString, ok := module.SplitPathVersion(mod.Path)
|
||||
if ok {
|
||||
index := strings.Index(versionString, "v")
|
||||
if index == -1 {
|
||||
return relevance
|
||||
}
|
||||
if versionNumber, err := strconv.ParseFloat(versionString[index+1:], 64); err == nil {
|
||||
relevance += versionNumber / 1000
|
||||
}
|
||||
}
|
||||
|
||||
return relevance
|
||||
}
|
||||
|
||||
// canonicalize gets the result of canonicalizing the packages using the results
|
||||
// of initializing the resolver from 'go list -m'.
|
||||
func (r *ModuleResolver) canonicalize(info directoryPackageInfo) (*pkg, error) {
|
||||
// Packages in GOROOT are already canonical, regardless of the std/cmd modules.
|
||||
if info.rootType == gopathwalk.RootGOROOT {
|
||||
return &pkg{
|
||||
importPathShort: info.nonCanonicalImportPath,
|
||||
dir: info.dir,
|
||||
packageName: path.Base(info.nonCanonicalImportPath),
|
||||
relevance: MaxRelevance,
|
||||
}, nil
|
||||
}
|
||||
|
||||
importPath := info.nonCanonicalImportPath
|
||||
mod := r.findModuleByDir(info.dir)
|
||||
// Check if the directory is underneath a module that's in scope.
|
||||
if mod != nil {
|
||||
// It is. If dir is the target of a replace directive,
|
||||
// our guessed import path is wrong. Use the real one.
|
||||
if mod.Dir == info.dir {
|
||||
importPath = mod.Path
|
||||
} else {
|
||||
dirInMod := info.dir[len(mod.Dir)+len("/"):]
|
||||
importPath = path.Join(mod.Path, filepath.ToSlash(dirInMod))
|
||||
}
|
||||
} else if !strings.HasPrefix(importPath, info.moduleName) {
|
||||
// The module's name doesn't match the package's import path. It
|
||||
// probably needs a replace directive we don't have.
|
||||
return nil, fmt.Errorf("package in %q is not valid without a replace statement", info.dir)
|
||||
}
|
||||
|
||||
res := &pkg{
|
||||
importPathShort: importPath,
|
||||
dir: info.dir,
|
||||
relevance: modRelevance(mod),
|
||||
}
|
||||
// We may have discovered a package that has a different version
|
||||
// in scope already. Canonicalize to that one if possible.
|
||||
if _, canonicalDir := r.findPackage(importPath); canonicalDir != "" {
|
||||
res.dir = canonicalDir
|
||||
}
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func (r *ModuleResolver) loadExports(ctx context.Context, pkg *pkg, includeTest bool) (string, []string, error) {
|
||||
if err := r.init(); err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
if info, ok := r.cacheLoad(pkg.dir); ok && !includeTest {
|
||||
return r.cacheExports(ctx, r.env, info)
|
||||
}
|
||||
return loadExportsFromFiles(ctx, r.env, pkg.dir, includeTest)
|
||||
}
|
||||
|
||||
func (r *ModuleResolver) scanDirForPackage(root gopathwalk.Root, dir string) directoryPackageInfo {
|
||||
subdir := ""
|
||||
if dir != root.Path {
|
||||
subdir = dir[len(root.Path)+len("/"):]
|
||||
}
|
||||
importPath := filepath.ToSlash(subdir)
|
||||
if strings.HasPrefix(importPath, "vendor/") {
|
||||
// Only enter vendor directories if they're explicitly requested as a root.
|
||||
return directoryPackageInfo{
|
||||
status: directoryScanned,
|
||||
err: fmt.Errorf("unwanted vendor directory"),
|
||||
}
|
||||
}
|
||||
switch root.Type {
|
||||
case gopathwalk.RootCurrentModule:
|
||||
importPath = path.Join(r.mainByDir[root.Path].Path, filepath.ToSlash(subdir))
|
||||
case gopathwalk.RootModuleCache:
|
||||
matches := modCacheRegexp.FindStringSubmatch(subdir)
|
||||
if len(matches) == 0 {
|
||||
return directoryPackageInfo{
|
||||
status: directoryScanned,
|
||||
err: fmt.Errorf("invalid module cache path: %v", subdir),
|
||||
}
|
||||
}
|
||||
modPath, err := module.UnescapePath(filepath.ToSlash(matches[1]))
|
||||
if err != nil {
|
||||
if r.env.Logf != nil {
|
||||
r.env.Logf("decoding module cache path %q: %v", subdir, err)
|
||||
}
|
||||
return directoryPackageInfo{
|
||||
status: directoryScanned,
|
||||
err: fmt.Errorf("decoding module cache path %q: %v", subdir, err),
|
||||
}
|
||||
}
|
||||
importPath = path.Join(modPath, filepath.ToSlash(matches[3]))
|
||||
}
|
||||
|
||||
modDir, modName := r.modInfo(dir)
|
||||
result := directoryPackageInfo{
|
||||
status: directoryScanned,
|
||||
dir: dir,
|
||||
rootType: root.Type,
|
||||
nonCanonicalImportPath: importPath,
|
||||
moduleDir: modDir,
|
||||
moduleName: modName,
|
||||
}
|
||||
if root.Type == gopathwalk.RootGOROOT {
|
||||
// stdlib packages are always in scope, despite the confusing go.mod
|
||||
return result
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// modCacheRegexp splits a path in a module cache into module, module version, and package.
|
||||
var modCacheRegexp = regexp.MustCompile(`(.*)@([^/\\]*)(.*)`)
|
||||
|
||||
var (
|
||||
slashSlash = []byte("//")
|
||||
moduleStr = []byte("module")
|
||||
)
|
||||
|
||||
// modulePath returns the module path from the gomod file text.
|
||||
// If it cannot find a module path, it returns an empty string.
|
||||
// It is tolerant of unrelated problems in the go.mod file.
|
||||
//
|
||||
// Copied from cmd/go/internal/modfile.
|
||||
func modulePath(mod []byte) string {
|
||||
for len(mod) > 0 {
|
||||
line := mod
|
||||
mod = nil
|
||||
if i := bytes.IndexByte(line, '\n'); i >= 0 {
|
||||
line, mod = line[:i], line[i+1:]
|
||||
}
|
||||
if i := bytes.Index(line, slashSlash); i >= 0 {
|
||||
line = line[:i]
|
||||
}
|
||||
line = bytes.TrimSpace(line)
|
||||
if !bytes.HasPrefix(line, moduleStr) {
|
||||
continue
|
||||
}
|
||||
line = line[len(moduleStr):]
|
||||
n := len(line)
|
||||
line = bytes.TrimSpace(line)
|
||||
if len(line) == n || len(line) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
if line[0] == '"' || line[0] == '`' {
|
||||
p, err := strconv.Unquote(string(line))
|
||||
if err != nil {
|
||||
return "" // malformed quoted string or multiline module path
|
||||
}
|
||||
return p
|
||||
}
|
||||
|
||||
return string(line)
|
||||
}
|
||||
return "" // missing module path
|
||||
}
|
||||
236
vendor/golang.org/x/tools/internal/imports/mod_cache.go
generated
vendored
Normal file
236
vendor/golang.org/x/tools/internal/imports/mod_cache.go
generated
vendored
Normal file
@@ -0,0 +1,236 @@
|
||||
// Copyright 2019 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package imports
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
"golang.org/x/tools/internal/gopathwalk"
|
||||
)
|
||||
|
||||
// To find packages to import, the resolver needs to know about all of
|
||||
// the packages that could be imported. This includes packages that are
|
||||
// already in modules that are in (1) the current module, (2) replace targets,
|
||||
// and (3) packages in the module cache. Packages in (1) and (2) may change over
|
||||
// time, as the client may edit the current module and locally replaced modules.
|
||||
// The module cache (which includes all of the packages in (3)) can only
|
||||
// ever be added to.
|
||||
//
|
||||
// The resolver can thus save state about packages in the module cache
|
||||
// and guarantee that this will not change over time. To obtain information
|
||||
// about new modules added to the module cache, the module cache should be
|
||||
// rescanned.
|
||||
//
|
||||
// It is OK to serve information about modules that have been deleted,
|
||||
// as they do still exist.
|
||||
// TODO(suzmue): can we share information with the caller about
|
||||
// what module needs to be downloaded to import this package?
|
||||
|
||||
type directoryPackageStatus int
|
||||
|
||||
const (
|
||||
_ directoryPackageStatus = iota
|
||||
directoryScanned
|
||||
nameLoaded
|
||||
exportsLoaded
|
||||
)
|
||||
|
||||
type directoryPackageInfo struct {
|
||||
// status indicates the extent to which this struct has been filled in.
|
||||
status directoryPackageStatus
|
||||
// err is non-nil when there was an error trying to reach status.
|
||||
err error
|
||||
|
||||
// Set when status >= directoryScanned.
|
||||
|
||||
// dir is the absolute directory of this package.
|
||||
dir string
|
||||
rootType gopathwalk.RootType
|
||||
// nonCanonicalImportPath is the package's expected import path. It may
|
||||
// not actually be importable at that path.
|
||||
nonCanonicalImportPath string
|
||||
|
||||
// Module-related information.
|
||||
moduleDir string // The directory that is the module root of this dir.
|
||||
moduleName string // The module name that contains this dir.
|
||||
|
||||
// Set when status >= nameLoaded.
|
||||
|
||||
packageName string // the package name, as declared in the source.
|
||||
|
||||
// Set when status >= exportsLoaded.
|
||||
|
||||
exports []string
|
||||
}
|
||||
|
||||
// reachedStatus returns true when info has a status at least target and any error associated with
|
||||
// an attempt to reach target.
|
||||
func (info *directoryPackageInfo) reachedStatus(target directoryPackageStatus) (bool, error) {
|
||||
if info.err == nil {
|
||||
return info.status >= target, nil
|
||||
}
|
||||
if info.status == target {
|
||||
return true, info.err
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// dirInfoCache is a concurrency safe map for storing information about
|
||||
// directories that may contain packages.
|
||||
//
|
||||
// The information in this cache is built incrementally. Entries are initialized in scan.
|
||||
// No new keys should be added in any other functions, as all directories containing
|
||||
// packages are identified in scan.
|
||||
//
|
||||
// Other functions, including loadExports and findPackage, may update entries in this cache
|
||||
// as they discover new things about the directory.
|
||||
//
|
||||
// The information in the cache is not expected to change for the cache's
|
||||
// lifetime, so there is no protection against competing writes. Users should
|
||||
// take care not to hold the cache across changes to the underlying files.
|
||||
//
|
||||
// TODO(suzmue): consider other concurrency strategies and data structures (RWLocks, sync.Map, etc)
|
||||
type dirInfoCache struct {
|
||||
mu sync.Mutex
|
||||
// dirs stores information about packages in directories, keyed by absolute path.
|
||||
dirs map[string]*directoryPackageInfo
|
||||
listeners map[*int]cacheListener
|
||||
}
|
||||
|
||||
type cacheListener func(directoryPackageInfo)
|
||||
|
||||
// ScanAndListen calls listener on all the items in the cache, and on anything
|
||||
// newly added. The returned stop function waits for all in-flight callbacks to
|
||||
// finish and blocks new ones.
|
||||
func (d *dirInfoCache) ScanAndListen(ctx context.Context, listener cacheListener) func() {
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
|
||||
// Flushing out all the callbacks is tricky without knowing how many there
|
||||
// are going to be. Setting an arbitrary limit makes it much easier.
|
||||
const maxInFlight = 10
|
||||
sema := make(chan struct{}, maxInFlight)
|
||||
for i := 0; i < maxInFlight; i++ {
|
||||
sema <- struct{}{}
|
||||
}
|
||||
|
||||
cookie := new(int) // A unique ID we can use for the listener.
|
||||
|
||||
// We can't hold mu while calling the listener.
|
||||
d.mu.Lock()
|
||||
var keys []string
|
||||
for key := range d.dirs {
|
||||
keys = append(keys, key)
|
||||
}
|
||||
d.listeners[cookie] = func(info directoryPackageInfo) {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case <-sema:
|
||||
}
|
||||
listener(info)
|
||||
sema <- struct{}{}
|
||||
}
|
||||
d.mu.Unlock()
|
||||
|
||||
stop := func() {
|
||||
cancel()
|
||||
d.mu.Lock()
|
||||
delete(d.listeners, cookie)
|
||||
d.mu.Unlock()
|
||||
for i := 0; i < maxInFlight; i++ {
|
||||
<-sema
|
||||
}
|
||||
}
|
||||
|
||||
// Process the pre-existing keys.
|
||||
for _, k := range keys {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return stop
|
||||
default:
|
||||
}
|
||||
if v, ok := d.Load(k); ok {
|
||||
listener(v)
|
||||
}
|
||||
}
|
||||
|
||||
return stop
|
||||
}
|
||||
|
||||
// Store stores the package info for dir.
|
||||
func (d *dirInfoCache) Store(dir string, info directoryPackageInfo) {
|
||||
d.mu.Lock()
|
||||
_, old := d.dirs[dir]
|
||||
d.dirs[dir] = &info
|
||||
var listeners []cacheListener
|
||||
for _, l := range d.listeners {
|
||||
listeners = append(listeners, l)
|
||||
}
|
||||
d.mu.Unlock()
|
||||
|
||||
if !old {
|
||||
for _, l := range listeners {
|
||||
l(info)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Load returns a copy of the directoryPackageInfo for absolute directory dir.
|
||||
func (d *dirInfoCache) Load(dir string) (directoryPackageInfo, bool) {
|
||||
d.mu.Lock()
|
||||
defer d.mu.Unlock()
|
||||
info, ok := d.dirs[dir]
|
||||
if !ok {
|
||||
return directoryPackageInfo{}, false
|
||||
}
|
||||
return *info, true
|
||||
}
|
||||
|
||||
// Keys returns the keys currently present in d.
|
||||
func (d *dirInfoCache) Keys() (keys []string) {
|
||||
d.mu.Lock()
|
||||
defer d.mu.Unlock()
|
||||
for key := range d.dirs {
|
||||
keys = append(keys, key)
|
||||
}
|
||||
return keys
|
||||
}
|
||||
|
||||
func (d *dirInfoCache) CachePackageName(info directoryPackageInfo) (string, error) {
|
||||
if loaded, err := info.reachedStatus(nameLoaded); loaded {
|
||||
return info.packageName, err
|
||||
}
|
||||
if scanned, err := info.reachedStatus(directoryScanned); !scanned || err != nil {
|
||||
return "", fmt.Errorf("cannot read package name, scan error: %v", err)
|
||||
}
|
||||
info.packageName, info.err = packageDirToName(info.dir)
|
||||
info.status = nameLoaded
|
||||
d.Store(info.dir, info)
|
||||
return info.packageName, info.err
|
||||
}
|
||||
|
||||
func (d *dirInfoCache) CacheExports(ctx context.Context, env *ProcessEnv, info directoryPackageInfo) (string, []string, error) {
|
||||
if reached, _ := info.reachedStatus(exportsLoaded); reached {
|
||||
return info.packageName, info.exports, info.err
|
||||
}
|
||||
if reached, err := info.reachedStatus(nameLoaded); reached && err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
info.packageName, info.exports, info.err = loadExportsFromFiles(ctx, env, info.dir, false)
|
||||
if info.err == context.Canceled || info.err == context.DeadlineExceeded {
|
||||
return info.packageName, info.exports, info.err
|
||||
}
|
||||
// The cache structure wants things to proceed linearly. We can skip a
|
||||
// step here, but only if we succeed.
|
||||
if info.status == nameLoaded || info.err == nil {
|
||||
info.status = exportsLoaded
|
||||
} else {
|
||||
info.status = nameLoaded
|
||||
}
|
||||
d.Store(info.dir, info)
|
||||
return info.packageName, info.exports, info.err
|
||||
}
|
||||
297
vendor/golang.org/x/tools/internal/imports/sortimports.go
generated
vendored
Normal file
297
vendor/golang.org/x/tools/internal/imports/sortimports.go
generated
vendored
Normal file
@@ -0,0 +1,297 @@
|
||||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Hacked up copy of go/ast/import.go
|
||||
// Modified to use a single token.File in preference to a FileSet.
|
||||
|
||||
package imports
|
||||
|
||||
import (
|
||||
"go/ast"
|
||||
"go/token"
|
||||
"log"
|
||||
"sort"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
// sortImports sorts runs of consecutive import lines in import blocks in f.
|
||||
// It also removes duplicate imports when it is possible to do so without data loss.
|
||||
//
|
||||
// It may mutate the token.File.
|
||||
func sortImports(localPrefix string, tokFile *token.File, f *ast.File) {
|
||||
for i, d := range f.Decls {
|
||||
d, ok := d.(*ast.GenDecl)
|
||||
if !ok || d.Tok != token.IMPORT {
|
||||
// Not an import declaration, so we're done.
|
||||
// Imports are always first.
|
||||
break
|
||||
}
|
||||
|
||||
if len(d.Specs) == 0 {
|
||||
// Empty import block, remove it.
|
||||
f.Decls = append(f.Decls[:i], f.Decls[i+1:]...)
|
||||
}
|
||||
|
||||
if !d.Lparen.IsValid() {
|
||||
// Not a block: sorted by default.
|
||||
continue
|
||||
}
|
||||
|
||||
// Identify and sort runs of specs on successive lines.
|
||||
i := 0
|
||||
specs := d.Specs[:0]
|
||||
for j, s := range d.Specs {
|
||||
if j > i && tokFile.Line(s.Pos()) > 1+tokFile.Line(d.Specs[j-1].End()) {
|
||||
// j begins a new run. End this one.
|
||||
specs = append(specs, sortSpecs(localPrefix, tokFile, f, d.Specs[i:j])...)
|
||||
i = j
|
||||
}
|
||||
}
|
||||
specs = append(specs, sortSpecs(localPrefix, tokFile, f, d.Specs[i:])...)
|
||||
d.Specs = specs
|
||||
|
||||
// Deduping can leave a blank line before the rparen; clean that up.
|
||||
// Ignore line directives.
|
||||
if len(d.Specs) > 0 {
|
||||
lastSpec := d.Specs[len(d.Specs)-1]
|
||||
lastLine := tokFile.PositionFor(lastSpec.Pos(), false).Line
|
||||
if rParenLine := tokFile.PositionFor(d.Rparen, false).Line; rParenLine > lastLine+1 {
|
||||
tokFile.MergeLine(rParenLine - 1) // has side effects!
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// mergeImports merges all the import declarations into the first one.
|
||||
// Taken from golang.org/x/tools/ast/astutil.
|
||||
// This does not adjust line numbers properly
|
||||
func mergeImports(f *ast.File) {
|
||||
if len(f.Decls) <= 1 {
|
||||
return
|
||||
}
|
||||
|
||||
// Merge all the import declarations into the first one.
|
||||
var first *ast.GenDecl
|
||||
for i := 0; i < len(f.Decls); i++ {
|
||||
decl := f.Decls[i]
|
||||
gen, ok := decl.(*ast.GenDecl)
|
||||
if !ok || gen.Tok != token.IMPORT || declImports(gen, "C") {
|
||||
continue
|
||||
}
|
||||
if first == nil {
|
||||
first = gen
|
||||
continue // Don't touch the first one.
|
||||
}
|
||||
// We now know there is more than one package in this import
|
||||
// declaration. Ensure that it ends up parenthesized.
|
||||
first.Lparen = first.Pos()
|
||||
// Move the imports of the other import declaration to the first one.
|
||||
for _, spec := range gen.Specs {
|
||||
spec.(*ast.ImportSpec).Path.ValuePos = first.Pos()
|
||||
first.Specs = append(first.Specs, spec)
|
||||
}
|
||||
f.Decls = append(f.Decls[:i], f.Decls[i+1:]...)
|
||||
i--
|
||||
}
|
||||
}
|
||||
|
||||
// declImports reports whether gen contains an import of path.
|
||||
// Taken from golang.org/x/tools/ast/astutil.
|
||||
func declImports(gen *ast.GenDecl, path string) bool {
|
||||
if gen.Tok != token.IMPORT {
|
||||
return false
|
||||
}
|
||||
for _, spec := range gen.Specs {
|
||||
impspec := spec.(*ast.ImportSpec)
|
||||
if importPath(impspec) == path {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func importPath(s ast.Spec) string {
|
||||
t, err := strconv.Unquote(s.(*ast.ImportSpec).Path.Value)
|
||||
if err == nil {
|
||||
return t
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func importName(s ast.Spec) string {
|
||||
n := s.(*ast.ImportSpec).Name
|
||||
if n == nil {
|
||||
return ""
|
||||
}
|
||||
return n.Name
|
||||
}
|
||||
|
||||
func importComment(s ast.Spec) string {
|
||||
c := s.(*ast.ImportSpec).Comment
|
||||
if c == nil {
|
||||
return ""
|
||||
}
|
||||
return c.Text()
|
||||
}
|
||||
|
||||
// collapse indicates whether prev may be removed, leaving only next.
|
||||
func collapse(prev, next ast.Spec) bool {
|
||||
if importPath(next) != importPath(prev) || importName(next) != importName(prev) {
|
||||
return false
|
||||
}
|
||||
return prev.(*ast.ImportSpec).Comment == nil
|
||||
}
|
||||
|
||||
type posSpan struct {
|
||||
Start token.Pos
|
||||
End token.Pos
|
||||
}
|
||||
|
||||
// sortSpecs sorts the import specs within each import decl.
|
||||
// It may mutate the token.File.
|
||||
func sortSpecs(localPrefix string, tokFile *token.File, f *ast.File, specs []ast.Spec) []ast.Spec {
|
||||
// Can't short-circuit here even if specs are already sorted,
|
||||
// since they might yet need deduplication.
|
||||
// A lone import, however, may be safely ignored.
|
||||
if len(specs) <= 1 {
|
||||
return specs
|
||||
}
|
||||
|
||||
// Record positions for specs.
|
||||
pos := make([]posSpan, len(specs))
|
||||
for i, s := range specs {
|
||||
pos[i] = posSpan{s.Pos(), s.End()}
|
||||
}
|
||||
|
||||
// Identify comments in this range.
|
||||
// Any comment from pos[0].Start to the final line counts.
|
||||
lastLine := tokFile.Line(pos[len(pos)-1].End)
|
||||
cstart := len(f.Comments)
|
||||
cend := len(f.Comments)
|
||||
for i, g := range f.Comments {
|
||||
if g.Pos() < pos[0].Start {
|
||||
continue
|
||||
}
|
||||
if i < cstart {
|
||||
cstart = i
|
||||
}
|
||||
if tokFile.Line(g.End()) > lastLine {
|
||||
cend = i
|
||||
break
|
||||
}
|
||||
}
|
||||
comments := f.Comments[cstart:cend]
|
||||
|
||||
// Assign each comment to the import spec preceding it.
|
||||
importComment := map[*ast.ImportSpec][]*ast.CommentGroup{}
|
||||
specIndex := 0
|
||||
for _, g := range comments {
|
||||
for specIndex+1 < len(specs) && pos[specIndex+1].Start <= g.Pos() {
|
||||
specIndex++
|
||||
}
|
||||
s := specs[specIndex].(*ast.ImportSpec)
|
||||
importComment[s] = append(importComment[s], g)
|
||||
}
|
||||
|
||||
// Sort the import specs by import path.
|
||||
// Remove duplicates, when possible without data loss.
|
||||
// Reassign the import paths to have the same position sequence.
|
||||
// Reassign each comment to abut the end of its spec.
|
||||
// Sort the comments by new position.
|
||||
sort.Sort(byImportSpec{localPrefix, specs})
|
||||
|
||||
// Dedup. Thanks to our sorting, we can just consider
|
||||
// adjacent pairs of imports.
|
||||
deduped := specs[:0]
|
||||
for i, s := range specs {
|
||||
if i == len(specs)-1 || !collapse(s, specs[i+1]) {
|
||||
deduped = append(deduped, s)
|
||||
} else {
|
||||
p := s.Pos()
|
||||
tokFile.MergeLine(tokFile.Line(p)) // has side effects!
|
||||
}
|
||||
}
|
||||
specs = deduped
|
||||
|
||||
// Fix up comment positions
|
||||
for i, s := range specs {
|
||||
s := s.(*ast.ImportSpec)
|
||||
if s.Name != nil {
|
||||
s.Name.NamePos = pos[i].Start
|
||||
}
|
||||
s.Path.ValuePos = pos[i].Start
|
||||
s.EndPos = pos[i].End
|
||||
nextSpecPos := pos[i].End
|
||||
|
||||
for _, g := range importComment[s] {
|
||||
for _, c := range g.List {
|
||||
c.Slash = pos[i].End
|
||||
nextSpecPos = c.End()
|
||||
}
|
||||
}
|
||||
if i < len(specs)-1 {
|
||||
pos[i+1].Start = nextSpecPos
|
||||
pos[i+1].End = nextSpecPos
|
||||
}
|
||||
}
|
||||
|
||||
sort.Sort(byCommentPos(comments))
|
||||
|
||||
// Fixup comments can insert blank lines, because import specs are on different lines.
|
||||
// We remove those blank lines here by merging import spec to the first import spec line.
|
||||
firstSpecLine := tokFile.Line(specs[0].Pos())
|
||||
for _, s := range specs[1:] {
|
||||
p := s.Pos()
|
||||
line := tokFile.Line(p)
|
||||
for previousLine := line - 1; previousLine >= firstSpecLine; {
|
||||
// MergeLine can panic. Avoid the panic at the cost of not removing the blank line
|
||||
// golang/go#50329
|
||||
if previousLine > 0 && previousLine < tokFile.LineCount() {
|
||||
tokFile.MergeLine(previousLine) // has side effects!
|
||||
previousLine--
|
||||
} else {
|
||||
// try to gather some data to diagnose how this could happen
|
||||
req := "Please report what the imports section of your go file looked like."
|
||||
log.Printf("panic avoided: first:%d line:%d previous:%d max:%d. %s",
|
||||
firstSpecLine, line, previousLine, tokFile.LineCount(), req)
|
||||
}
|
||||
}
|
||||
}
|
||||
return specs
|
||||
}
|
||||
|
||||
type byImportSpec struct {
|
||||
localPrefix string
|
||||
specs []ast.Spec // slice of *ast.ImportSpec
|
||||
}
|
||||
|
||||
func (x byImportSpec) Len() int { return len(x.specs) }
|
||||
func (x byImportSpec) Swap(i, j int) { x.specs[i], x.specs[j] = x.specs[j], x.specs[i] }
|
||||
func (x byImportSpec) Less(i, j int) bool {
|
||||
ipath := importPath(x.specs[i])
|
||||
jpath := importPath(x.specs[j])
|
||||
|
||||
igroup := importGroup(x.localPrefix, ipath)
|
||||
jgroup := importGroup(x.localPrefix, jpath)
|
||||
if igroup != jgroup {
|
||||
return igroup < jgroup
|
||||
}
|
||||
|
||||
if ipath != jpath {
|
||||
return ipath < jpath
|
||||
}
|
||||
iname := importName(x.specs[i])
|
||||
jname := importName(x.specs[j])
|
||||
|
||||
if iname != jname {
|
||||
return iname < jname
|
||||
}
|
||||
return importComment(x.specs[i]) < importComment(x.specs[j])
|
||||
}
|
||||
|
||||
type byCommentPos []*ast.CommentGroup
|
||||
|
||||
func (x byCommentPos) Len() int { return len(x) }
|
||||
func (x byCommentPos) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
|
||||
func (x byCommentPos) Less(i, j int) bool { return x[i].Pos() < x[j].Pos() }
|
||||
11345
vendor/golang.org/x/tools/internal/imports/zstdlib.go
generated
vendored
Normal file
11345
vendor/golang.org/x/tools/internal/imports/zstdlib.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
30
vendor/golang.org/x/tools/internal/packagesinternal/packages.go
generated
vendored
Normal file
30
vendor/golang.org/x/tools/internal/packagesinternal/packages.go
generated
vendored
Normal file
@@ -0,0 +1,30 @@
|
||||
// Copyright 2020 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package packagesinternal exposes internal-only fields from go/packages.
|
||||
package packagesinternal
|
||||
|
||||
import (
|
||||
"golang.org/x/tools/internal/gocommand"
|
||||
)
|
||||
|
||||
var GetForTest = func(p interface{}) string { return "" }
|
||||
var GetDepsErrors = func(p interface{}) []*PackageError { return nil }
|
||||
|
||||
type PackageError struct {
|
||||
ImportStack []string // shortest path from package named on command line to this one
|
||||
Pos string // position of error (if present, file:line:col)
|
||||
Err string // the error itself
|
||||
}
|
||||
|
||||
var GetGoCmdRunner = func(config interface{}) *gocommand.Runner { return nil }
|
||||
|
||||
var SetGoCmdRunner = func(config interface{}, runner *gocommand.Runner) {}
|
||||
|
||||
var TypecheckCgo int
|
||||
var DepsErrors int // must be set as a LoadMode to call GetDepsErrors
|
||||
var ForTest int // must be set as a LoadMode to call GetForTest
|
||||
|
||||
var SetModFlag = func(config interface{}, value string) {}
|
||||
var SetModFile = func(config interface{}, value string) {}
|
||||
77
vendor/golang.org/x/tools/internal/pkgbits/codes.go
generated
vendored
Normal file
77
vendor/golang.org/x/tools/internal/pkgbits/codes.go
generated
vendored
Normal file
@@ -0,0 +1,77 @@
|
||||
// Copyright 2021 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package pkgbits
|
||||
|
||||
// A Code is an enum value that can be encoded into bitstreams.
|
||||
//
|
||||
// Code types are preferable for enum types, because they allow
|
||||
// Decoder to detect desyncs.
|
||||
type Code interface {
|
||||
// Marker returns the SyncMarker for the Code's dynamic type.
|
||||
Marker() SyncMarker
|
||||
|
||||
// Value returns the Code's ordinal value.
|
||||
Value() int
|
||||
}
|
||||
|
||||
// A CodeVal distinguishes among go/constant.Value encodings.
|
||||
type CodeVal int
|
||||
|
||||
func (c CodeVal) Marker() SyncMarker { return SyncVal }
|
||||
func (c CodeVal) Value() int { return int(c) }
|
||||
|
||||
// Note: These values are public and cannot be changed without
|
||||
// updating the go/types importers.
|
||||
|
||||
const (
|
||||
ValBool CodeVal = iota
|
||||
ValString
|
||||
ValInt64
|
||||
ValBigInt
|
||||
ValBigRat
|
||||
ValBigFloat
|
||||
)
|
||||
|
||||
// A CodeType distinguishes among go/types.Type encodings.
|
||||
type CodeType int
|
||||
|
||||
func (c CodeType) Marker() SyncMarker { return SyncType }
|
||||
func (c CodeType) Value() int { return int(c) }
|
||||
|
||||
// Note: These values are public and cannot be changed without
|
||||
// updating the go/types importers.
|
||||
|
||||
const (
|
||||
TypeBasic CodeType = iota
|
||||
TypeNamed
|
||||
TypePointer
|
||||
TypeSlice
|
||||
TypeArray
|
||||
TypeChan
|
||||
TypeMap
|
||||
TypeSignature
|
||||
TypeStruct
|
||||
TypeInterface
|
||||
TypeUnion
|
||||
TypeTypeParam
|
||||
)
|
||||
|
||||
// A CodeObj distinguishes among go/types.Object encodings.
|
||||
type CodeObj int
|
||||
|
||||
func (c CodeObj) Marker() SyncMarker { return SyncCodeObj }
|
||||
func (c CodeObj) Value() int { return int(c) }
|
||||
|
||||
// Note: These values are public and cannot be changed without
|
||||
// updating the go/types importers.
|
||||
|
||||
const (
|
||||
ObjAlias CodeObj = iota
|
||||
ObjConst
|
||||
ObjType
|
||||
ObjFunc
|
||||
ObjVar
|
||||
ObjStub
|
||||
)
|
||||
517
vendor/golang.org/x/tools/internal/pkgbits/decoder.go
generated
vendored
Normal file
517
vendor/golang.org/x/tools/internal/pkgbits/decoder.go
generated
vendored
Normal file
@@ -0,0 +1,517 @@
|
||||
// Copyright 2021 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package pkgbits
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
"go/constant"
|
||||
"go/token"
|
||||
"io"
|
||||
"math/big"
|
||||
"os"
|
||||
"runtime"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// A PkgDecoder provides methods for decoding a package's Unified IR
|
||||
// export data.
|
||||
type PkgDecoder struct {
|
||||
// version is the file format version.
|
||||
version uint32
|
||||
|
||||
// sync indicates whether the file uses sync markers.
|
||||
sync bool
|
||||
|
||||
// pkgPath is the package path for the package to be decoded.
|
||||
//
|
||||
// TODO(mdempsky): Remove; unneeded since CL 391014.
|
||||
pkgPath string
|
||||
|
||||
// elemData is the full data payload of the encoded package.
|
||||
// Elements are densely and contiguously packed together.
|
||||
//
|
||||
// The last 8 bytes of elemData are the package fingerprint.
|
||||
elemData string
|
||||
|
||||
// elemEnds stores the byte-offset end positions of element
|
||||
// bitstreams within elemData.
|
||||
//
|
||||
// For example, element I's bitstream data starts at elemEnds[I-1]
|
||||
// (or 0, if I==0) and ends at elemEnds[I].
|
||||
//
|
||||
// Note: elemEnds is indexed by absolute indices, not
|
||||
// section-relative indices.
|
||||
elemEnds []uint32
|
||||
|
||||
// elemEndsEnds stores the index-offset end positions of relocation
|
||||
// sections within elemEnds.
|
||||
//
|
||||
// For example, section K's end positions start at elemEndsEnds[K-1]
|
||||
// (or 0, if K==0) and end at elemEndsEnds[K].
|
||||
elemEndsEnds [numRelocs]uint32
|
||||
|
||||
scratchRelocEnt []RelocEnt
|
||||
}
|
||||
|
||||
// PkgPath returns the package path for the package
|
||||
//
|
||||
// TODO(mdempsky): Remove; unneeded since CL 391014.
|
||||
func (pr *PkgDecoder) PkgPath() string { return pr.pkgPath }
|
||||
|
||||
// SyncMarkers reports whether pr uses sync markers.
|
||||
func (pr *PkgDecoder) SyncMarkers() bool { return pr.sync }
|
||||
|
||||
// NewPkgDecoder returns a PkgDecoder initialized to read the Unified
|
||||
// IR export data from input. pkgPath is the package path for the
|
||||
// compilation unit that produced the export data.
|
||||
//
|
||||
// TODO(mdempsky): Remove pkgPath parameter; unneeded since CL 391014.
|
||||
func NewPkgDecoder(pkgPath, input string) PkgDecoder {
|
||||
pr := PkgDecoder{
|
||||
pkgPath: pkgPath,
|
||||
}
|
||||
|
||||
// TODO(mdempsky): Implement direct indexing of input string to
|
||||
// avoid copying the position information.
|
||||
|
||||
r := strings.NewReader(input)
|
||||
|
||||
assert(binary.Read(r, binary.LittleEndian, &pr.version) == nil)
|
||||
|
||||
switch pr.version {
|
||||
default:
|
||||
panic(fmt.Errorf("unsupported version: %v", pr.version))
|
||||
case 0:
|
||||
// no flags
|
||||
case 1:
|
||||
var flags uint32
|
||||
assert(binary.Read(r, binary.LittleEndian, &flags) == nil)
|
||||
pr.sync = flags&flagSyncMarkers != 0
|
||||
}
|
||||
|
||||
assert(binary.Read(r, binary.LittleEndian, pr.elemEndsEnds[:]) == nil)
|
||||
|
||||
pr.elemEnds = make([]uint32, pr.elemEndsEnds[len(pr.elemEndsEnds)-1])
|
||||
assert(binary.Read(r, binary.LittleEndian, pr.elemEnds[:]) == nil)
|
||||
|
||||
pos, err := r.Seek(0, io.SeekCurrent)
|
||||
assert(err == nil)
|
||||
|
||||
pr.elemData = input[pos:]
|
||||
assert(len(pr.elemData)-8 == int(pr.elemEnds[len(pr.elemEnds)-1]))
|
||||
|
||||
return pr
|
||||
}
|
||||
|
||||
// NumElems returns the number of elements in section k.
|
||||
func (pr *PkgDecoder) NumElems(k RelocKind) int {
|
||||
count := int(pr.elemEndsEnds[k])
|
||||
if k > 0 {
|
||||
count -= int(pr.elemEndsEnds[k-1])
|
||||
}
|
||||
return count
|
||||
}
|
||||
|
||||
// TotalElems returns the total number of elements across all sections.
|
||||
func (pr *PkgDecoder) TotalElems() int {
|
||||
return len(pr.elemEnds)
|
||||
}
|
||||
|
||||
// Fingerprint returns the package fingerprint.
|
||||
func (pr *PkgDecoder) Fingerprint() [8]byte {
|
||||
var fp [8]byte
|
||||
copy(fp[:], pr.elemData[len(pr.elemData)-8:])
|
||||
return fp
|
||||
}
|
||||
|
||||
// AbsIdx returns the absolute index for the given (section, index)
|
||||
// pair.
|
||||
func (pr *PkgDecoder) AbsIdx(k RelocKind, idx Index) int {
|
||||
absIdx := int(idx)
|
||||
if k > 0 {
|
||||
absIdx += int(pr.elemEndsEnds[k-1])
|
||||
}
|
||||
if absIdx >= int(pr.elemEndsEnds[k]) {
|
||||
errorf("%v:%v is out of bounds; %v", k, idx, pr.elemEndsEnds)
|
||||
}
|
||||
return absIdx
|
||||
}
|
||||
|
||||
// DataIdx returns the raw element bitstream for the given (section,
|
||||
// index) pair.
|
||||
func (pr *PkgDecoder) DataIdx(k RelocKind, idx Index) string {
|
||||
absIdx := pr.AbsIdx(k, idx)
|
||||
|
||||
var start uint32
|
||||
if absIdx > 0 {
|
||||
start = pr.elemEnds[absIdx-1]
|
||||
}
|
||||
end := pr.elemEnds[absIdx]
|
||||
|
||||
return pr.elemData[start:end]
|
||||
}
|
||||
|
||||
// StringIdx returns the string value for the given string index.
|
||||
func (pr *PkgDecoder) StringIdx(idx Index) string {
|
||||
return pr.DataIdx(RelocString, idx)
|
||||
}
|
||||
|
||||
// NewDecoder returns a Decoder for the given (section, index) pair,
|
||||
// and decodes the given SyncMarker from the element bitstream.
|
||||
func (pr *PkgDecoder) NewDecoder(k RelocKind, idx Index, marker SyncMarker) Decoder {
|
||||
r := pr.NewDecoderRaw(k, idx)
|
||||
r.Sync(marker)
|
||||
return r
|
||||
}
|
||||
|
||||
// TempDecoder returns a Decoder for the given (section, index) pair,
|
||||
// and decodes the given SyncMarker from the element bitstream.
|
||||
// If possible the Decoder should be RetireDecoder'd when it is no longer
|
||||
// needed, this will avoid heap allocations.
|
||||
func (pr *PkgDecoder) TempDecoder(k RelocKind, idx Index, marker SyncMarker) Decoder {
|
||||
r := pr.TempDecoderRaw(k, idx)
|
||||
r.Sync(marker)
|
||||
return r
|
||||
}
|
||||
|
||||
func (pr *PkgDecoder) RetireDecoder(d *Decoder) {
|
||||
pr.scratchRelocEnt = d.Relocs
|
||||
d.Relocs = nil
|
||||
}
|
||||
|
||||
// NewDecoderRaw returns a Decoder for the given (section, index) pair.
|
||||
//
|
||||
// Most callers should use NewDecoder instead.
|
||||
func (pr *PkgDecoder) NewDecoderRaw(k RelocKind, idx Index) Decoder {
|
||||
r := Decoder{
|
||||
common: pr,
|
||||
k: k,
|
||||
Idx: idx,
|
||||
}
|
||||
|
||||
// TODO(mdempsky) r.data.Reset(...) after #44505 is resolved.
|
||||
r.Data = *strings.NewReader(pr.DataIdx(k, idx))
|
||||
|
||||
r.Sync(SyncRelocs)
|
||||
r.Relocs = make([]RelocEnt, r.Len())
|
||||
for i := range r.Relocs {
|
||||
r.Sync(SyncReloc)
|
||||
r.Relocs[i] = RelocEnt{RelocKind(r.Len()), Index(r.Len())}
|
||||
}
|
||||
|
||||
return r
|
||||
}
|
||||
|
||||
func (pr *PkgDecoder) TempDecoderRaw(k RelocKind, idx Index) Decoder {
|
||||
r := Decoder{
|
||||
common: pr,
|
||||
k: k,
|
||||
Idx: idx,
|
||||
}
|
||||
|
||||
r.Data.Reset(pr.DataIdx(k, idx))
|
||||
r.Sync(SyncRelocs)
|
||||
l := r.Len()
|
||||
if cap(pr.scratchRelocEnt) >= l {
|
||||
r.Relocs = pr.scratchRelocEnt[:l]
|
||||
pr.scratchRelocEnt = nil
|
||||
} else {
|
||||
r.Relocs = make([]RelocEnt, l)
|
||||
}
|
||||
for i := range r.Relocs {
|
||||
r.Sync(SyncReloc)
|
||||
r.Relocs[i] = RelocEnt{RelocKind(r.Len()), Index(r.Len())}
|
||||
}
|
||||
|
||||
return r
|
||||
}
|
||||
|
||||
// A Decoder provides methods for decoding an individual element's
|
||||
// bitstream data.
|
||||
type Decoder struct {
|
||||
common *PkgDecoder
|
||||
|
||||
Relocs []RelocEnt
|
||||
Data strings.Reader
|
||||
|
||||
k RelocKind
|
||||
Idx Index
|
||||
}
|
||||
|
||||
func (r *Decoder) checkErr(err error) {
|
||||
if err != nil {
|
||||
errorf("unexpected decoding error: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
func (r *Decoder) rawUvarint() uint64 {
|
||||
x, err := readUvarint(&r.Data)
|
||||
r.checkErr(err)
|
||||
return x
|
||||
}
|
||||
|
||||
// readUvarint is a type-specialized copy of encoding/binary.ReadUvarint.
|
||||
// This avoids the interface conversion and thus has better escape properties,
|
||||
// which flows up the stack.
|
||||
func readUvarint(r *strings.Reader) (uint64, error) {
|
||||
var x uint64
|
||||
var s uint
|
||||
for i := 0; i < binary.MaxVarintLen64; i++ {
|
||||
b, err := r.ReadByte()
|
||||
if err != nil {
|
||||
if i > 0 && err == io.EOF {
|
||||
err = io.ErrUnexpectedEOF
|
||||
}
|
||||
return x, err
|
||||
}
|
||||
if b < 0x80 {
|
||||
if i == binary.MaxVarintLen64-1 && b > 1 {
|
||||
return x, overflow
|
||||
}
|
||||
return x | uint64(b)<<s, nil
|
||||
}
|
||||
x |= uint64(b&0x7f) << s
|
||||
s += 7
|
||||
}
|
||||
return x, overflow
|
||||
}
|
||||
|
||||
var overflow = errors.New("pkgbits: readUvarint overflows a 64-bit integer")
|
||||
|
||||
func (r *Decoder) rawVarint() int64 {
|
||||
ux := r.rawUvarint()
|
||||
|
||||
// Zig-zag decode.
|
||||
x := int64(ux >> 1)
|
||||
if ux&1 != 0 {
|
||||
x = ^x
|
||||
}
|
||||
return x
|
||||
}
|
||||
|
||||
func (r *Decoder) rawReloc(k RelocKind, idx int) Index {
|
||||
e := r.Relocs[idx]
|
||||
assert(e.Kind == k)
|
||||
return e.Idx
|
||||
}
|
||||
|
||||
// Sync decodes a sync marker from the element bitstream and asserts
|
||||
// that it matches the expected marker.
|
||||
//
|
||||
// If r.common.sync is false, then Sync is a no-op.
|
||||
func (r *Decoder) Sync(mWant SyncMarker) {
|
||||
if !r.common.sync {
|
||||
return
|
||||
}
|
||||
|
||||
pos, _ := r.Data.Seek(0, io.SeekCurrent)
|
||||
mHave := SyncMarker(r.rawUvarint())
|
||||
writerPCs := make([]int, r.rawUvarint())
|
||||
for i := range writerPCs {
|
||||
writerPCs[i] = int(r.rawUvarint())
|
||||
}
|
||||
|
||||
if mHave == mWant {
|
||||
return
|
||||
}
|
||||
|
||||
// There's some tension here between printing:
|
||||
//
|
||||
// (1) full file paths that tools can recognize (e.g., so emacs
|
||||
// hyperlinks the "file:line" text for easy navigation), or
|
||||
//
|
||||
// (2) short file paths that are easier for humans to read (e.g., by
|
||||
// omitting redundant or irrelevant details, so it's easier to
|
||||
// focus on the useful bits that remain).
|
||||
//
|
||||
// The current formatting favors the former, as it seems more
|
||||
// helpful in practice. But perhaps the formatting could be improved
|
||||
// to better address both concerns. For example, use relative file
|
||||
// paths if they would be shorter, or rewrite file paths to contain
|
||||
// "$GOROOT" (like objabi.AbsFile does) if tools can be taught how
|
||||
// to reliably expand that again.
|
||||
|
||||
fmt.Printf("export data desync: package %q, section %v, index %v, offset %v\n", r.common.pkgPath, r.k, r.Idx, pos)
|
||||
|
||||
fmt.Printf("\nfound %v, written at:\n", mHave)
|
||||
if len(writerPCs) == 0 {
|
||||
fmt.Printf("\t[stack trace unavailable; recompile package %q with -d=syncframes]\n", r.common.pkgPath)
|
||||
}
|
||||
for _, pc := range writerPCs {
|
||||
fmt.Printf("\t%s\n", r.common.StringIdx(r.rawReloc(RelocString, pc)))
|
||||
}
|
||||
|
||||
fmt.Printf("\nexpected %v, reading at:\n", mWant)
|
||||
var readerPCs [32]uintptr // TODO(mdempsky): Dynamically size?
|
||||
n := runtime.Callers(2, readerPCs[:])
|
||||
for _, pc := range fmtFrames(readerPCs[:n]...) {
|
||||
fmt.Printf("\t%s\n", pc)
|
||||
}
|
||||
|
||||
// We already printed a stack trace for the reader, so now we can
|
||||
// simply exit. Printing a second one with panic or base.Fatalf
|
||||
// would just be noise.
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// Bool decodes and returns a bool value from the element bitstream.
|
||||
func (r *Decoder) Bool() bool {
|
||||
r.Sync(SyncBool)
|
||||
x, err := r.Data.ReadByte()
|
||||
r.checkErr(err)
|
||||
assert(x < 2)
|
||||
return x != 0
|
||||
}
|
||||
|
||||
// Int64 decodes and returns an int64 value from the element bitstream.
|
||||
func (r *Decoder) Int64() int64 {
|
||||
r.Sync(SyncInt64)
|
||||
return r.rawVarint()
|
||||
}
|
||||
|
||||
// Uint64 decodes and returns a uint64 value from the element bitstream.
|
||||
func (r *Decoder) Uint64() uint64 {
|
||||
r.Sync(SyncUint64)
|
||||
return r.rawUvarint()
|
||||
}
|
||||
|
||||
// Len decodes and returns a non-negative int value from the element bitstream.
|
||||
func (r *Decoder) Len() int { x := r.Uint64(); v := int(x); assert(uint64(v) == x); return v }
|
||||
|
||||
// Int decodes and returns an int value from the element bitstream.
|
||||
func (r *Decoder) Int() int { x := r.Int64(); v := int(x); assert(int64(v) == x); return v }
|
||||
|
||||
// Uint decodes and returns a uint value from the element bitstream.
|
||||
func (r *Decoder) Uint() uint { x := r.Uint64(); v := uint(x); assert(uint64(v) == x); return v }
|
||||
|
||||
// Code decodes a Code value from the element bitstream and returns
|
||||
// its ordinal value. It's the caller's responsibility to convert the
|
||||
// result to an appropriate Code type.
|
||||
//
|
||||
// TODO(mdempsky): Ideally this method would have signature "Code[T
|
||||
// Code] T" instead, but we don't allow generic methods and the
|
||||
// compiler can't depend on generics yet anyway.
|
||||
func (r *Decoder) Code(mark SyncMarker) int {
|
||||
r.Sync(mark)
|
||||
return r.Len()
|
||||
}
|
||||
|
||||
// Reloc decodes a relocation of expected section k from the element
|
||||
// bitstream and returns an index to the referenced element.
|
||||
func (r *Decoder) Reloc(k RelocKind) Index {
|
||||
r.Sync(SyncUseReloc)
|
||||
return r.rawReloc(k, r.Len())
|
||||
}
|
||||
|
||||
// String decodes and returns a string value from the element
|
||||
// bitstream.
|
||||
func (r *Decoder) String() string {
|
||||
r.Sync(SyncString)
|
||||
return r.common.StringIdx(r.Reloc(RelocString))
|
||||
}
|
||||
|
||||
// Strings decodes and returns a variable-length slice of strings from
|
||||
// the element bitstream.
|
||||
func (r *Decoder) Strings() []string {
|
||||
res := make([]string, r.Len())
|
||||
for i := range res {
|
||||
res[i] = r.String()
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
// Value decodes and returns a constant.Value from the element
|
||||
// bitstream.
|
||||
func (r *Decoder) Value() constant.Value {
|
||||
r.Sync(SyncValue)
|
||||
isComplex := r.Bool()
|
||||
val := r.scalar()
|
||||
if isComplex {
|
||||
val = constant.BinaryOp(val, token.ADD, constant.MakeImag(r.scalar()))
|
||||
}
|
||||
return val
|
||||
}
|
||||
|
||||
func (r *Decoder) scalar() constant.Value {
|
||||
switch tag := CodeVal(r.Code(SyncVal)); tag {
|
||||
default:
|
||||
panic(fmt.Errorf("unexpected scalar tag: %v", tag))
|
||||
|
||||
case ValBool:
|
||||
return constant.MakeBool(r.Bool())
|
||||
case ValString:
|
||||
return constant.MakeString(r.String())
|
||||
case ValInt64:
|
||||
return constant.MakeInt64(r.Int64())
|
||||
case ValBigInt:
|
||||
return constant.Make(r.bigInt())
|
||||
case ValBigRat:
|
||||
num := r.bigInt()
|
||||
denom := r.bigInt()
|
||||
return constant.Make(new(big.Rat).SetFrac(num, denom))
|
||||
case ValBigFloat:
|
||||
return constant.Make(r.bigFloat())
|
||||
}
|
||||
}
|
||||
|
||||
func (r *Decoder) bigInt() *big.Int {
|
||||
v := new(big.Int).SetBytes([]byte(r.String()))
|
||||
if r.Bool() {
|
||||
v.Neg(v)
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
func (r *Decoder) bigFloat() *big.Float {
|
||||
v := new(big.Float).SetPrec(512)
|
||||
assert(v.UnmarshalText([]byte(r.String())) == nil)
|
||||
return v
|
||||
}
|
||||
|
||||
// @@@ Helpers
|
||||
|
||||
// TODO(mdempsky): These should probably be removed. I think they're a
|
||||
// smell that the export data format is not yet quite right.
|
||||
|
||||
// PeekPkgPath returns the package path for the specified package
|
||||
// index.
|
||||
func (pr *PkgDecoder) PeekPkgPath(idx Index) string {
|
||||
var path string
|
||||
{
|
||||
r := pr.TempDecoder(RelocPkg, idx, SyncPkgDef)
|
||||
path = r.String()
|
||||
pr.RetireDecoder(&r)
|
||||
}
|
||||
if path == "" {
|
||||
path = pr.pkgPath
|
||||
}
|
||||
return path
|
||||
}
|
||||
|
||||
// PeekObj returns the package path, object name, and CodeObj for the
|
||||
// specified object index.
|
||||
func (pr *PkgDecoder) PeekObj(idx Index) (string, string, CodeObj) {
|
||||
var ridx Index
|
||||
var name string
|
||||
var rcode int
|
||||
{
|
||||
r := pr.TempDecoder(RelocName, idx, SyncObject1)
|
||||
r.Sync(SyncSym)
|
||||
r.Sync(SyncPkg)
|
||||
ridx = r.Reloc(RelocPkg)
|
||||
name = r.String()
|
||||
rcode = r.Code(SyncCodeObj)
|
||||
pr.RetireDecoder(&r)
|
||||
}
|
||||
|
||||
path := pr.PeekPkgPath(ridx)
|
||||
assert(name != "")
|
||||
|
||||
tag := CodeObj(rcode)
|
||||
|
||||
return path, name, tag
|
||||
}
|
||||
32
vendor/golang.org/x/tools/internal/pkgbits/doc.go
generated
vendored
Normal file
32
vendor/golang.org/x/tools/internal/pkgbits/doc.go
generated
vendored
Normal file
@@ -0,0 +1,32 @@
|
||||
// Copyright 2022 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package pkgbits implements low-level coding abstractions for
|
||||
// Unified IR's export data format.
|
||||
//
|
||||
// At a low-level, a package is a collection of bitstream elements.
|
||||
// Each element has a "kind" and a dense, non-negative index.
|
||||
// Elements can be randomly accessed given their kind and index.
|
||||
//
|
||||
// Individual elements are sequences of variable-length values (e.g.,
|
||||
// integers, booleans, strings, go/constant values, cross-references
|
||||
// to other elements). Package pkgbits provides APIs for encoding and
|
||||
// decoding these low-level values, but the details of mapping
|
||||
// higher-level Go constructs into elements is left to higher-level
|
||||
// abstractions.
|
||||
//
|
||||
// Elements may cross-reference each other with "relocations." For
|
||||
// example, an element representing a pointer type has a relocation
|
||||
// referring to the element type.
|
||||
//
|
||||
// Go constructs may be composed as a constellation of multiple
|
||||
// elements. For example, a declared function may have one element to
|
||||
// describe the object (e.g., its name, type, position), and a
|
||||
// separate element to describe its function body. This allows readers
|
||||
// some flexibility in efficiently seeking or re-reading data (e.g.,
|
||||
// inlining requires re-reading the function body for each inlined
|
||||
// call, without needing to re-read the object-level details).
|
||||
//
|
||||
// This is a copy of internal/pkgbits in the Go implementation.
|
||||
package pkgbits
|
||||
383
vendor/golang.org/x/tools/internal/pkgbits/encoder.go
generated
vendored
Normal file
383
vendor/golang.org/x/tools/internal/pkgbits/encoder.go
generated
vendored
Normal file
@@ -0,0 +1,383 @@
|
||||
// Copyright 2021 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package pkgbits
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/md5"
|
||||
"encoding/binary"
|
||||
"go/constant"
|
||||
"io"
|
||||
"math/big"
|
||||
"runtime"
|
||||
)
|
||||
|
||||
// currentVersion is the current version number.
|
||||
//
|
||||
// - v0: initial prototype
|
||||
//
|
||||
// - v1: adds the flags uint32 word
|
||||
const currentVersion uint32 = 1
|
||||
|
||||
// A PkgEncoder provides methods for encoding a package's Unified IR
|
||||
// export data.
|
||||
type PkgEncoder struct {
|
||||
// elems holds the bitstream for previously encoded elements.
|
||||
elems [numRelocs][]string
|
||||
|
||||
// stringsIdx maps previously encoded strings to their index within
|
||||
// the RelocString section, to allow deduplication. That is,
|
||||
// elems[RelocString][stringsIdx[s]] == s (if present).
|
||||
stringsIdx map[string]Index
|
||||
|
||||
// syncFrames is the number of frames to write at each sync
|
||||
// marker. A negative value means sync markers are omitted.
|
||||
syncFrames int
|
||||
}
|
||||
|
||||
// SyncMarkers reports whether pw uses sync markers.
|
||||
func (pw *PkgEncoder) SyncMarkers() bool { return pw.syncFrames >= 0 }
|
||||
|
||||
// NewPkgEncoder returns an initialized PkgEncoder.
|
||||
//
|
||||
// syncFrames is the number of caller frames that should be serialized
|
||||
// at Sync points. Serializing additional frames results in larger
|
||||
// export data files, but can help diagnosing desync errors in
|
||||
// higher-level Unified IR reader/writer code. If syncFrames is
|
||||
// negative, then sync markers are omitted entirely.
|
||||
func NewPkgEncoder(syncFrames int) PkgEncoder {
|
||||
return PkgEncoder{
|
||||
stringsIdx: make(map[string]Index),
|
||||
syncFrames: syncFrames,
|
||||
}
|
||||
}
|
||||
|
||||
// DumpTo writes the package's encoded data to out0 and returns the
|
||||
// package fingerprint.
|
||||
func (pw *PkgEncoder) DumpTo(out0 io.Writer) (fingerprint [8]byte) {
|
||||
h := md5.New()
|
||||
out := io.MultiWriter(out0, h)
|
||||
|
||||
writeUint32 := func(x uint32) {
|
||||
assert(binary.Write(out, binary.LittleEndian, x) == nil)
|
||||
}
|
||||
|
||||
writeUint32(currentVersion)
|
||||
|
||||
var flags uint32
|
||||
if pw.SyncMarkers() {
|
||||
flags |= flagSyncMarkers
|
||||
}
|
||||
writeUint32(flags)
|
||||
|
||||
// Write elemEndsEnds.
|
||||
var sum uint32
|
||||
for _, elems := range &pw.elems {
|
||||
sum += uint32(len(elems))
|
||||
writeUint32(sum)
|
||||
}
|
||||
|
||||
// Write elemEnds.
|
||||
sum = 0
|
||||
for _, elems := range &pw.elems {
|
||||
for _, elem := range elems {
|
||||
sum += uint32(len(elem))
|
||||
writeUint32(sum)
|
||||
}
|
||||
}
|
||||
|
||||
// Write elemData.
|
||||
for _, elems := range &pw.elems {
|
||||
for _, elem := range elems {
|
||||
_, err := io.WriteString(out, elem)
|
||||
assert(err == nil)
|
||||
}
|
||||
}
|
||||
|
||||
// Write fingerprint.
|
||||
copy(fingerprint[:], h.Sum(nil))
|
||||
_, err := out0.Write(fingerprint[:])
|
||||
assert(err == nil)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// StringIdx adds a string value to the strings section, if not
|
||||
// already present, and returns its index.
|
||||
func (pw *PkgEncoder) StringIdx(s string) Index {
|
||||
if idx, ok := pw.stringsIdx[s]; ok {
|
||||
assert(pw.elems[RelocString][idx] == s)
|
||||
return idx
|
||||
}
|
||||
|
||||
idx := Index(len(pw.elems[RelocString]))
|
||||
pw.elems[RelocString] = append(pw.elems[RelocString], s)
|
||||
pw.stringsIdx[s] = idx
|
||||
return idx
|
||||
}
|
||||
|
||||
// NewEncoder returns an Encoder for a new element within the given
|
||||
// section, and encodes the given SyncMarker as the start of the
|
||||
// element bitstream.
|
||||
func (pw *PkgEncoder) NewEncoder(k RelocKind, marker SyncMarker) Encoder {
|
||||
e := pw.NewEncoderRaw(k)
|
||||
e.Sync(marker)
|
||||
return e
|
||||
}
|
||||
|
||||
// NewEncoderRaw returns an Encoder for a new element within the given
|
||||
// section.
|
||||
//
|
||||
// Most callers should use NewEncoder instead.
|
||||
func (pw *PkgEncoder) NewEncoderRaw(k RelocKind) Encoder {
|
||||
idx := Index(len(pw.elems[k]))
|
||||
pw.elems[k] = append(pw.elems[k], "") // placeholder
|
||||
|
||||
return Encoder{
|
||||
p: pw,
|
||||
k: k,
|
||||
Idx: idx,
|
||||
}
|
||||
}
|
||||
|
||||
// An Encoder provides methods for encoding an individual element's
|
||||
// bitstream data.
|
||||
type Encoder struct {
|
||||
p *PkgEncoder
|
||||
|
||||
Relocs []RelocEnt
|
||||
RelocMap map[RelocEnt]uint32
|
||||
Data bytes.Buffer // accumulated element bitstream data
|
||||
|
||||
encodingRelocHeader bool
|
||||
|
||||
k RelocKind
|
||||
Idx Index // index within relocation section
|
||||
}
|
||||
|
||||
// Flush finalizes the element's bitstream and returns its Index.
|
||||
func (w *Encoder) Flush() Index {
|
||||
var sb bytes.Buffer // TODO(mdempsky): strings.Builder after #44505 is resolved
|
||||
|
||||
// Backup the data so we write the relocations at the front.
|
||||
var tmp bytes.Buffer
|
||||
io.Copy(&tmp, &w.Data)
|
||||
|
||||
// TODO(mdempsky): Consider writing these out separately so they're
|
||||
// easier to strip, along with function bodies, so that we can prune
|
||||
// down to just the data that's relevant to go/types.
|
||||
if w.encodingRelocHeader {
|
||||
panic("encodingRelocHeader already true; recursive flush?")
|
||||
}
|
||||
w.encodingRelocHeader = true
|
||||
w.Sync(SyncRelocs)
|
||||
w.Len(len(w.Relocs))
|
||||
for _, rEnt := range w.Relocs {
|
||||
w.Sync(SyncReloc)
|
||||
w.Len(int(rEnt.Kind))
|
||||
w.Len(int(rEnt.Idx))
|
||||
}
|
||||
|
||||
io.Copy(&sb, &w.Data)
|
||||
io.Copy(&sb, &tmp)
|
||||
w.p.elems[w.k][w.Idx] = sb.String()
|
||||
|
||||
return w.Idx
|
||||
}
|
||||
|
||||
func (w *Encoder) checkErr(err error) {
|
||||
if err != nil {
|
||||
errorf("unexpected encoding error: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func (w *Encoder) rawUvarint(x uint64) {
|
||||
var buf [binary.MaxVarintLen64]byte
|
||||
n := binary.PutUvarint(buf[:], x)
|
||||
_, err := w.Data.Write(buf[:n])
|
||||
w.checkErr(err)
|
||||
}
|
||||
|
||||
func (w *Encoder) rawVarint(x int64) {
|
||||
// Zig-zag encode.
|
||||
ux := uint64(x) << 1
|
||||
if x < 0 {
|
||||
ux = ^ux
|
||||
}
|
||||
|
||||
w.rawUvarint(ux)
|
||||
}
|
||||
|
||||
func (w *Encoder) rawReloc(r RelocKind, idx Index) int {
|
||||
e := RelocEnt{r, idx}
|
||||
if w.RelocMap != nil {
|
||||
if i, ok := w.RelocMap[e]; ok {
|
||||
return int(i)
|
||||
}
|
||||
} else {
|
||||
w.RelocMap = make(map[RelocEnt]uint32)
|
||||
}
|
||||
|
||||
i := len(w.Relocs)
|
||||
w.RelocMap[e] = uint32(i)
|
||||
w.Relocs = append(w.Relocs, e)
|
||||
return i
|
||||
}
|
||||
|
||||
func (w *Encoder) Sync(m SyncMarker) {
|
||||
if !w.p.SyncMarkers() {
|
||||
return
|
||||
}
|
||||
|
||||
// Writing out stack frame string references requires working
|
||||
// relocations, but writing out the relocations themselves involves
|
||||
// sync markers. To prevent infinite recursion, we simply trim the
|
||||
// stack frame for sync markers within the relocation header.
|
||||
var frames []string
|
||||
if !w.encodingRelocHeader && w.p.syncFrames > 0 {
|
||||
pcs := make([]uintptr, w.p.syncFrames)
|
||||
n := runtime.Callers(2, pcs)
|
||||
frames = fmtFrames(pcs[:n]...)
|
||||
}
|
||||
|
||||
// TODO(mdempsky): Save space by writing out stack frames as a
|
||||
// linked list so we can share common stack frames.
|
||||
w.rawUvarint(uint64(m))
|
||||
w.rawUvarint(uint64(len(frames)))
|
||||
for _, frame := range frames {
|
||||
w.rawUvarint(uint64(w.rawReloc(RelocString, w.p.StringIdx(frame))))
|
||||
}
|
||||
}
|
||||
|
||||
// Bool encodes and writes a bool value into the element bitstream,
|
||||
// and then returns the bool value.
|
||||
//
|
||||
// For simple, 2-alternative encodings, the idiomatic way to call Bool
|
||||
// is something like:
|
||||
//
|
||||
// if w.Bool(x != 0) {
|
||||
// // alternative #1
|
||||
// } else {
|
||||
// // alternative #2
|
||||
// }
|
||||
//
|
||||
// For multi-alternative encodings, use Code instead.
|
||||
func (w *Encoder) Bool(b bool) bool {
|
||||
w.Sync(SyncBool)
|
||||
var x byte
|
||||
if b {
|
||||
x = 1
|
||||
}
|
||||
err := w.Data.WriteByte(x)
|
||||
w.checkErr(err)
|
||||
return b
|
||||
}
|
||||
|
||||
// Int64 encodes and writes an int64 value into the element bitstream.
|
||||
func (w *Encoder) Int64(x int64) {
|
||||
w.Sync(SyncInt64)
|
||||
w.rawVarint(x)
|
||||
}
|
||||
|
||||
// Uint64 encodes and writes a uint64 value into the element bitstream.
|
||||
func (w *Encoder) Uint64(x uint64) {
|
||||
w.Sync(SyncUint64)
|
||||
w.rawUvarint(x)
|
||||
}
|
||||
|
||||
// Len encodes and writes a non-negative int value into the element bitstream.
|
||||
func (w *Encoder) Len(x int) { assert(x >= 0); w.Uint64(uint64(x)) }
|
||||
|
||||
// Int encodes and writes an int value into the element bitstream.
|
||||
func (w *Encoder) Int(x int) { w.Int64(int64(x)) }
|
||||
|
||||
// Uint encodes and writes a uint value into the element bitstream.
|
||||
func (w *Encoder) Uint(x uint) { w.Uint64(uint64(x)) }
|
||||
|
||||
// Reloc encodes and writes a relocation for the given (section,
|
||||
// index) pair into the element bitstream.
|
||||
//
|
||||
// Note: Only the index is formally written into the element
|
||||
// bitstream, so bitstream decoders must know from context which
|
||||
// section an encoded relocation refers to.
|
||||
func (w *Encoder) Reloc(r RelocKind, idx Index) {
|
||||
w.Sync(SyncUseReloc)
|
||||
w.Len(w.rawReloc(r, idx))
|
||||
}
|
||||
|
||||
// Code encodes and writes a Code value into the element bitstream.
|
||||
func (w *Encoder) Code(c Code) {
|
||||
w.Sync(c.Marker())
|
||||
w.Len(c.Value())
|
||||
}
|
||||
|
||||
// String encodes and writes a string value into the element
|
||||
// bitstream.
|
||||
//
|
||||
// Internally, strings are deduplicated by adding them to the strings
|
||||
// section (if not already present), and then writing a relocation
|
||||
// into the element bitstream.
|
||||
func (w *Encoder) String(s string) {
|
||||
w.Sync(SyncString)
|
||||
w.Reloc(RelocString, w.p.StringIdx(s))
|
||||
}
|
||||
|
||||
// Strings encodes and writes a variable-length slice of strings into
|
||||
// the element bitstream.
|
||||
func (w *Encoder) Strings(ss []string) {
|
||||
w.Len(len(ss))
|
||||
for _, s := range ss {
|
||||
w.String(s)
|
||||
}
|
||||
}
|
||||
|
||||
// Value encodes and writes a constant.Value into the element
|
||||
// bitstream.
|
||||
func (w *Encoder) Value(val constant.Value) {
|
||||
w.Sync(SyncValue)
|
||||
if w.Bool(val.Kind() == constant.Complex) {
|
||||
w.scalar(constant.Real(val))
|
||||
w.scalar(constant.Imag(val))
|
||||
} else {
|
||||
w.scalar(val)
|
||||
}
|
||||
}
|
||||
|
||||
func (w *Encoder) scalar(val constant.Value) {
|
||||
switch v := constant.Val(val).(type) {
|
||||
default:
|
||||
errorf("unhandled %v (%v)", val, val.Kind())
|
||||
case bool:
|
||||
w.Code(ValBool)
|
||||
w.Bool(v)
|
||||
case string:
|
||||
w.Code(ValString)
|
||||
w.String(v)
|
||||
case int64:
|
||||
w.Code(ValInt64)
|
||||
w.Int64(v)
|
||||
case *big.Int:
|
||||
w.Code(ValBigInt)
|
||||
w.bigInt(v)
|
||||
case *big.Rat:
|
||||
w.Code(ValBigRat)
|
||||
w.bigInt(v.Num())
|
||||
w.bigInt(v.Denom())
|
||||
case *big.Float:
|
||||
w.Code(ValBigFloat)
|
||||
w.bigFloat(v)
|
||||
}
|
||||
}
|
||||
|
||||
func (w *Encoder) bigInt(v *big.Int) {
|
||||
b := v.Bytes()
|
||||
w.String(string(b)) // TODO: More efficient encoding.
|
||||
w.Bool(v.Sign() < 0)
|
||||
}
|
||||
|
||||
func (w *Encoder) bigFloat(v *big.Float) {
|
||||
b := v.Append(nil, 'p', -1)
|
||||
w.String(string(b)) // TODO: More efficient encoding.
|
||||
}
|
||||
9
vendor/golang.org/x/tools/internal/pkgbits/flags.go
generated
vendored
Normal file
9
vendor/golang.org/x/tools/internal/pkgbits/flags.go
generated
vendored
Normal file
@@ -0,0 +1,9 @@
|
||||
// Copyright 2022 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package pkgbits
|
||||
|
||||
const (
|
||||
flagSyncMarkers = 1 << iota // file format contains sync markers
|
||||
)
|
||||
21
vendor/golang.org/x/tools/internal/pkgbits/frames_go1.go
generated
vendored
Normal file
21
vendor/golang.org/x/tools/internal/pkgbits/frames_go1.go
generated
vendored
Normal file
@@ -0,0 +1,21 @@
|
||||
// Copyright 2021 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build !go1.7
|
||||
// +build !go1.7
|
||||
|
||||
// TODO(mdempsky): Remove after #44505 is resolved
|
||||
|
||||
package pkgbits
|
||||
|
||||
import "runtime"
|
||||
|
||||
func walkFrames(pcs []uintptr, visit frameVisitor) {
|
||||
for _, pc := range pcs {
|
||||
fn := runtime.FuncForPC(pc)
|
||||
file, line := fn.FileLine(pc)
|
||||
|
||||
visit(file, line, fn.Name(), pc-fn.Entry())
|
||||
}
|
||||
}
|
||||
28
vendor/golang.org/x/tools/internal/pkgbits/frames_go17.go
generated
vendored
Normal file
28
vendor/golang.org/x/tools/internal/pkgbits/frames_go17.go
generated
vendored
Normal file
@@ -0,0 +1,28 @@
|
||||
// Copyright 2021 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build go1.7
|
||||
// +build go1.7
|
||||
|
||||
package pkgbits
|
||||
|
||||
import "runtime"
|
||||
|
||||
// walkFrames calls visit for each call frame represented by pcs.
|
||||
//
|
||||
// pcs should be a slice of PCs, as returned by runtime.Callers.
|
||||
func walkFrames(pcs []uintptr, visit frameVisitor) {
|
||||
if len(pcs) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
frames := runtime.CallersFrames(pcs)
|
||||
for {
|
||||
frame, more := frames.Next()
|
||||
visit(frame.File, frame.Line, frame.Function, frame.PC-frame.Entry)
|
||||
if !more {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
42
vendor/golang.org/x/tools/internal/pkgbits/reloc.go
generated
vendored
Normal file
42
vendor/golang.org/x/tools/internal/pkgbits/reloc.go
generated
vendored
Normal file
@@ -0,0 +1,42 @@
|
||||
// Copyright 2021 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package pkgbits
|
||||
|
||||
// A RelocKind indicates a particular section within a unified IR export.
|
||||
type RelocKind int32
|
||||
|
||||
// An Index represents a bitstream element index within a particular
|
||||
// section.
|
||||
type Index int32
|
||||
|
||||
// A relocEnt (relocation entry) is an entry in an element's local
|
||||
// reference table.
|
||||
//
|
||||
// TODO(mdempsky): Rename this too.
|
||||
type RelocEnt struct {
|
||||
Kind RelocKind
|
||||
Idx Index
|
||||
}
|
||||
|
||||
// Reserved indices within the meta relocation section.
|
||||
const (
|
||||
PublicRootIdx Index = 0
|
||||
PrivateRootIdx Index = 1
|
||||
)
|
||||
|
||||
const (
|
||||
RelocString RelocKind = iota
|
||||
RelocMeta
|
||||
RelocPosBase
|
||||
RelocPkg
|
||||
RelocName
|
||||
RelocType
|
||||
RelocObj
|
||||
RelocObjExt
|
||||
RelocObjDict
|
||||
RelocBody
|
||||
|
||||
numRelocs = iota
|
||||
)
|
||||
17
vendor/golang.org/x/tools/internal/pkgbits/support.go
generated
vendored
Normal file
17
vendor/golang.org/x/tools/internal/pkgbits/support.go
generated
vendored
Normal file
@@ -0,0 +1,17 @@
|
||||
// Copyright 2022 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package pkgbits
|
||||
|
||||
import "fmt"
|
||||
|
||||
func assert(b bool) {
|
||||
if !b {
|
||||
panic("assertion failed")
|
||||
}
|
||||
}
|
||||
|
||||
func errorf(format string, args ...interface{}) {
|
||||
panic(fmt.Errorf(format, args...))
|
||||
}
|
||||
113
vendor/golang.org/x/tools/internal/pkgbits/sync.go
generated
vendored
Normal file
113
vendor/golang.org/x/tools/internal/pkgbits/sync.go
generated
vendored
Normal file
@@ -0,0 +1,113 @@
|
||||
// Copyright 2021 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package pkgbits
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// fmtFrames formats a backtrace for reporting reader/writer desyncs.
|
||||
func fmtFrames(pcs ...uintptr) []string {
|
||||
res := make([]string, 0, len(pcs))
|
||||
walkFrames(pcs, func(file string, line int, name string, offset uintptr) {
|
||||
// Trim package from function name. It's just redundant noise.
|
||||
name = strings.TrimPrefix(name, "cmd/compile/internal/noder.")
|
||||
|
||||
res = append(res, fmt.Sprintf("%s:%v: %s +0x%v", file, line, name, offset))
|
||||
})
|
||||
return res
|
||||
}
|
||||
|
||||
type frameVisitor func(file string, line int, name string, offset uintptr)
|
||||
|
||||
// SyncMarker is an enum type that represents markers that may be
|
||||
// written to export data to ensure the reader and writer stay
|
||||
// synchronized.
|
||||
type SyncMarker int
|
||||
|
||||
//go:generate stringer -type=SyncMarker -trimprefix=Sync
|
||||
|
||||
const (
|
||||
_ SyncMarker = iota
|
||||
|
||||
// Public markers (known to go/types importers).
|
||||
|
||||
// Low-level coding markers.
|
||||
SyncEOF
|
||||
SyncBool
|
||||
SyncInt64
|
||||
SyncUint64
|
||||
SyncString
|
||||
SyncValue
|
||||
SyncVal
|
||||
SyncRelocs
|
||||
SyncReloc
|
||||
SyncUseReloc
|
||||
|
||||
// Higher-level object and type markers.
|
||||
SyncPublic
|
||||
SyncPos
|
||||
SyncPosBase
|
||||
SyncObject
|
||||
SyncObject1
|
||||
SyncPkg
|
||||
SyncPkgDef
|
||||
SyncMethod
|
||||
SyncType
|
||||
SyncTypeIdx
|
||||
SyncTypeParamNames
|
||||
SyncSignature
|
||||
SyncParams
|
||||
SyncParam
|
||||
SyncCodeObj
|
||||
SyncSym
|
||||
SyncLocalIdent
|
||||
SyncSelector
|
||||
|
||||
// Private markers (only known to cmd/compile).
|
||||
SyncPrivate
|
||||
|
||||
SyncFuncExt
|
||||
SyncVarExt
|
||||
SyncTypeExt
|
||||
SyncPragma
|
||||
|
||||
SyncExprList
|
||||
SyncExprs
|
||||
SyncExpr
|
||||
SyncExprType
|
||||
SyncAssign
|
||||
SyncOp
|
||||
SyncFuncLit
|
||||
SyncCompLit
|
||||
|
||||
SyncDecl
|
||||
SyncFuncBody
|
||||
SyncOpenScope
|
||||
SyncCloseScope
|
||||
SyncCloseAnotherScope
|
||||
SyncDeclNames
|
||||
SyncDeclName
|
||||
|
||||
SyncStmts
|
||||
SyncBlockStmt
|
||||
SyncIfStmt
|
||||
SyncForStmt
|
||||
SyncSwitchStmt
|
||||
SyncRangeStmt
|
||||
SyncCaseClause
|
||||
SyncCommClause
|
||||
SyncSelectStmt
|
||||
SyncDecls
|
||||
SyncLabeledStmt
|
||||
SyncUseObjLocal
|
||||
SyncAddLocal
|
||||
SyncLinkname
|
||||
SyncStmt1
|
||||
SyncStmtsEnd
|
||||
SyncLabel
|
||||
SyncOptLabel
|
||||
)
|
||||
89
vendor/golang.org/x/tools/internal/pkgbits/syncmarker_string.go
generated
vendored
Normal file
89
vendor/golang.org/x/tools/internal/pkgbits/syncmarker_string.go
generated
vendored
Normal file
@@ -0,0 +1,89 @@
|
||||
// Code generated by "stringer -type=SyncMarker -trimprefix=Sync"; DO NOT EDIT.
|
||||
|
||||
package pkgbits
|
||||
|
||||
import "strconv"
|
||||
|
||||
func _() {
|
||||
// An "invalid array index" compiler error signifies that the constant values have changed.
|
||||
// Re-run the stringer command to generate them again.
|
||||
var x [1]struct{}
|
||||
_ = x[SyncEOF-1]
|
||||
_ = x[SyncBool-2]
|
||||
_ = x[SyncInt64-3]
|
||||
_ = x[SyncUint64-4]
|
||||
_ = x[SyncString-5]
|
||||
_ = x[SyncValue-6]
|
||||
_ = x[SyncVal-7]
|
||||
_ = x[SyncRelocs-8]
|
||||
_ = x[SyncReloc-9]
|
||||
_ = x[SyncUseReloc-10]
|
||||
_ = x[SyncPublic-11]
|
||||
_ = x[SyncPos-12]
|
||||
_ = x[SyncPosBase-13]
|
||||
_ = x[SyncObject-14]
|
||||
_ = x[SyncObject1-15]
|
||||
_ = x[SyncPkg-16]
|
||||
_ = x[SyncPkgDef-17]
|
||||
_ = x[SyncMethod-18]
|
||||
_ = x[SyncType-19]
|
||||
_ = x[SyncTypeIdx-20]
|
||||
_ = x[SyncTypeParamNames-21]
|
||||
_ = x[SyncSignature-22]
|
||||
_ = x[SyncParams-23]
|
||||
_ = x[SyncParam-24]
|
||||
_ = x[SyncCodeObj-25]
|
||||
_ = x[SyncSym-26]
|
||||
_ = x[SyncLocalIdent-27]
|
||||
_ = x[SyncSelector-28]
|
||||
_ = x[SyncPrivate-29]
|
||||
_ = x[SyncFuncExt-30]
|
||||
_ = x[SyncVarExt-31]
|
||||
_ = x[SyncTypeExt-32]
|
||||
_ = x[SyncPragma-33]
|
||||
_ = x[SyncExprList-34]
|
||||
_ = x[SyncExprs-35]
|
||||
_ = x[SyncExpr-36]
|
||||
_ = x[SyncExprType-37]
|
||||
_ = x[SyncAssign-38]
|
||||
_ = x[SyncOp-39]
|
||||
_ = x[SyncFuncLit-40]
|
||||
_ = x[SyncCompLit-41]
|
||||
_ = x[SyncDecl-42]
|
||||
_ = x[SyncFuncBody-43]
|
||||
_ = x[SyncOpenScope-44]
|
||||
_ = x[SyncCloseScope-45]
|
||||
_ = x[SyncCloseAnotherScope-46]
|
||||
_ = x[SyncDeclNames-47]
|
||||
_ = x[SyncDeclName-48]
|
||||
_ = x[SyncStmts-49]
|
||||
_ = x[SyncBlockStmt-50]
|
||||
_ = x[SyncIfStmt-51]
|
||||
_ = x[SyncForStmt-52]
|
||||
_ = x[SyncSwitchStmt-53]
|
||||
_ = x[SyncRangeStmt-54]
|
||||
_ = x[SyncCaseClause-55]
|
||||
_ = x[SyncCommClause-56]
|
||||
_ = x[SyncSelectStmt-57]
|
||||
_ = x[SyncDecls-58]
|
||||
_ = x[SyncLabeledStmt-59]
|
||||
_ = x[SyncUseObjLocal-60]
|
||||
_ = x[SyncAddLocal-61]
|
||||
_ = x[SyncLinkname-62]
|
||||
_ = x[SyncStmt1-63]
|
||||
_ = x[SyncStmtsEnd-64]
|
||||
_ = x[SyncLabel-65]
|
||||
_ = x[SyncOptLabel-66]
|
||||
}
|
||||
|
||||
const _SyncMarker_name = "EOFBoolInt64Uint64StringValueValRelocsRelocUseRelocPublicPosPosBaseObjectObject1PkgPkgDefMethodTypeTypeIdxTypeParamNamesSignatureParamsParamCodeObjSymLocalIdentSelectorPrivateFuncExtVarExtTypeExtPragmaExprListExprsExprExprTypeAssignOpFuncLitCompLitDeclFuncBodyOpenScopeCloseScopeCloseAnotherScopeDeclNamesDeclNameStmtsBlockStmtIfStmtForStmtSwitchStmtRangeStmtCaseClauseCommClauseSelectStmtDeclsLabeledStmtUseObjLocalAddLocalLinknameStmt1StmtsEndLabelOptLabel"
|
||||
|
||||
var _SyncMarker_index = [...]uint16{0, 3, 7, 12, 18, 24, 29, 32, 38, 43, 51, 57, 60, 67, 73, 80, 83, 89, 95, 99, 106, 120, 129, 135, 140, 147, 150, 160, 168, 175, 182, 188, 195, 201, 209, 214, 218, 226, 232, 234, 241, 248, 252, 260, 269, 279, 296, 305, 313, 318, 327, 333, 340, 350, 359, 369, 379, 389, 394, 405, 416, 424, 432, 437, 445, 450, 458}
|
||||
|
||||
func (i SyncMarker) String() string {
|
||||
i -= 1
|
||||
if i < 0 || i >= SyncMarker(len(_SyncMarker_index)-1) {
|
||||
return "SyncMarker(" + strconv.FormatInt(int64(i+1), 10) + ")"
|
||||
}
|
||||
return _SyncMarker_name[_SyncMarker_index[i]:_SyncMarker_index[i+1]]
|
||||
}
|
||||
151
vendor/golang.org/x/tools/internal/tokeninternal/tokeninternal.go
generated
vendored
Normal file
151
vendor/golang.org/x/tools/internal/tokeninternal/tokeninternal.go
generated
vendored
Normal file
@@ -0,0 +1,151 @@
|
||||
// Copyright 2023 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// package tokeninternal provides access to some internal features of the token
|
||||
// package.
|
||||
package tokeninternal
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"go/token"
|
||||
"sort"
|
||||
"sync"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// GetLines returns the table of line-start offsets from a token.File.
|
||||
func GetLines(file *token.File) []int {
|
||||
// token.File has a Lines method on Go 1.21 and later.
|
||||
if file, ok := (interface{})(file).(interface{ Lines() []int }); ok {
|
||||
return file.Lines()
|
||||
}
|
||||
|
||||
// This declaration must match that of token.File.
|
||||
// This creates a risk of dependency skew.
|
||||
// For now we check that the size of the two
|
||||
// declarations is the same, on the (fragile) assumption
|
||||
// that future changes would add fields.
|
||||
type tokenFile119 struct {
|
||||
_ string
|
||||
_ int
|
||||
_ int
|
||||
mu sync.Mutex // we're not complete monsters
|
||||
lines []int
|
||||
_ []struct{}
|
||||
}
|
||||
type tokenFile118 struct {
|
||||
_ *token.FileSet // deleted in go1.19
|
||||
tokenFile119
|
||||
}
|
||||
|
||||
type uP = unsafe.Pointer
|
||||
switch unsafe.Sizeof(*file) {
|
||||
case unsafe.Sizeof(tokenFile118{}):
|
||||
var ptr *tokenFile118
|
||||
*(*uP)(uP(&ptr)) = uP(file)
|
||||
ptr.mu.Lock()
|
||||
defer ptr.mu.Unlock()
|
||||
return ptr.lines
|
||||
|
||||
case unsafe.Sizeof(tokenFile119{}):
|
||||
var ptr *tokenFile119
|
||||
*(*uP)(uP(&ptr)) = uP(file)
|
||||
ptr.mu.Lock()
|
||||
defer ptr.mu.Unlock()
|
||||
return ptr.lines
|
||||
|
||||
default:
|
||||
panic("unexpected token.File size")
|
||||
}
|
||||
}
|
||||
|
||||
// AddExistingFiles adds the specified files to the FileSet if they
|
||||
// are not already present. It panics if any pair of files in the
|
||||
// resulting FileSet would overlap.
|
||||
func AddExistingFiles(fset *token.FileSet, files []*token.File) {
|
||||
// Punch through the FileSet encapsulation.
|
||||
type tokenFileSet struct {
|
||||
// This type remained essentially consistent from go1.16 to go1.21.
|
||||
mutex sync.RWMutex
|
||||
base int
|
||||
files []*token.File
|
||||
_ *token.File // changed to atomic.Pointer[token.File] in go1.19
|
||||
}
|
||||
|
||||
// If the size of token.FileSet changes, this will fail to compile.
|
||||
const delta = int64(unsafe.Sizeof(tokenFileSet{})) - int64(unsafe.Sizeof(token.FileSet{}))
|
||||
var _ [-delta * delta]int
|
||||
|
||||
type uP = unsafe.Pointer
|
||||
var ptr *tokenFileSet
|
||||
*(*uP)(uP(&ptr)) = uP(fset)
|
||||
ptr.mutex.Lock()
|
||||
defer ptr.mutex.Unlock()
|
||||
|
||||
// Merge and sort.
|
||||
newFiles := append(ptr.files, files...)
|
||||
sort.Slice(newFiles, func(i, j int) bool {
|
||||
return newFiles[i].Base() < newFiles[j].Base()
|
||||
})
|
||||
|
||||
// Reject overlapping files.
|
||||
// Discard adjacent identical files.
|
||||
out := newFiles[:0]
|
||||
for i, file := range newFiles {
|
||||
if i > 0 {
|
||||
prev := newFiles[i-1]
|
||||
if file == prev {
|
||||
continue
|
||||
}
|
||||
if prev.Base()+prev.Size()+1 > file.Base() {
|
||||
panic(fmt.Sprintf("file %s (%d-%d) overlaps with file %s (%d-%d)",
|
||||
prev.Name(), prev.Base(), prev.Base()+prev.Size(),
|
||||
file.Name(), file.Base(), file.Base()+file.Size()))
|
||||
}
|
||||
}
|
||||
out = append(out, file)
|
||||
}
|
||||
newFiles = out
|
||||
|
||||
ptr.files = newFiles
|
||||
|
||||
// Advance FileSet.Base().
|
||||
if len(newFiles) > 0 {
|
||||
last := newFiles[len(newFiles)-1]
|
||||
newBase := last.Base() + last.Size() + 1
|
||||
if ptr.base < newBase {
|
||||
ptr.base = newBase
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// FileSetFor returns a new FileSet containing a sequence of new Files with
|
||||
// the same base, size, and line as the input files, for use in APIs that
|
||||
// require a FileSet.
|
||||
//
|
||||
// Precondition: the input files must be non-overlapping, and sorted in order
|
||||
// of their Base.
|
||||
func FileSetFor(files ...*token.File) *token.FileSet {
|
||||
fset := token.NewFileSet()
|
||||
for _, f := range files {
|
||||
f2 := fset.AddFile(f.Name(), f.Base(), f.Size())
|
||||
lines := GetLines(f)
|
||||
f2.SetLines(lines)
|
||||
}
|
||||
return fset
|
||||
}
|
||||
|
||||
// CloneFileSet creates a new FileSet holding all files in fset. It does not
|
||||
// create copies of the token.Files in fset: they are added to the resulting
|
||||
// FileSet unmodified.
|
||||
func CloneFileSet(fset *token.FileSet) *token.FileSet {
|
||||
var files []*token.File
|
||||
fset.Iterate(func(f *token.File) bool {
|
||||
files = append(files, f)
|
||||
return true
|
||||
})
|
||||
newFileSet := token.NewFileSet()
|
||||
AddExistingFiles(newFileSet, files)
|
||||
return newFileSet
|
||||
}
|
||||
204
vendor/golang.org/x/tools/internal/typeparams/common.go
generated
vendored
Normal file
204
vendor/golang.org/x/tools/internal/typeparams/common.go
generated
vendored
Normal file
@@ -0,0 +1,204 @@
|
||||
// Copyright 2021 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package typeparams contains common utilities for writing tools that interact
|
||||
// with generic Go code, as introduced with Go 1.18.
|
||||
//
|
||||
// Many of the types and functions in this package are proxies for the new APIs
|
||||
// introduced in the standard library with Go 1.18. For example, the
|
||||
// typeparams.Union type is an alias for go/types.Union, and the ForTypeSpec
|
||||
// function returns the value of the go/ast.TypeSpec.TypeParams field. At Go
|
||||
// versions older than 1.18 these helpers are implemented as stubs, allowing
|
||||
// users of this package to write code that handles generic constructs inline,
|
||||
// even if the Go version being used to compile does not support generics.
|
||||
//
|
||||
// Additionally, this package contains common utilities for working with the
|
||||
// new generic constructs, to supplement the standard library APIs. Notably,
|
||||
// the StructuralTerms API computes a minimal representation of the structural
|
||||
// restrictions on a type parameter.
|
||||
//
|
||||
// An external version of these APIs is available in the
|
||||
// golang.org/x/exp/typeparams module.
|
||||
package typeparams
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"go/ast"
|
||||
"go/token"
|
||||
"go/types"
|
||||
)
|
||||
|
||||
// UnpackIndexExpr extracts data from AST nodes that represent index
|
||||
// expressions.
|
||||
//
|
||||
// For an ast.IndexExpr, the resulting indices slice will contain exactly one
|
||||
// index expression. For an ast.IndexListExpr (go1.18+), it may have a variable
|
||||
// number of index expressions.
|
||||
//
|
||||
// For nodes that don't represent index expressions, the first return value of
|
||||
// UnpackIndexExpr will be nil.
|
||||
func UnpackIndexExpr(n ast.Node) (x ast.Expr, lbrack token.Pos, indices []ast.Expr, rbrack token.Pos) {
|
||||
switch e := n.(type) {
|
||||
case *ast.IndexExpr:
|
||||
return e.X, e.Lbrack, []ast.Expr{e.Index}, e.Rbrack
|
||||
case *IndexListExpr:
|
||||
return e.X, e.Lbrack, e.Indices, e.Rbrack
|
||||
}
|
||||
return nil, token.NoPos, nil, token.NoPos
|
||||
}
|
||||
|
||||
// PackIndexExpr returns an *ast.IndexExpr or *ast.IndexListExpr, depending on
|
||||
// the cardinality of indices. Calling PackIndexExpr with len(indices) == 0
|
||||
// will panic.
|
||||
func PackIndexExpr(x ast.Expr, lbrack token.Pos, indices []ast.Expr, rbrack token.Pos) ast.Expr {
|
||||
switch len(indices) {
|
||||
case 0:
|
||||
panic("empty indices")
|
||||
case 1:
|
||||
return &ast.IndexExpr{
|
||||
X: x,
|
||||
Lbrack: lbrack,
|
||||
Index: indices[0],
|
||||
Rbrack: rbrack,
|
||||
}
|
||||
default:
|
||||
return &IndexListExpr{
|
||||
X: x,
|
||||
Lbrack: lbrack,
|
||||
Indices: indices,
|
||||
Rbrack: rbrack,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// IsTypeParam reports whether t is a type parameter.
|
||||
func IsTypeParam(t types.Type) bool {
|
||||
_, ok := t.(*TypeParam)
|
||||
return ok
|
||||
}
|
||||
|
||||
// OriginMethod returns the origin method associated with the method fn.
|
||||
// For methods on a non-generic receiver base type, this is just
|
||||
// fn. However, for methods with a generic receiver, OriginMethod returns the
|
||||
// corresponding method in the method set of the origin type.
|
||||
//
|
||||
// As a special case, if fn is not a method (has no receiver), OriginMethod
|
||||
// returns fn.
|
||||
func OriginMethod(fn *types.Func) *types.Func {
|
||||
recv := fn.Type().(*types.Signature).Recv()
|
||||
if recv == nil {
|
||||
return fn
|
||||
}
|
||||
base := recv.Type()
|
||||
p, isPtr := base.(*types.Pointer)
|
||||
if isPtr {
|
||||
base = p.Elem()
|
||||
}
|
||||
named, isNamed := base.(*types.Named)
|
||||
if !isNamed {
|
||||
// Receiver is a *types.Interface.
|
||||
return fn
|
||||
}
|
||||
if ForNamed(named).Len() == 0 {
|
||||
// Receiver base has no type parameters, so we can avoid the lookup below.
|
||||
return fn
|
||||
}
|
||||
orig := NamedTypeOrigin(named)
|
||||
gfn, _, _ := types.LookupFieldOrMethod(orig, true, fn.Pkg(), fn.Name())
|
||||
|
||||
// This is a fix for a gopls crash (#60628) due to a go/types bug (#60634). In:
|
||||
// package p
|
||||
// type T *int
|
||||
// func (*T) f() {}
|
||||
// LookupFieldOrMethod(T, true, p, f)=nil, but NewMethodSet(*T)={(*T).f}.
|
||||
// Here we make them consistent by force.
|
||||
// (The go/types bug is general, but this workaround is reached only
|
||||
// for generic T thanks to the early return above.)
|
||||
if gfn == nil {
|
||||
mset := types.NewMethodSet(types.NewPointer(orig))
|
||||
for i := 0; i < mset.Len(); i++ {
|
||||
m := mset.At(i)
|
||||
if m.Obj().Id() == fn.Id() {
|
||||
gfn = m.Obj()
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// In golang/go#61196, we observe another crash, this time inexplicable.
|
||||
if gfn == nil {
|
||||
panic(fmt.Sprintf("missing origin method for %s.%s; named == origin: %t, named.NumMethods(): %d, origin.NumMethods(): %d", named, fn, named == orig, named.NumMethods(), orig.NumMethods()))
|
||||
}
|
||||
|
||||
return gfn.(*types.Func)
|
||||
}
|
||||
|
||||
// GenericAssignableTo is a generalization of types.AssignableTo that
|
||||
// implements the following rule for uninstantiated generic types:
|
||||
//
|
||||
// If V and T are generic named types, then V is considered assignable to T if,
|
||||
// for every possible instantation of V[A_1, ..., A_N], the instantiation
|
||||
// T[A_1, ..., A_N] is valid and V[A_1, ..., A_N] implements T[A_1, ..., A_N].
|
||||
//
|
||||
// If T has structural constraints, they must be satisfied by V.
|
||||
//
|
||||
// For example, consider the following type declarations:
|
||||
//
|
||||
// type Interface[T any] interface {
|
||||
// Accept(T)
|
||||
// }
|
||||
//
|
||||
// type Container[T any] struct {
|
||||
// Element T
|
||||
// }
|
||||
//
|
||||
// func (c Container[T]) Accept(t T) { c.Element = t }
|
||||
//
|
||||
// In this case, GenericAssignableTo reports that instantiations of Container
|
||||
// are assignable to the corresponding instantiation of Interface.
|
||||
func GenericAssignableTo(ctxt *Context, V, T types.Type) bool {
|
||||
// If V and T are not both named, or do not have matching non-empty type
|
||||
// parameter lists, fall back on types.AssignableTo.
|
||||
|
||||
VN, Vnamed := V.(*types.Named)
|
||||
TN, Tnamed := T.(*types.Named)
|
||||
if !Vnamed || !Tnamed {
|
||||
return types.AssignableTo(V, T)
|
||||
}
|
||||
|
||||
vtparams := ForNamed(VN)
|
||||
ttparams := ForNamed(TN)
|
||||
if vtparams.Len() == 0 || vtparams.Len() != ttparams.Len() || NamedTypeArgs(VN).Len() != 0 || NamedTypeArgs(TN).Len() != 0 {
|
||||
return types.AssignableTo(V, T)
|
||||
}
|
||||
|
||||
// V and T have the same (non-zero) number of type params. Instantiate both
|
||||
// with the type parameters of V. This must always succeed for V, and will
|
||||
// succeed for T if and only if the type set of each type parameter of V is a
|
||||
// subset of the type set of the corresponding type parameter of T, meaning
|
||||
// that every instantiation of V corresponds to a valid instantiation of T.
|
||||
|
||||
// Minor optimization: ensure we share a context across the two
|
||||
// instantiations below.
|
||||
if ctxt == nil {
|
||||
ctxt = NewContext()
|
||||
}
|
||||
|
||||
var targs []types.Type
|
||||
for i := 0; i < vtparams.Len(); i++ {
|
||||
targs = append(targs, vtparams.At(i))
|
||||
}
|
||||
|
||||
vinst, err := Instantiate(ctxt, V, targs, true)
|
||||
if err != nil {
|
||||
panic("type parameters should satisfy their own constraints")
|
||||
}
|
||||
|
||||
tinst, err := Instantiate(ctxt, T, targs, true)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
return types.AssignableTo(vinst, tinst)
|
||||
}
|
||||
122
vendor/golang.org/x/tools/internal/typeparams/coretype.go
generated
vendored
Normal file
122
vendor/golang.org/x/tools/internal/typeparams/coretype.go
generated
vendored
Normal file
@@ -0,0 +1,122 @@
|
||||
// Copyright 2022 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package typeparams
|
||||
|
||||
import (
|
||||
"go/types"
|
||||
)
|
||||
|
||||
// CoreType returns the core type of T or nil if T does not have a core type.
|
||||
//
|
||||
// See https://go.dev/ref/spec#Core_types for the definition of a core type.
|
||||
func CoreType(T types.Type) types.Type {
|
||||
U := T.Underlying()
|
||||
if _, ok := U.(*types.Interface); !ok {
|
||||
return U // for non-interface types,
|
||||
}
|
||||
|
||||
terms, err := _NormalTerms(U)
|
||||
if len(terms) == 0 || err != nil {
|
||||
// len(terms) -> empty type set of interface.
|
||||
// err != nil => U is invalid, exceeds complexity bounds, or has an empty type set.
|
||||
return nil // no core type.
|
||||
}
|
||||
|
||||
U = terms[0].Type().Underlying()
|
||||
var identical int // i in [0,identical) => Identical(U, terms[i].Type().Underlying())
|
||||
for identical = 1; identical < len(terms); identical++ {
|
||||
if !types.Identical(U, terms[identical].Type().Underlying()) {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if identical == len(terms) {
|
||||
// https://go.dev/ref/spec#Core_types
|
||||
// "There is a single type U which is the underlying type of all types in the type set of T"
|
||||
return U
|
||||
}
|
||||
ch, ok := U.(*types.Chan)
|
||||
if !ok {
|
||||
return nil // no core type as identical < len(terms) and U is not a channel.
|
||||
}
|
||||
// https://go.dev/ref/spec#Core_types
|
||||
// "the type chan E if T contains only bidirectional channels, or the type chan<- E or
|
||||
// <-chan E depending on the direction of the directional channels present."
|
||||
for chans := identical; chans < len(terms); chans++ {
|
||||
curr, ok := terms[chans].Type().Underlying().(*types.Chan)
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
if !types.Identical(ch.Elem(), curr.Elem()) {
|
||||
return nil // channel elements are not identical.
|
||||
}
|
||||
if ch.Dir() == types.SendRecv {
|
||||
// ch is bidirectional. We can safely always use curr's direction.
|
||||
ch = curr
|
||||
} else if curr.Dir() != types.SendRecv && ch.Dir() != curr.Dir() {
|
||||
// ch and curr are not bidirectional and not the same direction.
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return ch
|
||||
}
|
||||
|
||||
// _NormalTerms returns a slice of terms representing the normalized structural
|
||||
// type restrictions of a type, if any.
|
||||
//
|
||||
// For all types other than *types.TypeParam, *types.Interface, and
|
||||
// *types.Union, this is just a single term with Tilde() == false and
|
||||
// Type() == typ. For *types.TypeParam, *types.Interface, and *types.Union, see
|
||||
// below.
|
||||
//
|
||||
// Structural type restrictions of a type parameter are created via
|
||||
// non-interface types embedded in its constraint interface (directly, or via a
|
||||
// chain of interface embeddings). For example, in the declaration type
|
||||
// T[P interface{~int; m()}] int the structural restriction of the type
|
||||
// parameter P is ~int.
|
||||
//
|
||||
// With interface embedding and unions, the specification of structural type
|
||||
// restrictions may be arbitrarily complex. For example, consider the
|
||||
// following:
|
||||
//
|
||||
// type A interface{ ~string|~[]byte }
|
||||
//
|
||||
// type B interface{ int|string }
|
||||
//
|
||||
// type C interface { ~string|~int }
|
||||
//
|
||||
// type T[P interface{ A|B; C }] int
|
||||
//
|
||||
// In this example, the structural type restriction of P is ~string|int: A|B
|
||||
// expands to ~string|~[]byte|int|string, which reduces to ~string|~[]byte|int,
|
||||
// which when intersected with C (~string|~int) yields ~string|int.
|
||||
//
|
||||
// _NormalTerms computes these expansions and reductions, producing a
|
||||
// "normalized" form of the embeddings. A structural restriction is normalized
|
||||
// if it is a single union containing no interface terms, and is minimal in the
|
||||
// sense that removing any term changes the set of types satisfying the
|
||||
// constraint. It is left as a proof for the reader that, modulo sorting, there
|
||||
// is exactly one such normalized form.
|
||||
//
|
||||
// Because the minimal representation always takes this form, _NormalTerms
|
||||
// returns a slice of tilde terms corresponding to the terms of the union in
|
||||
// the normalized structural restriction. An error is returned if the type is
|
||||
// invalid, exceeds complexity bounds, or has an empty type set. In the latter
|
||||
// case, _NormalTerms returns ErrEmptyTypeSet.
|
||||
//
|
||||
// _NormalTerms makes no guarantees about the order of terms, except that it
|
||||
// is deterministic.
|
||||
func _NormalTerms(typ types.Type) ([]*Term, error) {
|
||||
switch typ := typ.(type) {
|
||||
case *TypeParam:
|
||||
return StructuralTerms(typ)
|
||||
case *Union:
|
||||
return UnionTermSet(typ)
|
||||
case *types.Interface:
|
||||
return InterfaceTermSet(typ)
|
||||
default:
|
||||
return []*Term{NewTerm(false, typ)}, nil
|
||||
}
|
||||
}
|
||||
12
vendor/golang.org/x/tools/internal/typeparams/enabled_go117.go
generated
vendored
Normal file
12
vendor/golang.org/x/tools/internal/typeparams/enabled_go117.go
generated
vendored
Normal file
@@ -0,0 +1,12 @@
|
||||
// Copyright 2021 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build !go1.18
|
||||
// +build !go1.18
|
||||
|
||||
package typeparams
|
||||
|
||||
// Enabled reports whether type parameters are enabled in the current build
|
||||
// environment.
|
||||
const Enabled = false
|
||||
15
vendor/golang.org/x/tools/internal/typeparams/enabled_go118.go
generated
vendored
Normal file
15
vendor/golang.org/x/tools/internal/typeparams/enabled_go118.go
generated
vendored
Normal file
@@ -0,0 +1,15 @@
|
||||
// Copyright 2021 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build go1.18
|
||||
// +build go1.18
|
||||
|
||||
package typeparams
|
||||
|
||||
// Note: this constant is in a separate file as this is the only acceptable
|
||||
// diff between the <1.18 API of this package and the 1.18 API.
|
||||
|
||||
// Enabled reports whether type parameters are enabled in the current build
|
||||
// environment.
|
||||
const Enabled = true
|
||||
218
vendor/golang.org/x/tools/internal/typeparams/normalize.go
generated
vendored
Normal file
218
vendor/golang.org/x/tools/internal/typeparams/normalize.go
generated
vendored
Normal file
@@ -0,0 +1,218 @@
|
||||
// Copyright 2021 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package typeparams
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"go/types"
|
||||
"os"
|
||||
"strings"
|
||||
)
|
||||
|
||||
//go:generate go run copytermlist.go
|
||||
|
||||
const debug = false
|
||||
|
||||
var ErrEmptyTypeSet = errors.New("empty type set")
|
||||
|
||||
// StructuralTerms returns a slice of terms representing the normalized
|
||||
// structural type restrictions of a type parameter, if any.
|
||||
//
|
||||
// Structural type restrictions of a type parameter are created via
|
||||
// non-interface types embedded in its constraint interface (directly, or via a
|
||||
// chain of interface embeddings). For example, in the declaration
|
||||
//
|
||||
// type T[P interface{~int; m()}] int
|
||||
//
|
||||
// the structural restriction of the type parameter P is ~int.
|
||||
//
|
||||
// With interface embedding and unions, the specification of structural type
|
||||
// restrictions may be arbitrarily complex. For example, consider the
|
||||
// following:
|
||||
//
|
||||
// type A interface{ ~string|~[]byte }
|
||||
//
|
||||
// type B interface{ int|string }
|
||||
//
|
||||
// type C interface { ~string|~int }
|
||||
//
|
||||
// type T[P interface{ A|B; C }] int
|
||||
//
|
||||
// In this example, the structural type restriction of P is ~string|int: A|B
|
||||
// expands to ~string|~[]byte|int|string, which reduces to ~string|~[]byte|int,
|
||||
// which when intersected with C (~string|~int) yields ~string|int.
|
||||
//
|
||||
// StructuralTerms computes these expansions and reductions, producing a
|
||||
// "normalized" form of the embeddings. A structural restriction is normalized
|
||||
// if it is a single union containing no interface terms, and is minimal in the
|
||||
// sense that removing any term changes the set of types satisfying the
|
||||
// constraint. It is left as a proof for the reader that, modulo sorting, there
|
||||
// is exactly one such normalized form.
|
||||
//
|
||||
// Because the minimal representation always takes this form, StructuralTerms
|
||||
// returns a slice of tilde terms corresponding to the terms of the union in
|
||||
// the normalized structural restriction. An error is returned if the
|
||||
// constraint interface is invalid, exceeds complexity bounds, or has an empty
|
||||
// type set. In the latter case, StructuralTerms returns ErrEmptyTypeSet.
|
||||
//
|
||||
// StructuralTerms makes no guarantees about the order of terms, except that it
|
||||
// is deterministic.
|
||||
func StructuralTerms(tparam *TypeParam) ([]*Term, error) {
|
||||
constraint := tparam.Constraint()
|
||||
if constraint == nil {
|
||||
return nil, fmt.Errorf("%s has nil constraint", tparam)
|
||||
}
|
||||
iface, _ := constraint.Underlying().(*types.Interface)
|
||||
if iface == nil {
|
||||
return nil, fmt.Errorf("constraint is %T, not *types.Interface", constraint.Underlying())
|
||||
}
|
||||
return InterfaceTermSet(iface)
|
||||
}
|
||||
|
||||
// InterfaceTermSet computes the normalized terms for a constraint interface,
|
||||
// returning an error if the term set cannot be computed or is empty. In the
|
||||
// latter case, the error will be ErrEmptyTypeSet.
|
||||
//
|
||||
// See the documentation of StructuralTerms for more information on
|
||||
// normalization.
|
||||
func InterfaceTermSet(iface *types.Interface) ([]*Term, error) {
|
||||
return computeTermSet(iface)
|
||||
}
|
||||
|
||||
// UnionTermSet computes the normalized terms for a union, returning an error
|
||||
// if the term set cannot be computed or is empty. In the latter case, the
|
||||
// error will be ErrEmptyTypeSet.
|
||||
//
|
||||
// See the documentation of StructuralTerms for more information on
|
||||
// normalization.
|
||||
func UnionTermSet(union *Union) ([]*Term, error) {
|
||||
return computeTermSet(union)
|
||||
}
|
||||
|
||||
func computeTermSet(typ types.Type) ([]*Term, error) {
|
||||
tset, err := computeTermSetInternal(typ, make(map[types.Type]*termSet), 0)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if tset.terms.isEmpty() {
|
||||
return nil, ErrEmptyTypeSet
|
||||
}
|
||||
if tset.terms.isAll() {
|
||||
return nil, nil
|
||||
}
|
||||
var terms []*Term
|
||||
for _, term := range tset.terms {
|
||||
terms = append(terms, NewTerm(term.tilde, term.typ))
|
||||
}
|
||||
return terms, nil
|
||||
}
|
||||
|
||||
// A termSet holds the normalized set of terms for a given type.
|
||||
//
|
||||
// The name termSet is intentionally distinct from 'type set': a type set is
|
||||
// all types that implement a type (and includes method restrictions), whereas
|
||||
// a term set just represents the structural restrictions on a type.
|
||||
type termSet struct {
|
||||
complete bool
|
||||
terms termlist
|
||||
}
|
||||
|
||||
func indentf(depth int, format string, args ...interface{}) {
|
||||
fmt.Fprintf(os.Stderr, strings.Repeat(".", depth)+format+"\n", args...)
|
||||
}
|
||||
|
||||
func computeTermSetInternal(t types.Type, seen map[types.Type]*termSet, depth int) (res *termSet, err error) {
|
||||
if t == nil {
|
||||
panic("nil type")
|
||||
}
|
||||
|
||||
if debug {
|
||||
indentf(depth, "%s", t.String())
|
||||
defer func() {
|
||||
if err != nil {
|
||||
indentf(depth, "=> %s", err)
|
||||
} else {
|
||||
indentf(depth, "=> %s", res.terms.String())
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
const maxTermCount = 100
|
||||
if tset, ok := seen[t]; ok {
|
||||
if !tset.complete {
|
||||
return nil, fmt.Errorf("cycle detected in the declaration of %s", t)
|
||||
}
|
||||
return tset, nil
|
||||
}
|
||||
|
||||
// Mark the current type as seen to avoid infinite recursion.
|
||||
tset := new(termSet)
|
||||
defer func() {
|
||||
tset.complete = true
|
||||
}()
|
||||
seen[t] = tset
|
||||
|
||||
switch u := t.Underlying().(type) {
|
||||
case *types.Interface:
|
||||
// The term set of an interface is the intersection of the term sets of its
|
||||
// embedded types.
|
||||
tset.terms = allTermlist
|
||||
for i := 0; i < u.NumEmbeddeds(); i++ {
|
||||
embedded := u.EmbeddedType(i)
|
||||
if _, ok := embedded.Underlying().(*TypeParam); ok {
|
||||
return nil, fmt.Errorf("invalid embedded type %T", embedded)
|
||||
}
|
||||
tset2, err := computeTermSetInternal(embedded, seen, depth+1)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
tset.terms = tset.terms.intersect(tset2.terms)
|
||||
}
|
||||
case *Union:
|
||||
// The term set of a union is the union of term sets of its terms.
|
||||
tset.terms = nil
|
||||
for i := 0; i < u.Len(); i++ {
|
||||
t := u.Term(i)
|
||||
var terms termlist
|
||||
switch t.Type().Underlying().(type) {
|
||||
case *types.Interface:
|
||||
tset2, err := computeTermSetInternal(t.Type(), seen, depth+1)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
terms = tset2.terms
|
||||
case *TypeParam, *Union:
|
||||
// A stand-alone type parameter or union is not permitted as union
|
||||
// term.
|
||||
return nil, fmt.Errorf("invalid union term %T", t)
|
||||
default:
|
||||
if t.Type() == types.Typ[types.Invalid] {
|
||||
continue
|
||||
}
|
||||
terms = termlist{{t.Tilde(), t.Type()}}
|
||||
}
|
||||
tset.terms = tset.terms.union(terms)
|
||||
if len(tset.terms) > maxTermCount {
|
||||
return nil, fmt.Errorf("exceeded max term count %d", maxTermCount)
|
||||
}
|
||||
}
|
||||
case *TypeParam:
|
||||
panic("unreachable")
|
||||
default:
|
||||
// For all other types, the term set is just a single non-tilde term
|
||||
// holding the type itself.
|
||||
if u != types.Typ[types.Invalid] {
|
||||
tset.terms = termlist{{false, t}}
|
||||
}
|
||||
}
|
||||
return tset, nil
|
||||
}
|
||||
|
||||
// under is a facade for the go/types internal function of the same name. It is
|
||||
// used by typeterm.go.
|
||||
func under(t types.Type) types.Type {
|
||||
return t.Underlying()
|
||||
}
|
||||
163
vendor/golang.org/x/tools/internal/typeparams/termlist.go
generated
vendored
Normal file
163
vendor/golang.org/x/tools/internal/typeparams/termlist.go
generated
vendored
Normal file
@@ -0,0 +1,163 @@
|
||||
// Copyright 2021 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Code generated by copytermlist.go DO NOT EDIT.
|
||||
|
||||
package typeparams
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"go/types"
|
||||
)
|
||||
|
||||
// A termlist represents the type set represented by the union
|
||||
// t1 ∪ y2 ∪ ... tn of the type sets of the terms t1 to tn.
|
||||
// A termlist is in normal form if all terms are disjoint.
|
||||
// termlist operations don't require the operands to be in
|
||||
// normal form.
|
||||
type termlist []*term
|
||||
|
||||
// allTermlist represents the set of all types.
|
||||
// It is in normal form.
|
||||
var allTermlist = termlist{new(term)}
|
||||
|
||||
// String prints the termlist exactly (without normalization).
|
||||
func (xl termlist) String() string {
|
||||
if len(xl) == 0 {
|
||||
return "∅"
|
||||
}
|
||||
var buf bytes.Buffer
|
||||
for i, x := range xl {
|
||||
if i > 0 {
|
||||
buf.WriteString(" | ")
|
||||
}
|
||||
buf.WriteString(x.String())
|
||||
}
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
// isEmpty reports whether the termlist xl represents the empty set of types.
|
||||
func (xl termlist) isEmpty() bool {
|
||||
// If there's a non-nil term, the entire list is not empty.
|
||||
// If the termlist is in normal form, this requires at most
|
||||
// one iteration.
|
||||
for _, x := range xl {
|
||||
if x != nil {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// isAll reports whether the termlist xl represents the set of all types.
|
||||
func (xl termlist) isAll() bool {
|
||||
// If there's a 𝓤 term, the entire list is 𝓤.
|
||||
// If the termlist is in normal form, this requires at most
|
||||
// one iteration.
|
||||
for _, x := range xl {
|
||||
if x != nil && x.typ == nil {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// norm returns the normal form of xl.
|
||||
func (xl termlist) norm() termlist {
|
||||
// Quadratic algorithm, but good enough for now.
|
||||
// TODO(gri) fix asymptotic performance
|
||||
used := make([]bool, len(xl))
|
||||
var rl termlist
|
||||
for i, xi := range xl {
|
||||
if xi == nil || used[i] {
|
||||
continue
|
||||
}
|
||||
for j := i + 1; j < len(xl); j++ {
|
||||
xj := xl[j]
|
||||
if xj == nil || used[j] {
|
||||
continue
|
||||
}
|
||||
if u1, u2 := xi.union(xj); u2 == nil {
|
||||
// If we encounter a 𝓤 term, the entire list is 𝓤.
|
||||
// Exit early.
|
||||
// (Note that this is not just an optimization;
|
||||
// if we continue, we may end up with a 𝓤 term
|
||||
// and other terms and the result would not be
|
||||
// in normal form.)
|
||||
if u1.typ == nil {
|
||||
return allTermlist
|
||||
}
|
||||
xi = u1
|
||||
used[j] = true // xj is now unioned into xi - ignore it in future iterations
|
||||
}
|
||||
}
|
||||
rl = append(rl, xi)
|
||||
}
|
||||
return rl
|
||||
}
|
||||
|
||||
// union returns the union xl ∪ yl.
|
||||
func (xl termlist) union(yl termlist) termlist {
|
||||
return append(xl, yl...).norm()
|
||||
}
|
||||
|
||||
// intersect returns the intersection xl ∩ yl.
|
||||
func (xl termlist) intersect(yl termlist) termlist {
|
||||
if xl.isEmpty() || yl.isEmpty() {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Quadratic algorithm, but good enough for now.
|
||||
// TODO(gri) fix asymptotic performance
|
||||
var rl termlist
|
||||
for _, x := range xl {
|
||||
for _, y := range yl {
|
||||
if r := x.intersect(y); r != nil {
|
||||
rl = append(rl, r)
|
||||
}
|
||||
}
|
||||
}
|
||||
return rl.norm()
|
||||
}
|
||||
|
||||
// equal reports whether xl and yl represent the same type set.
|
||||
func (xl termlist) equal(yl termlist) bool {
|
||||
// TODO(gri) this should be more efficient
|
||||
return xl.subsetOf(yl) && yl.subsetOf(xl)
|
||||
}
|
||||
|
||||
// includes reports whether t ∈ xl.
|
||||
func (xl termlist) includes(t types.Type) bool {
|
||||
for _, x := range xl {
|
||||
if x.includes(t) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// supersetOf reports whether y ⊆ xl.
|
||||
func (xl termlist) supersetOf(y *term) bool {
|
||||
for _, x := range xl {
|
||||
if y.subsetOf(x) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// subsetOf reports whether xl ⊆ yl.
|
||||
func (xl termlist) subsetOf(yl termlist) bool {
|
||||
if yl.isEmpty() {
|
||||
return xl.isEmpty()
|
||||
}
|
||||
|
||||
// each term x of xl must be a subset of yl
|
||||
for _, x := range xl {
|
||||
if !yl.supersetOf(x) {
|
||||
return false // x is not a subset yl
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
197
vendor/golang.org/x/tools/internal/typeparams/typeparams_go117.go
generated
vendored
Normal file
197
vendor/golang.org/x/tools/internal/typeparams/typeparams_go117.go
generated
vendored
Normal file
@@ -0,0 +1,197 @@
|
||||
// Copyright 2021 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build !go1.18
|
||||
// +build !go1.18
|
||||
|
||||
package typeparams
|
||||
|
||||
import (
|
||||
"go/ast"
|
||||
"go/token"
|
||||
"go/types"
|
||||
)
|
||||
|
||||
func unsupported() {
|
||||
panic("type parameters are unsupported at this go version")
|
||||
}
|
||||
|
||||
// IndexListExpr is a placeholder type, as type parameters are not supported at
|
||||
// this Go version. Its methods panic on use.
|
||||
type IndexListExpr struct {
|
||||
ast.Expr
|
||||
X ast.Expr // expression
|
||||
Lbrack token.Pos // position of "["
|
||||
Indices []ast.Expr // index expressions
|
||||
Rbrack token.Pos // position of "]"
|
||||
}
|
||||
|
||||
// ForTypeSpec returns an empty field list, as type parameters on not supported
|
||||
// at this Go version.
|
||||
func ForTypeSpec(*ast.TypeSpec) *ast.FieldList {
|
||||
return nil
|
||||
}
|
||||
|
||||
// ForFuncType returns an empty field list, as type parameters are not
|
||||
// supported at this Go version.
|
||||
func ForFuncType(*ast.FuncType) *ast.FieldList {
|
||||
return nil
|
||||
}
|
||||
|
||||
// TypeParam is a placeholder type, as type parameters are not supported at
|
||||
// this Go version. Its methods panic on use.
|
||||
type TypeParam struct{ types.Type }
|
||||
|
||||
func (*TypeParam) Index() int { unsupported(); return 0 }
|
||||
func (*TypeParam) Constraint() types.Type { unsupported(); return nil }
|
||||
func (*TypeParam) Obj() *types.TypeName { unsupported(); return nil }
|
||||
|
||||
// TypeParamList is a placeholder for an empty type parameter list.
|
||||
type TypeParamList struct{}
|
||||
|
||||
func (*TypeParamList) Len() int { return 0 }
|
||||
func (*TypeParamList) At(int) *TypeParam { unsupported(); return nil }
|
||||
|
||||
// TypeList is a placeholder for an empty type list.
|
||||
type TypeList struct{}
|
||||
|
||||
func (*TypeList) Len() int { return 0 }
|
||||
func (*TypeList) At(int) types.Type { unsupported(); return nil }
|
||||
|
||||
// NewTypeParam is unsupported at this Go version, and panics.
|
||||
func NewTypeParam(name *types.TypeName, constraint types.Type) *TypeParam {
|
||||
unsupported()
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetTypeParamConstraint is unsupported at this Go version, and panics.
|
||||
func SetTypeParamConstraint(tparam *TypeParam, constraint types.Type) {
|
||||
unsupported()
|
||||
}
|
||||
|
||||
// NewSignatureType calls types.NewSignature, panicking if recvTypeParams or
|
||||
// typeParams is non-empty.
|
||||
func NewSignatureType(recv *types.Var, recvTypeParams, typeParams []*TypeParam, params, results *types.Tuple, variadic bool) *types.Signature {
|
||||
if len(recvTypeParams) != 0 || len(typeParams) != 0 {
|
||||
panic("signatures cannot have type parameters at this Go version")
|
||||
}
|
||||
return types.NewSignature(recv, params, results, variadic)
|
||||
}
|
||||
|
||||
// ForSignature returns an empty slice.
|
||||
func ForSignature(*types.Signature) *TypeParamList {
|
||||
return nil
|
||||
}
|
||||
|
||||
// RecvTypeParams returns a nil slice.
|
||||
func RecvTypeParams(sig *types.Signature) *TypeParamList {
|
||||
return nil
|
||||
}
|
||||
|
||||
// IsComparable returns false, as no interfaces are type-restricted at this Go
|
||||
// version.
|
||||
func IsComparable(*types.Interface) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// IsMethodSet returns true, as no interfaces are type-restricted at this Go
|
||||
// version.
|
||||
func IsMethodSet(*types.Interface) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// IsImplicit returns false, as no interfaces are implicit at this Go version.
|
||||
func IsImplicit(*types.Interface) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// MarkImplicit does nothing, because this Go version does not have implicit
|
||||
// interfaces.
|
||||
func MarkImplicit(*types.Interface) {}
|
||||
|
||||
// ForNamed returns an empty type parameter list, as type parameters are not
|
||||
// supported at this Go version.
|
||||
func ForNamed(*types.Named) *TypeParamList {
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetForNamed panics if tparams is non-empty.
|
||||
func SetForNamed(_ *types.Named, tparams []*TypeParam) {
|
||||
if len(tparams) > 0 {
|
||||
unsupported()
|
||||
}
|
||||
}
|
||||
|
||||
// NamedTypeArgs returns nil.
|
||||
func NamedTypeArgs(*types.Named) *TypeList {
|
||||
return nil
|
||||
}
|
||||
|
||||
// NamedTypeOrigin is the identity method at this Go version.
|
||||
func NamedTypeOrigin(named *types.Named) *types.Named {
|
||||
return named
|
||||
}
|
||||
|
||||
// Term holds information about a structural type restriction.
|
||||
type Term struct {
|
||||
tilde bool
|
||||
typ types.Type
|
||||
}
|
||||
|
||||
func (m *Term) Tilde() bool { return m.tilde }
|
||||
func (m *Term) Type() types.Type { return m.typ }
|
||||
func (m *Term) String() string {
|
||||
pre := ""
|
||||
if m.tilde {
|
||||
pre = "~"
|
||||
}
|
||||
return pre + m.typ.String()
|
||||
}
|
||||
|
||||
// NewTerm is unsupported at this Go version, and panics.
|
||||
func NewTerm(tilde bool, typ types.Type) *Term {
|
||||
return &Term{tilde, typ}
|
||||
}
|
||||
|
||||
// Union is a placeholder type, as type parameters are not supported at this Go
|
||||
// version. Its methods panic on use.
|
||||
type Union struct{ types.Type }
|
||||
|
||||
func (*Union) Len() int { return 0 }
|
||||
func (*Union) Term(i int) *Term { unsupported(); return nil }
|
||||
|
||||
// NewUnion is unsupported at this Go version, and panics.
|
||||
func NewUnion(terms []*Term) *Union {
|
||||
unsupported()
|
||||
return nil
|
||||
}
|
||||
|
||||
// InitInstanceInfo is a noop at this Go version.
|
||||
func InitInstanceInfo(*types.Info) {}
|
||||
|
||||
// Instance is a placeholder type, as type parameters are not supported at this
|
||||
// Go version.
|
||||
type Instance struct {
|
||||
TypeArgs *TypeList
|
||||
Type types.Type
|
||||
}
|
||||
|
||||
// GetInstances returns a nil map, as type parameters are not supported at this
|
||||
// Go version.
|
||||
func GetInstances(info *types.Info) map[*ast.Ident]Instance { return nil }
|
||||
|
||||
// Context is a placeholder type, as type parameters are not supported at
|
||||
// this Go version.
|
||||
type Context struct{}
|
||||
|
||||
// NewContext returns a placeholder Context instance.
|
||||
func NewContext() *Context {
|
||||
return &Context{}
|
||||
}
|
||||
|
||||
// Instantiate is unsupported on this Go version, and panics.
|
||||
func Instantiate(ctxt *Context, typ types.Type, targs []types.Type, validate bool) (types.Type, error) {
|
||||
unsupported()
|
||||
return nil, nil
|
||||
}
|
||||
151
vendor/golang.org/x/tools/internal/typeparams/typeparams_go118.go
generated
vendored
Normal file
151
vendor/golang.org/x/tools/internal/typeparams/typeparams_go118.go
generated
vendored
Normal file
@@ -0,0 +1,151 @@
|
||||
// Copyright 2021 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build go1.18
|
||||
// +build go1.18
|
||||
|
||||
package typeparams
|
||||
|
||||
import (
|
||||
"go/ast"
|
||||
"go/types"
|
||||
)
|
||||
|
||||
// IndexListExpr is an alias for ast.IndexListExpr.
|
||||
type IndexListExpr = ast.IndexListExpr
|
||||
|
||||
// ForTypeSpec returns n.TypeParams.
|
||||
func ForTypeSpec(n *ast.TypeSpec) *ast.FieldList {
|
||||
if n == nil {
|
||||
return nil
|
||||
}
|
||||
return n.TypeParams
|
||||
}
|
||||
|
||||
// ForFuncType returns n.TypeParams.
|
||||
func ForFuncType(n *ast.FuncType) *ast.FieldList {
|
||||
if n == nil {
|
||||
return nil
|
||||
}
|
||||
return n.TypeParams
|
||||
}
|
||||
|
||||
// TypeParam is an alias for types.TypeParam
|
||||
type TypeParam = types.TypeParam
|
||||
|
||||
// TypeParamList is an alias for types.TypeParamList
|
||||
type TypeParamList = types.TypeParamList
|
||||
|
||||
// TypeList is an alias for types.TypeList
|
||||
type TypeList = types.TypeList
|
||||
|
||||
// NewTypeParam calls types.NewTypeParam.
|
||||
func NewTypeParam(name *types.TypeName, constraint types.Type) *TypeParam {
|
||||
return types.NewTypeParam(name, constraint)
|
||||
}
|
||||
|
||||
// SetTypeParamConstraint calls tparam.SetConstraint(constraint).
|
||||
func SetTypeParamConstraint(tparam *TypeParam, constraint types.Type) {
|
||||
tparam.SetConstraint(constraint)
|
||||
}
|
||||
|
||||
// NewSignatureType calls types.NewSignatureType.
|
||||
func NewSignatureType(recv *types.Var, recvTypeParams, typeParams []*TypeParam, params, results *types.Tuple, variadic bool) *types.Signature {
|
||||
return types.NewSignatureType(recv, recvTypeParams, typeParams, params, results, variadic)
|
||||
}
|
||||
|
||||
// ForSignature returns sig.TypeParams()
|
||||
func ForSignature(sig *types.Signature) *TypeParamList {
|
||||
return sig.TypeParams()
|
||||
}
|
||||
|
||||
// RecvTypeParams returns sig.RecvTypeParams().
|
||||
func RecvTypeParams(sig *types.Signature) *TypeParamList {
|
||||
return sig.RecvTypeParams()
|
||||
}
|
||||
|
||||
// IsComparable calls iface.IsComparable().
|
||||
func IsComparable(iface *types.Interface) bool {
|
||||
return iface.IsComparable()
|
||||
}
|
||||
|
||||
// IsMethodSet calls iface.IsMethodSet().
|
||||
func IsMethodSet(iface *types.Interface) bool {
|
||||
return iface.IsMethodSet()
|
||||
}
|
||||
|
||||
// IsImplicit calls iface.IsImplicit().
|
||||
func IsImplicit(iface *types.Interface) bool {
|
||||
return iface.IsImplicit()
|
||||
}
|
||||
|
||||
// MarkImplicit calls iface.MarkImplicit().
|
||||
func MarkImplicit(iface *types.Interface) {
|
||||
iface.MarkImplicit()
|
||||
}
|
||||
|
||||
// ForNamed extracts the (possibly empty) type parameter object list from
|
||||
// named.
|
||||
func ForNamed(named *types.Named) *TypeParamList {
|
||||
return named.TypeParams()
|
||||
}
|
||||
|
||||
// SetForNamed sets the type params tparams on n. Each tparam must be of
|
||||
// dynamic type *types.TypeParam.
|
||||
func SetForNamed(n *types.Named, tparams []*TypeParam) {
|
||||
n.SetTypeParams(tparams)
|
||||
}
|
||||
|
||||
// NamedTypeArgs returns named.TypeArgs().
|
||||
func NamedTypeArgs(named *types.Named) *TypeList {
|
||||
return named.TypeArgs()
|
||||
}
|
||||
|
||||
// NamedTypeOrigin returns named.Orig().
|
||||
func NamedTypeOrigin(named *types.Named) *types.Named {
|
||||
return named.Origin()
|
||||
}
|
||||
|
||||
// Term is an alias for types.Term.
|
||||
type Term = types.Term
|
||||
|
||||
// NewTerm calls types.NewTerm.
|
||||
func NewTerm(tilde bool, typ types.Type) *Term {
|
||||
return types.NewTerm(tilde, typ)
|
||||
}
|
||||
|
||||
// Union is an alias for types.Union
|
||||
type Union = types.Union
|
||||
|
||||
// NewUnion calls types.NewUnion.
|
||||
func NewUnion(terms []*Term) *Union {
|
||||
return types.NewUnion(terms)
|
||||
}
|
||||
|
||||
// InitInstanceInfo initializes info to record information about type and
|
||||
// function instances.
|
||||
func InitInstanceInfo(info *types.Info) {
|
||||
info.Instances = make(map[*ast.Ident]types.Instance)
|
||||
}
|
||||
|
||||
// Instance is an alias for types.Instance.
|
||||
type Instance = types.Instance
|
||||
|
||||
// GetInstances returns info.Instances.
|
||||
func GetInstances(info *types.Info) map[*ast.Ident]Instance {
|
||||
return info.Instances
|
||||
}
|
||||
|
||||
// Context is an alias for types.Context.
|
||||
type Context = types.Context
|
||||
|
||||
// NewContext calls types.NewContext.
|
||||
func NewContext() *Context {
|
||||
return types.NewContext()
|
||||
}
|
||||
|
||||
// Instantiate calls types.Instantiate.
|
||||
func Instantiate(ctxt *Context, typ types.Type, targs []types.Type, validate bool) (types.Type, error) {
|
||||
return types.Instantiate(ctxt, typ, targs, validate)
|
||||
}
|
||||
169
vendor/golang.org/x/tools/internal/typeparams/typeterm.go
generated
vendored
Normal file
169
vendor/golang.org/x/tools/internal/typeparams/typeterm.go
generated
vendored
Normal file
@@ -0,0 +1,169 @@
|
||||
// Copyright 2021 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Code generated by copytermlist.go DO NOT EDIT.
|
||||
|
||||
package typeparams
|
||||
|
||||
import "go/types"
|
||||
|
||||
// A term describes elementary type sets:
|
||||
//
|
||||
// ∅: (*term)(nil) == ∅ // set of no types (empty set)
|
||||
// 𝓤: &term{} == 𝓤 // set of all types (𝓤niverse)
|
||||
// T: &term{false, T} == {T} // set of type T
|
||||
// ~t: &term{true, t} == {t' | under(t') == t} // set of types with underlying type t
|
||||
type term struct {
|
||||
tilde bool // valid if typ != nil
|
||||
typ types.Type
|
||||
}
|
||||
|
||||
func (x *term) String() string {
|
||||
switch {
|
||||
case x == nil:
|
||||
return "∅"
|
||||
case x.typ == nil:
|
||||
return "𝓤"
|
||||
case x.tilde:
|
||||
return "~" + x.typ.String()
|
||||
default:
|
||||
return x.typ.String()
|
||||
}
|
||||
}
|
||||
|
||||
// equal reports whether x and y represent the same type set.
|
||||
func (x *term) equal(y *term) bool {
|
||||
// easy cases
|
||||
switch {
|
||||
case x == nil || y == nil:
|
||||
return x == y
|
||||
case x.typ == nil || y.typ == nil:
|
||||
return x.typ == y.typ
|
||||
}
|
||||
// ∅ ⊂ x, y ⊂ 𝓤
|
||||
|
||||
return x.tilde == y.tilde && types.Identical(x.typ, y.typ)
|
||||
}
|
||||
|
||||
// union returns the union x ∪ y: zero, one, or two non-nil terms.
|
||||
func (x *term) union(y *term) (_, _ *term) {
|
||||
// easy cases
|
||||
switch {
|
||||
case x == nil && y == nil:
|
||||
return nil, nil // ∅ ∪ ∅ == ∅
|
||||
case x == nil:
|
||||
return y, nil // ∅ ∪ y == y
|
||||
case y == nil:
|
||||
return x, nil // x ∪ ∅ == x
|
||||
case x.typ == nil:
|
||||
return x, nil // 𝓤 ∪ y == 𝓤
|
||||
case y.typ == nil:
|
||||
return y, nil // x ∪ 𝓤 == 𝓤
|
||||
}
|
||||
// ∅ ⊂ x, y ⊂ 𝓤
|
||||
|
||||
if x.disjoint(y) {
|
||||
return x, y // x ∪ y == (x, y) if x ∩ y == ∅
|
||||
}
|
||||
// x.typ == y.typ
|
||||
|
||||
// ~t ∪ ~t == ~t
|
||||
// ~t ∪ T == ~t
|
||||
// T ∪ ~t == ~t
|
||||
// T ∪ T == T
|
||||
if x.tilde || !y.tilde {
|
||||
return x, nil
|
||||
}
|
||||
return y, nil
|
||||
}
|
||||
|
||||
// intersect returns the intersection x ∩ y.
|
||||
func (x *term) intersect(y *term) *term {
|
||||
// easy cases
|
||||
switch {
|
||||
case x == nil || y == nil:
|
||||
return nil // ∅ ∩ y == ∅ and ∩ ∅ == ∅
|
||||
case x.typ == nil:
|
||||
return y // 𝓤 ∩ y == y
|
||||
case y.typ == nil:
|
||||
return x // x ∩ 𝓤 == x
|
||||
}
|
||||
// ∅ ⊂ x, y ⊂ 𝓤
|
||||
|
||||
if x.disjoint(y) {
|
||||
return nil // x ∩ y == ∅ if x ∩ y == ∅
|
||||
}
|
||||
// x.typ == y.typ
|
||||
|
||||
// ~t ∩ ~t == ~t
|
||||
// ~t ∩ T == T
|
||||
// T ∩ ~t == T
|
||||
// T ∩ T == T
|
||||
if !x.tilde || y.tilde {
|
||||
return x
|
||||
}
|
||||
return y
|
||||
}
|
||||
|
||||
// includes reports whether t ∈ x.
|
||||
func (x *term) includes(t types.Type) bool {
|
||||
// easy cases
|
||||
switch {
|
||||
case x == nil:
|
||||
return false // t ∈ ∅ == false
|
||||
case x.typ == nil:
|
||||
return true // t ∈ 𝓤 == true
|
||||
}
|
||||
// ∅ ⊂ x ⊂ 𝓤
|
||||
|
||||
u := t
|
||||
if x.tilde {
|
||||
u = under(u)
|
||||
}
|
||||
return types.Identical(x.typ, u)
|
||||
}
|
||||
|
||||
// subsetOf reports whether x ⊆ y.
|
||||
func (x *term) subsetOf(y *term) bool {
|
||||
// easy cases
|
||||
switch {
|
||||
case x == nil:
|
||||
return true // ∅ ⊆ y == true
|
||||
case y == nil:
|
||||
return false // x ⊆ ∅ == false since x != ∅
|
||||
case y.typ == nil:
|
||||
return true // x ⊆ 𝓤 == true
|
||||
case x.typ == nil:
|
||||
return false // 𝓤 ⊆ y == false since y != 𝓤
|
||||
}
|
||||
// ∅ ⊂ x, y ⊂ 𝓤
|
||||
|
||||
if x.disjoint(y) {
|
||||
return false // x ⊆ y == false if x ∩ y == ∅
|
||||
}
|
||||
// x.typ == y.typ
|
||||
|
||||
// ~t ⊆ ~t == true
|
||||
// ~t ⊆ T == false
|
||||
// T ⊆ ~t == true
|
||||
// T ⊆ T == true
|
||||
return !x.tilde || y.tilde
|
||||
}
|
||||
|
||||
// disjoint reports whether x ∩ y == ∅.
|
||||
// x.typ and y.typ must not be nil.
|
||||
func (x *term) disjoint(y *term) bool {
|
||||
if debug && (x.typ == nil || y.typ == nil) {
|
||||
panic("invalid argument(s)")
|
||||
}
|
||||
ux := x.typ
|
||||
if y.tilde {
|
||||
ux = under(ux)
|
||||
}
|
||||
uy := y.typ
|
||||
if x.tilde {
|
||||
uy = under(uy)
|
||||
}
|
||||
return !types.Identical(ux, uy)
|
||||
}
|
||||
1560
vendor/golang.org/x/tools/internal/typesinternal/errorcode.go
generated
vendored
Normal file
1560
vendor/golang.org/x/tools/internal/typesinternal/errorcode.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
179
vendor/golang.org/x/tools/internal/typesinternal/errorcode_string.go
generated
vendored
Normal file
179
vendor/golang.org/x/tools/internal/typesinternal/errorcode_string.go
generated
vendored
Normal file
@@ -0,0 +1,179 @@
|
||||
// Code generated by "stringer -type=ErrorCode"; DO NOT EDIT.
|
||||
|
||||
package typesinternal
|
||||
|
||||
import "strconv"
|
||||
|
||||
func _() {
|
||||
// An "invalid array index" compiler error signifies that the constant values have changed.
|
||||
// Re-run the stringer command to generate them again.
|
||||
var x [1]struct{}
|
||||
_ = x[InvalidSyntaxTree - -1]
|
||||
_ = x[Test-1]
|
||||
_ = x[BlankPkgName-2]
|
||||
_ = x[MismatchedPkgName-3]
|
||||
_ = x[InvalidPkgUse-4]
|
||||
_ = x[BadImportPath-5]
|
||||
_ = x[BrokenImport-6]
|
||||
_ = x[ImportCRenamed-7]
|
||||
_ = x[UnusedImport-8]
|
||||
_ = x[InvalidInitCycle-9]
|
||||
_ = x[DuplicateDecl-10]
|
||||
_ = x[InvalidDeclCycle-11]
|
||||
_ = x[InvalidTypeCycle-12]
|
||||
_ = x[InvalidConstInit-13]
|
||||
_ = x[InvalidConstVal-14]
|
||||
_ = x[InvalidConstType-15]
|
||||
_ = x[UntypedNilUse-16]
|
||||
_ = x[WrongAssignCount-17]
|
||||
_ = x[UnassignableOperand-18]
|
||||
_ = x[NoNewVar-19]
|
||||
_ = x[MultiValAssignOp-20]
|
||||
_ = x[InvalidIfaceAssign-21]
|
||||
_ = x[InvalidChanAssign-22]
|
||||
_ = x[IncompatibleAssign-23]
|
||||
_ = x[UnaddressableFieldAssign-24]
|
||||
_ = x[NotAType-25]
|
||||
_ = x[InvalidArrayLen-26]
|
||||
_ = x[BlankIfaceMethod-27]
|
||||
_ = x[IncomparableMapKey-28]
|
||||
_ = x[InvalidIfaceEmbed-29]
|
||||
_ = x[InvalidPtrEmbed-30]
|
||||
_ = x[BadRecv-31]
|
||||
_ = x[InvalidRecv-32]
|
||||
_ = x[DuplicateFieldAndMethod-33]
|
||||
_ = x[DuplicateMethod-34]
|
||||
_ = x[InvalidBlank-35]
|
||||
_ = x[InvalidIota-36]
|
||||
_ = x[MissingInitBody-37]
|
||||
_ = x[InvalidInitSig-38]
|
||||
_ = x[InvalidInitDecl-39]
|
||||
_ = x[InvalidMainDecl-40]
|
||||
_ = x[TooManyValues-41]
|
||||
_ = x[NotAnExpr-42]
|
||||
_ = x[TruncatedFloat-43]
|
||||
_ = x[NumericOverflow-44]
|
||||
_ = x[UndefinedOp-45]
|
||||
_ = x[MismatchedTypes-46]
|
||||
_ = x[DivByZero-47]
|
||||
_ = x[NonNumericIncDec-48]
|
||||
_ = x[UnaddressableOperand-49]
|
||||
_ = x[InvalidIndirection-50]
|
||||
_ = x[NonIndexableOperand-51]
|
||||
_ = x[InvalidIndex-52]
|
||||
_ = x[SwappedSliceIndices-53]
|
||||
_ = x[NonSliceableOperand-54]
|
||||
_ = x[InvalidSliceExpr-55]
|
||||
_ = x[InvalidShiftCount-56]
|
||||
_ = x[InvalidShiftOperand-57]
|
||||
_ = x[InvalidReceive-58]
|
||||
_ = x[InvalidSend-59]
|
||||
_ = x[DuplicateLitKey-60]
|
||||
_ = x[MissingLitKey-61]
|
||||
_ = x[InvalidLitIndex-62]
|
||||
_ = x[OversizeArrayLit-63]
|
||||
_ = x[MixedStructLit-64]
|
||||
_ = x[InvalidStructLit-65]
|
||||
_ = x[MissingLitField-66]
|
||||
_ = x[DuplicateLitField-67]
|
||||
_ = x[UnexportedLitField-68]
|
||||
_ = x[InvalidLitField-69]
|
||||
_ = x[UntypedLit-70]
|
||||
_ = x[InvalidLit-71]
|
||||
_ = x[AmbiguousSelector-72]
|
||||
_ = x[UndeclaredImportedName-73]
|
||||
_ = x[UnexportedName-74]
|
||||
_ = x[UndeclaredName-75]
|
||||
_ = x[MissingFieldOrMethod-76]
|
||||
_ = x[BadDotDotDotSyntax-77]
|
||||
_ = x[NonVariadicDotDotDot-78]
|
||||
_ = x[MisplacedDotDotDot-79]
|
||||
_ = x[InvalidDotDotDotOperand-80]
|
||||
_ = x[InvalidDotDotDot-81]
|
||||
_ = x[UncalledBuiltin-82]
|
||||
_ = x[InvalidAppend-83]
|
||||
_ = x[InvalidCap-84]
|
||||
_ = x[InvalidClose-85]
|
||||
_ = x[InvalidCopy-86]
|
||||
_ = x[InvalidComplex-87]
|
||||
_ = x[InvalidDelete-88]
|
||||
_ = x[InvalidImag-89]
|
||||
_ = x[InvalidLen-90]
|
||||
_ = x[SwappedMakeArgs-91]
|
||||
_ = x[InvalidMake-92]
|
||||
_ = x[InvalidReal-93]
|
||||
_ = x[InvalidAssert-94]
|
||||
_ = x[ImpossibleAssert-95]
|
||||
_ = x[InvalidConversion-96]
|
||||
_ = x[InvalidUntypedConversion-97]
|
||||
_ = x[BadOffsetofSyntax-98]
|
||||
_ = x[InvalidOffsetof-99]
|
||||
_ = x[UnusedExpr-100]
|
||||
_ = x[UnusedVar-101]
|
||||
_ = x[MissingReturn-102]
|
||||
_ = x[WrongResultCount-103]
|
||||
_ = x[OutOfScopeResult-104]
|
||||
_ = x[InvalidCond-105]
|
||||
_ = x[InvalidPostDecl-106]
|
||||
_ = x[InvalidChanRange-107]
|
||||
_ = x[InvalidIterVar-108]
|
||||
_ = x[InvalidRangeExpr-109]
|
||||
_ = x[MisplacedBreak-110]
|
||||
_ = x[MisplacedContinue-111]
|
||||
_ = x[MisplacedFallthrough-112]
|
||||
_ = x[DuplicateCase-113]
|
||||
_ = x[DuplicateDefault-114]
|
||||
_ = x[BadTypeKeyword-115]
|
||||
_ = x[InvalidTypeSwitch-116]
|
||||
_ = x[InvalidExprSwitch-117]
|
||||
_ = x[InvalidSelectCase-118]
|
||||
_ = x[UndeclaredLabel-119]
|
||||
_ = x[DuplicateLabel-120]
|
||||
_ = x[MisplacedLabel-121]
|
||||
_ = x[UnusedLabel-122]
|
||||
_ = x[JumpOverDecl-123]
|
||||
_ = x[JumpIntoBlock-124]
|
||||
_ = x[InvalidMethodExpr-125]
|
||||
_ = x[WrongArgCount-126]
|
||||
_ = x[InvalidCall-127]
|
||||
_ = x[UnusedResults-128]
|
||||
_ = x[InvalidDefer-129]
|
||||
_ = x[InvalidGo-130]
|
||||
_ = x[BadDecl-131]
|
||||
_ = x[RepeatedDecl-132]
|
||||
_ = x[InvalidUnsafeAdd-133]
|
||||
_ = x[InvalidUnsafeSlice-134]
|
||||
_ = x[UnsupportedFeature-135]
|
||||
_ = x[NotAGenericType-136]
|
||||
_ = x[WrongTypeArgCount-137]
|
||||
_ = x[CannotInferTypeArgs-138]
|
||||
_ = x[InvalidTypeArg-139]
|
||||
_ = x[InvalidInstanceCycle-140]
|
||||
_ = x[InvalidUnion-141]
|
||||
_ = x[MisplacedConstraintIface-142]
|
||||
_ = x[InvalidMethodTypeParams-143]
|
||||
_ = x[MisplacedTypeParam-144]
|
||||
_ = x[InvalidUnsafeSliceData-145]
|
||||
_ = x[InvalidUnsafeString-146]
|
||||
}
|
||||
|
||||
const (
|
||||
_ErrorCode_name_0 = "InvalidSyntaxTree"
|
||||
_ErrorCode_name_1 = "TestBlankPkgNameMismatchedPkgNameInvalidPkgUseBadImportPathBrokenImportImportCRenamedUnusedImportInvalidInitCycleDuplicateDeclInvalidDeclCycleInvalidTypeCycleInvalidConstInitInvalidConstValInvalidConstTypeUntypedNilUseWrongAssignCountUnassignableOperandNoNewVarMultiValAssignOpInvalidIfaceAssignInvalidChanAssignIncompatibleAssignUnaddressableFieldAssignNotATypeInvalidArrayLenBlankIfaceMethodIncomparableMapKeyInvalidIfaceEmbedInvalidPtrEmbedBadRecvInvalidRecvDuplicateFieldAndMethodDuplicateMethodInvalidBlankInvalidIotaMissingInitBodyInvalidInitSigInvalidInitDeclInvalidMainDeclTooManyValuesNotAnExprTruncatedFloatNumericOverflowUndefinedOpMismatchedTypesDivByZeroNonNumericIncDecUnaddressableOperandInvalidIndirectionNonIndexableOperandInvalidIndexSwappedSliceIndicesNonSliceableOperandInvalidSliceExprInvalidShiftCountInvalidShiftOperandInvalidReceiveInvalidSendDuplicateLitKeyMissingLitKeyInvalidLitIndexOversizeArrayLitMixedStructLitInvalidStructLitMissingLitFieldDuplicateLitFieldUnexportedLitFieldInvalidLitFieldUntypedLitInvalidLitAmbiguousSelectorUndeclaredImportedNameUnexportedNameUndeclaredNameMissingFieldOrMethodBadDotDotDotSyntaxNonVariadicDotDotDotMisplacedDotDotDotInvalidDotDotDotOperandInvalidDotDotDotUncalledBuiltinInvalidAppendInvalidCapInvalidCloseInvalidCopyInvalidComplexInvalidDeleteInvalidImagInvalidLenSwappedMakeArgsInvalidMakeInvalidRealInvalidAssertImpossibleAssertInvalidConversionInvalidUntypedConversionBadOffsetofSyntaxInvalidOffsetofUnusedExprUnusedVarMissingReturnWrongResultCountOutOfScopeResultInvalidCondInvalidPostDeclInvalidChanRangeInvalidIterVarInvalidRangeExprMisplacedBreakMisplacedContinueMisplacedFallthroughDuplicateCaseDuplicateDefaultBadTypeKeywordInvalidTypeSwitchInvalidExprSwitchInvalidSelectCaseUndeclaredLabelDuplicateLabelMisplacedLabelUnusedLabelJumpOverDeclJumpIntoBlockInvalidMethodExprWrongArgCountInvalidCallUnusedResultsInvalidDeferInvalidGoBadDeclRepeatedDeclInvalidUnsafeAddInvalidUnsafeSliceUnsupportedFeatureNotAGenericTypeWrongTypeArgCountCannotInferTypeArgsInvalidTypeArgInvalidInstanceCycleInvalidUnionMisplacedConstraintIfaceInvalidMethodTypeParamsMisplacedTypeParamInvalidUnsafeSliceDataInvalidUnsafeString"
|
||||
)
|
||||
|
||||
var (
|
||||
_ErrorCode_index_1 = [...]uint16{0, 4, 16, 33, 46, 59, 71, 85, 97, 113, 126, 142, 158, 174, 189, 205, 218, 234, 253, 261, 277, 295, 312, 330, 354, 362, 377, 393, 411, 428, 443, 450, 461, 484, 499, 511, 522, 537, 551, 566, 581, 594, 603, 617, 632, 643, 658, 667, 683, 703, 721, 740, 752, 771, 790, 806, 823, 842, 856, 867, 882, 895, 910, 926, 940, 956, 971, 988, 1006, 1021, 1031, 1041, 1058, 1080, 1094, 1108, 1128, 1146, 1166, 1184, 1207, 1223, 1238, 1251, 1261, 1273, 1284, 1298, 1311, 1322, 1332, 1347, 1358, 1369, 1382, 1398, 1415, 1439, 1456, 1471, 1481, 1490, 1503, 1519, 1535, 1546, 1561, 1577, 1591, 1607, 1621, 1638, 1658, 1671, 1687, 1701, 1718, 1735, 1752, 1767, 1781, 1795, 1806, 1818, 1831, 1848, 1861, 1872, 1885, 1897, 1906, 1913, 1925, 1941, 1959, 1977, 1992, 2009, 2028, 2042, 2062, 2074, 2098, 2121, 2139, 2161, 2180}
|
||||
)
|
||||
|
||||
func (i ErrorCode) String() string {
|
||||
switch {
|
||||
case i == -1:
|
||||
return _ErrorCode_name_0
|
||||
case 1 <= i && i <= 146:
|
||||
i -= 1
|
||||
return _ErrorCode_name_1[_ErrorCode_index_1[i]:_ErrorCode_index_1[i+1]]
|
||||
default:
|
||||
return "ErrorCode(" + strconv.FormatInt(int64(i), 10) + ")"
|
||||
}
|
||||
}
|
||||
24
vendor/golang.org/x/tools/internal/typesinternal/objectpath.go
generated
vendored
Normal file
24
vendor/golang.org/x/tools/internal/typesinternal/objectpath.go
generated
vendored
Normal file
@@ -0,0 +1,24 @@
|
||||
// Copyright 2023 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package typesinternal
|
||||
|
||||
import "go/types"
|
||||
|
||||
// This file contains back doors that allow gopls to avoid method sorting when
|
||||
// using the objectpath package.
|
||||
//
|
||||
// This is performance-critical in certain repositories, but changing the
|
||||
// behavior of the objectpath package is still being discussed in
|
||||
// golang/go#61443. If we decide to remove the sorting in objectpath we can
|
||||
// simply delete these back doors. Otherwise, we should add a new API to
|
||||
// objectpath that allows controlling the sorting.
|
||||
|
||||
// SkipEncoderMethodSorting marks enc (which must be an *objectpath.Encoder) as
|
||||
// not requiring sorted methods.
|
||||
var SkipEncoderMethodSorting func(enc interface{})
|
||||
|
||||
// ObjectpathObject is like objectpath.Object, but allows suppressing method
|
||||
// sorting.
|
||||
var ObjectpathObject func(pkg *types.Package, p string, skipMethodSorting bool) (types.Object, error)
|
||||
52
vendor/golang.org/x/tools/internal/typesinternal/types.go
generated
vendored
Normal file
52
vendor/golang.org/x/tools/internal/typesinternal/types.go
generated
vendored
Normal file
@@ -0,0 +1,52 @@
|
||||
// Copyright 2020 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package typesinternal provides access to internal go/types APIs that are not
|
||||
// yet exported.
|
||||
package typesinternal
|
||||
|
||||
import (
|
||||
"go/token"
|
||||
"go/types"
|
||||
"reflect"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
func SetUsesCgo(conf *types.Config) bool {
|
||||
v := reflect.ValueOf(conf).Elem()
|
||||
|
||||
f := v.FieldByName("go115UsesCgo")
|
||||
if !f.IsValid() {
|
||||
f = v.FieldByName("UsesCgo")
|
||||
if !f.IsValid() {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
addr := unsafe.Pointer(f.UnsafeAddr())
|
||||
*(*bool)(addr) = true
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// ReadGo116ErrorData extracts additional information from types.Error values
|
||||
// generated by Go version 1.16 and later: the error code, start position, and
|
||||
// end position. If all positions are valid, start <= err.Pos <= end.
|
||||
//
|
||||
// If the data could not be read, the final result parameter will be false.
|
||||
func ReadGo116ErrorData(err types.Error) (code ErrorCode, start, end token.Pos, ok bool) {
|
||||
var data [3]int
|
||||
// By coincidence all of these fields are ints, which simplifies things.
|
||||
v := reflect.ValueOf(err)
|
||||
for i, name := range []string{"go116code", "go116start", "go116end"} {
|
||||
f := v.FieldByName(name)
|
||||
if !f.IsValid() {
|
||||
return 0, 0, 0, false
|
||||
}
|
||||
data[i] = int(f.Int())
|
||||
}
|
||||
return ErrorCode(data[0]), token.Pos(data[1]), token.Pos(data[2]), true
|
||||
}
|
||||
|
||||
var SetGoVersion = func(conf *types.Config, version string) bool { return false }
|
||||
19
vendor/golang.org/x/tools/internal/typesinternal/types_118.go
generated
vendored
Normal file
19
vendor/golang.org/x/tools/internal/typesinternal/types_118.go
generated
vendored
Normal file
@@ -0,0 +1,19 @@
|
||||
// Copyright 2021 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build go1.18
|
||||
// +build go1.18
|
||||
|
||||
package typesinternal
|
||||
|
||||
import (
|
||||
"go/types"
|
||||
)
|
||||
|
||||
func init() {
|
||||
SetGoVersion = func(conf *types.Config, version string) bool {
|
||||
conf.GoVersion = version
|
||||
return true
|
||||
}
|
||||
}
|
||||
Reference in New Issue
Block a user