summaryrefslogtreecommitdiffhomepage
path: root/vendor/golang.org/x/tools/internal/gcimporter
diff options
context:
space:
mode:
Diffstat (limited to 'vendor/golang.org/x/tools/internal/gcimporter')
-rw-r--r--vendor/golang.org/x/tools/internal/gcimporter/bimport.go150
-rw-r--r--vendor/golang.org/x/tools/internal/gcimporter/exportdata.go99
-rw-r--r--vendor/golang.org/x/tools/internal/gcimporter/gcimporter.go266
-rw-r--r--vendor/golang.org/x/tools/internal/gcimporter/iexport.go1332
-rw-r--r--vendor/golang.org/x/tools/internal/gcimporter/iimport.go1100
-rw-r--r--vendor/golang.org/x/tools/internal/gcimporter/newInterface10.go22
-rw-r--r--vendor/golang.org/x/tools/internal/gcimporter/newInterface11.go14
-rw-r--r--vendor/golang.org/x/tools/internal/gcimporter/support_go118.go34
-rw-r--r--vendor/golang.org/x/tools/internal/gcimporter/unified_no.go10
-rw-r--r--vendor/golang.org/x/tools/internal/gcimporter/unified_yes.go10
-rw-r--r--vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go728
11 files changed, 3765 insertions, 0 deletions
diff --git a/vendor/golang.org/x/tools/internal/gcimporter/bimport.go b/vendor/golang.org/x/tools/internal/gcimporter/bimport.go
new file mode 100644
index 0000000..d98b0db
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/gcimporter/bimport.go
@@ -0,0 +1,150 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file contains the remaining vestiges of
+// $GOROOT/src/go/internal/gcimporter/bimport.go.
+
+package gcimporter
+
+import (
+ "fmt"
+ "go/token"
+ "go/types"
+ "sync"
+)
+
+func errorf(format string, args ...interface{}) {
+ panic(fmt.Sprintf(format, args...))
+}
+
+const deltaNewFile = -64 // see cmd/compile/internal/gc/bexport.go
+
+// Synthesize a token.Pos
+type fakeFileSet struct {
+ fset *token.FileSet
+ files map[string]*fileInfo
+}
+
+type fileInfo struct {
+ file *token.File
+ lastline int
+}
+
+const maxlines = 64 * 1024
+
+func (s *fakeFileSet) pos(file string, line, column int) token.Pos {
+ // TODO(mdempsky): Make use of column.
+
+ // Since we don't know the set of needed file positions, we reserve maxlines
+ // positions per file. We delay calling token.File.SetLines until all
+ // positions have been calculated (by way of fakeFileSet.setLines), so that
+ // we can avoid setting unnecessary lines. See also golang/go#46586.
+ f := s.files[file]
+ if f == nil {
+ f = &fileInfo{file: s.fset.AddFile(file, -1, maxlines)}
+ s.files[file] = f
+ }
+ if line > maxlines {
+ line = 1
+ }
+ if line > f.lastline {
+ f.lastline = line
+ }
+
+ // Return a fake position assuming that f.file consists only of newlines.
+ return token.Pos(f.file.Base() + line - 1)
+}
+
+func (s *fakeFileSet) setLines() {
+ fakeLinesOnce.Do(func() {
+ fakeLines = make([]int, maxlines)
+ for i := range fakeLines {
+ fakeLines[i] = i
+ }
+ })
+ for _, f := range s.files {
+ f.file.SetLines(fakeLines[:f.lastline])
+ }
+}
+
+var (
+ fakeLines []int
+ fakeLinesOnce sync.Once
+)
+
+func chanDir(d int) types.ChanDir {
+ // tag values must match the constants in cmd/compile/internal/gc/go.go
+ switch d {
+ case 1 /* Crecv */ :
+ return types.RecvOnly
+ case 2 /* Csend */ :
+ return types.SendOnly
+ case 3 /* Cboth */ :
+ return types.SendRecv
+ default:
+ errorf("unexpected channel dir %d", d)
+ return 0
+ }
+}
+
+var predeclOnce sync.Once
+var predecl []types.Type // initialized lazily
+
+func predeclared() []types.Type {
+ predeclOnce.Do(func() {
+ // initialize lazily to be sure that all
+ // elements have been initialized before
+ predecl = []types.Type{ // basic types
+ types.Typ[types.Bool],
+ types.Typ[types.Int],
+ types.Typ[types.Int8],
+ types.Typ[types.Int16],
+ types.Typ[types.Int32],
+ types.Typ[types.Int64],
+ types.Typ[types.Uint],
+ types.Typ[types.Uint8],
+ types.Typ[types.Uint16],
+ types.Typ[types.Uint32],
+ types.Typ[types.Uint64],
+ types.Typ[types.Uintptr],
+ types.Typ[types.Float32],
+ types.Typ[types.Float64],
+ types.Typ[types.Complex64],
+ types.Typ[types.Complex128],
+ types.Typ[types.String],
+
+ // basic type aliases
+ types.Universe.Lookup("byte").Type(),
+ types.Universe.Lookup("rune").Type(),
+
+ // error
+ types.Universe.Lookup("error").Type(),
+
+ // untyped types
+ types.Typ[types.UntypedBool],
+ types.Typ[types.UntypedInt],
+ types.Typ[types.UntypedRune],
+ types.Typ[types.UntypedFloat],
+ types.Typ[types.UntypedComplex],
+ types.Typ[types.UntypedString],
+ types.Typ[types.UntypedNil],
+
+ // package unsafe
+ types.Typ[types.UnsafePointer],
+
+ // invalid type
+ types.Typ[types.Invalid], // only appears in packages with errors
+
+ // used internally by gc; never used by this package or in .a files
+ anyType{},
+ }
+ predecl = append(predecl, additionalPredeclared()...)
+ })
+ return predecl
+}
+
+type anyType struct{}
+
+func (t anyType) Underlying() types.Type { return t }
+func (t anyType) String() string { return "any" }
diff --git a/vendor/golang.org/x/tools/internal/gcimporter/exportdata.go b/vendor/golang.org/x/tools/internal/gcimporter/exportdata.go
new file mode 100644
index 0000000..f6437fe
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/gcimporter/exportdata.go
@@ -0,0 +1,99 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file is a copy of $GOROOT/src/go/internal/gcimporter/exportdata.go.
+
+// This file implements FindExportData.
+
+package gcimporter
+
+import (
+ "bufio"
+ "fmt"
+ "io"
+ "strconv"
+ "strings"
+)
+
+func readGopackHeader(r *bufio.Reader) (name string, size int64, err error) {
+ // See $GOROOT/include/ar.h.
+ hdr := make([]byte, 16+12+6+6+8+10+2)
+ _, err = io.ReadFull(r, hdr)
+ if err != nil {
+ return
+ }
+ // leave for debugging
+ if false {
+ fmt.Printf("header: %s", hdr)
+ }
+ s := strings.TrimSpace(string(hdr[16+12+6+6+8:][:10]))
+ length, err := strconv.Atoi(s)
+ size = int64(length)
+ if err != nil || hdr[len(hdr)-2] != '`' || hdr[len(hdr)-1] != '\n' {
+ err = fmt.Errorf("invalid archive header")
+ return
+ }
+ name = strings.TrimSpace(string(hdr[:16]))
+ return
+}
+
+// FindExportData positions the reader r at the beginning of the
+// export data section of an underlying GC-created object/archive
+// file by reading from it. The reader must be positioned at the
+// start of the file before calling this function. The hdr result
+// is the string before the export data, either "$$" or "$$B".
+// The size result is the length of the export data in bytes, or -1 if not known.
+func FindExportData(r *bufio.Reader) (hdr string, size int64, err error) {
+ // Read first line to make sure this is an object file.
+ line, err := r.ReadSlice('\n')
+ if err != nil {
+ err = fmt.Errorf("can't find export data (%v)", err)
+ return
+ }
+
+ if string(line) == "!<arch>\n" {
+ // Archive file. Scan to __.PKGDEF.
+ var name string
+ if name, size, err = readGopackHeader(r); err != nil {
+ return
+ }
+
+ // First entry should be __.PKGDEF.
+ if name != "__.PKGDEF" {
+ err = fmt.Errorf("go archive is missing __.PKGDEF")
+ return
+ }
+
+ // Read first line of __.PKGDEF data, so that line
+ // is once again the first line of the input.
+ if line, err = r.ReadSlice('\n'); err != nil {
+ err = fmt.Errorf("can't find export data (%v)", err)
+ return
+ }
+ size -= int64(len(line))
+ }
+
+ // Now at __.PKGDEF in archive or still at beginning of file.
+ // Either way, line should begin with "go object ".
+ if !strings.HasPrefix(string(line), "go object ") {
+ err = fmt.Errorf("not a Go object file")
+ return
+ }
+
+ // Skip over object header to export data.
+ // Begins after first line starting with $$.
+ for line[0] != '$' {
+ if line, err = r.ReadSlice('\n'); err != nil {
+ err = fmt.Errorf("can't find export data (%v)", err)
+ return
+ }
+ size -= int64(len(line))
+ }
+ hdr = string(line)
+ if size < 0 {
+ size = -1
+ }
+
+ return
+}
diff --git a/vendor/golang.org/x/tools/internal/gcimporter/gcimporter.go b/vendor/golang.org/x/tools/internal/gcimporter/gcimporter.go
new file mode 100644
index 0000000..39df911
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/gcimporter/gcimporter.go
@@ -0,0 +1,266 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file is a reduced copy of $GOROOT/src/go/internal/gcimporter/gcimporter.go.
+
+// Package gcimporter provides various functions for reading
+// gc-generated object files that can be used to implement the
+// Importer interface defined by the Go 1.5 standard library package.
+//
+// The encoding is deterministic: if the encoder is applied twice to
+// the same types.Package data structure, both encodings are equal.
+// This property may be important to avoid spurious changes in
+// applications such as build systems.
+//
+// However, the encoder is not necessarily idempotent. Importing an
+// exported package may yield a types.Package that, while it
+// represents the same set of Go types as the original, may differ in
+// the details of its internal representation. Because of these
+// differences, re-encoding the imported package may yield a
+// different, but equally valid, encoding of the package.
+package gcimporter // import "golang.org/x/tools/internal/gcimporter"
+
+import (
+ "bufio"
+ "bytes"
+ "fmt"
+ "go/build"
+ "go/token"
+ "go/types"
+ "io"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "strings"
+ "sync"
+)
+
+const (
+ // Enable debug during development: it adds some additional checks, and
+ // prevents errors from being recovered.
+ debug = false
+
+ // If trace is set, debugging output is printed to std out.
+ trace = false
+)
+
+var exportMap sync.Map // package dir → func() (string, bool)
+
+// lookupGorootExport returns the location of the export data
+// (normally found in the build cache, but located in GOROOT/pkg
+// in prior Go releases) for the package located in pkgDir.
+//
+// (We use the package's directory instead of its import path
+// mainly to simplify handling of the packages in src/vendor
+// and cmd/vendor.)
+func lookupGorootExport(pkgDir string) (string, bool) {
+ f, ok := exportMap.Load(pkgDir)
+ if !ok {
+ var (
+ listOnce sync.Once
+ exportPath string
+ )
+ f, _ = exportMap.LoadOrStore(pkgDir, func() (string, bool) {
+ listOnce.Do(func() {
+ cmd := exec.Command("go", "list", "-export", "-f", "{{.Export}}", pkgDir)
+ cmd.Dir = build.Default.GOROOT
+ var output []byte
+ output, err := cmd.Output()
+ if err != nil {
+ return
+ }
+
+ exports := strings.Split(string(bytes.TrimSpace(output)), "\n")
+ if len(exports) != 1 {
+ return
+ }
+
+ exportPath = exports[0]
+ })
+
+ return exportPath, exportPath != ""
+ })
+ }
+
+ return f.(func() (string, bool))()
+}
+
+var pkgExts = [...]string{".a", ".o"}
+
+// FindPkg returns the filename and unique package id for an import
+// path based on package information provided by build.Import (using
+// the build.Default build.Context). A relative srcDir is interpreted
+// relative to the current working directory.
+// If no file was found, an empty filename is returned.
+func FindPkg(path, srcDir string) (filename, id string) {
+ if path == "" {
+ return
+ }
+
+ var noext string
+ switch {
+ default:
+ // "x" -> "$GOPATH/pkg/$GOOS_$GOARCH/x.ext", "x"
+ // Don't require the source files to be present.
+ if abs, err := filepath.Abs(srcDir); err == nil { // see issue 14282
+ srcDir = abs
+ }
+ bp, _ := build.Import(path, srcDir, build.FindOnly|build.AllowBinary)
+ if bp.PkgObj == "" {
+ var ok bool
+ if bp.Goroot && bp.Dir != "" {
+ filename, ok = lookupGorootExport(bp.Dir)
+ }
+ if !ok {
+ id = path // make sure we have an id to print in error message
+ return
+ }
+ } else {
+ noext = strings.TrimSuffix(bp.PkgObj, ".a")
+ id = bp.ImportPath
+ }
+
+ case build.IsLocalImport(path):
+ // "./x" -> "/this/directory/x.ext", "/this/directory/x"
+ noext = filepath.Join(srcDir, path)
+ id = noext
+
+ case filepath.IsAbs(path):
+ // for completeness only - go/build.Import
+ // does not support absolute imports
+ // "/x" -> "/x.ext", "/x"
+ noext = path
+ id = path
+ }
+
+ if false { // for debugging
+ if path != id {
+ fmt.Printf("%s -> %s\n", path, id)
+ }
+ }
+
+ if filename != "" {
+ if f, err := os.Stat(filename); err == nil && !f.IsDir() {
+ return
+ }
+ }
+
+ // try extensions
+ for _, ext := range pkgExts {
+ filename = noext + ext
+ if f, err := os.Stat(filename); err == nil && !f.IsDir() {
+ return
+ }
+ }
+
+ filename = "" // not found
+ return
+}
+
+// Import imports a gc-generated package given its import path and srcDir, adds
+// the corresponding package object to the packages map, and returns the object.
+// The packages map must contain all packages already imported.
+func Import(packages map[string]*types.Package, path, srcDir string, lookup func(path string) (io.ReadCloser, error)) (pkg *types.Package, err error) {
+ var rc io.ReadCloser
+ var filename, id string
+ if lookup != nil {
+ // With custom lookup specified, assume that caller has
+ // converted path to a canonical import path for use in the map.
+ if path == "unsafe" {
+ return types.Unsafe, nil
+ }
+ id = path
+
+ // No need to re-import if the package was imported completely before.
+ if pkg = packages[id]; pkg != nil && pkg.Complete() {
+ return
+ }
+ f, err := lookup(path)
+ if err != nil {
+ return nil, err
+ }
+ rc = f
+ } else {
+ filename, id = FindPkg(path, srcDir)
+ if filename == "" {
+ if path == "unsafe" {
+ return types.Unsafe, nil
+ }
+ return nil, fmt.Errorf("can't find import: %q", id)
+ }
+
+ // no need to re-import if the package was imported completely before
+ if pkg = packages[id]; pkg != nil && pkg.Complete() {
+ return
+ }
+
+ // open file
+ f, err := os.Open(filename)
+ if err != nil {
+ return nil, err
+ }
+ defer func() {
+ if err != nil {
+ // add file name to error
+ err = fmt.Errorf("%s: %v", filename, err)
+ }
+ }()
+ rc = f
+ }
+ defer rc.Close()
+
+ var hdr string
+ var size int64
+ buf := bufio.NewReader(rc)
+ if hdr, size, err = FindExportData(buf); err != nil {
+ return
+ }
+
+ switch hdr {
+ case "$$B\n":
+ var data []byte
+ data, err = io.ReadAll(buf)
+ if err != nil {
+ break
+ }
+
+ // TODO(gri): allow clients of go/importer to provide a FileSet.
+ // Or, define a new standard go/types/gcexportdata package.
+ fset := token.NewFileSet()
+
+ // Select appropriate importer.
+ if len(data) > 0 {
+ switch data[0] {
+ case 'v', 'c', 'd': // binary, till go1.10
+ return nil, fmt.Errorf("binary (%c) import format is no longer supported", data[0])
+
+ case 'i': // indexed, till go1.19
+ _, pkg, err := IImportData(fset, packages, data[1:], id)
+ return pkg, err
+
+ case 'u': // unified, from go1.20
+ _, pkg, err := UImportData(fset, packages, data[1:size], id)
+ return pkg, err
+
+ default:
+ l := len(data)
+ if l > 10 {
+ l = 10
+ }
+ return nil, fmt.Errorf("unexpected export data with prefix %q for path %s", string(data[:l]), id)
+ }
+ }
+
+ default:
+ err = fmt.Errorf("unknown export data header: %q", hdr)
+ }
+
+ return
+}
+
+type byPath []*types.Package
+
+func (a byPath) Len() int { return len(a) }
+func (a byPath) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
+func (a byPath) Less(i, j int) bool { return a[i].Path() < a[j].Path() }
diff --git a/vendor/golang.org/x/tools/internal/gcimporter/iexport.go b/vendor/golang.org/x/tools/internal/gcimporter/iexport.go
new file mode 100644
index 0000000..deeb67f
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/gcimporter/iexport.go
@@ -0,0 +1,1332 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Indexed binary package export.
+// This file was derived from $GOROOT/src/cmd/compile/internal/gc/iexport.go;
+// see that file for specification of the format.
+
+package gcimporter
+
+import (
+ "bytes"
+ "encoding/binary"
+ "fmt"
+ "go/constant"
+ "go/token"
+ "go/types"
+ "io"
+ "math/big"
+ "reflect"
+ "sort"
+ "strconv"
+ "strings"
+
+ "golang.org/x/tools/go/types/objectpath"
+ "golang.org/x/tools/internal/aliases"
+ "golang.org/x/tools/internal/tokeninternal"
+)
+
+// IExportShallow encodes "shallow" export data for the specified package.
+//
+// No promises are made about the encoding other than that it can be decoded by
+// the same version of IIExportShallow. If you plan to save export data in the
+// file system, be sure to include a cryptographic digest of the executable in
+// the key to avoid version skew.
+//
+// If the provided reportf func is non-nil, it will be used for reporting bugs
+// encountered during export.
+// TODO(rfindley): remove reportf when we are confident enough in the new
+// objectpath encoding.
+func IExportShallow(fset *token.FileSet, pkg *types.Package, reportf ReportFunc) ([]byte, error) {
+ // In principle this operation can only fail if out.Write fails,
+ // but that's impossible for bytes.Buffer---and as a matter of
+ // fact iexportCommon doesn't even check for I/O errors.
+ // TODO(adonovan): handle I/O errors properly.
+ // TODO(adonovan): use byte slices throughout, avoiding copying.
+ const bundle, shallow = false, true
+ var out bytes.Buffer
+ err := iexportCommon(&out, fset, bundle, shallow, iexportVersion, []*types.Package{pkg})
+ return out.Bytes(), err
+}
+
+// IImportShallow decodes "shallow" types.Package data encoded by
+// IExportShallow in the same executable. This function cannot import data from
+// cmd/compile or gcexportdata.Write.
+//
+// The importer calls getPackages to obtain package symbols for all
+// packages mentioned in the export data, including the one being
+// decoded.
+//
+// If the provided reportf func is non-nil, it will be used for reporting bugs
+// encountered during import.
+// TODO(rfindley): remove reportf when we are confident enough in the new
+// objectpath encoding.
+func IImportShallow(fset *token.FileSet, getPackages GetPackagesFunc, data []byte, path string, reportf ReportFunc) (*types.Package, error) {
+ const bundle = false
+ const shallow = true
+ pkgs, err := iimportCommon(fset, getPackages, data, bundle, path, shallow, reportf)
+ if err != nil {
+ return nil, err
+ }
+ return pkgs[0], nil
+}
+
+// ReportFunc is the type of a function used to report formatted bugs.
+type ReportFunc = func(string, ...interface{})
+
+// Current bundled export format version. Increase with each format change.
+// 0: initial implementation
+const bundleVersion = 0
+
+// IExportData writes indexed export data for pkg to out.
+//
+// If no file set is provided, position info will be missing.
+// The package path of the top-level package will not be recorded,
+// so that calls to IImportData can override with a provided package path.
+func IExportData(out io.Writer, fset *token.FileSet, pkg *types.Package) error {
+ const bundle, shallow = false, false
+ return iexportCommon(out, fset, bundle, shallow, iexportVersion, []*types.Package{pkg})
+}
+
+// IExportBundle writes an indexed export bundle for pkgs to out.
+func IExportBundle(out io.Writer, fset *token.FileSet, pkgs []*types.Package) error {
+ const bundle, shallow = true, false
+ return iexportCommon(out, fset, bundle, shallow, iexportVersion, pkgs)
+}
+
+func iexportCommon(out io.Writer, fset *token.FileSet, bundle, shallow bool, version int, pkgs []*types.Package) (err error) {
+ if !debug {
+ defer func() {
+ if e := recover(); e != nil {
+ if ierr, ok := e.(internalError); ok {
+ err = ierr
+ return
+ }
+ // Not an internal error; panic again.
+ panic(e)
+ }
+ }()
+ }
+
+ p := iexporter{
+ fset: fset,
+ version: version,
+ shallow: shallow,
+ allPkgs: map[*types.Package]bool{},
+ stringIndex: map[string]uint64{},
+ declIndex: map[types.Object]uint64{},
+ tparamNames: map[types.Object]string{},
+ typIndex: map[types.Type]uint64{},
+ }
+ if !bundle {
+ p.localpkg = pkgs[0]
+ }
+
+ for i, pt := range predeclared() {
+ p.typIndex[pt] = uint64(i)
+ }
+ if len(p.typIndex) > predeclReserved {
+ panic(internalErrorf("too many predeclared types: %d > %d", len(p.typIndex), predeclReserved))
+ }
+
+ // Initialize work queue with exported declarations.
+ for _, pkg := range pkgs {
+ scope := pkg.Scope()
+ for _, name := range scope.Names() {
+ if token.IsExported(name) {
+ p.pushDecl(scope.Lookup(name))
+ }
+ }
+
+ if bundle {
+ // Ensure pkg and its imports are included in the index.
+ p.allPkgs[pkg] = true
+ for _, imp := range pkg.Imports() {
+ p.allPkgs[imp] = true
+ }
+ }
+ }
+
+ // Loop until no more work.
+ for !p.declTodo.empty() {
+ p.doDecl(p.declTodo.popHead())
+ }
+
+ // Produce index of offset of each file record in files.
+ var files intWriter
+ var fileOffset []uint64 // fileOffset[i] is offset in files of file encoded as i
+ if p.shallow {
+ fileOffset = make([]uint64, len(p.fileInfos))
+ for i, info := range p.fileInfos {
+ fileOffset[i] = uint64(files.Len())
+ p.encodeFile(&files, info.file, info.needed)
+ }
+ }
+
+ // Append indices to data0 section.
+ dataLen := uint64(p.data0.Len())
+ w := p.newWriter()
+ w.writeIndex(p.declIndex)
+
+ if bundle {
+ w.uint64(uint64(len(pkgs)))
+ for _, pkg := range pkgs {
+ w.pkg(pkg)
+ imps := pkg.Imports()
+ w.uint64(uint64(len(imps)))
+ for _, imp := range imps {
+ w.pkg(imp)
+ }
+ }
+ }
+ w.flush()
+
+ // Assemble header.
+ var hdr intWriter
+ if bundle {
+ hdr.uint64(bundleVersion)
+ }
+ hdr.uint64(uint64(p.version))
+ hdr.uint64(uint64(p.strings.Len()))
+ if p.shallow {
+ hdr.uint64(uint64(files.Len()))
+ hdr.uint64(uint64(len(fileOffset)))
+ for _, offset := range fileOffset {
+ hdr.uint64(offset)
+ }
+ }
+ hdr.uint64(dataLen)
+
+ // Flush output.
+ io.Copy(out, &hdr)
+ io.Copy(out, &p.strings)
+ if p.shallow {
+ io.Copy(out, &files)
+ }
+ io.Copy(out, &p.data0)
+
+ return nil
+}
+
+// encodeFile writes to w a representation of the file sufficient to
+// faithfully restore position information about all needed offsets.
+// Mutates the needed array.
+func (p *iexporter) encodeFile(w *intWriter, file *token.File, needed []uint64) {
+ _ = needed[0] // precondition: needed is non-empty
+
+ w.uint64(p.stringOff(file.Name()))
+
+ size := uint64(file.Size())
+ w.uint64(size)
+
+ // Sort the set of needed offsets. Duplicates are harmless.
+ sort.Slice(needed, func(i, j int) bool { return needed[i] < needed[j] })
+
+ lines := tokeninternal.GetLines(file) // byte offset of each line start
+ w.uint64(uint64(len(lines)))
+
+ // Rather than record the entire array of line start offsets,
+ // we save only a sparse list of (index, offset) pairs for
+ // the start of each line that contains a needed position.
+ var sparse [][2]int // (index, offset) pairs
+outer:
+ for i, lineStart := range lines {
+ lineEnd := size
+ if i < len(lines)-1 {
+ lineEnd = uint64(lines[i+1])
+ }
+ // Does this line contains a needed offset?
+ if needed[0] < lineEnd {
+ sparse = append(sparse, [2]int{i, lineStart})
+ for needed[0] < lineEnd {
+ needed = needed[1:]
+ if len(needed) == 0 {
+ break outer
+ }
+ }
+ }
+ }
+
+ // Delta-encode the columns.
+ w.uint64(uint64(len(sparse)))
+ var prev [2]int
+ for _, pair := range sparse {
+ w.uint64(uint64(pair[0] - prev[0]))
+ w.uint64(uint64(pair[1] - prev[1]))
+ prev = pair
+ }
+}
+
+// writeIndex writes out an object index. mainIndex indicates whether
+// we're writing out the main index, which is also read by
+// non-compiler tools and includes a complete package description
+// (i.e., name and height).
+func (w *exportWriter) writeIndex(index map[types.Object]uint64) {
+ type pkgObj struct {
+ obj types.Object
+ name string // qualified name; differs from obj.Name for type params
+ }
+ // Build a map from packages to objects from that package.
+ pkgObjs := map[*types.Package][]pkgObj{}
+
+ // For the main index, make sure to include every package that
+ // we reference, even if we're not exporting (or reexporting)
+ // any symbols from it.
+ if w.p.localpkg != nil {
+ pkgObjs[w.p.localpkg] = nil
+ }
+ for pkg := range w.p.allPkgs {
+ pkgObjs[pkg] = nil
+ }
+
+ for obj := range index {
+ name := w.p.exportName(obj)
+ pkgObjs[obj.Pkg()] = append(pkgObjs[obj.Pkg()], pkgObj{obj, name})
+ }
+
+ var pkgs []*types.Package
+ for pkg, objs := range pkgObjs {
+ pkgs = append(pkgs, pkg)
+
+ sort.Slice(objs, func(i, j int) bool {
+ return objs[i].name < objs[j].name
+ })
+ }
+
+ sort.Slice(pkgs, func(i, j int) bool {
+ return w.exportPath(pkgs[i]) < w.exportPath(pkgs[j])
+ })
+
+ w.uint64(uint64(len(pkgs)))
+ for _, pkg := range pkgs {
+ w.string(w.exportPath(pkg))
+ w.string(pkg.Name())
+ w.uint64(uint64(0)) // package height is not needed for go/types
+
+ objs := pkgObjs[pkg]
+ w.uint64(uint64(len(objs)))
+ for _, obj := range objs {
+ w.string(obj.name)
+ w.uint64(index[obj.obj])
+ }
+ }
+}
+
+// exportName returns the 'exported' name of an object. It differs from
+// obj.Name() only for type parameters (see tparamExportName for details).
+func (p *iexporter) exportName(obj types.Object) (res string) {
+ if name := p.tparamNames[obj]; name != "" {
+ return name
+ }
+ return obj.Name()
+}
+
+type iexporter struct {
+ fset *token.FileSet
+ out *bytes.Buffer
+ version int
+
+ shallow bool // don't put types from other packages in the index
+ objEncoder *objectpath.Encoder // encodes objects from other packages in shallow mode; lazily allocated
+ localpkg *types.Package // (nil in bundle mode)
+
+ // allPkgs tracks all packages that have been referenced by
+ // the export data, so we can ensure to include them in the
+ // main index.
+ allPkgs map[*types.Package]bool
+
+ declTodo objQueue
+
+ strings intWriter
+ stringIndex map[string]uint64
+
+ // In shallow mode, object positions are encoded as (file, offset).
+ // Each file is recorded as a line-number table.
+ // Only the lines of needed positions are saved faithfully.
+ fileInfo map[*token.File]uint64 // value is index in fileInfos
+ fileInfos []*filePositions
+
+ data0 intWriter
+ declIndex map[types.Object]uint64
+ tparamNames map[types.Object]string // typeparam->exported name
+ typIndex map[types.Type]uint64
+
+ indent int // for tracing support
+}
+
+type filePositions struct {
+ file *token.File
+ needed []uint64 // unordered list of needed file offsets
+}
+
+func (p *iexporter) trace(format string, args ...interface{}) {
+ if !trace {
+ // Call sites should also be guarded, but having this check here allows
+ // easily enabling/disabling debug trace statements.
+ return
+ }
+ fmt.Printf(strings.Repeat("..", p.indent)+format+"\n", args...)
+}
+
+// objectpathEncoder returns the lazily allocated objectpath.Encoder to use
+// when encoding objects in other packages during shallow export.
+//
+// Using a shared Encoder amortizes some of cost of objectpath search.
+func (p *iexporter) objectpathEncoder() *objectpath.Encoder {
+ if p.objEncoder == nil {
+ p.objEncoder = new(objectpath.Encoder)
+ }
+ return p.objEncoder
+}
+
+// stringOff returns the offset of s within the string section.
+// If not already present, it's added to the end.
+func (p *iexporter) stringOff(s string) uint64 {
+ off, ok := p.stringIndex[s]
+ if !ok {
+ off = uint64(p.strings.Len())
+ p.stringIndex[s] = off
+
+ p.strings.uint64(uint64(len(s)))
+ p.strings.WriteString(s)
+ }
+ return off
+}
+
+// fileIndexAndOffset returns the index of the token.File and the byte offset of pos within it.
+func (p *iexporter) fileIndexAndOffset(file *token.File, pos token.Pos) (uint64, uint64) {
+ index, ok := p.fileInfo[file]
+ if !ok {
+ index = uint64(len(p.fileInfo))
+ p.fileInfos = append(p.fileInfos, &filePositions{file: file})
+ if p.fileInfo == nil {
+ p.fileInfo = make(map[*token.File]uint64)
+ }
+ p.fileInfo[file] = index
+ }
+ // Record each needed offset.
+ info := p.fileInfos[index]
+ offset := uint64(file.Offset(pos))
+ info.needed = append(info.needed, offset)
+
+ return index, offset
+}
+
+// pushDecl adds n to the declaration work queue, if not already present.
+func (p *iexporter) pushDecl(obj types.Object) {
+ // Package unsafe is known to the compiler and predeclared.
+ // Caller should not ask us to do export it.
+ if obj.Pkg() == types.Unsafe {
+ panic("cannot export package unsafe")
+ }
+
+ // Shallow export data: don't index decls from other packages.
+ if p.shallow && obj.Pkg() != p.localpkg {
+ return
+ }
+
+ if _, ok := p.declIndex[obj]; ok {
+ return
+ }
+
+ p.declIndex[obj] = ^uint64(0) // mark obj present in work queue
+ p.declTodo.pushTail(obj)
+}
+
+// exportWriter handles writing out individual data section chunks.
+type exportWriter struct {
+ p *iexporter
+
+ data intWriter
+ prevFile string
+ prevLine int64
+ prevColumn int64
+}
+
+func (w *exportWriter) exportPath(pkg *types.Package) string {
+ if pkg == w.p.localpkg {
+ return ""
+ }
+ return pkg.Path()
+}
+
+func (p *iexporter) doDecl(obj types.Object) {
+ if trace {
+ p.trace("exporting decl %v (%T)", obj, obj)
+ p.indent++
+ defer func() {
+ p.indent--
+ p.trace("=> %s", obj)
+ }()
+ }
+ w := p.newWriter()
+
+ switch obj := obj.(type) {
+ case *types.Var:
+ w.tag(varTag)
+ w.pos(obj.Pos())
+ w.typ(obj.Type(), obj.Pkg())
+
+ case *types.Func:
+ sig, _ := obj.Type().(*types.Signature)
+ if sig.Recv() != nil {
+ // We shouldn't see methods in the package scope,
+ // but the type checker may repair "func () F() {}"
+ // to "func (Invalid) F()" and then treat it like "func F()",
+ // so allow that. See golang/go#57729.
+ if sig.Recv().Type() != types.Typ[types.Invalid] {
+ panic(internalErrorf("unexpected method: %v", sig))
+ }
+ }
+
+ // Function.
+ if sig.TypeParams().Len() == 0 {
+ w.tag(funcTag)
+ } else {
+ w.tag(genericFuncTag)
+ }
+ w.pos(obj.Pos())
+ // The tparam list of the function type is the declaration of the type
+ // params. So, write out the type params right now. Then those type params
+ // will be referenced via their type offset (via typOff) in all other
+ // places in the signature and function where they are used.
+ //
+ // While importing the type parameters, tparamList computes and records
+ // their export name, so that it can be later used when writing the index.
+ if tparams := sig.TypeParams(); tparams.Len() > 0 {
+ w.tparamList(obj.Name(), tparams, obj.Pkg())
+ }
+ w.signature(sig)
+
+ case *types.Const:
+ w.tag(constTag)
+ w.pos(obj.Pos())
+ w.value(obj.Type(), obj.Val())
+
+ case *types.TypeName:
+ t := obj.Type()
+
+ if tparam, ok := aliases.Unalias(t).(*types.TypeParam); ok {
+ w.tag(typeParamTag)
+ w.pos(obj.Pos())
+ constraint := tparam.Constraint()
+ if p.version >= iexportVersionGo1_18 {
+ implicit := false
+ if iface, _ := aliases.Unalias(constraint).(*types.Interface); iface != nil {
+ implicit = iface.IsImplicit()
+ }
+ w.bool(implicit)
+ }
+ w.typ(constraint, obj.Pkg())
+ break
+ }
+
+ if obj.IsAlias() {
+ w.tag(aliasTag)
+ w.pos(obj.Pos())
+ if alias, ok := t.(*aliases.Alias); ok {
+ // Preserve materialized aliases,
+ // even of non-exported types.
+ t = aliases.Rhs(alias)
+ }
+ w.typ(t, obj.Pkg())
+ break
+ }
+
+ // Defined type.
+ named, ok := t.(*types.Named)
+ if !ok {
+ panic(internalErrorf("%s is not a defined type", t))
+ }
+
+ if named.TypeParams().Len() == 0 {
+ w.tag(typeTag)
+ } else {
+ w.tag(genericTypeTag)
+ }
+ w.pos(obj.Pos())
+
+ if named.TypeParams().Len() > 0 {
+ // While importing the type parameters, tparamList computes and records
+ // their export name, so that it can be later used when writing the index.
+ w.tparamList(obj.Name(), named.TypeParams(), obj.Pkg())
+ }
+
+ underlying := named.Underlying()
+ w.typ(underlying, obj.Pkg())
+
+ if types.IsInterface(t) {
+ break
+ }
+
+ n := named.NumMethods()
+ w.uint64(uint64(n))
+ for i := 0; i < n; i++ {
+ m := named.Method(i)
+ w.pos(m.Pos())
+ w.string(m.Name())
+ sig, _ := m.Type().(*types.Signature)
+
+ // Receiver type parameters are type arguments of the receiver type, so
+ // their name must be qualified before exporting recv.
+ if rparams := sig.RecvTypeParams(); rparams.Len() > 0 {
+ prefix := obj.Name() + "." + m.Name()
+ for i := 0; i < rparams.Len(); i++ {
+ rparam := rparams.At(i)
+ name := tparamExportName(prefix, rparam)
+ w.p.tparamNames[rparam.Obj()] = name
+ }
+ }
+ w.param(sig.Recv())
+ w.signature(sig)
+ }
+
+ default:
+ panic(internalErrorf("unexpected object: %v", obj))
+ }
+
+ p.declIndex[obj] = w.flush()
+}
+
+func (w *exportWriter) tag(tag byte) {
+ w.data.WriteByte(tag)
+}
+
+func (w *exportWriter) pos(pos token.Pos) {
+ if w.p.shallow {
+ w.posV2(pos)
+ } else if w.p.version >= iexportVersionPosCol {
+ w.posV1(pos)
+ } else {
+ w.posV0(pos)
+ }
+}
+
+// posV2 encoding (used only in shallow mode) records positions as
+// (file, offset), where file is the index in the token.File table
+// (which records the file name and newline offsets) and offset is a
+// byte offset. It effectively ignores //line directives.
+func (w *exportWriter) posV2(pos token.Pos) {
+ if pos == token.NoPos {
+ w.uint64(0)
+ return
+ }
+ file := w.p.fset.File(pos) // fset must be non-nil
+ index, offset := w.p.fileIndexAndOffset(file, pos)
+ w.uint64(1 + index)
+ w.uint64(offset)
+}
+
+func (w *exportWriter) posV1(pos token.Pos) {
+ if w.p.fset == nil {
+ w.int64(0)
+ return
+ }
+
+ p := w.p.fset.Position(pos)
+ file := p.Filename
+ line := int64(p.Line)
+ column := int64(p.Column)
+
+ deltaColumn := (column - w.prevColumn) << 1
+ deltaLine := (line - w.prevLine) << 1
+
+ if file != w.prevFile {
+ deltaLine |= 1
+ }
+ if deltaLine != 0 {
+ deltaColumn |= 1
+ }
+
+ w.int64(deltaColumn)
+ if deltaColumn&1 != 0 {
+ w.int64(deltaLine)
+ if deltaLine&1 != 0 {
+ w.string(file)
+ }
+ }
+
+ w.prevFile = file
+ w.prevLine = line
+ w.prevColumn = column
+}
+
+func (w *exportWriter) posV0(pos token.Pos) {
+ if w.p.fset == nil {
+ w.int64(0)
+ return
+ }
+
+ p := w.p.fset.Position(pos)
+ file := p.Filename
+ line := int64(p.Line)
+
+ // When file is the same as the last position (common case),
+ // we can save a few bytes by delta encoding just the line
+ // number.
+ //
+ // Note: Because data objects may be read out of order (or not
+ // at all), we can only apply delta encoding within a single
+ // object. This is handled implicitly by tracking prevFile and
+ // prevLine as fields of exportWriter.
+
+ if file == w.prevFile {
+ delta := line - w.prevLine
+ w.int64(delta)
+ if delta == deltaNewFile {
+ w.int64(-1)
+ }
+ } else {
+ w.int64(deltaNewFile)
+ w.int64(line) // line >= 0
+ w.string(file)
+ w.prevFile = file
+ }
+ w.prevLine = line
+}
+
+func (w *exportWriter) pkg(pkg *types.Package) {
+ // Ensure any referenced packages are declared in the main index.
+ w.p.allPkgs[pkg] = true
+
+ w.string(w.exportPath(pkg))
+}
+
+func (w *exportWriter) qualifiedType(obj *types.TypeName) {
+ name := w.p.exportName(obj)
+
+ // Ensure any referenced declarations are written out too.
+ w.p.pushDecl(obj)
+ w.string(name)
+ w.pkg(obj.Pkg())
+}
+
+// TODO(rfindley): what does 'pkg' even mean here? It would be better to pass
+// it in explicitly into signatures and structs that may use it for
+// constructing fields.
+func (w *exportWriter) typ(t types.Type, pkg *types.Package) {
+ w.data.uint64(w.p.typOff(t, pkg))
+}
+
+func (p *iexporter) newWriter() *exportWriter {
+ return &exportWriter{p: p}
+}
+
+func (w *exportWriter) flush() uint64 {
+ off := uint64(w.p.data0.Len())
+ io.Copy(&w.p.data0, &w.data)
+ return off
+}
+
+func (p *iexporter) typOff(t types.Type, pkg *types.Package) uint64 {
+ off, ok := p.typIndex[t]
+ if !ok {
+ w := p.newWriter()
+ w.doTyp(t, pkg)
+ off = predeclReserved + w.flush()
+ p.typIndex[t] = off
+ }
+ return off
+}
+
+func (w *exportWriter) startType(k itag) {
+ w.data.uint64(uint64(k))
+}
+
+func (w *exportWriter) doTyp(t types.Type, pkg *types.Package) {
+ if trace {
+ w.p.trace("exporting type %s (%T)", t, t)
+ w.p.indent++
+ defer func() {
+ w.p.indent--
+ w.p.trace("=> %s", t)
+ }()
+ }
+ switch t := t.(type) {
+ case *aliases.Alias:
+ // TODO(adonovan): support parameterized aliases, following *types.Named.
+ w.startType(aliasType)
+ w.qualifiedType(t.Obj())
+
+ case *types.Named:
+ if targs := t.TypeArgs(); targs.Len() > 0 {
+ w.startType(instanceType)
+ // TODO(rfindley): investigate if this position is correct, and if it
+ // matters.
+ w.pos(t.Obj().Pos())
+ w.typeList(targs, pkg)
+ w.typ(t.Origin(), pkg)
+ return
+ }
+ w.startType(definedType)
+ w.qualifiedType(t.Obj())
+
+ case *types.TypeParam:
+ w.startType(typeParamType)
+ w.qualifiedType(t.Obj())
+
+ case *types.Pointer:
+ w.startType(pointerType)
+ w.typ(t.Elem(), pkg)
+
+ case *types.Slice:
+ w.startType(sliceType)
+ w.typ(t.Elem(), pkg)
+
+ case *types.Array:
+ w.startType(arrayType)
+ w.uint64(uint64(t.Len()))
+ w.typ(t.Elem(), pkg)
+
+ case *types.Chan:
+ w.startType(chanType)
+ // 1 RecvOnly; 2 SendOnly; 3 SendRecv
+ var dir uint64
+ switch t.Dir() {
+ case types.RecvOnly:
+ dir = 1
+ case types.SendOnly:
+ dir = 2
+ case types.SendRecv:
+ dir = 3
+ }
+ w.uint64(dir)
+ w.typ(t.Elem(), pkg)
+
+ case *types.Map:
+ w.startType(mapType)
+ w.typ(t.Key(), pkg)
+ w.typ(t.Elem(), pkg)
+
+ case *types.Signature:
+ w.startType(signatureType)
+ w.pkg(pkg)
+ w.signature(t)
+
+ case *types.Struct:
+ w.startType(structType)
+ n := t.NumFields()
+ // Even for struct{} we must emit some qualifying package, because that's
+ // what the compiler does, and thus that's what the importer expects.
+ fieldPkg := pkg
+ if n > 0 {
+ fieldPkg = t.Field(0).Pkg()
+ }
+ if fieldPkg == nil {
+ // TODO(rfindley): improve this very hacky logic.
+ //
+ // The importer expects a package to be set for all struct types, even
+ // those with no fields. A better encoding might be to set NumFields
+ // before pkg. setPkg panics with a nil package, which may be possible
+ // to reach with invalid packages (and perhaps valid packages, too?), so
+ // (arbitrarily) set the localpkg if available.
+ //
+ // Alternatively, we may be able to simply guarantee that pkg != nil, by
+ // reconsidering the encoding of constant values.
+ if w.p.shallow {
+ fieldPkg = w.p.localpkg
+ } else {
+ panic(internalErrorf("no package to set for empty struct"))
+ }
+ }
+ w.pkg(fieldPkg)
+ w.uint64(uint64(n))
+
+ for i := 0; i < n; i++ {
+ f := t.Field(i)
+ if w.p.shallow {
+ w.objectPath(f)
+ }
+ w.pos(f.Pos())
+ w.string(f.Name()) // unexported fields implicitly qualified by prior setPkg
+ w.typ(f.Type(), fieldPkg)
+ w.bool(f.Anonymous())
+ w.string(t.Tag(i)) // note (or tag)
+ }
+
+ case *types.Interface:
+ w.startType(interfaceType)
+ w.pkg(pkg)
+
+ n := t.NumEmbeddeds()
+ w.uint64(uint64(n))
+ for i := 0; i < n; i++ {
+ ft := t.EmbeddedType(i)
+ tPkg := pkg
+ if named, _ := aliases.Unalias(ft).(*types.Named); named != nil {
+ w.pos(named.Obj().Pos())
+ } else {
+ w.pos(token.NoPos)
+ }
+ w.typ(ft, tPkg)
+ }
+
+ // See comment for struct fields. In shallow mode we change the encoding
+ // for interface methods that are promoted from other packages.
+
+ n = t.NumExplicitMethods()
+ w.uint64(uint64(n))
+ for i := 0; i < n; i++ {
+ m := t.ExplicitMethod(i)
+ if w.p.shallow {
+ w.objectPath(m)
+ }
+ w.pos(m.Pos())
+ w.string(m.Name())
+ sig, _ := m.Type().(*types.Signature)
+ w.signature(sig)
+ }
+
+ case *types.Union:
+ w.startType(unionType)
+ nt := t.Len()
+ w.uint64(uint64(nt))
+ for i := 0; i < nt; i++ {
+ term := t.Term(i)
+ w.bool(term.Tilde())
+ w.typ(term.Type(), pkg)
+ }
+
+ default:
+ panic(internalErrorf("unexpected type: %v, %v", t, reflect.TypeOf(t)))
+ }
+}
+
+// objectPath writes the package and objectPath to use to look up obj in a
+// different package, when encoding in "shallow" mode.
+//
+// When doing a shallow import, the importer creates only the local package,
+// and requests package symbols for dependencies from the client.
+// However, certain types defined in the local package may hold objects defined
+// (perhaps deeply) within another package.
+//
+// For example, consider the following:
+//
+// package a
+// func F() chan * map[string] struct { X int }
+//
+// package b
+// import "a"
+// var B = a.F()
+//
+// In this example, the type of b.B holds fields defined in package a.
+// In order to have the correct canonical objects for the field defined in the
+// type of B, they are encoded as objectPaths and later looked up in the
+// importer. The same problem applies to interface methods.
+func (w *exportWriter) objectPath(obj types.Object) {
+ if obj.Pkg() == nil || obj.Pkg() == w.p.localpkg {
+ // obj.Pkg() may be nil for the builtin error.Error.
+ // In this case, or if obj is declared in the local package, no need to
+ // encode.
+ w.string("")
+ return
+ }
+ objectPath, err := w.p.objectpathEncoder().For(obj)
+ if err != nil {
+ // Fall back to the empty string, which will cause the importer to create a
+ // new object, which matches earlier behavior. Creating a new object is
+ // sufficient for many purposes (such as type checking), but causes certain
+ // references algorithms to fail (golang/go#60819). However, we didn't
+ // notice this problem during months of gopls@v0.12.0 testing.
+ //
+ // TODO(golang/go#61674): this workaround is insufficient, as in the case
+ // where the field forwarded from an instantiated type that may not appear
+ // in the export data of the original package:
+ //
+ // // package a
+ // type A[P any] struct{ F P }
+ //
+ // // package b
+ // type B a.A[int]
+ //
+ // We need to update references algorithms not to depend on this
+ // de-duplication, at which point we may want to simply remove the
+ // workaround here.
+ w.string("")
+ return
+ }
+ w.string(string(objectPath))
+ w.pkg(obj.Pkg())
+}
+
+func (w *exportWriter) signature(sig *types.Signature) {
+ w.paramList(sig.Params())
+ w.paramList(sig.Results())
+ if sig.Params().Len() > 0 {
+ w.bool(sig.Variadic())
+ }
+}
+
+func (w *exportWriter) typeList(ts *types.TypeList, pkg *types.Package) {
+ w.uint64(uint64(ts.Len()))
+ for i := 0; i < ts.Len(); i++ {
+ w.typ(ts.At(i), pkg)
+ }
+}
+
+func (w *exportWriter) tparamList(prefix string, list *types.TypeParamList, pkg *types.Package) {
+ ll := uint64(list.Len())
+ w.uint64(ll)
+ for i := 0; i < list.Len(); i++ {
+ tparam := list.At(i)
+ // Set the type parameter exportName before exporting its type.
+ exportName := tparamExportName(prefix, tparam)
+ w.p.tparamNames[tparam.Obj()] = exportName
+ w.typ(list.At(i), pkg)
+ }
+}
+
+const blankMarker = "$"
+
+// tparamExportName returns the 'exported' name of a type parameter, which
+// differs from its actual object name: it is prefixed with a qualifier, and
+// blank type parameter names are disambiguated by their index in the type
+// parameter list.
+func tparamExportName(prefix string, tparam *types.TypeParam) string {
+ assert(prefix != "")
+ name := tparam.Obj().Name()
+ if name == "_" {
+ name = blankMarker + strconv.Itoa(tparam.Index())
+ }
+ return prefix + "." + name
+}
+
+// tparamName returns the real name of a type parameter, after stripping its
+// qualifying prefix and reverting blank-name encoding. See tparamExportName
+// for details.
+func tparamName(exportName string) string {
+ // Remove the "path" from the type param name that makes it unique.
+ ix := strings.LastIndex(exportName, ".")
+ if ix < 0 {
+ errorf("malformed type parameter export name %s: missing prefix", exportName)
+ }
+ name := exportName[ix+1:]
+ if strings.HasPrefix(name, blankMarker) {
+ return "_"
+ }
+ return name
+}
+
+func (w *exportWriter) paramList(tup *types.Tuple) {
+ n := tup.Len()
+ w.uint64(uint64(n))
+ for i := 0; i < n; i++ {
+ w.param(tup.At(i))
+ }
+}
+
+func (w *exportWriter) param(obj types.Object) {
+ w.pos(obj.Pos())
+ w.localIdent(obj)
+ w.typ(obj.Type(), obj.Pkg())
+}
+
+func (w *exportWriter) value(typ types.Type, v constant.Value) {
+ w.typ(typ, nil)
+ if w.p.version >= iexportVersionGo1_18 {
+ w.int64(int64(v.Kind()))
+ }
+
+ if v.Kind() == constant.Unknown {
+ // golang/go#60605: treat unknown constant values as if they have invalid type
+ //
+ // This loses some fidelity over the package type-checked from source, but that
+ // is acceptable.
+ //
+ // TODO(rfindley): we should switch on the recorded constant kind rather
+ // than the constant type
+ return
+ }
+
+ switch b := typ.Underlying().(*types.Basic); b.Info() & types.IsConstType {
+ case types.IsBoolean:
+ w.bool(constant.BoolVal(v))
+ case types.IsInteger:
+ var i big.Int
+ if i64, exact := constant.Int64Val(v); exact {
+ i.SetInt64(i64)
+ } else if ui64, exact := constant.Uint64Val(v); exact {
+ i.SetUint64(ui64)
+ } else {
+ i.SetString(v.ExactString(), 10)
+ }
+ w.mpint(&i, typ)
+ case types.IsFloat:
+ f := constantToFloat(v)
+ w.mpfloat(f, typ)
+ case types.IsComplex:
+ w.mpfloat(constantToFloat(constant.Real(v)), typ)
+ w.mpfloat(constantToFloat(constant.Imag(v)), typ)
+ case types.IsString:
+ w.string(constant.StringVal(v))
+ default:
+ if b.Kind() == types.Invalid {
+ // package contains type errors
+ break
+ }
+ panic(internalErrorf("unexpected type %v (%v)", typ, typ.Underlying()))
+ }
+}
+
+// constantToFloat converts a constant.Value with kind constant.Float to a
+// big.Float.
+func constantToFloat(x constant.Value) *big.Float {
+ x = constant.ToFloat(x)
+ // Use the same floating-point precision (512) as cmd/compile
+ // (see Mpprec in cmd/compile/internal/gc/mpfloat.go).
+ const mpprec = 512
+ var f big.Float
+ f.SetPrec(mpprec)
+ if v, exact := constant.Float64Val(x); exact {
+ // float64
+ f.SetFloat64(v)
+ } else if num, denom := constant.Num(x), constant.Denom(x); num.Kind() == constant.Int {
+ // TODO(gri): add big.Rat accessor to constant.Value.
+ n := valueToRat(num)
+ d := valueToRat(denom)
+ f.SetRat(n.Quo(n, d))
+ } else {
+ // Value too large to represent as a fraction => inaccessible.
+ // TODO(gri): add big.Float accessor to constant.Value.
+ _, ok := f.SetString(x.ExactString())
+ assert(ok)
+ }
+ return &f
+}
+
+func valueToRat(x constant.Value) *big.Rat {
+ // Convert little-endian to big-endian.
+ // I can't believe this is necessary.
+ bytes := constant.Bytes(x)
+ for i := 0; i < len(bytes)/2; i++ {
+ bytes[i], bytes[len(bytes)-1-i] = bytes[len(bytes)-1-i], bytes[i]
+ }
+ return new(big.Rat).SetInt(new(big.Int).SetBytes(bytes))
+}
+
+// mpint exports a multi-precision integer.
+//
+// For unsigned types, small values are written out as a single
+// byte. Larger values are written out as a length-prefixed big-endian
+// byte string, where the length prefix is encoded as its complement.
+// For example, bytes 0, 1, and 2 directly represent the integer
+// values 0, 1, and 2; while bytes 255, 254, and 253 indicate a 1-,
+// 2-, and 3-byte big-endian string follow.
+//
+// Encoding for signed types use the same general approach as for
+// unsigned types, except small values use zig-zag encoding and the
+// bottom bit of length prefix byte for large values is reserved as a
+// sign bit.
+//
+// The exact boundary between small and large encodings varies
+// according to the maximum number of bytes needed to encode a value
+// of type typ. As a special case, 8-bit types are always encoded as a
+// single byte.
+//
+// TODO(mdempsky): Is this level of complexity really worthwhile?
+func (w *exportWriter) mpint(x *big.Int, typ types.Type) {
+ basic, ok := typ.Underlying().(*types.Basic)
+ if !ok {
+ panic(internalErrorf("unexpected type %v (%T)", typ.Underlying(), typ.Underlying()))
+ }
+
+ signed, maxBytes := intSize(basic)
+
+ negative := x.Sign() < 0
+ if !signed && negative {
+ panic(internalErrorf("negative unsigned integer; type %v, value %v", typ, x))
+ }
+
+ b := x.Bytes()
+ if len(b) > 0 && b[0] == 0 {
+ panic(internalErrorf("leading zeros"))
+ }
+ if uint(len(b)) > maxBytes {
+ panic(internalErrorf("bad mpint length: %d > %d (type %v, value %v)", len(b), maxBytes, typ, x))
+ }
+
+ maxSmall := 256 - maxBytes
+ if signed {
+ maxSmall = 256 - 2*maxBytes
+ }
+ if maxBytes == 1 {
+ maxSmall = 256
+ }
+
+ // Check if x can use small value encoding.
+ if len(b) <= 1 {
+ var ux uint
+ if len(b) == 1 {
+ ux = uint(b[0])
+ }
+ if signed {
+ ux <<= 1
+ if negative {
+ ux--
+ }
+ }
+ if ux < maxSmall {
+ w.data.WriteByte(byte(ux))
+ return
+ }
+ }
+
+ n := 256 - uint(len(b))
+ if signed {
+ n = 256 - 2*uint(len(b))
+ if negative {
+ n |= 1
+ }
+ }
+ if n < maxSmall || n >= 256 {
+ panic(internalErrorf("encoding mistake: %d, %v, %v => %d", len(b), signed, negative, n))
+ }
+
+ w.data.WriteByte(byte(n))
+ w.data.Write(b)
+}
+
+// mpfloat exports a multi-precision floating point number.
+//
+// The number's value is decomposed into mantissa × 2**exponent, where
+// mantissa is an integer. The value is written out as mantissa (as a
+// multi-precision integer) and then the exponent, except exponent is
+// omitted if mantissa is zero.
+func (w *exportWriter) mpfloat(f *big.Float, typ types.Type) {
+ if f.IsInf() {
+ panic("infinite constant")
+ }
+
+ // Break into f = mant × 2**exp, with 0.5 <= mant < 1.
+ var mant big.Float
+ exp := int64(f.MantExp(&mant))
+
+ // Scale so that mant is an integer.
+ prec := mant.MinPrec()
+ mant.SetMantExp(&mant, int(prec))
+ exp -= int64(prec)
+
+ manti, acc := mant.Int(nil)
+ if acc != big.Exact {
+ panic(internalErrorf("mantissa scaling failed for %f (%s)", f, acc))
+ }
+ w.mpint(manti, typ)
+ if manti.Sign() != 0 {
+ w.int64(exp)
+ }
+}
+
+func (w *exportWriter) bool(b bool) bool {
+ var x uint64
+ if b {
+ x = 1
+ }
+ w.uint64(x)
+ return b
+}
+
+func (w *exportWriter) int64(x int64) { w.data.int64(x) }
+func (w *exportWriter) uint64(x uint64) { w.data.uint64(x) }
+func (w *exportWriter) string(s string) { w.uint64(w.p.stringOff(s)) }
+
+func (w *exportWriter) localIdent(obj types.Object) {
+ // Anonymous parameters.
+ if obj == nil {
+ w.string("")
+ return
+ }
+
+ name := obj.Name()
+ if name == "_" {
+ w.string("_")
+ return
+ }
+
+ w.string(name)
+}
+
+type intWriter struct {
+ bytes.Buffer
+}
+
+func (w *intWriter) int64(x int64) {
+ var buf [binary.MaxVarintLen64]byte
+ n := binary.PutVarint(buf[:], x)
+ w.Write(buf[:n])
+}
+
+func (w *intWriter) uint64(x uint64) {
+ var buf [binary.MaxVarintLen64]byte
+ n := binary.PutUvarint(buf[:], x)
+ w.Write(buf[:n])
+}
+
+func assert(cond bool) {
+ if !cond {
+ panic("internal error: assertion failed")
+ }
+}
+
+// The below is copied from go/src/cmd/compile/internal/gc/syntax.go.
+
+// objQueue is a FIFO queue of types.Object. The zero value of objQueue is
+// a ready-to-use empty queue.
+type objQueue struct {
+ ring []types.Object
+ head, tail int
+}
+
+// empty returns true if q contains no Nodes.
+func (q *objQueue) empty() bool {
+ return q.head == q.tail
+}
+
+// pushTail appends n to the tail of the queue.
+func (q *objQueue) pushTail(obj types.Object) {
+ if len(q.ring) == 0 {
+ q.ring = make([]types.Object, 16)
+ } else if q.head+len(q.ring) == q.tail {
+ // Grow the ring.
+ nring := make([]types.Object, len(q.ring)*2)
+ // Copy the old elements.
+ part := q.ring[q.head%len(q.ring):]
+ if q.tail-q.head <= len(part) {
+ part = part[:q.tail-q.head]
+ copy(nring, part)
+ } else {
+ pos := copy(nring, part)
+ copy(nring[pos:], q.ring[:q.tail%len(q.ring)])
+ }
+ q.ring, q.head, q.tail = nring, 0, q.tail-q.head
+ }
+
+ q.ring[q.tail%len(q.ring)] = obj
+ q.tail++
+}
+
+// popHead pops a node from the head of the queue. It panics if q is empty.
+func (q *objQueue) popHead() types.Object {
+ if q.empty() {
+ panic("dequeue empty")
+ }
+ obj := q.ring[q.head%len(q.ring)]
+ q.head++
+ return obj
+}
+
+// internalError represents an error generated inside this package.
+type internalError string
+
+func (e internalError) Error() string { return "gcimporter: " + string(e) }
+
+// TODO(adonovan): make this call panic, so that it's symmetric with errorf.
+// Otherwise it's easy to forget to do anything with the error.
+//
+// TODO(adonovan): also, consider switching the names "errorf" and
+// "internalErrorf" as the former is used for bugs, whose cause is
+// internal inconsistency, whereas the latter is used for ordinary
+// situations like bad input, whose cause is external.
+func internalErrorf(format string, args ...interface{}) error {
+ return internalError(fmt.Sprintf(format, args...))
+}
diff --git a/vendor/golang.org/x/tools/internal/gcimporter/iimport.go b/vendor/golang.org/x/tools/internal/gcimporter/iimport.go
new file mode 100644
index 0000000..136aa03
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/gcimporter/iimport.go
@@ -0,0 +1,1100 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Indexed package import.
+// See cmd/compile/internal/gc/iexport.go for the export data format.
+
+// This file is a copy of $GOROOT/src/go/internal/gcimporter/iimport.go.
+
+package gcimporter
+
+import (
+ "bytes"
+ "encoding/binary"
+ "fmt"
+ "go/constant"
+ "go/token"
+ "go/types"
+ "io"
+ "math/big"
+ "sort"
+ "strings"
+
+ "golang.org/x/tools/go/types/objectpath"
+ "golang.org/x/tools/internal/aliases"
+ "golang.org/x/tools/internal/typesinternal"
+)
+
+type intReader struct {
+ *bytes.Reader
+ path string
+}
+
+func (r *intReader) int64() int64 {
+ i, err := binary.ReadVarint(r.Reader)
+ if err != nil {
+ errorf("import %q: read varint error: %v", r.path, err)
+ }
+ return i
+}
+
+func (r *intReader) uint64() uint64 {
+ i, err := binary.ReadUvarint(r.Reader)
+ if err != nil {
+ errorf("import %q: read varint error: %v", r.path, err)
+ }
+ return i
+}
+
+// Keep this in sync with constants in iexport.go.
+const (
+ iexportVersionGo1_11 = 0
+ iexportVersionPosCol = 1
+ iexportVersionGo1_18 = 2
+ iexportVersionGenerics = 2
+
+ iexportVersionCurrent = 2
+)
+
+type ident struct {
+ pkg *types.Package
+ name string
+}
+
+const predeclReserved = 32
+
+type itag uint64
+
+const (
+ // Types
+ definedType itag = iota
+ pointerType
+ sliceType
+ arrayType
+ chanType
+ mapType
+ signatureType
+ structType
+ interfaceType
+ typeParamType
+ instanceType
+ unionType
+ aliasType
+)
+
+// Object tags
+const (
+ varTag = 'V'
+ funcTag = 'F'
+ genericFuncTag = 'G'
+ constTag = 'C'
+ aliasTag = 'A'
+ genericAliasTag = 'B'
+ typeParamTag = 'P'
+ typeTag = 'T'
+ genericTypeTag = 'U'
+)
+
+// IImportData imports a package from the serialized package data
+// and returns 0 and a reference to the package.
+// If the export data version is not recognized or the format is otherwise
+// compromised, an error is returned.
+func IImportData(fset *token.FileSet, imports map[string]*types.Package, data []byte, path string) (int, *types.Package, error) {
+ pkgs, err := iimportCommon(fset, GetPackagesFromMap(imports), data, false, path, false, nil)
+ if err != nil {
+ return 0, nil, err
+ }
+ return 0, pkgs[0], nil
+}
+
+// IImportBundle imports a set of packages from the serialized package bundle.
+func IImportBundle(fset *token.FileSet, imports map[string]*types.Package, data []byte) ([]*types.Package, error) {
+ return iimportCommon(fset, GetPackagesFromMap(imports), data, true, "", false, nil)
+}
+
+// A GetPackagesFunc function obtains the non-nil symbols for a set of
+// packages, creating and recursively importing them as needed. An
+// implementation should store each package symbol is in the Pkg
+// field of the items array.
+//
+// Any error causes importing to fail. This can be used to quickly read
+// the import manifest of an export data file without fully decoding it.
+type GetPackagesFunc = func(items []GetPackagesItem) error
+
+// A GetPackagesItem is a request from the importer for the package
+// symbol of the specified name and path.
+type GetPackagesItem struct {
+ Name, Path string
+ Pkg *types.Package // to be filled in by GetPackagesFunc call
+
+ // private importer state
+ pathOffset uint64
+ nameIndex map[string]uint64
+}
+
+// GetPackagesFromMap returns a GetPackagesFunc that retrieves
+// packages from the given map of package path to package.
+//
+// The returned function may mutate m: each requested package that is not
+// found is created with types.NewPackage and inserted into m.
+func GetPackagesFromMap(m map[string]*types.Package) GetPackagesFunc {
+ return func(items []GetPackagesItem) error {
+ for i, item := range items {
+ pkg, ok := m[item.Path]
+ if !ok {
+ pkg = types.NewPackage(item.Path, item.Name)
+ m[item.Path] = pkg
+ }
+ items[i].Pkg = pkg
+ }
+ return nil
+ }
+}
+
+func iimportCommon(fset *token.FileSet, getPackages GetPackagesFunc, data []byte, bundle bool, path string, shallow bool, reportf ReportFunc) (pkgs []*types.Package, err error) {
+ const currentVersion = iexportVersionCurrent
+ version := int64(-1)
+ if !debug {
+ defer func() {
+ if e := recover(); e != nil {
+ if bundle {
+ err = fmt.Errorf("%v", e)
+ } else if version > currentVersion {
+ err = fmt.Errorf("cannot import %q (%v), export data is newer version - update tool", path, e)
+ } else {
+ err = fmt.Errorf("internal error while importing %q (%v); please report an issue", path, e)
+ }
+ }
+ }()
+ }
+
+ r := &intReader{bytes.NewReader(data), path}
+
+ if bundle {
+ if v := r.uint64(); v != bundleVersion {
+ errorf("unknown bundle format version %d", v)
+ }
+ }
+
+ version = int64(r.uint64())
+ switch version {
+ case iexportVersionGo1_18, iexportVersionPosCol, iexportVersionGo1_11:
+ default:
+ if version > iexportVersionGo1_18 {
+ errorf("unstable iexport format version %d, just rebuild compiler and std library", version)
+ } else {
+ errorf("unknown iexport format version %d", version)
+ }
+ }
+
+ sLen := int64(r.uint64())
+ var fLen int64
+ var fileOffset []uint64
+ if shallow {
+ // Shallow mode uses a different position encoding.
+ fLen = int64(r.uint64())
+ fileOffset = make([]uint64, r.uint64())
+ for i := range fileOffset {
+ fileOffset[i] = r.uint64()
+ }
+ }
+ dLen := int64(r.uint64())
+
+ whence, _ := r.Seek(0, io.SeekCurrent)
+ stringData := data[whence : whence+sLen]
+ fileData := data[whence+sLen : whence+sLen+fLen]
+ declData := data[whence+sLen+fLen : whence+sLen+fLen+dLen]
+ r.Seek(sLen+fLen+dLen, io.SeekCurrent)
+
+ p := iimporter{
+ version: int(version),
+ ipath: path,
+ aliases: aliases.Enabled(),
+ shallow: shallow,
+ reportf: reportf,
+
+ stringData: stringData,
+ stringCache: make(map[uint64]string),
+ fileOffset: fileOffset,
+ fileData: fileData,
+ fileCache: make([]*token.File, len(fileOffset)),
+ pkgCache: make(map[uint64]*types.Package),
+
+ declData: declData,
+ pkgIndex: make(map[*types.Package]map[string]uint64),
+ typCache: make(map[uint64]types.Type),
+ // Separate map for typeparams, keyed by their package and unique
+ // name.
+ tparamIndex: make(map[ident]types.Type),
+
+ fake: fakeFileSet{
+ fset: fset,
+ files: make(map[string]*fileInfo),
+ },
+ }
+ defer p.fake.setLines() // set lines for files in fset
+
+ for i, pt := range predeclared() {
+ p.typCache[uint64(i)] = pt
+ }
+
+ // Gather the relevant packages from the manifest.
+ items := make([]GetPackagesItem, r.uint64())
+ uniquePkgPaths := make(map[string]bool)
+ for i := range items {
+ pkgPathOff := r.uint64()
+ pkgPath := p.stringAt(pkgPathOff)
+ pkgName := p.stringAt(r.uint64())
+ _ = r.uint64() // package height; unused by go/types
+
+ if pkgPath == "" {
+ pkgPath = path
+ }
+ items[i].Name = pkgName
+ items[i].Path = pkgPath
+ items[i].pathOffset = pkgPathOff
+
+ // Read index for package.
+ nameIndex := make(map[string]uint64)
+ nSyms := r.uint64()
+ // In shallow mode, only the current package (i=0) has an index.
+ assert(!(shallow && i > 0 && nSyms != 0))
+ for ; nSyms > 0; nSyms-- {
+ name := p.stringAt(r.uint64())
+ nameIndex[name] = r.uint64()
+ }
+
+ items[i].nameIndex = nameIndex
+
+ uniquePkgPaths[pkgPath] = true
+ }
+ // Debugging #63822; hypothesis: there are duplicate PkgPaths.
+ if len(uniquePkgPaths) != len(items) {
+ reportf("found duplicate PkgPaths while reading export data manifest: %v", items)
+ }
+
+ // Request packages all at once from the client,
+ // enabling a parallel implementation.
+ if err := getPackages(items); err != nil {
+ return nil, err // don't wrap this error
+ }
+
+ // Check the results and complete the index.
+ pkgList := make([]*types.Package, len(items))
+ for i, item := range items {
+ pkg := item.Pkg
+ if pkg == nil {
+ errorf("internal error: getPackages returned nil package for %q", item.Path)
+ } else if pkg.Path() != item.Path {
+ errorf("internal error: getPackages returned wrong path %q, want %q", pkg.Path(), item.Path)
+ } else if pkg.Name() != item.Name {
+ errorf("internal error: getPackages returned wrong name %s for package %q, want %s", pkg.Name(), item.Path, item.Name)
+ }
+ p.pkgCache[item.pathOffset] = pkg
+ p.pkgIndex[pkg] = item.nameIndex
+ pkgList[i] = pkg
+ }
+
+ if bundle {
+ pkgs = make([]*types.Package, r.uint64())
+ for i := range pkgs {
+ pkg := p.pkgAt(r.uint64())
+ imps := make([]*types.Package, r.uint64())
+ for j := range imps {
+ imps[j] = p.pkgAt(r.uint64())
+ }
+ pkg.SetImports(imps)
+ pkgs[i] = pkg
+ }
+ } else {
+ if len(pkgList) == 0 {
+ errorf("no packages found for %s", path)
+ panic("unreachable")
+ }
+ pkgs = pkgList[:1]
+
+ // record all referenced packages as imports
+ list := append(([]*types.Package)(nil), pkgList[1:]...)
+ sort.Sort(byPath(list))
+ pkgs[0].SetImports(list)
+ }
+
+ for _, pkg := range pkgs {
+ if pkg.Complete() {
+ continue
+ }
+
+ names := make([]string, 0, len(p.pkgIndex[pkg]))
+ for name := range p.pkgIndex[pkg] {
+ names = append(names, name)
+ }
+ sort.Strings(names)
+ for _, name := range names {
+ p.doDecl(pkg, name)
+ }
+
+ // package was imported completely and without errors
+ pkg.MarkComplete()
+ }
+
+ // SetConstraint can't be called if the constraint type is not yet complete.
+ // When type params are created in the typeParamTag case of (*importReader).obj(),
+ // the associated constraint type may not be complete due to recursion.
+ // Therefore, we defer calling SetConstraint there, and call it here instead
+ // after all types are complete.
+ for _, d := range p.later {
+ d.t.SetConstraint(d.constraint)
+ }
+
+ for _, typ := range p.interfaceList {
+ typ.Complete()
+ }
+
+ // Workaround for golang/go#61561. See the doc for instanceList for details.
+ for _, typ := range p.instanceList {
+ if iface, _ := typ.Underlying().(*types.Interface); iface != nil {
+ iface.Complete()
+ }
+ }
+
+ return pkgs, nil
+}
+
+type setConstraintArgs struct {
+ t *types.TypeParam
+ constraint types.Type
+}
+
+type iimporter struct {
+ version int
+ ipath string
+
+ aliases bool
+ shallow bool
+ reportf ReportFunc // if non-nil, used to report bugs
+
+ stringData []byte
+ stringCache map[uint64]string
+ fileOffset []uint64 // fileOffset[i] is offset in fileData for info about file encoded as i
+ fileData []byte
+ fileCache []*token.File // memoized decoding of file encoded as i
+ pkgCache map[uint64]*types.Package
+
+ declData []byte
+ pkgIndex map[*types.Package]map[string]uint64
+ typCache map[uint64]types.Type
+ tparamIndex map[ident]types.Type
+
+ fake fakeFileSet
+ interfaceList []*types.Interface
+
+ // Workaround for the go/types bug golang/go#61561: instances produced during
+ // instantiation may contain incomplete interfaces. Here we only complete the
+ // underlying type of the instance, which is the most common case but doesn't
+ // handle parameterized interface literals defined deeper in the type.
+ instanceList []types.Type // instances for later completion (see golang/go#61561)
+
+ // Arguments for calls to SetConstraint that are deferred due to recursive types
+ later []setConstraintArgs
+
+ indent int // for tracing support
+}
+
+func (p *iimporter) trace(format string, args ...interface{}) {
+ if !trace {
+ // Call sites should also be guarded, but having this check here allows
+ // easily enabling/disabling debug trace statements.
+ return
+ }
+ fmt.Printf(strings.Repeat("..", p.indent)+format+"\n", args...)
+}
+
+func (p *iimporter) doDecl(pkg *types.Package, name string) {
+ if debug {
+ p.trace("import decl %s", name)
+ p.indent++
+ defer func() {
+ p.indent--
+ p.trace("=> %s", name)
+ }()
+ }
+ // See if we've already imported this declaration.
+ if obj := pkg.Scope().Lookup(name); obj != nil {
+ return
+ }
+
+ off, ok := p.pkgIndex[pkg][name]
+ if !ok {
+ // In deep mode, the index should be complete. In shallow
+ // mode, we should have already recursively loaded necessary
+ // dependencies so the above Lookup succeeds.
+ errorf("%v.%v not in index", pkg, name)
+ }
+
+ r := &importReader{p: p, currPkg: pkg}
+ r.declReader.Reset(p.declData[off:])
+
+ r.obj(name)
+}
+
+func (p *iimporter) stringAt(off uint64) string {
+ if s, ok := p.stringCache[off]; ok {
+ return s
+ }
+
+ slen, n := binary.Uvarint(p.stringData[off:])
+ if n <= 0 {
+ errorf("varint failed")
+ }
+ spos := off + uint64(n)
+ s := string(p.stringData[spos : spos+slen])
+ p.stringCache[off] = s
+ return s
+}
+
+func (p *iimporter) fileAt(index uint64) *token.File {
+ file := p.fileCache[index]
+ if file == nil {
+ off := p.fileOffset[index]
+ file = p.decodeFile(intReader{bytes.NewReader(p.fileData[off:]), p.ipath})
+ p.fileCache[index] = file
+ }
+ return file
+}
+
+func (p *iimporter) decodeFile(rd intReader) *token.File {
+ filename := p.stringAt(rd.uint64())
+ size := int(rd.uint64())
+ file := p.fake.fset.AddFile(filename, -1, size)
+
+ // SetLines requires a nondecreasing sequence.
+ // Because it is common for clients to derive the interval
+ // [start, start+len(name)] from a start position, and we
+ // want to ensure that the end offset is on the same line,
+ // we fill in the gaps of the sparse encoding with values
+ // that strictly increase by the largest possible amount.
+ // This allows us to avoid having to record the actual end
+ // offset of each needed line.
+
+ lines := make([]int, int(rd.uint64()))
+ var index, offset int
+ for i, n := 0, int(rd.uint64()); i < n; i++ {
+ index += int(rd.uint64())
+ offset += int(rd.uint64())
+ lines[index] = offset
+
+ // Ensure monotonicity between points.
+ for j := index - 1; j > 0 && lines[j] == 0; j-- {
+ lines[j] = lines[j+1] - 1
+ }
+ }
+
+ // Ensure monotonicity after last point.
+ for j := len(lines) - 1; j > 0 && lines[j] == 0; j-- {
+ size--
+ lines[j] = size
+ }
+
+ if !file.SetLines(lines) {
+ errorf("SetLines failed: %d", lines) // can't happen
+ }
+ return file
+}
+
+func (p *iimporter) pkgAt(off uint64) *types.Package {
+ if pkg, ok := p.pkgCache[off]; ok {
+ return pkg
+ }
+ path := p.stringAt(off)
+ errorf("missing package %q in %q", path, p.ipath)
+ return nil
+}
+
+func (p *iimporter) typAt(off uint64, base *types.Named) types.Type {
+ if t, ok := p.typCache[off]; ok && canReuse(base, t) {
+ return t
+ }
+
+ if off < predeclReserved {
+ errorf("predeclared type missing from cache: %v", off)
+ }
+
+ r := &importReader{p: p}
+ r.declReader.Reset(p.declData[off-predeclReserved:])
+ t := r.doType(base)
+
+ if canReuse(base, t) {
+ p.typCache[off] = t
+ }
+ return t
+}
+
+// canReuse reports whether the type rhs on the RHS of the declaration for def
+// may be re-used.
+//
+// Specifically, if def is non-nil and rhs is an interface type with methods, it
+// may not be re-used because we have a convention of setting the receiver type
+// for interface methods to def.
+func canReuse(def *types.Named, rhs types.Type) bool {
+ if def == nil {
+ return true
+ }
+ iface, _ := aliases.Unalias(rhs).(*types.Interface)
+ if iface == nil {
+ return true
+ }
+ // Don't use iface.Empty() here as iface may not be complete.
+ return iface.NumEmbeddeds() == 0 && iface.NumExplicitMethods() == 0
+}
+
+type importReader struct {
+ p *iimporter
+ declReader bytes.Reader
+ currPkg *types.Package
+ prevFile string
+ prevLine int64
+ prevColumn int64
+}
+
+func (r *importReader) obj(name string) {
+ tag := r.byte()
+ pos := r.pos()
+
+ switch tag {
+ case aliasTag:
+ typ := r.typ()
+ // TODO(adonovan): support generic aliases:
+ // if tag == genericAliasTag {
+ // tparams := r.tparamList()
+ // alias.SetTypeParams(tparams)
+ // }
+ r.declare(aliases.NewAlias(r.p.aliases, pos, r.currPkg, name, typ))
+
+ case constTag:
+ typ, val := r.value()
+
+ r.declare(types.NewConst(pos, r.currPkg, name, typ, val))
+
+ case funcTag, genericFuncTag:
+ var tparams []*types.TypeParam
+ if tag == genericFuncTag {
+ tparams = r.tparamList()
+ }
+ sig := r.signature(nil, nil, tparams)
+ r.declare(types.NewFunc(pos, r.currPkg, name, sig))
+
+ case typeTag, genericTypeTag:
+ // Types can be recursive. We need to setup a stub
+ // declaration before recursing.
+ obj := types.NewTypeName(pos, r.currPkg, name, nil)
+ named := types.NewNamed(obj, nil, nil)
+ // Declare obj before calling r.tparamList, so the new type name is recognized
+ // if used in the constraint of one of its own typeparams (see #48280).
+ r.declare(obj)
+ if tag == genericTypeTag {
+ tparams := r.tparamList()
+ named.SetTypeParams(tparams)
+ }
+
+ underlying := r.p.typAt(r.uint64(), named).Underlying()
+ named.SetUnderlying(underlying)
+
+ if !isInterface(underlying) {
+ for n := r.uint64(); n > 0; n-- {
+ mpos := r.pos()
+ mname := r.ident()
+ recv := r.param()
+
+ // If the receiver has any targs, set those as the
+ // rparams of the method (since those are the
+ // typeparams being used in the method sig/body).
+ _, recvNamed := typesinternal.ReceiverNamed(recv)
+ targs := recvNamed.TypeArgs()
+ var rparams []*types.TypeParam
+ if targs.Len() > 0 {
+ rparams = make([]*types.TypeParam, targs.Len())
+ for i := range rparams {
+ rparams[i] = aliases.Unalias(targs.At(i)).(*types.TypeParam)
+ }
+ }
+ msig := r.signature(recv, rparams, nil)
+
+ named.AddMethod(types.NewFunc(mpos, r.currPkg, mname, msig))
+ }
+ }
+
+ case typeParamTag:
+ // We need to "declare" a typeparam in order to have a name that
+ // can be referenced recursively (if needed) in the type param's
+ // bound.
+ if r.p.version < iexportVersionGenerics {
+ errorf("unexpected type param type")
+ }
+ name0 := tparamName(name)
+ tn := types.NewTypeName(pos, r.currPkg, name0, nil)
+ t := types.NewTypeParam(tn, nil)
+
+ // To handle recursive references to the typeparam within its
+ // bound, save the partial type in tparamIndex before reading the bounds.
+ id := ident{r.currPkg, name}
+ r.p.tparamIndex[id] = t
+ var implicit bool
+ if r.p.version >= iexportVersionGo1_18 {
+ implicit = r.bool()
+ }
+ constraint := r.typ()
+ if implicit {
+ iface, _ := aliases.Unalias(constraint).(*types.Interface)
+ if iface == nil {
+ errorf("non-interface constraint marked implicit")
+ }
+ iface.MarkImplicit()
+ }
+ // The constraint type may not be complete, if we
+ // are in the middle of a type recursion involving type
+ // constraints. So, we defer SetConstraint until we have
+ // completely set up all types in ImportData.
+ r.p.later = append(r.p.later, setConstraintArgs{t: t, constraint: constraint})
+
+ case varTag:
+ typ := r.typ()
+
+ r.declare(types.NewVar(pos, r.currPkg, name, typ))
+
+ default:
+ errorf("unexpected tag: %v", tag)
+ }
+}
+
+func (r *importReader) declare(obj types.Object) {
+ obj.Pkg().Scope().Insert(obj)
+}
+
+func (r *importReader) value() (typ types.Type, val constant.Value) {
+ typ = r.typ()
+ if r.p.version >= iexportVersionGo1_18 {
+ // TODO: add support for using the kind.
+ _ = constant.Kind(r.int64())
+ }
+
+ switch b := typ.Underlying().(*types.Basic); b.Info() & types.IsConstType {
+ case types.IsBoolean:
+ val = constant.MakeBool(r.bool())
+
+ case types.IsString:
+ val = constant.MakeString(r.string())
+
+ case types.IsInteger:
+ var x big.Int
+ r.mpint(&x, b)
+ val = constant.Make(&x)
+
+ case types.IsFloat:
+ val = r.mpfloat(b)
+
+ case types.IsComplex:
+ re := r.mpfloat(b)
+ im := r.mpfloat(b)
+ val = constant.BinaryOp(re, token.ADD, constant.MakeImag(im))
+
+ default:
+ if b.Kind() == types.Invalid {
+ val = constant.MakeUnknown()
+ return
+ }
+ errorf("unexpected type %v", typ) // panics
+ panic("unreachable")
+ }
+
+ return
+}
+
+func intSize(b *types.Basic) (signed bool, maxBytes uint) {
+ if (b.Info() & types.IsUntyped) != 0 {
+ return true, 64
+ }
+
+ switch b.Kind() {
+ case types.Float32, types.Complex64:
+ return true, 3
+ case types.Float64, types.Complex128:
+ return true, 7
+ }
+
+ signed = (b.Info() & types.IsUnsigned) == 0
+ switch b.Kind() {
+ case types.Int8, types.Uint8:
+ maxBytes = 1
+ case types.Int16, types.Uint16:
+ maxBytes = 2
+ case types.Int32, types.Uint32:
+ maxBytes = 4
+ default:
+ maxBytes = 8
+ }
+
+ return
+}
+
+func (r *importReader) mpint(x *big.Int, typ *types.Basic) {
+ signed, maxBytes := intSize(typ)
+
+ maxSmall := 256 - maxBytes
+ if signed {
+ maxSmall = 256 - 2*maxBytes
+ }
+ if maxBytes == 1 {
+ maxSmall = 256
+ }
+
+ n, _ := r.declReader.ReadByte()
+ if uint(n) < maxSmall {
+ v := int64(n)
+ if signed {
+ v >>= 1
+ if n&1 != 0 {
+ v = ^v
+ }
+ }
+ x.SetInt64(v)
+ return
+ }
+
+ v := -n
+ if signed {
+ v = -(n &^ 1) >> 1
+ }
+ if v < 1 || uint(v) > maxBytes {
+ errorf("weird decoding: %v, %v => %v", n, signed, v)
+ }
+ b := make([]byte, v)
+ io.ReadFull(&r.declReader, b)
+ x.SetBytes(b)
+ if signed && n&1 != 0 {
+ x.Neg(x)
+ }
+}
+
+func (r *importReader) mpfloat(typ *types.Basic) constant.Value {
+ var mant big.Int
+ r.mpint(&mant, typ)
+ var f big.Float
+ f.SetInt(&mant)
+ if f.Sign() != 0 {
+ f.SetMantExp(&f, int(r.int64()))
+ }
+ return constant.Make(&f)
+}
+
+func (r *importReader) ident() string {
+ return r.string()
+}
+
+func (r *importReader) qualifiedIdent() (*types.Package, string) {
+ name := r.string()
+ pkg := r.pkg()
+ return pkg, name
+}
+
+func (r *importReader) pos() token.Pos {
+ if r.p.shallow {
+ // precise offsets are encoded only in shallow mode
+ return r.posv2()
+ }
+ if r.p.version >= iexportVersionPosCol {
+ r.posv1()
+ } else {
+ r.posv0()
+ }
+
+ if r.prevFile == "" && r.prevLine == 0 && r.prevColumn == 0 {
+ return token.NoPos
+ }
+ return r.p.fake.pos(r.prevFile, int(r.prevLine), int(r.prevColumn))
+}
+
+func (r *importReader) posv0() {
+ delta := r.int64()
+ if delta != deltaNewFile {
+ r.prevLine += delta
+ } else if l := r.int64(); l == -1 {
+ r.prevLine += deltaNewFile
+ } else {
+ r.prevFile = r.string()
+ r.prevLine = l
+ }
+}
+
+func (r *importReader) posv1() {
+ delta := r.int64()
+ r.prevColumn += delta >> 1
+ if delta&1 != 0 {
+ delta = r.int64()
+ r.prevLine += delta >> 1
+ if delta&1 != 0 {
+ r.prevFile = r.string()
+ }
+ }
+}
+
+func (r *importReader) posv2() token.Pos {
+ file := r.uint64()
+ if file == 0 {
+ return token.NoPos
+ }
+ tf := r.p.fileAt(file - 1)
+ return tf.Pos(int(r.uint64()))
+}
+
+func (r *importReader) typ() types.Type {
+ return r.p.typAt(r.uint64(), nil)
+}
+
+func isInterface(t types.Type) bool {
+ _, ok := aliases.Unalias(t).(*types.Interface)
+ return ok
+}
+
+func (r *importReader) pkg() *types.Package { return r.p.pkgAt(r.uint64()) }
+func (r *importReader) string() string { return r.p.stringAt(r.uint64()) }
+
+func (r *importReader) doType(base *types.Named) (res types.Type) {
+ k := r.kind()
+ if debug {
+ r.p.trace("importing type %d (base: %s)", k, base)
+ r.p.indent++
+ defer func() {
+ r.p.indent--
+ r.p.trace("=> %s", res)
+ }()
+ }
+ switch k {
+ default:
+ errorf("unexpected kind tag in %q: %v", r.p.ipath, k)
+ return nil
+
+ case aliasType, definedType:
+ pkg, name := r.qualifiedIdent()
+ r.p.doDecl(pkg, name)
+ return pkg.Scope().Lookup(name).(*types.TypeName).Type()
+ case pointerType:
+ return types.NewPointer(r.typ())
+ case sliceType:
+ return types.NewSlice(r.typ())
+ case arrayType:
+ n := r.uint64()
+ return types.NewArray(r.typ(), int64(n))
+ case chanType:
+ dir := chanDir(int(r.uint64()))
+ return types.NewChan(dir, r.typ())
+ case mapType:
+ return types.NewMap(r.typ(), r.typ())
+ case signatureType:
+ r.currPkg = r.pkg()
+ return r.signature(nil, nil, nil)
+
+ case structType:
+ r.currPkg = r.pkg()
+
+ fields := make([]*types.Var, r.uint64())
+ tags := make([]string, len(fields))
+ for i := range fields {
+ var field *types.Var
+ if r.p.shallow {
+ field, _ = r.objectPathObject().(*types.Var)
+ }
+
+ fpos := r.pos()
+ fname := r.ident()
+ ftyp := r.typ()
+ emb := r.bool()
+ tag := r.string()
+
+ // Either this is not a shallow import, the field is local, or the
+ // encoded objectPath failed to produce an object (a bug).
+ //
+ // Even in this last, buggy case, fall back on creating a new field. As
+ // discussed in iexport.go, this is not correct, but mostly works and is
+ // preferable to failing (for now at least).
+ if field == nil {
+ field = types.NewField(fpos, r.currPkg, fname, ftyp, emb)
+ }
+
+ fields[i] = field
+ tags[i] = tag
+ }
+ return types.NewStruct(fields, tags)
+
+ case interfaceType:
+ r.currPkg = r.pkg()
+
+ embeddeds := make([]types.Type, r.uint64())
+ for i := range embeddeds {
+ _ = r.pos()
+ embeddeds[i] = r.typ()
+ }
+
+ methods := make([]*types.Func, r.uint64())
+ for i := range methods {
+ var method *types.Func
+ if r.p.shallow {
+ method, _ = r.objectPathObject().(*types.Func)
+ }
+
+ mpos := r.pos()
+ mname := r.ident()
+
+ // TODO(mdempsky): Matches bimport.go, but I
+ // don't agree with this.
+ var recv *types.Var
+ if base != nil {
+ recv = types.NewVar(token.NoPos, r.currPkg, "", base)
+ }
+ msig := r.signature(recv, nil, nil)
+
+ if method == nil {
+ method = types.NewFunc(mpos, r.currPkg, mname, msig)
+ }
+ methods[i] = method
+ }
+
+ typ := newInterface(methods, embeddeds)
+ r.p.interfaceList = append(r.p.interfaceList, typ)
+ return typ
+
+ case typeParamType:
+ if r.p.version < iexportVersionGenerics {
+ errorf("unexpected type param type")
+ }
+ pkg, name := r.qualifiedIdent()
+ id := ident{pkg, name}
+ if t, ok := r.p.tparamIndex[id]; ok {
+ // We're already in the process of importing this typeparam.
+ return t
+ }
+ // Otherwise, import the definition of the typeparam now.
+ r.p.doDecl(pkg, name)
+ return r.p.tparamIndex[id]
+
+ case instanceType:
+ if r.p.version < iexportVersionGenerics {
+ errorf("unexpected instantiation type")
+ }
+ // pos does not matter for instances: they are positioned on the original
+ // type.
+ _ = r.pos()
+ len := r.uint64()
+ targs := make([]types.Type, len)
+ for i := range targs {
+ targs[i] = r.typ()
+ }
+ baseType := r.typ()
+ // The imported instantiated type doesn't include any methods, so
+ // we must always use the methods of the base (orig) type.
+ // TODO provide a non-nil *Environment
+ t, _ := types.Instantiate(nil, baseType, targs, false)
+
+ // Workaround for golang/go#61561. See the doc for instanceList for details.
+ r.p.instanceList = append(r.p.instanceList, t)
+ return t
+
+ case unionType:
+ if r.p.version < iexportVersionGenerics {
+ errorf("unexpected instantiation type")
+ }
+ terms := make([]*types.Term, r.uint64())
+ for i := range terms {
+ terms[i] = types.NewTerm(r.bool(), r.typ())
+ }
+ return types.NewUnion(terms)
+ }
+}
+
+func (r *importReader) kind() itag {
+ return itag(r.uint64())
+}
+
+// objectPathObject is the inverse of exportWriter.objectPath.
+//
+// In shallow mode, certain fields and methods may need to be looked up in an
+// imported package. See the doc for exportWriter.objectPath for a full
+// explanation.
+func (r *importReader) objectPathObject() types.Object {
+ objPath := objectpath.Path(r.string())
+ if objPath == "" {
+ return nil
+ }
+ pkg := r.pkg()
+ obj, err := objectpath.Object(pkg, objPath)
+ if err != nil {
+ if r.p.reportf != nil {
+ r.p.reportf("failed to find object for objectPath %q: %v", objPath, err)
+ }
+ }
+ return obj
+}
+
+func (r *importReader) signature(recv *types.Var, rparams []*types.TypeParam, tparams []*types.TypeParam) *types.Signature {
+ params := r.paramList()
+ results := r.paramList()
+ variadic := params.Len() > 0 && r.bool()
+ return types.NewSignatureType(recv, rparams, tparams, params, results, variadic)
+}
+
+func (r *importReader) tparamList() []*types.TypeParam {
+ n := r.uint64()
+ if n == 0 {
+ return nil
+ }
+ xs := make([]*types.TypeParam, n)
+ for i := range xs {
+ // Note: the standard library importer is tolerant of nil types here,
+ // though would panic in SetTypeParams.
+ xs[i] = aliases.Unalias(r.typ()).(*types.TypeParam)
+ }
+ return xs
+}
+
+func (r *importReader) paramList() *types.Tuple {
+ xs := make([]*types.Var, r.uint64())
+ for i := range xs {
+ xs[i] = r.param()
+ }
+ return types.NewTuple(xs...)
+}
+
+func (r *importReader) param() *types.Var {
+ pos := r.pos()
+ name := r.ident()
+ typ := r.typ()
+ return types.NewParam(pos, r.currPkg, name, typ)
+}
+
+func (r *importReader) bool() bool {
+ return r.uint64() != 0
+}
+
+func (r *importReader) int64() int64 {
+ n, err := binary.ReadVarint(&r.declReader)
+ if err != nil {
+ errorf("readVarint: %v", err)
+ }
+ return n
+}
+
+func (r *importReader) uint64() uint64 {
+ n, err := binary.ReadUvarint(&r.declReader)
+ if err != nil {
+ errorf("readUvarint: %v", err)
+ }
+ return n
+}
+
+func (r *importReader) byte() byte {
+ x, err := r.declReader.ReadByte()
+ if err != nil {
+ errorf("declReader.ReadByte: %v", err)
+ }
+ return x
+}
diff --git a/vendor/golang.org/x/tools/internal/gcimporter/newInterface10.go b/vendor/golang.org/x/tools/internal/gcimporter/newInterface10.go
new file mode 100644
index 0000000..8b163e3
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/gcimporter/newInterface10.go
@@ -0,0 +1,22 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !go1.11
+// +build !go1.11
+
+package gcimporter
+
+import "go/types"
+
+func newInterface(methods []*types.Func, embeddeds []types.Type) *types.Interface {
+ named := make([]*types.Named, len(embeddeds))
+ for i, e := range embeddeds {
+ var ok bool
+ named[i], ok = e.(*types.Named)
+ if !ok {
+ panic("embedding of non-defined interfaces in interfaces is not supported before Go 1.11")
+ }
+ }
+ return types.NewInterface(methods, named)
+}
diff --git a/vendor/golang.org/x/tools/internal/gcimporter/newInterface11.go b/vendor/golang.org/x/tools/internal/gcimporter/newInterface11.go
new file mode 100644
index 0000000..49984f4
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/gcimporter/newInterface11.go
@@ -0,0 +1,14 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build go1.11
+// +build go1.11
+
+package gcimporter
+
+import "go/types"
+
+func newInterface(methods []*types.Func, embeddeds []types.Type) *types.Interface {
+ return types.NewInterfaceType(methods, embeddeds)
+}
diff --git a/vendor/golang.org/x/tools/internal/gcimporter/support_go118.go b/vendor/golang.org/x/tools/internal/gcimporter/support_go118.go
new file mode 100644
index 0000000..0cd3b91
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/gcimporter/support_go118.go
@@ -0,0 +1,34 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gcimporter
+
+import "go/types"
+
+const iexportVersion = iexportVersionGenerics
+
+// additionalPredeclared returns additional predeclared types in go.1.18.
+func additionalPredeclared() []types.Type {
+ return []types.Type{
+ // comparable
+ types.Universe.Lookup("comparable").Type(),
+
+ // any
+ types.Universe.Lookup("any").Type(),
+ }
+}
+
+// See cmd/compile/internal/types.SplitVargenSuffix.
+func splitVargenSuffix(name string) (base, suffix string) {
+ i := len(name)
+ for i > 0 && name[i-1] >= '0' && name[i-1] <= '9' {
+ i--
+ }
+ const dot = "·"
+ if i >= len(dot) && name[i-len(dot):i] == dot {
+ i -= len(dot)
+ return name[:i], name[i:]
+ }
+ return name, ""
+}
diff --git a/vendor/golang.org/x/tools/internal/gcimporter/unified_no.go b/vendor/golang.org/x/tools/internal/gcimporter/unified_no.go
new file mode 100644
index 0000000..38b624c
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/gcimporter/unified_no.go
@@ -0,0 +1,10 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !goexperiment.unified
+// +build !goexperiment.unified
+
+package gcimporter
+
+const unifiedIR = false
diff --git a/vendor/golang.org/x/tools/internal/gcimporter/unified_yes.go b/vendor/golang.org/x/tools/internal/gcimporter/unified_yes.go
new file mode 100644
index 0000000..b5118d0
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/gcimporter/unified_yes.go
@@ -0,0 +1,10 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build goexperiment.unified
+// +build goexperiment.unified
+
+package gcimporter
+
+const unifiedIR = true
diff --git a/vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go b/vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go
new file mode 100644
index 0000000..2c07706
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go
@@ -0,0 +1,728 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Derived from go/internal/gcimporter/ureader.go
+
+package gcimporter
+
+import (
+ "fmt"
+ "go/token"
+ "go/types"
+ "sort"
+ "strings"
+
+ "golang.org/x/tools/internal/aliases"
+ "golang.org/x/tools/internal/pkgbits"
+)
+
+// A pkgReader holds the shared state for reading a unified IR package
+// description.
+type pkgReader struct {
+ pkgbits.PkgDecoder
+
+ fake fakeFileSet
+
+ ctxt *types.Context
+ imports map[string]*types.Package // previously imported packages, indexed by path
+ aliases bool // create types.Alias nodes
+
+ // lazily initialized arrays corresponding to the unified IR
+ // PosBase, Pkg, and Type sections, respectively.
+ posBases []string // position bases (i.e., file names)
+ pkgs []*types.Package
+ typs []types.Type
+
+ // laterFns holds functions that need to be invoked at the end of
+ // import reading.
+ laterFns []func()
+ // laterFors is used in case of 'type A B' to ensure that B is processed before A.
+ laterFors map[types.Type]int
+
+ // ifaces holds a list of constructed Interfaces, which need to have
+ // Complete called after importing is done.
+ ifaces []*types.Interface
+}
+
+// later adds a function to be invoked at the end of import reading.
+func (pr *pkgReader) later(fn func()) {
+ pr.laterFns = append(pr.laterFns, fn)
+}
+
+// See cmd/compile/internal/noder.derivedInfo.
+type derivedInfo struct {
+ idx pkgbits.Index
+ needed bool
+}
+
+// See cmd/compile/internal/noder.typeInfo.
+type typeInfo struct {
+ idx pkgbits.Index
+ derived bool
+}
+
+func UImportData(fset *token.FileSet, imports map[string]*types.Package, data []byte, path string) (_ int, pkg *types.Package, err error) {
+ if !debug {
+ defer func() {
+ if x := recover(); x != nil {
+ err = fmt.Errorf("internal error in importing %q (%v); please report an issue", path, x)
+ }
+ }()
+ }
+
+ s := string(data)
+ s = s[:strings.LastIndex(s, "\n$$\n")]
+ input := pkgbits.NewPkgDecoder(path, s)
+ pkg = readUnifiedPackage(fset, nil, imports, input)
+ return
+}
+
+// laterFor adds a function to be invoked at the end of import reading, and records the type that function is finishing.
+func (pr *pkgReader) laterFor(t types.Type, fn func()) {
+ if pr.laterFors == nil {
+ pr.laterFors = make(map[types.Type]int)
+ }
+ pr.laterFors[t] = len(pr.laterFns)
+ pr.laterFns = append(pr.laterFns, fn)
+}
+
+// readUnifiedPackage reads a package description from the given
+// unified IR export data decoder.
+func readUnifiedPackage(fset *token.FileSet, ctxt *types.Context, imports map[string]*types.Package, input pkgbits.PkgDecoder) *types.Package {
+ pr := pkgReader{
+ PkgDecoder: input,
+
+ fake: fakeFileSet{
+ fset: fset,
+ files: make(map[string]*fileInfo),
+ },
+
+ ctxt: ctxt,
+ imports: imports,
+ aliases: aliases.Enabled(),
+
+ posBases: make([]string, input.NumElems(pkgbits.RelocPosBase)),
+ pkgs: make([]*types.Package, input.NumElems(pkgbits.RelocPkg)),
+ typs: make([]types.Type, input.NumElems(pkgbits.RelocType)),
+ }
+ defer pr.fake.setLines()
+
+ r := pr.newReader(pkgbits.RelocMeta, pkgbits.PublicRootIdx, pkgbits.SyncPublic)
+ pkg := r.pkg()
+ r.Bool() // has init
+
+ for i, n := 0, r.Len(); i < n; i++ {
+ // As if r.obj(), but avoiding the Scope.Lookup call,
+ // to avoid eager loading of imports.
+ r.Sync(pkgbits.SyncObject)
+ assert(!r.Bool())
+ r.p.objIdx(r.Reloc(pkgbits.RelocObj))
+ assert(r.Len() == 0)
+ }
+
+ r.Sync(pkgbits.SyncEOF)
+
+ for _, fn := range pr.laterFns {
+ fn()
+ }
+
+ for _, iface := range pr.ifaces {
+ iface.Complete()
+ }
+
+ // Imports() of pkg are all of the transitive packages that were loaded.
+ var imps []*types.Package
+ for _, imp := range pr.pkgs {
+ if imp != nil && imp != pkg {
+ imps = append(imps, imp)
+ }
+ }
+ sort.Sort(byPath(imps))
+ pkg.SetImports(imps)
+
+ pkg.MarkComplete()
+ return pkg
+}
+
+// A reader holds the state for reading a single unified IR element
+// within a package.
+type reader struct {
+ pkgbits.Decoder
+
+ p *pkgReader
+
+ dict *readerDict
+}
+
+// A readerDict holds the state for type parameters that parameterize
+// the current unified IR element.
+type readerDict struct {
+ // bounds is a slice of typeInfos corresponding to the underlying
+ // bounds of the element's type parameters.
+ bounds []typeInfo
+
+ // tparams is a slice of the constructed TypeParams for the element.
+ tparams []*types.TypeParam
+
+ // devived is a slice of types derived from tparams, which may be
+ // instantiated while reading the current element.
+ derived []derivedInfo
+ derivedTypes []types.Type // lazily instantiated from derived
+}
+
+func (pr *pkgReader) newReader(k pkgbits.RelocKind, idx pkgbits.Index, marker pkgbits.SyncMarker) *reader {
+ return &reader{
+ Decoder: pr.NewDecoder(k, idx, marker),
+ p: pr,
+ }
+}
+
+func (pr *pkgReader) tempReader(k pkgbits.RelocKind, idx pkgbits.Index, marker pkgbits.SyncMarker) *reader {
+ return &reader{
+ Decoder: pr.TempDecoder(k, idx, marker),
+ p: pr,
+ }
+}
+
+func (pr *pkgReader) retireReader(r *reader) {
+ pr.RetireDecoder(&r.Decoder)
+}
+
+// @@@ Positions
+
+func (r *reader) pos() token.Pos {
+ r.Sync(pkgbits.SyncPos)
+ if !r.Bool() {
+ return token.NoPos
+ }
+
+ // TODO(mdempsky): Delta encoding.
+ posBase := r.posBase()
+ line := r.Uint()
+ col := r.Uint()
+ return r.p.fake.pos(posBase, int(line), int(col))
+}
+
+func (r *reader) posBase() string {
+ return r.p.posBaseIdx(r.Reloc(pkgbits.RelocPosBase))
+}
+
+func (pr *pkgReader) posBaseIdx(idx pkgbits.Index) string {
+ if b := pr.posBases[idx]; b != "" {
+ return b
+ }
+
+ var filename string
+ {
+ r := pr.tempReader(pkgbits.RelocPosBase, idx, pkgbits.SyncPosBase)
+
+ // Within types2, position bases have a lot more details (e.g.,
+ // keeping track of where //line directives appeared exactly).
+ //
+ // For go/types, we just track the file name.
+
+ filename = r.String()
+
+ if r.Bool() { // file base
+ // Was: "b = token.NewTrimmedFileBase(filename, true)"
+ } else { // line base
+ pos := r.pos()
+ line := r.Uint()
+ col := r.Uint()
+
+ // Was: "b = token.NewLineBase(pos, filename, true, line, col)"
+ _, _, _ = pos, line, col
+ }
+ pr.retireReader(r)
+ }
+ b := filename
+ pr.posBases[idx] = b
+ return b
+}
+
+// @@@ Packages
+
+func (r *reader) pkg() *types.Package {
+ r.Sync(pkgbits.SyncPkg)
+ return r.p.pkgIdx(r.Reloc(pkgbits.RelocPkg))
+}
+
+func (pr *pkgReader) pkgIdx(idx pkgbits.Index) *types.Package {
+ // TODO(mdempsky): Consider using some non-nil pointer to indicate
+ // the universe scope, so we don't need to keep re-reading it.
+ if pkg := pr.pkgs[idx]; pkg != nil {
+ return pkg
+ }
+
+ pkg := pr.newReader(pkgbits.RelocPkg, idx, pkgbits.SyncPkgDef).doPkg()
+ pr.pkgs[idx] = pkg
+ return pkg
+}
+
+func (r *reader) doPkg() *types.Package {
+ path := r.String()
+ switch path {
+ case "":
+ path = r.p.PkgPath()
+ case "builtin":
+ return nil // universe
+ case "unsafe":
+ return types.Unsafe
+ }
+
+ if pkg := r.p.imports[path]; pkg != nil {
+ return pkg
+ }
+
+ name := r.String()
+
+ pkg := types.NewPackage(path, name)
+ r.p.imports[path] = pkg
+
+ return pkg
+}
+
+// @@@ Types
+
+func (r *reader) typ() types.Type {
+ return r.p.typIdx(r.typInfo(), r.dict)
+}
+
+func (r *reader) typInfo() typeInfo {
+ r.Sync(pkgbits.SyncType)
+ if r.Bool() {
+ return typeInfo{idx: pkgbits.Index(r.Len()), derived: true}
+ }
+ return typeInfo{idx: r.Reloc(pkgbits.RelocType), derived: false}
+}
+
+func (pr *pkgReader) typIdx(info typeInfo, dict *readerDict) types.Type {
+ idx := info.idx
+ var where *types.Type
+ if info.derived {
+ where = &dict.derivedTypes[idx]
+ idx = dict.derived[idx].idx
+ } else {
+ where = &pr.typs[idx]
+ }
+
+ if typ := *where; typ != nil {
+ return typ
+ }
+
+ var typ types.Type
+ {
+ r := pr.tempReader(pkgbits.RelocType, idx, pkgbits.SyncTypeIdx)
+ r.dict = dict
+
+ typ = r.doTyp()
+ assert(typ != nil)
+ pr.retireReader(r)
+ }
+ // See comment in pkgReader.typIdx explaining how this happens.
+ if prev := *where; prev != nil {
+ return prev
+ }
+
+ *where = typ
+ return typ
+}
+
+func (r *reader) doTyp() (res types.Type) {
+ switch tag := pkgbits.CodeType(r.Code(pkgbits.SyncType)); tag {
+ default:
+ errorf("unhandled type tag: %v", tag)
+ panic("unreachable")
+
+ case pkgbits.TypeBasic:
+ return types.Typ[r.Len()]
+
+ case pkgbits.TypeNamed:
+ obj, targs := r.obj()
+ name := obj.(*types.TypeName)
+ if len(targs) != 0 {
+ t, _ := types.Instantiate(r.p.ctxt, name.Type(), targs, false)
+ return t
+ }
+ return name.Type()
+
+ case pkgbits.TypeTypeParam:
+ return r.dict.tparams[r.Len()]
+
+ case pkgbits.TypeArray:
+ len := int64(r.Uint64())
+ return types.NewArray(r.typ(), len)
+ case pkgbits.TypeChan:
+ dir := types.ChanDir(r.Len())
+ return types.NewChan(dir, r.typ())
+ case pkgbits.TypeMap:
+ return types.NewMap(r.typ(), r.typ())
+ case pkgbits.TypePointer:
+ return types.NewPointer(r.typ())
+ case pkgbits.TypeSignature:
+ return r.signature(nil, nil, nil)
+ case pkgbits.TypeSlice:
+ return types.NewSlice(r.typ())
+ case pkgbits.TypeStruct:
+ return r.structType()
+ case pkgbits.TypeInterface:
+ return r.interfaceType()
+ case pkgbits.TypeUnion:
+ return r.unionType()
+ }
+}
+
+func (r *reader) structType() *types.Struct {
+ fields := make([]*types.Var, r.Len())
+ var tags []string
+ for i := range fields {
+ pos := r.pos()
+ pkg, name := r.selector()
+ ftyp := r.typ()
+ tag := r.String()
+ embedded := r.Bool()
+
+ fields[i] = types.NewField(pos, pkg, name, ftyp, embedded)
+ if tag != "" {
+ for len(tags) < i {
+ tags = append(tags, "")
+ }
+ tags = append(tags, tag)
+ }
+ }
+ return types.NewStruct(fields, tags)
+}
+
+func (r *reader) unionType() *types.Union {
+ terms := make([]*types.Term, r.Len())
+ for i := range terms {
+ terms[i] = types.NewTerm(r.Bool(), r.typ())
+ }
+ return types.NewUnion(terms)
+}
+
+func (r *reader) interfaceType() *types.Interface {
+ methods := make([]*types.Func, r.Len())
+ embeddeds := make([]types.Type, r.Len())
+ implicit := len(methods) == 0 && len(embeddeds) == 1 && r.Bool()
+
+ for i := range methods {
+ pos := r.pos()
+ pkg, name := r.selector()
+ mtyp := r.signature(nil, nil, nil)
+ methods[i] = types.NewFunc(pos, pkg, name, mtyp)
+ }
+
+ for i := range embeddeds {
+ embeddeds[i] = r.typ()
+ }
+
+ iface := types.NewInterfaceType(methods, embeddeds)
+ if implicit {
+ iface.MarkImplicit()
+ }
+
+ // We need to call iface.Complete(), but if there are any embedded
+ // defined types, then we may not have set their underlying
+ // interface type yet. So we need to defer calling Complete until
+ // after we've called SetUnderlying everywhere.
+ //
+ // TODO(mdempsky): After CL 424876 lands, it should be safe to call
+ // iface.Complete() immediately.
+ r.p.ifaces = append(r.p.ifaces, iface)
+
+ return iface
+}
+
+func (r *reader) signature(recv *types.Var, rtparams, tparams []*types.TypeParam) *types.Signature {
+ r.Sync(pkgbits.SyncSignature)
+
+ params := r.params()
+ results := r.params()
+ variadic := r.Bool()
+
+ return types.NewSignatureType(recv, rtparams, tparams, params, results, variadic)
+}
+
+func (r *reader) params() *types.Tuple {
+ r.Sync(pkgbits.SyncParams)
+
+ params := make([]*types.Var, r.Len())
+ for i := range params {
+ params[i] = r.param()
+ }
+
+ return types.NewTuple(params...)
+}
+
+func (r *reader) param() *types.Var {
+ r.Sync(pkgbits.SyncParam)
+
+ pos := r.pos()
+ pkg, name := r.localIdent()
+ typ := r.typ()
+
+ return types.NewParam(pos, pkg, name, typ)
+}
+
+// @@@ Objects
+
+func (r *reader) obj() (types.Object, []types.Type) {
+ r.Sync(pkgbits.SyncObject)
+
+ assert(!r.Bool())
+
+ pkg, name := r.p.objIdx(r.Reloc(pkgbits.RelocObj))
+ obj := pkgScope(pkg).Lookup(name)
+
+ targs := make([]types.Type, r.Len())
+ for i := range targs {
+ targs[i] = r.typ()
+ }
+
+ return obj, targs
+}
+
+func (pr *pkgReader) objIdx(idx pkgbits.Index) (*types.Package, string) {
+
+ var objPkg *types.Package
+ var objName string
+ var tag pkgbits.CodeObj
+ {
+ rname := pr.tempReader(pkgbits.RelocName, idx, pkgbits.SyncObject1)
+
+ objPkg, objName = rname.qualifiedIdent()
+ assert(objName != "")
+
+ tag = pkgbits.CodeObj(rname.Code(pkgbits.SyncCodeObj))
+ pr.retireReader(rname)
+ }
+
+ if tag == pkgbits.ObjStub {
+ assert(objPkg == nil || objPkg == types.Unsafe)
+ return objPkg, objName
+ }
+
+ // Ignore local types promoted to global scope (#55110).
+ if _, suffix := splitVargenSuffix(objName); suffix != "" {
+ return objPkg, objName
+ }
+
+ if objPkg.Scope().Lookup(objName) == nil {
+ dict := pr.objDictIdx(idx)
+
+ r := pr.newReader(pkgbits.RelocObj, idx, pkgbits.SyncObject1)
+ r.dict = dict
+
+ declare := func(obj types.Object) {
+ objPkg.Scope().Insert(obj)
+ }
+
+ switch tag {
+ default:
+ panic("weird")
+
+ case pkgbits.ObjAlias:
+ pos := r.pos()
+ typ := r.typ()
+ declare(aliases.NewAlias(r.p.aliases, pos, objPkg, objName, typ))
+
+ case pkgbits.ObjConst:
+ pos := r.pos()
+ typ := r.typ()
+ val := r.Value()
+ declare(types.NewConst(pos, objPkg, objName, typ, val))
+
+ case pkgbits.ObjFunc:
+ pos := r.pos()
+ tparams := r.typeParamNames()
+ sig := r.signature(nil, nil, tparams)
+ declare(types.NewFunc(pos, objPkg, objName, sig))
+
+ case pkgbits.ObjType:
+ pos := r.pos()
+
+ obj := types.NewTypeName(pos, objPkg, objName, nil)
+ named := types.NewNamed(obj, nil, nil)
+ declare(obj)
+
+ named.SetTypeParams(r.typeParamNames())
+
+ setUnderlying := func(underlying types.Type) {
+ // If the underlying type is an interface, we need to
+ // duplicate its methods so we can replace the receiver
+ // parameter's type (#49906).
+ if iface, ok := aliases.Unalias(underlying).(*types.Interface); ok && iface.NumExplicitMethods() != 0 {
+ methods := make([]*types.Func, iface.NumExplicitMethods())
+ for i := range methods {
+ fn := iface.ExplicitMethod(i)
+ sig := fn.Type().(*types.Signature)
+
+ recv := types.NewVar(fn.Pos(), fn.Pkg(), "", named)
+ methods[i] = types.NewFunc(fn.Pos(), fn.Pkg(), fn.Name(), types.NewSignature(recv, sig.Params(), sig.Results(), sig.Variadic()))
+ }
+
+ embeds := make([]types.Type, iface.NumEmbeddeds())
+ for i := range embeds {
+ embeds[i] = iface.EmbeddedType(i)
+ }
+
+ newIface := types.NewInterfaceType(methods, embeds)
+ r.p.ifaces = append(r.p.ifaces, newIface)
+ underlying = newIface
+ }
+
+ named.SetUnderlying(underlying)
+ }
+
+ // Since go.dev/cl/455279, we can assume rhs.Underlying() will
+ // always be non-nil. However, to temporarily support users of
+ // older snapshot releases, we continue to fallback to the old
+ // behavior for now.
+ //
+ // TODO(mdempsky): Remove fallback code and simplify after
+ // allowing time for snapshot users to upgrade.
+ rhs := r.typ()
+ if underlying := rhs.Underlying(); underlying != nil {
+ setUnderlying(underlying)
+ } else {
+ pk := r.p
+ pk.laterFor(named, func() {
+ // First be sure that the rhs is initialized, if it needs to be initialized.
+ delete(pk.laterFors, named) // prevent cycles
+ if i, ok := pk.laterFors[rhs]; ok {
+ f := pk.laterFns[i]
+ pk.laterFns[i] = func() {} // function is running now, so replace it with a no-op
+ f() // initialize RHS
+ }
+ setUnderlying(rhs.Underlying())
+ })
+ }
+
+ for i, n := 0, r.Len(); i < n; i++ {
+ named.AddMethod(r.method())
+ }
+
+ case pkgbits.ObjVar:
+ pos := r.pos()
+ typ := r.typ()
+ declare(types.NewVar(pos, objPkg, objName, typ))
+ }
+ }
+
+ return objPkg, objName
+}
+
+func (pr *pkgReader) objDictIdx(idx pkgbits.Index) *readerDict {
+
+ var dict readerDict
+
+ {
+ r := pr.tempReader(pkgbits.RelocObjDict, idx, pkgbits.SyncObject1)
+ if implicits := r.Len(); implicits != 0 {
+ errorf("unexpected object with %v implicit type parameter(s)", implicits)
+ }
+
+ dict.bounds = make([]typeInfo, r.Len())
+ for i := range dict.bounds {
+ dict.bounds[i] = r.typInfo()
+ }
+
+ dict.derived = make([]derivedInfo, r.Len())
+ dict.derivedTypes = make([]types.Type, len(dict.derived))
+ for i := range dict.derived {
+ dict.derived[i] = derivedInfo{r.Reloc(pkgbits.RelocType), r.Bool()}
+ }
+
+ pr.retireReader(r)
+ }
+ // function references follow, but reader doesn't need those
+
+ return &dict
+}
+
+func (r *reader) typeParamNames() []*types.TypeParam {
+ r.Sync(pkgbits.SyncTypeParamNames)
+
+ // Note: This code assumes it only processes objects without
+ // implement type parameters. This is currently fine, because
+ // reader is only used to read in exported declarations, which are
+ // always package scoped.
+
+ if len(r.dict.bounds) == 0 {
+ return nil
+ }
+
+ // Careful: Type parameter lists may have cycles. To allow for this,
+ // we construct the type parameter list in two passes: first we
+ // create all the TypeNames and TypeParams, then we construct and
+ // set the bound type.
+
+ r.dict.tparams = make([]*types.TypeParam, len(r.dict.bounds))
+ for i := range r.dict.bounds {
+ pos := r.pos()
+ pkg, name := r.localIdent()
+
+ tname := types.NewTypeName(pos, pkg, name, nil)
+ r.dict.tparams[i] = types.NewTypeParam(tname, nil)
+ }
+
+ typs := make([]types.Type, len(r.dict.bounds))
+ for i, bound := range r.dict.bounds {
+ typs[i] = r.p.typIdx(bound, r.dict)
+ }
+
+ // TODO(mdempsky): This is subtle, elaborate further.
+ //
+ // We have to save tparams outside of the closure, because
+ // typeParamNames() can be called multiple times with the same
+ // dictionary instance.
+ //
+ // Also, this needs to happen later to make sure SetUnderlying has
+ // been called.
+ //
+ // TODO(mdempsky): Is it safe to have a single "later" slice or do
+ // we need to have multiple passes? See comments on CL 386002 and
+ // go.dev/issue/52104.
+ tparams := r.dict.tparams
+ r.p.later(func() {
+ for i, typ := range typs {
+ tparams[i].SetConstraint(typ)
+ }
+ })
+
+ return r.dict.tparams
+}
+
+func (r *reader) method() *types.Func {
+ r.Sync(pkgbits.SyncMethod)
+ pos := r.pos()
+ pkg, name := r.selector()
+
+ rparams := r.typeParamNames()
+ sig := r.signature(r.param(), rparams, nil)
+
+ _ = r.pos() // TODO(mdempsky): Remove; this is a hacker for linker.go.
+ return types.NewFunc(pos, pkg, name, sig)
+}
+
+func (r *reader) qualifiedIdent() (*types.Package, string) { return r.ident(pkgbits.SyncSym) }
+func (r *reader) localIdent() (*types.Package, string) { return r.ident(pkgbits.SyncLocalIdent) }
+func (r *reader) selector() (*types.Package, string) { return r.ident(pkgbits.SyncSelector) }
+
+func (r *reader) ident(marker pkgbits.SyncMarker) (*types.Package, string) {
+ r.Sync(marker)
+ return r.pkg(), r.String()
+}
+
+// pkgScope returns pkg.Scope().
+// If pkg is nil, it returns types.Universe instead.
+//
+// TODO(mdempsky): Remove after x/tools can depend on Go 1.19.
+func pkgScope(pkg *types.Package) *types.Scope {
+ if pkg != nil {
+ return pkg.Scope()
+ }
+ return types.Universe
+}