From 548090e67f66acf84385c4152ca464e52d3e3319 Mon Sep 17 00:00:00 2001 From: Thomas Voss Date: Fri, 13 Sep 2024 13:01:48 +0200 Subject: Migrate away from templ and towards html/template --- .../golang.org/x/tools/go/ast/astutil/enclosing.go | 654 ++++ .../golang.org/x/tools/go/ast/astutil/imports.go | 485 +++ .../golang.org/x/tools/go/ast/astutil/rewrite.go | 486 +++ vendor/golang.org/x/tools/go/ast/astutil/util.go | 19 + .../golang.org/x/tools/go/buildutil/allpackages.go | 195 ++ .../golang.org/x/tools/go/buildutil/fakecontext.go | 111 + vendor/golang.org/x/tools/go/buildutil/overlay.go | 101 + vendor/golang.org/x/tools/go/buildutil/tags.go | 100 + vendor/golang.org/x/tools/go/buildutil/util.go | 209 ++ .../golang.org/x/tools/go/callgraph/callgraph.go | 129 + vendor/golang.org/x/tools/go/callgraph/cha/cha.go | 164 + vendor/golang.org/x/tools/go/callgraph/util.go | 180 ++ .../x/tools/go/gcexportdata/gcexportdata.go | 186 ++ .../golang.org/x/tools/go/gcexportdata/importer.go | 75 + vendor/golang.org/x/tools/go/internal/cgo/cgo.go | 219 ++ .../x/tools/go/internal/cgo/cgo_pkgconfig.go | 42 + vendor/golang.org/x/tools/go/loader/doc.go | 202 ++ vendor/golang.org/x/tools/go/loader/loader.go | 1066 +++++++ vendor/golang.org/x/tools/go/loader/util.go | 123 + vendor/golang.org/x/tools/go/packages/doc.go | 242 ++ vendor/golang.org/x/tools/go/packages/external.go | 156 + vendor/golang.org/x/tools/go/packages/golist.go | 1066 +++++++ .../x/tools/go/packages/golist_overlay.go | 83 + .../x/tools/go/packages/loadmode_string.go | 57 + vendor/golang.org/x/tools/go/packages/packages.go | 1515 +++++++++ vendor/golang.org/x/tools/go/packages/visit.go | 68 + vendor/golang.org/x/tools/go/ssa/TODO | 16 + vendor/golang.org/x/tools/go/ssa/block.go | 113 + vendor/golang.org/x/tools/go/ssa/blockopt.go | 183 ++ vendor/golang.org/x/tools/go/ssa/builder.go | 3276 ++++++++++++++++++++ vendor/golang.org/x/tools/go/ssa/const.go | 232 ++ vendor/golang.org/x/tools/go/ssa/coretype.go | 161 + vendor/golang.org/x/tools/go/ssa/create.go | 318 ++ vendor/golang.org/x/tools/go/ssa/doc.go | 122 + vendor/golang.org/x/tools/go/ssa/dom.go | 340 ++ vendor/golang.org/x/tools/go/ssa/emit.go | 614 ++++ vendor/golang.org/x/tools/go/ssa/func.go | 816 +++++ vendor/golang.org/x/tools/go/ssa/instantiate.go | 131 + vendor/golang.org/x/tools/go/ssa/lift.go | 688 ++++ vendor/golang.org/x/tools/go/ssa/lvalue.go | 155 + vendor/golang.org/x/tools/go/ssa/methods.go | 281 ++ vendor/golang.org/x/tools/go/ssa/mode.go | 111 + vendor/golang.org/x/tools/go/ssa/print.go | 470 +++ vendor/golang.org/x/tools/go/ssa/sanity.go | 560 ++++ vendor/golang.org/x/tools/go/ssa/source.go | 288 ++ vendor/golang.org/x/tools/go/ssa/ssa.go | 1871 +++++++++++ vendor/golang.org/x/tools/go/ssa/ssautil/load.go | 214 ++ vendor/golang.org/x/tools/go/ssa/ssautil/switch.go | 230 ++ vendor/golang.org/x/tools/go/ssa/ssautil/visit.go | 157 + vendor/golang.org/x/tools/go/ssa/subst.go | 642 ++++ vendor/golang.org/x/tools/go/ssa/task.go | 103 + vendor/golang.org/x/tools/go/ssa/util.go | 430 +++ vendor/golang.org/x/tools/go/ssa/util_go120.go | 17 + vendor/golang.org/x/tools/go/ssa/wrappers.go | 348 +++ .../x/tools/go/types/objectpath/objectpath.go | 788 +++++ .../golang.org/x/tools/go/types/typeutil/callee.go | 69 + .../x/tools/go/types/typeutil/imports.go | 30 + vendor/golang.org/x/tools/go/types/typeutil/map.go | 518 ++++ .../x/tools/go/types/typeutil/methodsetcache.go | 73 + vendor/golang.org/x/tools/go/types/typeutil/ui.go | 55 + 60 files changed, 22353 insertions(+) create mode 100644 vendor/golang.org/x/tools/go/ast/astutil/enclosing.go create mode 100644 vendor/golang.org/x/tools/go/ast/astutil/imports.go create mode 100644 vendor/golang.org/x/tools/go/ast/astutil/rewrite.go create mode 100644 vendor/golang.org/x/tools/go/ast/astutil/util.go create mode 100644 vendor/golang.org/x/tools/go/buildutil/allpackages.go create mode 100644 vendor/golang.org/x/tools/go/buildutil/fakecontext.go create mode 100644 vendor/golang.org/x/tools/go/buildutil/overlay.go create mode 100644 vendor/golang.org/x/tools/go/buildutil/tags.go create mode 100644 vendor/golang.org/x/tools/go/buildutil/util.go create mode 100644 vendor/golang.org/x/tools/go/callgraph/callgraph.go create mode 100644 vendor/golang.org/x/tools/go/callgraph/cha/cha.go create mode 100644 vendor/golang.org/x/tools/go/callgraph/util.go create mode 100644 vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go create mode 100644 vendor/golang.org/x/tools/go/gcexportdata/importer.go create mode 100644 vendor/golang.org/x/tools/go/internal/cgo/cgo.go create mode 100644 vendor/golang.org/x/tools/go/internal/cgo/cgo_pkgconfig.go create mode 100644 vendor/golang.org/x/tools/go/loader/doc.go create mode 100644 vendor/golang.org/x/tools/go/loader/loader.go create mode 100644 vendor/golang.org/x/tools/go/loader/util.go create mode 100644 vendor/golang.org/x/tools/go/packages/doc.go create mode 100644 vendor/golang.org/x/tools/go/packages/external.go create mode 100644 vendor/golang.org/x/tools/go/packages/golist.go create mode 100644 vendor/golang.org/x/tools/go/packages/golist_overlay.go create mode 100644 vendor/golang.org/x/tools/go/packages/loadmode_string.go create mode 100644 vendor/golang.org/x/tools/go/packages/packages.go create mode 100644 vendor/golang.org/x/tools/go/packages/visit.go create mode 100644 vendor/golang.org/x/tools/go/ssa/TODO create mode 100644 vendor/golang.org/x/tools/go/ssa/block.go create mode 100644 vendor/golang.org/x/tools/go/ssa/blockopt.go create mode 100644 vendor/golang.org/x/tools/go/ssa/builder.go create mode 100644 vendor/golang.org/x/tools/go/ssa/const.go create mode 100644 vendor/golang.org/x/tools/go/ssa/coretype.go create mode 100644 vendor/golang.org/x/tools/go/ssa/create.go create mode 100644 vendor/golang.org/x/tools/go/ssa/doc.go create mode 100644 vendor/golang.org/x/tools/go/ssa/dom.go create mode 100644 vendor/golang.org/x/tools/go/ssa/emit.go create mode 100644 vendor/golang.org/x/tools/go/ssa/func.go create mode 100644 vendor/golang.org/x/tools/go/ssa/instantiate.go create mode 100644 vendor/golang.org/x/tools/go/ssa/lift.go create mode 100644 vendor/golang.org/x/tools/go/ssa/lvalue.go create mode 100644 vendor/golang.org/x/tools/go/ssa/methods.go create mode 100644 vendor/golang.org/x/tools/go/ssa/mode.go create mode 100644 vendor/golang.org/x/tools/go/ssa/print.go create mode 100644 vendor/golang.org/x/tools/go/ssa/sanity.go create mode 100644 vendor/golang.org/x/tools/go/ssa/source.go create mode 100644 vendor/golang.org/x/tools/go/ssa/ssa.go create mode 100644 vendor/golang.org/x/tools/go/ssa/ssautil/load.go create mode 100644 vendor/golang.org/x/tools/go/ssa/ssautil/switch.go create mode 100644 vendor/golang.org/x/tools/go/ssa/ssautil/visit.go create mode 100644 vendor/golang.org/x/tools/go/ssa/subst.go create mode 100644 vendor/golang.org/x/tools/go/ssa/task.go create mode 100644 vendor/golang.org/x/tools/go/ssa/util.go create mode 100644 vendor/golang.org/x/tools/go/ssa/util_go120.go create mode 100644 vendor/golang.org/x/tools/go/ssa/wrappers.go create mode 100644 vendor/golang.org/x/tools/go/types/objectpath/objectpath.go create mode 100644 vendor/golang.org/x/tools/go/types/typeutil/callee.go create mode 100644 vendor/golang.org/x/tools/go/types/typeutil/imports.go create mode 100644 vendor/golang.org/x/tools/go/types/typeutil/map.go create mode 100644 vendor/golang.org/x/tools/go/types/typeutil/methodsetcache.go create mode 100644 vendor/golang.org/x/tools/go/types/typeutil/ui.go (limited to 'vendor/golang.org/x/tools/go') diff --git a/vendor/golang.org/x/tools/go/ast/astutil/enclosing.go b/vendor/golang.org/x/tools/go/ast/astutil/enclosing.go new file mode 100644 index 0000000..6e34df4 --- /dev/null +++ b/vendor/golang.org/x/tools/go/ast/astutil/enclosing.go @@ -0,0 +1,654 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package astutil + +// This file defines utilities for working with source positions. + +import ( + "fmt" + "go/ast" + "go/token" + "sort" +) + +// PathEnclosingInterval returns the node that encloses the source +// interval [start, end), and all its ancestors up to the AST root. +// +// The definition of "enclosing" used by this function considers +// additional whitespace abutting a node to be enclosed by it. +// In this example: +// +// z := x + y // add them +// <-A-> +// <----B-----> +// +// the ast.BinaryExpr(+) node is considered to enclose interval B +// even though its [Pos()..End()) is actually only interval A. +// This behaviour makes user interfaces more tolerant of imperfect +// input. +// +// This function treats tokens as nodes, though they are not included +// in the result. e.g. PathEnclosingInterval("+") returns the +// enclosing ast.BinaryExpr("x + y"). +// +// If start==end, the 1-char interval following start is used instead. +// +// The 'exact' result is true if the interval contains only path[0] +// and perhaps some adjacent whitespace. It is false if the interval +// overlaps multiple children of path[0], or if it contains only +// interior whitespace of path[0]. +// In this example: +// +// z := x + y // add them +// <--C--> <---E--> +// ^ +// D +// +// intervals C, D and E are inexact. C is contained by the +// z-assignment statement, because it spans three of its children (:=, +// x, +). So too is the 1-char interval D, because it contains only +// interior whitespace of the assignment. E is considered interior +// whitespace of the BlockStmt containing the assignment. +// +// The resulting path is never empty; it always contains at least the +// 'root' *ast.File. Ideally PathEnclosingInterval would reject +// intervals that lie wholly or partially outside the range of the +// file, but unfortunately ast.File records only the token.Pos of +// the 'package' keyword, but not of the start of the file itself. +func PathEnclosingInterval(root *ast.File, start, end token.Pos) (path []ast.Node, exact bool) { + // fmt.Printf("EnclosingInterval %d %d\n", start, end) // debugging + + // Precondition: node.[Pos..End) and adjoining whitespace contain [start, end). + var visit func(node ast.Node) bool + visit = func(node ast.Node) bool { + path = append(path, node) + + nodePos := node.Pos() + nodeEnd := node.End() + + // fmt.Printf("visit(%T, %d, %d)\n", node, nodePos, nodeEnd) // debugging + + // Intersect [start, end) with interval of node. + if start < nodePos { + start = nodePos + } + if end > nodeEnd { + end = nodeEnd + } + + // Find sole child that contains [start, end). + children := childrenOf(node) + l := len(children) + for i, child := range children { + // [childPos, childEnd) is unaugmented interval of child. + childPos := child.Pos() + childEnd := child.End() + + // [augPos, augEnd) is whitespace-augmented interval of child. + augPos := childPos + augEnd := childEnd + if i > 0 { + augPos = children[i-1].End() // start of preceding whitespace + } + if i < l-1 { + nextChildPos := children[i+1].Pos() + // Does [start, end) lie between child and next child? + if start >= augEnd && end <= nextChildPos { + return false // inexact match + } + augEnd = nextChildPos // end of following whitespace + } + + // fmt.Printf("\tchild %d: [%d..%d)\tcontains interval [%d..%d)?\n", + // i, augPos, augEnd, start, end) // debugging + + // Does augmented child strictly contain [start, end)? + if augPos <= start && end <= augEnd { + if is[tokenNode](child) { + return true + } + + // childrenOf elides the FuncType node beneath FuncDecl. + // Add it back here for TypeParams, Params, Results, + // all FieldLists). But we don't add it back for the "func" token + // even though it is is the tree at FuncDecl.Type.Func. + if decl, ok := node.(*ast.FuncDecl); ok { + if fields, ok := child.(*ast.FieldList); ok && fields != decl.Recv { + path = append(path, decl.Type) + } + } + + return visit(child) + } + + // Does [start, end) overlap multiple children? + // i.e. left-augmented child contains start + // but LR-augmented child does not contain end. + if start < childEnd && end > augEnd { + break + } + } + + // No single child contained [start, end), + // so node is the result. Is it exact? + + // (It's tempting to put this condition before the + // child loop, but it gives the wrong result in the + // case where a node (e.g. ExprStmt) and its sole + // child have equal intervals.) + if start == nodePos && end == nodeEnd { + return true // exact match + } + + return false // inexact: overlaps multiple children + } + + // Ensure [start,end) is nondecreasing. + if start > end { + start, end = end, start + } + + if start < root.End() && end > root.Pos() { + if start == end { + end = start + 1 // empty interval => interval of size 1 + } + exact = visit(root) + + // Reverse the path: + for i, l := 0, len(path); i < l/2; i++ { + path[i], path[l-1-i] = path[l-1-i], path[i] + } + } else { + // Selection lies within whitespace preceding the + // first (or following the last) declaration in the file. + // The result nonetheless always includes the ast.File. + path = append(path, root) + } + + return +} + +// tokenNode is a dummy implementation of ast.Node for a single token. +// They are used transiently by PathEnclosingInterval but never escape +// this package. +type tokenNode struct { + pos token.Pos + end token.Pos +} + +func (n tokenNode) Pos() token.Pos { + return n.pos +} + +func (n tokenNode) End() token.Pos { + return n.end +} + +func tok(pos token.Pos, len int) ast.Node { + return tokenNode{pos, pos + token.Pos(len)} +} + +// childrenOf returns the direct non-nil children of ast.Node n. +// It may include fake ast.Node implementations for bare tokens. +// it is not safe to call (e.g.) ast.Walk on such nodes. +func childrenOf(n ast.Node) []ast.Node { + var children []ast.Node + + // First add nodes for all true subtrees. + ast.Inspect(n, func(node ast.Node) bool { + if node == n { // push n + return true // recur + } + if node != nil { // push child + children = append(children, node) + } + return false // no recursion + }) + + // Then add fake Nodes for bare tokens. + switch n := n.(type) { + case *ast.ArrayType: + children = append(children, + tok(n.Lbrack, len("[")), + tok(n.Elt.End(), len("]"))) + + case *ast.AssignStmt: + children = append(children, + tok(n.TokPos, len(n.Tok.String()))) + + case *ast.BasicLit: + children = append(children, + tok(n.ValuePos, len(n.Value))) + + case *ast.BinaryExpr: + children = append(children, tok(n.OpPos, len(n.Op.String()))) + + case *ast.BlockStmt: + children = append(children, + tok(n.Lbrace, len("{")), + tok(n.Rbrace, len("}"))) + + case *ast.BranchStmt: + children = append(children, + tok(n.TokPos, len(n.Tok.String()))) + + case *ast.CallExpr: + children = append(children, + tok(n.Lparen, len("(")), + tok(n.Rparen, len(")"))) + if n.Ellipsis != 0 { + children = append(children, tok(n.Ellipsis, len("..."))) + } + + case *ast.CaseClause: + if n.List == nil { + children = append(children, + tok(n.Case, len("default"))) + } else { + children = append(children, + tok(n.Case, len("case"))) + } + children = append(children, tok(n.Colon, len(":"))) + + case *ast.ChanType: + switch n.Dir { + case ast.RECV: + children = append(children, tok(n.Begin, len("<-chan"))) + case ast.SEND: + children = append(children, tok(n.Begin, len("chan<-"))) + case ast.RECV | ast.SEND: + children = append(children, tok(n.Begin, len("chan"))) + } + + case *ast.CommClause: + if n.Comm == nil { + children = append(children, + tok(n.Case, len("default"))) + } else { + children = append(children, + tok(n.Case, len("case"))) + } + children = append(children, tok(n.Colon, len(":"))) + + case *ast.Comment: + // nop + + case *ast.CommentGroup: + // nop + + case *ast.CompositeLit: + children = append(children, + tok(n.Lbrace, len("{")), + tok(n.Rbrace, len("{"))) + + case *ast.DeclStmt: + // nop + + case *ast.DeferStmt: + children = append(children, + tok(n.Defer, len("defer"))) + + case *ast.Ellipsis: + children = append(children, + tok(n.Ellipsis, len("..."))) + + case *ast.EmptyStmt: + // nop + + case *ast.ExprStmt: + // nop + + case *ast.Field: + // TODO(adonovan): Field.{Doc,Comment,Tag}? + + case *ast.FieldList: + children = append(children, + tok(n.Opening, len("(")), // or len("[") + tok(n.Closing, len(")"))) // or len("]") + + case *ast.File: + // TODO test: Doc + children = append(children, + tok(n.Package, len("package"))) + + case *ast.ForStmt: + children = append(children, + tok(n.For, len("for"))) + + case *ast.FuncDecl: + // TODO(adonovan): FuncDecl.Comment? + + // Uniquely, FuncDecl breaks the invariant that + // preorder traversal yields tokens in lexical order: + // in fact, FuncDecl.Recv precedes FuncDecl.Type.Func. + // + // As a workaround, we inline the case for FuncType + // here and order things correctly. + // We also need to insert the elided FuncType just + // before the 'visit' recursion. + // + children = nil // discard ast.Walk(FuncDecl) info subtrees + children = append(children, tok(n.Type.Func, len("func"))) + if n.Recv != nil { + children = append(children, n.Recv) + } + children = append(children, n.Name) + if tparams := n.Type.TypeParams; tparams != nil { + children = append(children, tparams) + } + if n.Type.Params != nil { + children = append(children, n.Type.Params) + } + if n.Type.Results != nil { + children = append(children, n.Type.Results) + } + if n.Body != nil { + children = append(children, n.Body) + } + + case *ast.FuncLit: + // nop + + case *ast.FuncType: + if n.Func != 0 { + children = append(children, + tok(n.Func, len("func"))) + } + + case *ast.GenDecl: + children = append(children, + tok(n.TokPos, len(n.Tok.String()))) + if n.Lparen != 0 { + children = append(children, + tok(n.Lparen, len("(")), + tok(n.Rparen, len(")"))) + } + + case *ast.GoStmt: + children = append(children, + tok(n.Go, len("go"))) + + case *ast.Ident: + children = append(children, + tok(n.NamePos, len(n.Name))) + + case *ast.IfStmt: + children = append(children, + tok(n.If, len("if"))) + + case *ast.ImportSpec: + // TODO(adonovan): ImportSpec.{Doc,EndPos}? + + case *ast.IncDecStmt: + children = append(children, + tok(n.TokPos, len(n.Tok.String()))) + + case *ast.IndexExpr: + children = append(children, + tok(n.Lbrack, len("[")), + tok(n.Rbrack, len("]"))) + + case *ast.IndexListExpr: + children = append(children, + tok(n.Lbrack, len("[")), + tok(n.Rbrack, len("]"))) + + case *ast.InterfaceType: + children = append(children, + tok(n.Interface, len("interface"))) + + case *ast.KeyValueExpr: + children = append(children, + tok(n.Colon, len(":"))) + + case *ast.LabeledStmt: + children = append(children, + tok(n.Colon, len(":"))) + + case *ast.MapType: + children = append(children, + tok(n.Map, len("map"))) + + case *ast.ParenExpr: + children = append(children, + tok(n.Lparen, len("(")), + tok(n.Rparen, len(")"))) + + case *ast.RangeStmt: + children = append(children, + tok(n.For, len("for")), + tok(n.TokPos, len(n.Tok.String()))) + + case *ast.ReturnStmt: + children = append(children, + tok(n.Return, len("return"))) + + case *ast.SelectStmt: + children = append(children, + tok(n.Select, len("select"))) + + case *ast.SelectorExpr: + // nop + + case *ast.SendStmt: + children = append(children, + tok(n.Arrow, len("<-"))) + + case *ast.SliceExpr: + children = append(children, + tok(n.Lbrack, len("[")), + tok(n.Rbrack, len("]"))) + + case *ast.StarExpr: + children = append(children, tok(n.Star, len("*"))) + + case *ast.StructType: + children = append(children, tok(n.Struct, len("struct"))) + + case *ast.SwitchStmt: + children = append(children, tok(n.Switch, len("switch"))) + + case *ast.TypeAssertExpr: + children = append(children, + tok(n.Lparen-1, len(".")), + tok(n.Lparen, len("(")), + tok(n.Rparen, len(")"))) + + case *ast.TypeSpec: + // TODO(adonovan): TypeSpec.{Doc,Comment}? + + case *ast.TypeSwitchStmt: + children = append(children, tok(n.Switch, len("switch"))) + + case *ast.UnaryExpr: + children = append(children, tok(n.OpPos, len(n.Op.String()))) + + case *ast.ValueSpec: + // TODO(adonovan): ValueSpec.{Doc,Comment}? + + case *ast.BadDecl, *ast.BadExpr, *ast.BadStmt: + // nop + } + + // TODO(adonovan): opt: merge the logic of ast.Inspect() into + // the switch above so we can make interleaved callbacks for + // both Nodes and Tokens in the right order and avoid the need + // to sort. + sort.Sort(byPos(children)) + + return children +} + +type byPos []ast.Node + +func (sl byPos) Len() int { + return len(sl) +} +func (sl byPos) Less(i, j int) bool { + return sl[i].Pos() < sl[j].Pos() +} +func (sl byPos) Swap(i, j int) { + sl[i], sl[j] = sl[j], sl[i] +} + +// NodeDescription returns a description of the concrete type of n suitable +// for a user interface. +// +// TODO(adonovan): in some cases (e.g. Field, FieldList, Ident, +// StarExpr) we could be much more specific given the path to the AST +// root. Perhaps we should do that. +func NodeDescription(n ast.Node) string { + switch n := n.(type) { + case *ast.ArrayType: + return "array type" + case *ast.AssignStmt: + return "assignment" + case *ast.BadDecl: + return "bad declaration" + case *ast.BadExpr: + return "bad expression" + case *ast.BadStmt: + return "bad statement" + case *ast.BasicLit: + return "basic literal" + case *ast.BinaryExpr: + return fmt.Sprintf("binary %s operation", n.Op) + case *ast.BlockStmt: + return "block" + case *ast.BranchStmt: + switch n.Tok { + case token.BREAK: + return "break statement" + case token.CONTINUE: + return "continue statement" + case token.GOTO: + return "goto statement" + case token.FALLTHROUGH: + return "fall-through statement" + } + case *ast.CallExpr: + if len(n.Args) == 1 && !n.Ellipsis.IsValid() { + return "function call (or conversion)" + } + return "function call" + case *ast.CaseClause: + return "case clause" + case *ast.ChanType: + return "channel type" + case *ast.CommClause: + return "communication clause" + case *ast.Comment: + return "comment" + case *ast.CommentGroup: + return "comment group" + case *ast.CompositeLit: + return "composite literal" + case *ast.DeclStmt: + return NodeDescription(n.Decl) + " statement" + case *ast.DeferStmt: + return "defer statement" + case *ast.Ellipsis: + return "ellipsis" + case *ast.EmptyStmt: + return "empty statement" + case *ast.ExprStmt: + return "expression statement" + case *ast.Field: + // Can be any of these: + // struct {x, y int} -- struct field(s) + // struct {T} -- anon struct field + // interface {I} -- interface embedding + // interface {f()} -- interface method + // func (A) func(B) C -- receiver, param(s), result(s) + return "field/method/parameter" + case *ast.FieldList: + return "field/method/parameter list" + case *ast.File: + return "source file" + case *ast.ForStmt: + return "for loop" + case *ast.FuncDecl: + return "function declaration" + case *ast.FuncLit: + return "function literal" + case *ast.FuncType: + return "function type" + case *ast.GenDecl: + switch n.Tok { + case token.IMPORT: + return "import declaration" + case token.CONST: + return "constant declaration" + case token.TYPE: + return "type declaration" + case token.VAR: + return "variable declaration" + } + case *ast.GoStmt: + return "go statement" + case *ast.Ident: + return "identifier" + case *ast.IfStmt: + return "if statement" + case *ast.ImportSpec: + return "import specification" + case *ast.IncDecStmt: + if n.Tok == token.INC { + return "increment statement" + } + return "decrement statement" + case *ast.IndexExpr: + return "index expression" + case *ast.IndexListExpr: + return "index list expression" + case *ast.InterfaceType: + return "interface type" + case *ast.KeyValueExpr: + return "key/value association" + case *ast.LabeledStmt: + return "statement label" + case *ast.MapType: + return "map type" + case *ast.Package: + return "package" + case *ast.ParenExpr: + return "parenthesized " + NodeDescription(n.X) + case *ast.RangeStmt: + return "range loop" + case *ast.ReturnStmt: + return "return statement" + case *ast.SelectStmt: + return "select statement" + case *ast.SelectorExpr: + return "selector" + case *ast.SendStmt: + return "channel send" + case *ast.SliceExpr: + return "slice expression" + case *ast.StarExpr: + return "*-operation" // load/store expr or pointer type + case *ast.StructType: + return "struct type" + case *ast.SwitchStmt: + return "switch statement" + case *ast.TypeAssertExpr: + return "type assertion" + case *ast.TypeSpec: + return "type specification" + case *ast.TypeSwitchStmt: + return "type switch" + case *ast.UnaryExpr: + return fmt.Sprintf("unary %s operation", n.Op) + case *ast.ValueSpec: + return "value specification" + + } + panic(fmt.Sprintf("unexpected node type: %T", n)) +} + +func is[T any](x any) bool { + _, ok := x.(T) + return ok +} diff --git a/vendor/golang.org/x/tools/go/ast/astutil/imports.go b/vendor/golang.org/x/tools/go/ast/astutil/imports.go new file mode 100644 index 0000000..18d1adb --- /dev/null +++ b/vendor/golang.org/x/tools/go/ast/astutil/imports.go @@ -0,0 +1,485 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package astutil contains common utilities for working with the Go AST. +package astutil // import "golang.org/x/tools/go/ast/astutil" + +import ( + "fmt" + "go/ast" + "go/token" + "strconv" + "strings" +) + +// AddImport adds the import path to the file f, if absent. +func AddImport(fset *token.FileSet, f *ast.File, path string) (added bool) { + return AddNamedImport(fset, f, "", path) +} + +// AddNamedImport adds the import with the given name and path to the file f, if absent. +// If name is not empty, it is used to rename the import. +// +// For example, calling +// +// AddNamedImport(fset, f, "pathpkg", "path") +// +// adds +// +// import pathpkg "path" +func AddNamedImport(fset *token.FileSet, f *ast.File, name, path string) (added bool) { + if imports(f, name, path) { + return false + } + + newImport := &ast.ImportSpec{ + Path: &ast.BasicLit{ + Kind: token.STRING, + Value: strconv.Quote(path), + }, + } + if name != "" { + newImport.Name = &ast.Ident{Name: name} + } + + // Find an import decl to add to. + // The goal is to find an existing import + // whose import path has the longest shared + // prefix with path. + var ( + bestMatch = -1 // length of longest shared prefix + lastImport = -1 // index in f.Decls of the file's final import decl + impDecl *ast.GenDecl // import decl containing the best match + impIndex = -1 // spec index in impDecl containing the best match + + isThirdPartyPath = isThirdParty(path) + ) + for i, decl := range f.Decls { + gen, ok := decl.(*ast.GenDecl) + if ok && gen.Tok == token.IMPORT { + lastImport = i + // Do not add to import "C", to avoid disrupting the + // association with its doc comment, breaking cgo. + if declImports(gen, "C") { + continue + } + + // Match an empty import decl if that's all that is available. + if len(gen.Specs) == 0 && bestMatch == -1 { + impDecl = gen + } + + // Compute longest shared prefix with imports in this group and find best + // matched import spec. + // 1. Always prefer import spec with longest shared prefix. + // 2. While match length is 0, + // - for stdlib package: prefer first import spec. + // - for third party package: prefer first third party import spec. + // We cannot use last import spec as best match for third party package + // because grouped imports are usually placed last by goimports -local + // flag. + // See issue #19190. + seenAnyThirdParty := false + for j, spec := range gen.Specs { + impspec := spec.(*ast.ImportSpec) + p := importPath(impspec) + n := matchLen(p, path) + if n > bestMatch || (bestMatch == 0 && !seenAnyThirdParty && isThirdPartyPath) { + bestMatch = n + impDecl = gen + impIndex = j + } + seenAnyThirdParty = seenAnyThirdParty || isThirdParty(p) + } + } + } + + // If no import decl found, add one after the last import. + if impDecl == nil { + impDecl = &ast.GenDecl{ + Tok: token.IMPORT, + } + if lastImport >= 0 { + impDecl.TokPos = f.Decls[lastImport].End() + } else { + // There are no existing imports. + // Our new import, preceded by a blank line, goes after the package declaration + // and after the comment, if any, that starts on the same line as the + // package declaration. + impDecl.TokPos = f.Package + + file := fset.File(f.Package) + pkgLine := file.Line(f.Package) + for _, c := range f.Comments { + if file.Line(c.Pos()) > pkgLine { + break + } + // +2 for a blank line + impDecl.TokPos = c.End() + 2 + } + } + f.Decls = append(f.Decls, nil) + copy(f.Decls[lastImport+2:], f.Decls[lastImport+1:]) + f.Decls[lastImport+1] = impDecl + } + + // Insert new import at insertAt. + insertAt := 0 + if impIndex >= 0 { + // insert after the found import + insertAt = impIndex + 1 + } + impDecl.Specs = append(impDecl.Specs, nil) + copy(impDecl.Specs[insertAt+1:], impDecl.Specs[insertAt:]) + impDecl.Specs[insertAt] = newImport + pos := impDecl.Pos() + if insertAt > 0 { + // If there is a comment after an existing import, preserve the comment + // position by adding the new import after the comment. + if spec, ok := impDecl.Specs[insertAt-1].(*ast.ImportSpec); ok && spec.Comment != nil { + pos = spec.Comment.End() + } else { + // Assign same position as the previous import, + // so that the sorter sees it as being in the same block. + pos = impDecl.Specs[insertAt-1].Pos() + } + } + if newImport.Name != nil { + newImport.Name.NamePos = pos + } + newImport.Path.ValuePos = pos + newImport.EndPos = pos + + // Clean up parens. impDecl contains at least one spec. + if len(impDecl.Specs) == 1 { + // Remove unneeded parens. + impDecl.Lparen = token.NoPos + } else if !impDecl.Lparen.IsValid() { + // impDecl needs parens added. + impDecl.Lparen = impDecl.Specs[0].Pos() + } + + f.Imports = append(f.Imports, newImport) + + if len(f.Decls) <= 1 { + return true + } + + // Merge all the import declarations into the first one. + var first *ast.GenDecl + for i := 0; i < len(f.Decls); i++ { + decl := f.Decls[i] + gen, ok := decl.(*ast.GenDecl) + if !ok || gen.Tok != token.IMPORT || declImports(gen, "C") { + continue + } + if first == nil { + first = gen + continue // Don't touch the first one. + } + // We now know there is more than one package in this import + // declaration. Ensure that it ends up parenthesized. + first.Lparen = first.Pos() + // Move the imports of the other import declaration to the first one. + for _, spec := range gen.Specs { + spec.(*ast.ImportSpec).Path.ValuePos = first.Pos() + first.Specs = append(first.Specs, spec) + } + f.Decls = append(f.Decls[:i], f.Decls[i+1:]...) + i-- + } + + return true +} + +func isThirdParty(importPath string) bool { + // Third party package import path usually contains "." (".com", ".org", ...) + // This logic is taken from golang.org/x/tools/imports package. + return strings.Contains(importPath, ".") +} + +// DeleteImport deletes the import path from the file f, if present. +// If there are duplicate import declarations, all matching ones are deleted. +func DeleteImport(fset *token.FileSet, f *ast.File, path string) (deleted bool) { + return DeleteNamedImport(fset, f, "", path) +} + +// DeleteNamedImport deletes the import with the given name and path from the file f, if present. +// If there are duplicate import declarations, all matching ones are deleted. +func DeleteNamedImport(fset *token.FileSet, f *ast.File, name, path string) (deleted bool) { + var delspecs []*ast.ImportSpec + var delcomments []*ast.CommentGroup + + // Find the import nodes that import path, if any. + for i := 0; i < len(f.Decls); i++ { + decl := f.Decls[i] + gen, ok := decl.(*ast.GenDecl) + if !ok || gen.Tok != token.IMPORT { + continue + } + for j := 0; j < len(gen.Specs); j++ { + spec := gen.Specs[j] + impspec := spec.(*ast.ImportSpec) + if importName(impspec) != name || importPath(impspec) != path { + continue + } + + // We found an import spec that imports path. + // Delete it. + delspecs = append(delspecs, impspec) + deleted = true + copy(gen.Specs[j:], gen.Specs[j+1:]) + gen.Specs = gen.Specs[:len(gen.Specs)-1] + + // If this was the last import spec in this decl, + // delete the decl, too. + if len(gen.Specs) == 0 { + copy(f.Decls[i:], f.Decls[i+1:]) + f.Decls = f.Decls[:len(f.Decls)-1] + i-- + break + } else if len(gen.Specs) == 1 { + if impspec.Doc != nil { + delcomments = append(delcomments, impspec.Doc) + } + if impspec.Comment != nil { + delcomments = append(delcomments, impspec.Comment) + } + for _, cg := range f.Comments { + // Found comment on the same line as the import spec. + if cg.End() < impspec.Pos() && fset.Position(cg.End()).Line == fset.Position(impspec.Pos()).Line { + delcomments = append(delcomments, cg) + break + } + } + + spec := gen.Specs[0].(*ast.ImportSpec) + + // Move the documentation right after the import decl. + if spec.Doc != nil { + for fset.Position(gen.TokPos).Line+1 < fset.Position(spec.Doc.Pos()).Line { + fset.File(gen.TokPos).MergeLine(fset.Position(gen.TokPos).Line) + } + } + for _, cg := range f.Comments { + if cg.End() < spec.Pos() && fset.Position(cg.End()).Line == fset.Position(spec.Pos()).Line { + for fset.Position(gen.TokPos).Line+1 < fset.Position(spec.Pos()).Line { + fset.File(gen.TokPos).MergeLine(fset.Position(gen.TokPos).Line) + } + break + } + } + } + if j > 0 { + lastImpspec := gen.Specs[j-1].(*ast.ImportSpec) + lastLine := fset.PositionFor(lastImpspec.Path.ValuePos, false).Line + line := fset.PositionFor(impspec.Path.ValuePos, false).Line + + // We deleted an entry but now there may be + // a blank line-sized hole where the import was. + if line-lastLine > 1 || !gen.Rparen.IsValid() { + // There was a blank line immediately preceding the deleted import, + // so there's no need to close the hole. The right parenthesis is + // invalid after AddImport to an import statement without parenthesis. + // Do nothing. + } else if line != fset.File(gen.Rparen).LineCount() { + // There was no blank line. Close the hole. + fset.File(gen.Rparen).MergeLine(line) + } + } + j-- + } + } + + // Delete imports from f.Imports. + for i := 0; i < len(f.Imports); i++ { + imp := f.Imports[i] + for j, del := range delspecs { + if imp == del { + copy(f.Imports[i:], f.Imports[i+1:]) + f.Imports = f.Imports[:len(f.Imports)-1] + copy(delspecs[j:], delspecs[j+1:]) + delspecs = delspecs[:len(delspecs)-1] + i-- + break + } + } + } + + // Delete comments from f.Comments. + for i := 0; i < len(f.Comments); i++ { + cg := f.Comments[i] + for j, del := range delcomments { + if cg == del { + copy(f.Comments[i:], f.Comments[i+1:]) + f.Comments = f.Comments[:len(f.Comments)-1] + copy(delcomments[j:], delcomments[j+1:]) + delcomments = delcomments[:len(delcomments)-1] + i-- + break + } + } + } + + if len(delspecs) > 0 { + panic(fmt.Sprintf("deleted specs from Decls but not Imports: %v", delspecs)) + } + + return +} + +// RewriteImport rewrites any import of path oldPath to path newPath. +func RewriteImport(fset *token.FileSet, f *ast.File, oldPath, newPath string) (rewrote bool) { + for _, imp := range f.Imports { + if importPath(imp) == oldPath { + rewrote = true + // record old End, because the default is to compute + // it using the length of imp.Path.Value. + imp.EndPos = imp.End() + imp.Path.Value = strconv.Quote(newPath) + } + } + return +} + +// UsesImport reports whether a given import is used. +func UsesImport(f *ast.File, path string) (used bool) { + spec := importSpec(f, path) + if spec == nil { + return + } + + name := spec.Name.String() + switch name { + case "": + // If the package name is not explicitly specified, + // make an educated guess. This is not guaranteed to be correct. + lastSlash := strings.LastIndex(path, "/") + if lastSlash == -1 { + name = path + } else { + name = path[lastSlash+1:] + } + case "_", ".": + // Not sure if this import is used - err on the side of caution. + return true + } + + ast.Walk(visitFn(func(n ast.Node) { + sel, ok := n.(*ast.SelectorExpr) + if ok && isTopName(sel.X, name) { + used = true + } + }), f) + + return +} + +type visitFn func(node ast.Node) + +func (fn visitFn) Visit(node ast.Node) ast.Visitor { + fn(node) + return fn +} + +// imports reports whether f has an import with the specified name and path. +func imports(f *ast.File, name, path string) bool { + for _, s := range f.Imports { + if importName(s) == name && importPath(s) == path { + return true + } + } + return false +} + +// importSpec returns the import spec if f imports path, +// or nil otherwise. +func importSpec(f *ast.File, path string) *ast.ImportSpec { + for _, s := range f.Imports { + if importPath(s) == path { + return s + } + } + return nil +} + +// importName returns the name of s, +// or "" if the import is not named. +func importName(s *ast.ImportSpec) string { + if s.Name == nil { + return "" + } + return s.Name.Name +} + +// importPath returns the unquoted import path of s, +// or "" if the path is not properly quoted. +func importPath(s *ast.ImportSpec) string { + t, err := strconv.Unquote(s.Path.Value) + if err != nil { + return "" + } + return t +} + +// declImports reports whether gen contains an import of path. +func declImports(gen *ast.GenDecl, path string) bool { + if gen.Tok != token.IMPORT { + return false + } + for _, spec := range gen.Specs { + impspec := spec.(*ast.ImportSpec) + if importPath(impspec) == path { + return true + } + } + return false +} + +// matchLen returns the length of the longest path segment prefix shared by x and y. +func matchLen(x, y string) int { + n := 0 + for i := 0; i < len(x) && i < len(y) && x[i] == y[i]; i++ { + if x[i] == '/' { + n++ + } + } + return n +} + +// isTopName returns true if n is a top-level unresolved identifier with the given name. +func isTopName(n ast.Expr, name string) bool { + id, ok := n.(*ast.Ident) + return ok && id.Name == name && id.Obj == nil +} + +// Imports returns the file imports grouped by paragraph. +func Imports(fset *token.FileSet, f *ast.File) [][]*ast.ImportSpec { + var groups [][]*ast.ImportSpec + + for _, decl := range f.Decls { + genDecl, ok := decl.(*ast.GenDecl) + if !ok || genDecl.Tok != token.IMPORT { + break + } + + group := []*ast.ImportSpec{} + + var lastLine int + for _, spec := range genDecl.Specs { + importSpec := spec.(*ast.ImportSpec) + pos := importSpec.Path.ValuePos + line := fset.Position(pos).Line + if lastLine > 0 && pos > 0 && line-lastLine > 1 { + groups = append(groups, group) + group = []*ast.ImportSpec{} + } + group = append(group, importSpec) + lastLine = line + } + groups = append(groups, group) + } + + return groups +} diff --git a/vendor/golang.org/x/tools/go/ast/astutil/rewrite.go b/vendor/golang.org/x/tools/go/ast/astutil/rewrite.go new file mode 100644 index 0000000..58934f7 --- /dev/null +++ b/vendor/golang.org/x/tools/go/ast/astutil/rewrite.go @@ -0,0 +1,486 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package astutil + +import ( + "fmt" + "go/ast" + "reflect" + "sort" +) + +// An ApplyFunc is invoked by Apply for each node n, even if n is nil, +// before and/or after the node's children, using a Cursor describing +// the current node and providing operations on it. +// +// The return value of ApplyFunc controls the syntax tree traversal. +// See Apply for details. +type ApplyFunc func(*Cursor) bool + +// Apply traverses a syntax tree recursively, starting with root, +// and calling pre and post for each node as described below. +// Apply returns the syntax tree, possibly modified. +// +// If pre is not nil, it is called for each node before the node's +// children are traversed (pre-order). If pre returns false, no +// children are traversed, and post is not called for that node. +// +// If post is not nil, and a prior call of pre didn't return false, +// post is called for each node after its children are traversed +// (post-order). If post returns false, traversal is terminated and +// Apply returns immediately. +// +// Only fields that refer to AST nodes are considered children; +// i.e., token.Pos, Scopes, Objects, and fields of basic types +// (strings, etc.) are ignored. +// +// Children are traversed in the order in which they appear in the +// respective node's struct definition. A package's files are +// traversed in the filenames' alphabetical order. +func Apply(root ast.Node, pre, post ApplyFunc) (result ast.Node) { + parent := &struct{ ast.Node }{root} + defer func() { + if r := recover(); r != nil && r != abort { + panic(r) + } + result = parent.Node + }() + a := &application{pre: pre, post: post} + a.apply(parent, "Node", nil, root) + return +} + +var abort = new(int) // singleton, to signal termination of Apply + +// A Cursor describes a node encountered during Apply. +// Information about the node and its parent is available +// from the Node, Parent, Name, and Index methods. +// +// If p is a variable of type and value of the current parent node +// c.Parent(), and f is the field identifier with name c.Name(), +// the following invariants hold: +// +// p.f == c.Node() if c.Index() < 0 +// p.f[c.Index()] == c.Node() if c.Index() >= 0 +// +// The methods Replace, Delete, InsertBefore, and InsertAfter +// can be used to change the AST without disrupting Apply. +type Cursor struct { + parent ast.Node + name string + iter *iterator // valid if non-nil + node ast.Node +} + +// Node returns the current Node. +func (c *Cursor) Node() ast.Node { return c.node } + +// Parent returns the parent of the current Node. +func (c *Cursor) Parent() ast.Node { return c.parent } + +// Name returns the name of the parent Node field that contains the current Node. +// If the parent is a *ast.Package and the current Node is a *ast.File, Name returns +// the filename for the current Node. +func (c *Cursor) Name() string { return c.name } + +// Index reports the index >= 0 of the current Node in the slice of Nodes that +// contains it, or a value < 0 if the current Node is not part of a slice. +// The index of the current node changes if InsertBefore is called while +// processing the current node. +func (c *Cursor) Index() int { + if c.iter != nil { + return c.iter.index + } + return -1 +} + +// field returns the current node's parent field value. +func (c *Cursor) field() reflect.Value { + return reflect.Indirect(reflect.ValueOf(c.parent)).FieldByName(c.name) +} + +// Replace replaces the current Node with n. +// The replacement node is not walked by Apply. +func (c *Cursor) Replace(n ast.Node) { + if _, ok := c.node.(*ast.File); ok { + file, ok := n.(*ast.File) + if !ok { + panic("attempt to replace *ast.File with non-*ast.File") + } + c.parent.(*ast.Package).Files[c.name] = file + return + } + + v := c.field() + if i := c.Index(); i >= 0 { + v = v.Index(i) + } + v.Set(reflect.ValueOf(n)) +} + +// Delete deletes the current Node from its containing slice. +// If the current Node is not part of a slice, Delete panics. +// As a special case, if the current node is a package file, +// Delete removes it from the package's Files map. +func (c *Cursor) Delete() { + if _, ok := c.node.(*ast.File); ok { + delete(c.parent.(*ast.Package).Files, c.name) + return + } + + i := c.Index() + if i < 0 { + panic("Delete node not contained in slice") + } + v := c.field() + l := v.Len() + reflect.Copy(v.Slice(i, l), v.Slice(i+1, l)) + v.Index(l - 1).Set(reflect.Zero(v.Type().Elem())) + v.SetLen(l - 1) + c.iter.step-- +} + +// InsertAfter inserts n after the current Node in its containing slice. +// If the current Node is not part of a slice, InsertAfter panics. +// Apply does not walk n. +func (c *Cursor) InsertAfter(n ast.Node) { + i := c.Index() + if i < 0 { + panic("InsertAfter node not contained in slice") + } + v := c.field() + v.Set(reflect.Append(v, reflect.Zero(v.Type().Elem()))) + l := v.Len() + reflect.Copy(v.Slice(i+2, l), v.Slice(i+1, l)) + v.Index(i + 1).Set(reflect.ValueOf(n)) + c.iter.step++ +} + +// InsertBefore inserts n before the current Node in its containing slice. +// If the current Node is not part of a slice, InsertBefore panics. +// Apply will not walk n. +func (c *Cursor) InsertBefore(n ast.Node) { + i := c.Index() + if i < 0 { + panic("InsertBefore node not contained in slice") + } + v := c.field() + v.Set(reflect.Append(v, reflect.Zero(v.Type().Elem()))) + l := v.Len() + reflect.Copy(v.Slice(i+1, l), v.Slice(i, l)) + v.Index(i).Set(reflect.ValueOf(n)) + c.iter.index++ +} + +// application carries all the shared data so we can pass it around cheaply. +type application struct { + pre, post ApplyFunc + cursor Cursor + iter iterator +} + +func (a *application) apply(parent ast.Node, name string, iter *iterator, n ast.Node) { + // convert typed nil into untyped nil + if v := reflect.ValueOf(n); v.Kind() == reflect.Ptr && v.IsNil() { + n = nil + } + + // avoid heap-allocating a new cursor for each apply call; reuse a.cursor instead + saved := a.cursor + a.cursor.parent = parent + a.cursor.name = name + a.cursor.iter = iter + a.cursor.node = n + + if a.pre != nil && !a.pre(&a.cursor) { + a.cursor = saved + return + } + + // walk children + // (the order of the cases matches the order of the corresponding node types in go/ast) + switch n := n.(type) { + case nil: + // nothing to do + + // Comments and fields + case *ast.Comment: + // nothing to do + + case *ast.CommentGroup: + if n != nil { + a.applyList(n, "List") + } + + case *ast.Field: + a.apply(n, "Doc", nil, n.Doc) + a.applyList(n, "Names") + a.apply(n, "Type", nil, n.Type) + a.apply(n, "Tag", nil, n.Tag) + a.apply(n, "Comment", nil, n.Comment) + + case *ast.FieldList: + a.applyList(n, "List") + + // Expressions + case *ast.BadExpr, *ast.Ident, *ast.BasicLit: + // nothing to do + + case *ast.Ellipsis: + a.apply(n, "Elt", nil, n.Elt) + + case *ast.FuncLit: + a.apply(n, "Type", nil, n.Type) + a.apply(n, "Body", nil, n.Body) + + case *ast.CompositeLit: + a.apply(n, "Type", nil, n.Type) + a.applyList(n, "Elts") + + case *ast.ParenExpr: + a.apply(n, "X", nil, n.X) + + case *ast.SelectorExpr: + a.apply(n, "X", nil, n.X) + a.apply(n, "Sel", nil, n.Sel) + + case *ast.IndexExpr: + a.apply(n, "X", nil, n.X) + a.apply(n, "Index", nil, n.Index) + + case *ast.IndexListExpr: + a.apply(n, "X", nil, n.X) + a.applyList(n, "Indices") + + case *ast.SliceExpr: + a.apply(n, "X", nil, n.X) + a.apply(n, "Low", nil, n.Low) + a.apply(n, "High", nil, n.High) + a.apply(n, "Max", nil, n.Max) + + case *ast.TypeAssertExpr: + a.apply(n, "X", nil, n.X) + a.apply(n, "Type", nil, n.Type) + + case *ast.CallExpr: + a.apply(n, "Fun", nil, n.Fun) + a.applyList(n, "Args") + + case *ast.StarExpr: + a.apply(n, "X", nil, n.X) + + case *ast.UnaryExpr: + a.apply(n, "X", nil, n.X) + + case *ast.BinaryExpr: + a.apply(n, "X", nil, n.X) + a.apply(n, "Y", nil, n.Y) + + case *ast.KeyValueExpr: + a.apply(n, "Key", nil, n.Key) + a.apply(n, "Value", nil, n.Value) + + // Types + case *ast.ArrayType: + a.apply(n, "Len", nil, n.Len) + a.apply(n, "Elt", nil, n.Elt) + + case *ast.StructType: + a.apply(n, "Fields", nil, n.Fields) + + case *ast.FuncType: + if tparams := n.TypeParams; tparams != nil { + a.apply(n, "TypeParams", nil, tparams) + } + a.apply(n, "Params", nil, n.Params) + a.apply(n, "Results", nil, n.Results) + + case *ast.InterfaceType: + a.apply(n, "Methods", nil, n.Methods) + + case *ast.MapType: + a.apply(n, "Key", nil, n.Key) + a.apply(n, "Value", nil, n.Value) + + case *ast.ChanType: + a.apply(n, "Value", nil, n.Value) + + // Statements + case *ast.BadStmt: + // nothing to do + + case *ast.DeclStmt: + a.apply(n, "Decl", nil, n.Decl) + + case *ast.EmptyStmt: + // nothing to do + + case *ast.LabeledStmt: + a.apply(n, "Label", nil, n.Label) + a.apply(n, "Stmt", nil, n.Stmt) + + case *ast.ExprStmt: + a.apply(n, "X", nil, n.X) + + case *ast.SendStmt: + a.apply(n, "Chan", nil, n.Chan) + a.apply(n, "Value", nil, n.Value) + + case *ast.IncDecStmt: + a.apply(n, "X", nil, n.X) + + case *ast.AssignStmt: + a.applyList(n, "Lhs") + a.applyList(n, "Rhs") + + case *ast.GoStmt: + a.apply(n, "Call", nil, n.Call) + + case *ast.DeferStmt: + a.apply(n, "Call", nil, n.Call) + + case *ast.ReturnStmt: + a.applyList(n, "Results") + + case *ast.BranchStmt: + a.apply(n, "Label", nil, n.Label) + + case *ast.BlockStmt: + a.applyList(n, "List") + + case *ast.IfStmt: + a.apply(n, "Init", nil, n.Init) + a.apply(n, "Cond", nil, n.Cond) + a.apply(n, "Body", nil, n.Body) + a.apply(n, "Else", nil, n.Else) + + case *ast.CaseClause: + a.applyList(n, "List") + a.applyList(n, "Body") + + case *ast.SwitchStmt: + a.apply(n, "Init", nil, n.Init) + a.apply(n, "Tag", nil, n.Tag) + a.apply(n, "Body", nil, n.Body) + + case *ast.TypeSwitchStmt: + a.apply(n, "Init", nil, n.Init) + a.apply(n, "Assign", nil, n.Assign) + a.apply(n, "Body", nil, n.Body) + + case *ast.CommClause: + a.apply(n, "Comm", nil, n.Comm) + a.applyList(n, "Body") + + case *ast.SelectStmt: + a.apply(n, "Body", nil, n.Body) + + case *ast.ForStmt: + a.apply(n, "Init", nil, n.Init) + a.apply(n, "Cond", nil, n.Cond) + a.apply(n, "Post", nil, n.Post) + a.apply(n, "Body", nil, n.Body) + + case *ast.RangeStmt: + a.apply(n, "Key", nil, n.Key) + a.apply(n, "Value", nil, n.Value) + a.apply(n, "X", nil, n.X) + a.apply(n, "Body", nil, n.Body) + + // Declarations + case *ast.ImportSpec: + a.apply(n, "Doc", nil, n.Doc) + a.apply(n, "Name", nil, n.Name) + a.apply(n, "Path", nil, n.Path) + a.apply(n, "Comment", nil, n.Comment) + + case *ast.ValueSpec: + a.apply(n, "Doc", nil, n.Doc) + a.applyList(n, "Names") + a.apply(n, "Type", nil, n.Type) + a.applyList(n, "Values") + a.apply(n, "Comment", nil, n.Comment) + + case *ast.TypeSpec: + a.apply(n, "Doc", nil, n.Doc) + a.apply(n, "Name", nil, n.Name) + if tparams := n.TypeParams; tparams != nil { + a.apply(n, "TypeParams", nil, tparams) + } + a.apply(n, "Type", nil, n.Type) + a.apply(n, "Comment", nil, n.Comment) + + case *ast.BadDecl: + // nothing to do + + case *ast.GenDecl: + a.apply(n, "Doc", nil, n.Doc) + a.applyList(n, "Specs") + + case *ast.FuncDecl: + a.apply(n, "Doc", nil, n.Doc) + a.apply(n, "Recv", nil, n.Recv) + a.apply(n, "Name", nil, n.Name) + a.apply(n, "Type", nil, n.Type) + a.apply(n, "Body", nil, n.Body) + + // Files and packages + case *ast.File: + a.apply(n, "Doc", nil, n.Doc) + a.apply(n, "Name", nil, n.Name) + a.applyList(n, "Decls") + // Don't walk n.Comments; they have either been walked already if + // they are Doc comments, or they can be easily walked explicitly. + + case *ast.Package: + // collect and sort names for reproducible behavior + var names []string + for name := range n.Files { + names = append(names, name) + } + sort.Strings(names) + for _, name := range names { + a.apply(n, name, nil, n.Files[name]) + } + + default: + panic(fmt.Sprintf("Apply: unexpected node type %T", n)) + } + + if a.post != nil && !a.post(&a.cursor) { + panic(abort) + } + + a.cursor = saved +} + +// An iterator controls iteration over a slice of nodes. +type iterator struct { + index, step int +} + +func (a *application) applyList(parent ast.Node, name string) { + // avoid heap-allocating a new iterator for each applyList call; reuse a.iter instead + saved := a.iter + a.iter.index = 0 + for { + // must reload parent.name each time, since cursor modifications might change it + v := reflect.Indirect(reflect.ValueOf(parent)).FieldByName(name) + if a.iter.index >= v.Len() { + break + } + + // element x may be nil in a bad AST - be cautious + var x ast.Node + if e := v.Index(a.iter.index); e.IsValid() { + x = e.Interface().(ast.Node) + } + + a.iter.step = 1 + a.apply(parent, name, &a.iter, x) + a.iter.index += a.iter.step + } + a.iter = saved +} diff --git a/vendor/golang.org/x/tools/go/ast/astutil/util.go b/vendor/golang.org/x/tools/go/ast/astutil/util.go new file mode 100644 index 0000000..6bdcf70 --- /dev/null +++ b/vendor/golang.org/x/tools/go/ast/astutil/util.go @@ -0,0 +1,19 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package astutil + +import "go/ast" + +// Unparen returns e with any enclosing parentheses stripped. +// TODO(adonovan): use go1.22's ast.Unparen. +func Unparen(e ast.Expr) ast.Expr { + for { + p, ok := e.(*ast.ParenExpr) + if !ok { + return e + } + e = p.X + } +} diff --git a/vendor/golang.org/x/tools/go/buildutil/allpackages.go b/vendor/golang.org/x/tools/go/buildutil/allpackages.go new file mode 100644 index 0000000..dfb8cd6 --- /dev/null +++ b/vendor/golang.org/x/tools/go/buildutil/allpackages.go @@ -0,0 +1,195 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package buildutil provides utilities related to the go/build +// package in the standard library. +// +// All I/O is done via the build.Context file system interface, which must +// be concurrency-safe. +package buildutil // import "golang.org/x/tools/go/buildutil" + +import ( + "go/build" + "os" + "path/filepath" + "sort" + "strings" + "sync" +) + +// AllPackages returns the package path of each Go package in any source +// directory of the specified build context (e.g. $GOROOT or an element +// of $GOPATH). Errors are ignored. The results are sorted. +// All package paths are canonical, and thus may contain "/vendor/". +// +// The result may include import paths for directories that contain no +// *.go files, such as "archive" (in $GOROOT/src). +// +// All I/O is done via the build.Context file system interface, +// which must be concurrency-safe. +func AllPackages(ctxt *build.Context) []string { + var list []string + ForEachPackage(ctxt, func(pkg string, _ error) { + list = append(list, pkg) + }) + sort.Strings(list) + return list +} + +// ForEachPackage calls the found function with the package path of +// each Go package it finds in any source directory of the specified +// build context (e.g. $GOROOT or an element of $GOPATH). +// All package paths are canonical, and thus may contain "/vendor/". +// +// If the package directory exists but could not be read, the second +// argument to the found function provides the error. +// +// All I/O is done via the build.Context file system interface, +// which must be concurrency-safe. +func ForEachPackage(ctxt *build.Context, found func(importPath string, err error)) { + ch := make(chan item) + + var wg sync.WaitGroup + for _, root := range ctxt.SrcDirs() { + root := root + wg.Add(1) + go func() { + allPackages(ctxt, root, ch) + wg.Done() + }() + } + go func() { + wg.Wait() + close(ch) + }() + + // All calls to found occur in the caller's goroutine. + for i := range ch { + found(i.importPath, i.err) + } +} + +type item struct { + importPath string + err error // (optional) +} + +// We use a process-wide counting semaphore to limit +// the number of parallel calls to ReadDir. +var ioLimit = make(chan bool, 20) + +func allPackages(ctxt *build.Context, root string, ch chan<- item) { + root = filepath.Clean(root) + string(os.PathSeparator) + + var wg sync.WaitGroup + + var walkDir func(dir string) + walkDir = func(dir string) { + // Avoid .foo, _foo, and testdata directory trees. + base := filepath.Base(dir) + if base == "" || base[0] == '.' || base[0] == '_' || base == "testdata" { + return + } + + pkg := filepath.ToSlash(strings.TrimPrefix(dir, root)) + + // Prune search if we encounter any of these import paths. + switch pkg { + case "builtin": + return + } + + ioLimit <- true + files, err := ReadDir(ctxt, dir) + <-ioLimit + if pkg != "" || err != nil { + ch <- item{pkg, err} + } + for _, fi := range files { + fi := fi + if fi.IsDir() { + wg.Add(1) + go func() { + walkDir(filepath.Join(dir, fi.Name())) + wg.Done() + }() + } + } + } + + walkDir(root) + wg.Wait() +} + +// ExpandPatterns returns the set of packages matched by patterns, +// which may have the following forms: +// +// golang.org/x/tools/cmd/guru # a single package +// golang.org/x/tools/... # all packages beneath dir +// ... # the entire workspace. +// +// Order is significant: a pattern preceded by '-' removes matching +// packages from the set. For example, these patterns match all encoding +// packages except encoding/xml: +// +// encoding/... -encoding/xml +// +// A trailing slash in a pattern is ignored. (Path components of Go +// package names are separated by slash, not the platform's path separator.) +func ExpandPatterns(ctxt *build.Context, patterns []string) map[string]bool { + // TODO(adonovan): support other features of 'go list': + // - "std"/"cmd"/"all" meta-packages + // - "..." not at the end of a pattern + // - relative patterns using "./" or "../" prefix + + pkgs := make(map[string]bool) + doPkg := func(pkg string, neg bool) { + if neg { + delete(pkgs, pkg) + } else { + pkgs[pkg] = true + } + } + + // Scan entire workspace if wildcards are present. + // TODO(adonovan): opt: scan only the necessary subtrees of the workspace. + var all []string + for _, arg := range patterns { + if strings.HasSuffix(arg, "...") { + all = AllPackages(ctxt) + break + } + } + + for _, arg := range patterns { + if arg == "" { + continue + } + + neg := arg[0] == '-' + if neg { + arg = arg[1:] + } + + if arg == "..." { + // ... matches all packages + for _, pkg := range all { + doPkg(pkg, neg) + } + } else if dir := strings.TrimSuffix(arg, "/..."); dir != arg { + // dir/... matches all packages beneath dir + for _, pkg := range all { + if strings.HasPrefix(pkg, dir) && + (len(pkg) == len(dir) || pkg[len(dir)] == '/') { + doPkg(pkg, neg) + } + } + } else { + // single package + doPkg(strings.TrimSuffix(arg, "/"), neg) + } + } + + return pkgs +} diff --git a/vendor/golang.org/x/tools/go/buildutil/fakecontext.go b/vendor/golang.org/x/tools/go/buildutil/fakecontext.go new file mode 100644 index 0000000..763d188 --- /dev/null +++ b/vendor/golang.org/x/tools/go/buildutil/fakecontext.go @@ -0,0 +1,111 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package buildutil + +import ( + "fmt" + "go/build" + "io" + "os" + "path" + "path/filepath" + "sort" + "strings" + "time" +) + +// FakeContext returns a build.Context for the fake file tree specified +// by pkgs, which maps package import paths to a mapping from file base +// names to contents. +// +// The fake Context has a GOROOT of "/go" and no GOPATH, and overrides +// the necessary file access methods to read from memory instead of the +// real file system. +// +// Unlike a real file tree, the fake one has only two levels---packages +// and files---so ReadDir("/go/src/") returns all packages under +// /go/src/ including, for instance, "math" and "math/big". +// ReadDir("/go/src/math/big") would return all the files in the +// "math/big" package. +func FakeContext(pkgs map[string]map[string]string) *build.Context { + clean := func(filename string) string { + f := path.Clean(filepath.ToSlash(filename)) + // Removing "/go/src" while respecting segment + // boundaries has this unfortunate corner case: + if f == "/go/src" { + return "" + } + return strings.TrimPrefix(f, "/go/src/") + } + + ctxt := build.Default // copy + ctxt.GOROOT = "/go" + ctxt.GOPATH = "" + ctxt.Compiler = "gc" + ctxt.IsDir = func(dir string) bool { + dir = clean(dir) + if dir == "" { + return true // needed by (*build.Context).SrcDirs + } + return pkgs[dir] != nil + } + ctxt.ReadDir = func(dir string) ([]os.FileInfo, error) { + dir = clean(dir) + var fis []os.FileInfo + if dir == "" { + // enumerate packages + for importPath := range pkgs { + fis = append(fis, fakeDirInfo(importPath)) + } + } else { + // enumerate files of package + for basename := range pkgs[dir] { + fis = append(fis, fakeFileInfo(basename)) + } + } + sort.Sort(byName(fis)) + return fis, nil + } + ctxt.OpenFile = func(filename string) (io.ReadCloser, error) { + filename = clean(filename) + dir, base := path.Split(filename) + content, ok := pkgs[path.Clean(dir)][base] + if !ok { + return nil, fmt.Errorf("file not found: %s", filename) + } + return io.NopCloser(strings.NewReader(content)), nil + } + ctxt.IsAbsPath = func(path string) bool { + path = filepath.ToSlash(path) + // Don't rely on the default (filepath.Path) since on + // Windows, it reports virtual paths as non-absolute. + return strings.HasPrefix(path, "/") + } + return &ctxt +} + +type byName []os.FileInfo + +func (s byName) Len() int { return len(s) } +func (s byName) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s byName) Less(i, j int) bool { return s[i].Name() < s[j].Name() } + +type fakeFileInfo string + +func (fi fakeFileInfo) Name() string { return string(fi) } +func (fakeFileInfo) Sys() interface{} { return nil } +func (fakeFileInfo) ModTime() time.Time { return time.Time{} } +func (fakeFileInfo) IsDir() bool { return false } +func (fakeFileInfo) Size() int64 { return 0 } +func (fakeFileInfo) Mode() os.FileMode { return 0644 } + +type fakeDirInfo string + +func (fd fakeDirInfo) Name() string { return string(fd) } +func (fakeDirInfo) Sys() interface{} { return nil } +func (fakeDirInfo) ModTime() time.Time { return time.Time{} } +func (fakeDirInfo) IsDir() bool { return true } +func (fakeDirInfo) Size() int64 { return 0 } +func (fakeDirInfo) Mode() os.FileMode { return 0755 } diff --git a/vendor/golang.org/x/tools/go/buildutil/overlay.go b/vendor/golang.org/x/tools/go/buildutil/overlay.go new file mode 100644 index 0000000..7e37165 --- /dev/null +++ b/vendor/golang.org/x/tools/go/buildutil/overlay.go @@ -0,0 +1,101 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package buildutil + +import ( + "bufio" + "bytes" + "fmt" + "go/build" + "io" + "path/filepath" + "strconv" + "strings" +) + +// OverlayContext overlays a build.Context with additional files from +// a map. Files in the map take precedence over other files. +// +// In addition to plain string comparison, two file names are +// considered equal if their base names match and their directory +// components point at the same directory on the file system. That is, +// symbolic links are followed for directories, but not files. +// +// A common use case for OverlayContext is to allow editors to pass in +// a set of unsaved, modified files. +// +// Currently, only the Context.OpenFile function will respect the +// overlay. This may change in the future. +func OverlayContext(orig *build.Context, overlay map[string][]byte) *build.Context { + // TODO(dominikh): Implement IsDir, HasSubdir and ReadDir + + rc := func(data []byte) (io.ReadCloser, error) { + return io.NopCloser(bytes.NewBuffer(data)), nil + } + + copy := *orig // make a copy + ctxt := © + ctxt.OpenFile = func(path string) (io.ReadCloser, error) { + // Fast path: names match exactly. + if content, ok := overlay[path]; ok { + return rc(content) + } + + // Slow path: check for same file under a different + // alias, perhaps due to a symbolic link. + for filename, content := range overlay { + if sameFile(path, filename) { + return rc(content) + } + } + + return OpenFile(orig, path) + } + return ctxt +} + +// ParseOverlayArchive parses an archive containing Go files and their +// contents. The result is intended to be used with OverlayContext. +// +// # Archive format +// +// The archive consists of a series of files. Each file consists of a +// name, a decimal file size and the file contents, separated by +// newlines. No newline follows after the file contents. +func ParseOverlayArchive(archive io.Reader) (map[string][]byte, error) { + overlay := make(map[string][]byte) + r := bufio.NewReader(archive) + for { + // Read file name. + filename, err := r.ReadString('\n') + if err != nil { + if err == io.EOF { + break // OK + } + return nil, fmt.Errorf("reading archive file name: %v", err) + } + filename = filepath.Clean(strings.TrimSpace(filename)) + + // Read file size. + sz, err := r.ReadString('\n') + if err != nil { + return nil, fmt.Errorf("reading size of archive file %s: %v", filename, err) + } + sz = strings.TrimSpace(sz) + size, err := strconv.ParseUint(sz, 10, 32) + if err != nil { + return nil, fmt.Errorf("parsing size of archive file %s: %v", filename, err) + } + + // Read file content. + content := make([]byte, size) + if _, err := io.ReadFull(r, content); err != nil { + return nil, fmt.Errorf("reading archive file %s: %v", filename, err) + } + overlay[filename] = content + } + + return overlay, nil +} diff --git a/vendor/golang.org/x/tools/go/buildutil/tags.go b/vendor/golang.org/x/tools/go/buildutil/tags.go new file mode 100644 index 0000000..32c8d14 --- /dev/null +++ b/vendor/golang.org/x/tools/go/buildutil/tags.go @@ -0,0 +1,100 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package buildutil + +// This duplicated logic must be kept in sync with that from go build: +// $GOROOT/src/cmd/go/internal/work/build.go (tagsFlag.Set) +// $GOROOT/src/cmd/go/internal/base/flag.go (StringsFlag.Set) +// $GOROOT/src/cmd/internal/quoted/quoted.go (isSpaceByte, Split) + +import ( + "fmt" + "strings" +) + +const TagsFlagDoc = "a list of `build tags` to consider satisfied during the build. " + + "For more information about build tags, see the description of " + + "build constraints in the documentation for the go/build package" + +// TagsFlag is an implementation of the flag.Value and flag.Getter interfaces that parses +// a flag value the same as go build's -tags flag and populates a []string slice. +// +// See $GOROOT/src/go/build/doc.go for description of build tags. +// See $GOROOT/src/cmd/go/doc.go for description of 'go build -tags' flag. +// +// Example: +// +// flag.Var((*buildutil.TagsFlag)(&build.Default.BuildTags), "tags", buildutil.TagsFlagDoc) +type TagsFlag []string + +func (v *TagsFlag) Set(s string) error { + // See $GOROOT/src/cmd/go/internal/work/build.go (tagsFlag.Set) + // For compatibility with Go 1.12 and earlier, allow "-tags='a b c'" or even just "-tags='a'". + if strings.Contains(s, " ") || strings.Contains(s, "'") { + var err error + *v, err = splitQuotedFields(s) + if *v == nil { + *v = []string{} + } + return err + } + + // Starting in Go 1.13, the -tags flag is a comma-separated list of build tags. + *v = []string{} + for _, s := range strings.Split(s, ",") { + if s != "" { + *v = append(*v, s) + } + } + return nil +} + +func (v *TagsFlag) Get() interface{} { return *v } + +func splitQuotedFields(s string) ([]string, error) { + // See $GOROOT/src/cmd/internal/quoted/quoted.go (Split) + // This must remain in sync with that logic. + var f []string + for len(s) > 0 { + for len(s) > 0 && isSpaceByte(s[0]) { + s = s[1:] + } + if len(s) == 0 { + break + } + // Accepted quoted string. No unescaping inside. + if s[0] == '"' || s[0] == '\'' { + quote := s[0] + s = s[1:] + i := 0 + for i < len(s) && s[i] != quote { + i++ + } + if i >= len(s) { + return nil, fmt.Errorf("unterminated %c string", quote) + } + f = append(f, s[:i]) + s = s[i+1:] + continue + } + i := 0 + for i < len(s) && !isSpaceByte(s[i]) { + i++ + } + f = append(f, s[:i]) + s = s[i:] + } + return f, nil +} + +func (v *TagsFlag) String() string { + return "" +} + +func isSpaceByte(c byte) bool { + // See $GOROOT/src/cmd/internal/quoted/quoted.go (isSpaceByte, Split) + // This list must remain in sync with that. + return c == ' ' || c == '\t' || c == '\n' || c == '\r' +} diff --git a/vendor/golang.org/x/tools/go/buildutil/util.go b/vendor/golang.org/x/tools/go/buildutil/util.go new file mode 100644 index 0000000..bee6390 --- /dev/null +++ b/vendor/golang.org/x/tools/go/buildutil/util.go @@ -0,0 +1,209 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package buildutil + +import ( + "fmt" + "go/ast" + "go/build" + "go/parser" + "go/token" + "io" + "io/ioutil" + "os" + "path" + "path/filepath" + "strings" +) + +// ParseFile behaves like parser.ParseFile, +// but uses the build context's file system interface, if any. +// +// If file is not absolute (as defined by IsAbsPath), the (dir, file) +// components are joined using JoinPath; dir must be absolute. +// +// The displayPath function, if provided, is used to transform the +// filename that will be attached to the ASTs. +// +// TODO(adonovan): call this from go/loader.parseFiles when the tree thaws. +func ParseFile(fset *token.FileSet, ctxt *build.Context, displayPath func(string) string, dir string, file string, mode parser.Mode) (*ast.File, error) { + if !IsAbsPath(ctxt, file) { + file = JoinPath(ctxt, dir, file) + } + rd, err := OpenFile(ctxt, file) + if err != nil { + return nil, err + } + defer rd.Close() // ignore error + if displayPath != nil { + file = displayPath(file) + } + return parser.ParseFile(fset, file, rd, mode) +} + +// ContainingPackage returns the package containing filename. +// +// If filename is not absolute, it is interpreted relative to working directory dir. +// All I/O is via the build context's file system interface, if any. +// +// The '...Files []string' fields of the resulting build.Package are not +// populated (build.FindOnly mode). +func ContainingPackage(ctxt *build.Context, dir, filename string) (*build.Package, error) { + if !IsAbsPath(ctxt, filename) { + filename = JoinPath(ctxt, dir, filename) + } + + // We must not assume the file tree uses + // "/" always, + // `\` always, + // or os.PathSeparator (which varies by platform), + // but to make any progress, we are forced to assume that + // paths will not use `\` unless the PathSeparator + // is also `\`, thus we can rely on filepath.ToSlash for some sanity. + + dirSlash := path.Dir(filepath.ToSlash(filename)) + "/" + + // We assume that no source root (GOPATH[i] or GOROOT) contains any other. + for _, srcdir := range ctxt.SrcDirs() { + srcdirSlash := filepath.ToSlash(srcdir) + "/" + if importPath, ok := HasSubdir(ctxt, srcdirSlash, dirSlash); ok { + return ctxt.Import(importPath, dir, build.FindOnly) + } + } + + return nil, fmt.Errorf("can't find package containing %s", filename) +} + +// -- Effective methods of file system interface ------------------------- + +// (go/build.Context defines these as methods, but does not export them.) + +// HasSubdir calls ctxt.HasSubdir (if not nil) or else uses +// the local file system to answer the question. +func HasSubdir(ctxt *build.Context, root, dir string) (rel string, ok bool) { + if f := ctxt.HasSubdir; f != nil { + return f(root, dir) + } + + // Try using paths we received. + if rel, ok = hasSubdir(root, dir); ok { + return + } + + // Try expanding symlinks and comparing + // expanded against unexpanded and + // expanded against expanded. + rootSym, _ := filepath.EvalSymlinks(root) + dirSym, _ := filepath.EvalSymlinks(dir) + + if rel, ok = hasSubdir(rootSym, dir); ok { + return + } + if rel, ok = hasSubdir(root, dirSym); ok { + return + } + return hasSubdir(rootSym, dirSym) +} + +func hasSubdir(root, dir string) (rel string, ok bool) { + const sep = string(filepath.Separator) + root = filepath.Clean(root) + if !strings.HasSuffix(root, sep) { + root += sep + } + + dir = filepath.Clean(dir) + if !strings.HasPrefix(dir, root) { + return "", false + } + + return filepath.ToSlash(dir[len(root):]), true +} + +// FileExists returns true if the specified file exists, +// using the build context's file system interface. +func FileExists(ctxt *build.Context, path string) bool { + if ctxt.OpenFile != nil { + r, err := ctxt.OpenFile(path) + if err != nil { + return false + } + r.Close() // ignore error + return true + } + _, err := os.Stat(path) + return err == nil +} + +// OpenFile behaves like os.Open, +// but uses the build context's file system interface, if any. +func OpenFile(ctxt *build.Context, path string) (io.ReadCloser, error) { + if ctxt.OpenFile != nil { + return ctxt.OpenFile(path) + } + return os.Open(path) +} + +// IsAbsPath behaves like filepath.IsAbs, +// but uses the build context's file system interface, if any. +func IsAbsPath(ctxt *build.Context, path string) bool { + if ctxt.IsAbsPath != nil { + return ctxt.IsAbsPath(path) + } + return filepath.IsAbs(path) +} + +// JoinPath behaves like filepath.Join, +// but uses the build context's file system interface, if any. +func JoinPath(ctxt *build.Context, path ...string) string { + if ctxt.JoinPath != nil { + return ctxt.JoinPath(path...) + } + return filepath.Join(path...) +} + +// IsDir behaves like os.Stat plus IsDir, +// but uses the build context's file system interface, if any. +func IsDir(ctxt *build.Context, path string) bool { + if ctxt.IsDir != nil { + return ctxt.IsDir(path) + } + fi, err := os.Stat(path) + return err == nil && fi.IsDir() +} + +// ReadDir behaves like ioutil.ReadDir, +// but uses the build context's file system interface, if any. +func ReadDir(ctxt *build.Context, path string) ([]os.FileInfo, error) { + if ctxt.ReadDir != nil { + return ctxt.ReadDir(path) + } + return ioutil.ReadDir(path) +} + +// SplitPathList behaves like filepath.SplitList, +// but uses the build context's file system interface, if any. +func SplitPathList(ctxt *build.Context, s string) []string { + if ctxt.SplitPathList != nil { + return ctxt.SplitPathList(s) + } + return filepath.SplitList(s) +} + +// sameFile returns true if x and y have the same basename and denote +// the same file. +func sameFile(x, y string) bool { + if path.Clean(x) == path.Clean(y) { + return true + } + if filepath.Base(x) == filepath.Base(y) { // (optimisation) + if xi, err := os.Stat(x); err == nil { + if yi, err := os.Stat(y); err == nil { + return os.SameFile(xi, yi) + } + } + } + return false +} diff --git a/vendor/golang.org/x/tools/go/callgraph/callgraph.go b/vendor/golang.org/x/tools/go/callgraph/callgraph.go new file mode 100644 index 0000000..a1b0ca5 --- /dev/null +++ b/vendor/golang.org/x/tools/go/callgraph/callgraph.go @@ -0,0 +1,129 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package callgraph defines the call graph and various algorithms +and utilities to operate on it. + +A call graph is a labelled directed graph whose nodes represent +functions and whose edge labels represent syntactic function call +sites. The presence of a labelled edge (caller, site, callee) +indicates that caller may call callee at the specified call site. + +A call graph is a multigraph: it may contain multiple edges (caller, +*, callee) connecting the same pair of nodes, so long as the edges +differ by label; this occurs when one function calls another function +from multiple call sites. Also, it may contain multiple edges +(caller, site, *) that differ only by callee; this indicates a +polymorphic call. + +A SOUND call graph is one that overapproximates the dynamic calling +behaviors of the program in all possible executions. One call graph +is more PRECISE than another if it is a smaller overapproximation of +the dynamic behavior. + +All call graphs have a synthetic root node which is responsible for +calling main() and init(). + +Calls to built-in functions (e.g. panic, println) are not represented +in the call graph; they are treated like built-in operators of the +language. +*/ +package callgraph // import "golang.org/x/tools/go/callgraph" + +// TODO(adonovan): add a function to eliminate wrappers from the +// callgraph, preserving topology. +// More generally, we could eliminate "uninteresting" nodes such as +// nodes from packages we don't care about. + +// TODO(zpavlinovic): decide how callgraphs handle calls to and from generic function bodies. + +import ( + "fmt" + "go/token" + + "golang.org/x/tools/go/ssa" +) + +// A Graph represents a call graph. +// +// A graph may contain nodes that are not reachable from the root. +// If the call graph is sound, such nodes indicate unreachable +// functions. +type Graph struct { + Root *Node // the distinguished root node + Nodes map[*ssa.Function]*Node // all nodes by function +} + +// New returns a new Graph with the specified root node. +func New(root *ssa.Function) *Graph { + g := &Graph{Nodes: make(map[*ssa.Function]*Node)} + g.Root = g.CreateNode(root) + return g +} + +// CreateNode returns the Node for fn, creating it if not present. +// The root node may have fn=nil. +func (g *Graph) CreateNode(fn *ssa.Function) *Node { + n, ok := g.Nodes[fn] + if !ok { + n = &Node{Func: fn, ID: len(g.Nodes)} + g.Nodes[fn] = n + } + return n +} + +// A Node represents a node in a call graph. +type Node struct { + Func *ssa.Function // the function this node represents + ID int // 0-based sequence number + In []*Edge // unordered set of incoming call edges (n.In[*].Callee == n) + Out []*Edge // unordered set of outgoing call edges (n.Out[*].Caller == n) +} + +func (n *Node) String() string { + return fmt.Sprintf("n%d:%s", n.ID, n.Func) +} + +// A Edge represents an edge in the call graph. +// +// Site is nil for edges originating in synthetic or intrinsic +// functions, e.g. reflect.Value.Call or the root of the call graph. +type Edge struct { + Caller *Node + Site ssa.CallInstruction + Callee *Node +} + +func (e Edge) String() string { + return fmt.Sprintf("%s --> %s", e.Caller, e.Callee) +} + +func (e Edge) Description() string { + var prefix string + switch e.Site.(type) { + case nil: + return "synthetic call" + case *ssa.Go: + prefix = "concurrent " + case *ssa.Defer: + prefix = "deferred " + } + return prefix + e.Site.Common().Description() +} + +func (e Edge) Pos() token.Pos { + if e.Site == nil { + return token.NoPos + } + return e.Site.Pos() +} + +// AddEdge adds the edge (caller, site, callee) to the call graph. +// Elimination of duplicate edges is the caller's responsibility. +func AddEdge(caller *Node, site ssa.CallInstruction, callee *Node) { + e := &Edge{caller, site, callee} + callee.In = append(callee.In, e) + caller.Out = append(caller.Out, e) +} diff --git a/vendor/golang.org/x/tools/go/callgraph/cha/cha.go b/vendor/golang.org/x/tools/go/callgraph/cha/cha.go new file mode 100644 index 0000000..3040f3d --- /dev/null +++ b/vendor/golang.org/x/tools/go/callgraph/cha/cha.go @@ -0,0 +1,164 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package cha computes the call graph of a Go program using the Class +// Hierarchy Analysis (CHA) algorithm. +// +// CHA was first described in "Optimization of Object-Oriented Programs +// Using Static Class Hierarchy Analysis", Jeffrey Dean, David Grove, +// and Craig Chambers, ECOOP'95. +// +// CHA is related to RTA (see go/callgraph/rta); the difference is that +// CHA conservatively computes the entire "implements" relation between +// interfaces and concrete types ahead of time, whereas RTA uses dynamic +// programming to construct it on the fly as it encounters new functions +// reachable from main. CHA may thus include spurious call edges for +// types that haven't been instantiated yet, or types that are never +// instantiated. +// +// Since CHA conservatively assumes that all functions are address-taken +// and all concrete types are put into interfaces, it is sound to run on +// partial programs, such as libraries without a main or test function. +package cha // import "golang.org/x/tools/go/callgraph/cha" + +// TODO(zpavlinovic): update CHA for how it handles generic function bodies. + +import ( + "go/types" + + "golang.org/x/tools/go/callgraph" + "golang.org/x/tools/go/ssa" + "golang.org/x/tools/go/ssa/ssautil" + "golang.org/x/tools/go/types/typeutil" +) + +// CallGraph computes the call graph of the specified program using the +// Class Hierarchy Analysis algorithm. +func CallGraph(prog *ssa.Program) *callgraph.Graph { + cg := callgraph.New(nil) // TODO(adonovan) eliminate concept of rooted callgraph + + allFuncs := ssautil.AllFunctions(prog) + + calleesOf := lazyCallees(allFuncs) + + addEdge := func(fnode *callgraph.Node, site ssa.CallInstruction, g *ssa.Function) { + gnode := cg.CreateNode(g) + callgraph.AddEdge(fnode, site, gnode) + } + + addEdges := func(fnode *callgraph.Node, site ssa.CallInstruction, callees []*ssa.Function) { + // Because every call to a highly polymorphic and + // frequently used abstract method such as + // (io.Writer).Write is assumed to call every concrete + // Write method in the program, the call graph can + // contain a lot of duplication. + // + // TODO(taking): opt: consider making lazyCallees public. + // Using the same benchmarks as callgraph_test.go, removing just + // the explicit callgraph.Graph construction is 4x less memory + // and is 37% faster. + // CHA 86 ms/op 16 MB/op + // lazyCallees 63 ms/op 4 MB/op + for _, g := range callees { + addEdge(fnode, site, g) + } + } + + for f := range allFuncs { + fnode := cg.CreateNode(f) + for _, b := range f.Blocks { + for _, instr := range b.Instrs { + if site, ok := instr.(ssa.CallInstruction); ok { + if g := site.Common().StaticCallee(); g != nil { + addEdge(fnode, site, g) + } else { + addEdges(fnode, site, calleesOf(site)) + } + } + } + } + } + + return cg +} + +// lazyCallees returns a function that maps a call site (in a function in fns) +// to its callees within fns. +// +// The resulting function is not concurrency safe. +func lazyCallees(fns map[*ssa.Function]bool) func(site ssa.CallInstruction) []*ssa.Function { + // funcsBySig contains all functions, keyed by signature. It is + // the effective set of address-taken functions used to resolve + // a dynamic call of a particular signature. + var funcsBySig typeutil.Map // value is []*ssa.Function + + // methodsByID contains all methods, grouped by ID for efficient + // lookup. + // + // We must key by ID, not name, for correct resolution of interface + // calls to a type with two (unexported) methods spelled the same but + // from different packages. The fact that the concrete type implements + // the interface does not mean the call dispatches to both methods. + methodsByID := make(map[string][]*ssa.Function) + + // An imethod represents an interface method I.m. + // (There's no go/types object for it; + // a *types.Func may be shared by many interfaces due to interface embedding.) + type imethod struct { + I *types.Interface + id string + } + // methodsMemo records, for every abstract method call I.m on + // interface type I, the set of concrete methods C.m of all + // types C that satisfy interface I. + // + // Abstract methods may be shared by several interfaces, + // hence we must pass I explicitly, not guess from m. + // + // methodsMemo is just a cache, so it needn't be a typeutil.Map. + methodsMemo := make(map[imethod][]*ssa.Function) + lookupMethods := func(I *types.Interface, m *types.Func) []*ssa.Function { + id := m.Id() + methods, ok := methodsMemo[imethod{I, id}] + if !ok { + for _, f := range methodsByID[id] { + C := f.Signature.Recv().Type() // named or *named + if types.Implements(C, I) { + methods = append(methods, f) + } + } + methodsMemo[imethod{I, id}] = methods + } + return methods + } + + for f := range fns { + if f.Signature.Recv() == nil { + // Package initializers can never be address-taken. + if f.Name() == "init" && f.Synthetic == "package initializer" { + continue + } + funcs, _ := funcsBySig.At(f.Signature).([]*ssa.Function) + funcs = append(funcs, f) + funcsBySig.Set(f.Signature, funcs) + } else if obj := f.Object(); obj != nil { + id := obj.(*types.Func).Id() + methodsByID[id] = append(methodsByID[id], f) + } + } + + return func(site ssa.CallInstruction) []*ssa.Function { + call := site.Common() + if call.IsInvoke() { + tiface := call.Value.Type().Underlying().(*types.Interface) + return lookupMethods(tiface, call.Method) + } else if g := call.StaticCallee(); g != nil { + return []*ssa.Function{g} + } else if _, ok := call.Value.(*ssa.Builtin); !ok { + fns, _ := funcsBySig.At(call.Signature()).([]*ssa.Function) + return fns + } + return nil + } +} diff --git a/vendor/golang.org/x/tools/go/callgraph/util.go b/vendor/golang.org/x/tools/go/callgraph/util.go new file mode 100644 index 0000000..5499320 --- /dev/null +++ b/vendor/golang.org/x/tools/go/callgraph/util.go @@ -0,0 +1,180 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package callgraph + +import "golang.org/x/tools/go/ssa" + +// This file provides various utilities over call graphs, such as +// visitation and path search. + +// CalleesOf returns a new set containing all direct callees of the +// caller node. +func CalleesOf(caller *Node) map[*Node]bool { + callees := make(map[*Node]bool) + for _, e := range caller.Out { + callees[e.Callee] = true + } + return callees +} + +// GraphVisitEdges visits all the edges in graph g in depth-first order. +// The edge function is called for each edge in postorder. If it +// returns non-nil, visitation stops and GraphVisitEdges returns that +// value. +func GraphVisitEdges(g *Graph, edge func(*Edge) error) error { + seen := make(map[*Node]bool) + var visit func(n *Node) error + visit = func(n *Node) error { + if !seen[n] { + seen[n] = true + for _, e := range n.Out { + if err := visit(e.Callee); err != nil { + return err + } + if err := edge(e); err != nil { + return err + } + } + } + return nil + } + for _, n := range g.Nodes { + if err := visit(n); err != nil { + return err + } + } + return nil +} + +// PathSearch finds an arbitrary path starting at node start and +// ending at some node for which isEnd() returns true. On success, +// PathSearch returns the path as an ordered list of edges; on +// failure, it returns nil. +func PathSearch(start *Node, isEnd func(*Node) bool) []*Edge { + stack := make([]*Edge, 0, 32) + seen := make(map[*Node]bool) + var search func(n *Node) []*Edge + search = func(n *Node) []*Edge { + if !seen[n] { + seen[n] = true + if isEnd(n) { + return stack + } + for _, e := range n.Out { + stack = append(stack, e) // push + if found := search(e.Callee); found != nil { + return found + } + stack = stack[:len(stack)-1] // pop + } + } + return nil + } + return search(start) +} + +// DeleteSyntheticNodes removes from call graph g all nodes for +// functions that do not correspond to source syntax. For historical +// reasons, nodes for g.Root and package initializers are always +// kept. +// +// As nodes are removed, edges are created to preserve the +// reachability relation of the remaining nodes. +func (g *Graph) DeleteSyntheticNodes() { + // Measurements on the standard library and go.tools show that + // resulting graph has ~15% fewer nodes and 4-8% fewer edges + // than the input. + // + // Inlining a wrapper of in-degree m, out-degree n adds m*n + // and removes m+n edges. Since most wrappers are monomorphic + // (n=1) this results in a slight reduction. Polymorphic + // wrappers (n>1), e.g. from embedding an interface value + // inside a struct to satisfy some interface, cause an + // increase in the graph, but they seem to be uncommon. + + // Hash all existing edges to avoid creating duplicates. + edges := make(map[Edge]bool) + for _, cgn := range g.Nodes { + for _, e := range cgn.Out { + edges[*e] = true + } + } + for fn, cgn := range g.Nodes { + if cgn == g.Root || isInit(cgn.Func) || fn.Syntax() != nil { + continue // keep + } + for _, eIn := range cgn.In { + for _, eOut := range cgn.Out { + newEdge := Edge{eIn.Caller, eIn.Site, eOut.Callee} + if edges[newEdge] { + continue // don't add duplicate + } + AddEdge(eIn.Caller, eIn.Site, eOut.Callee) + edges[newEdge] = true + } + } + g.DeleteNode(cgn) + } +} + +func isInit(fn *ssa.Function) bool { + return fn.Pkg != nil && fn.Pkg.Func("init") == fn +} + +// DeleteNode removes node n and its edges from the graph g. +// (NB: not efficient for batch deletion.) +func (g *Graph) DeleteNode(n *Node) { + n.deleteIns() + n.deleteOuts() + delete(g.Nodes, n.Func) +} + +// deleteIns deletes all incoming edges to n. +func (n *Node) deleteIns() { + for _, e := range n.In { + removeOutEdge(e) + } + n.In = nil +} + +// deleteOuts deletes all outgoing edges from n. +func (n *Node) deleteOuts() { + for _, e := range n.Out { + removeInEdge(e) + } + n.Out = nil +} + +// removeOutEdge removes edge.Caller's outgoing edge 'edge'. +func removeOutEdge(edge *Edge) { + caller := edge.Caller + n := len(caller.Out) + for i, e := range caller.Out { + if e == edge { + // Replace it with the final element and shrink the slice. + caller.Out[i] = caller.Out[n-1] + caller.Out[n-1] = nil // aid GC + caller.Out = caller.Out[:n-1] + return + } + } + panic("edge not found: " + edge.String()) +} + +// removeInEdge removes edge.Callee's incoming edge 'edge'. +func removeInEdge(edge *Edge) { + caller := edge.Callee + n := len(caller.In) + for i, e := range caller.In { + if e == edge { + // Replace it with the final element and shrink the slice. + caller.In[i] = caller.In[n-1] + caller.In[n-1] = nil // aid GC + caller.In = caller.In[:n-1] + return + } + } + panic("edge not found: " + edge.String()) +} diff --git a/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go b/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go new file mode 100644 index 0000000..137cc8d --- /dev/null +++ b/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go @@ -0,0 +1,186 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package gcexportdata provides functions for locating, reading, and +// writing export data files containing type information produced by the +// gc compiler. This package supports go1.7 export data format and all +// later versions. +// +// Although it might seem convenient for this package to live alongside +// go/types in the standard library, this would cause version skew +// problems for developer tools that use it, since they must be able to +// consume the outputs of the gc compiler both before and after a Go +// update such as from Go 1.7 to Go 1.8. Because this package lives in +// golang.org/x/tools, sites can update their version of this repo some +// time before the Go 1.8 release and rebuild and redeploy their +// developer tools, which will then be able to consume both Go 1.7 and +// Go 1.8 export data files, so they will work before and after the +// Go update. (See discussion at https://golang.org/issue/15651.) +package gcexportdata // import "golang.org/x/tools/go/gcexportdata" + +import ( + "bufio" + "bytes" + "encoding/json" + "fmt" + "go/token" + "go/types" + "io" + "os/exec" + + "golang.org/x/tools/internal/gcimporter" +) + +// Find returns the name of an object (.o) or archive (.a) file +// containing type information for the specified import path, +// using the go command. +// If no file was found, an empty filename is returned. +// +// A relative srcDir is interpreted relative to the current working directory. +// +// Find also returns the package's resolved (canonical) import path, +// reflecting the effects of srcDir and vendoring on importPath. +// +// Deprecated: Use the higher-level API in golang.org/x/tools/go/packages, +// which is more efficient. +func Find(importPath, srcDir string) (filename, path string) { + cmd := exec.Command("go", "list", "-json", "-export", "--", importPath) + cmd.Dir = srcDir + out, err := cmd.Output() + if err != nil { + return "", "" + } + var data struct { + ImportPath string + Export string + } + json.Unmarshal(out, &data) + return data.Export, data.ImportPath +} + +// NewReader returns a reader for the export data section of an object +// (.o) or archive (.a) file read from r. The new reader may provide +// additional trailing data beyond the end of the export data. +func NewReader(r io.Reader) (io.Reader, error) { + buf := bufio.NewReader(r) + _, size, err := gcimporter.FindExportData(buf) + if err != nil { + return nil, err + } + + if size >= 0 { + // We were given an archive and found the __.PKGDEF in it. + // This tells us the size of the export data, and we don't + // need to return the entire file. + return &io.LimitedReader{ + R: buf, + N: size, + }, nil + } else { + // We were given an object file. As such, we don't know how large + // the export data is and must return the entire file. + return buf, nil + } +} + +// readAll works the same way as io.ReadAll, but avoids allocations and copies +// by preallocating a byte slice of the necessary size if the size is known up +// front. This is always possible when the input is an archive. In that case, +// NewReader will return the known size using an io.LimitedReader. +func readAll(r io.Reader) ([]byte, error) { + if lr, ok := r.(*io.LimitedReader); ok { + data := make([]byte, lr.N) + _, err := io.ReadFull(lr, data) + return data, err + } + return io.ReadAll(r) +} + +// Read reads export data from in, decodes it, and returns type +// information for the package. +// +// The package path (effectively its linker symbol prefix) is +// specified by path, since unlike the package name, this information +// may not be recorded in the export data. +// +// File position information is added to fset. +// +// Read may inspect and add to the imports map to ensure that references +// within the export data to other packages are consistent. The caller +// must ensure that imports[path] does not exist, or exists but is +// incomplete (see types.Package.Complete), and Read inserts the +// resulting package into this map entry. +// +// On return, the state of the reader is undefined. +func Read(in io.Reader, fset *token.FileSet, imports map[string]*types.Package, path string) (*types.Package, error) { + data, err := readAll(in) + if err != nil { + return nil, fmt.Errorf("reading export data for %q: %v", path, err) + } + + if bytes.HasPrefix(data, []byte("!")) { + return nil, fmt.Errorf("can't read export data for %q directly from an archive file (call gcexportdata.NewReader first to extract export data)", path) + } + + // The indexed export format starts with an 'i'; the older + // binary export format starts with a 'c', 'd', or 'v' + // (from "version"). Select appropriate importer. + if len(data) > 0 { + switch data[0] { + case 'v', 'c', 'd': // binary, till go1.10 + return nil, fmt.Errorf("binary (%c) import format is no longer supported", data[0]) + + case 'i': // indexed, till go1.19 + _, pkg, err := gcimporter.IImportData(fset, imports, data[1:], path) + return pkg, err + + case 'u': // unified, from go1.20 + _, pkg, err := gcimporter.UImportData(fset, imports, data[1:], path) + return pkg, err + + default: + l := len(data) + if l > 10 { + l = 10 + } + return nil, fmt.Errorf("unexpected export data with prefix %q for path %s", string(data[:l]), path) + } + } + return nil, fmt.Errorf("empty export data for %s", path) +} + +// Write writes encoded type information for the specified package to out. +// The FileSet provides file position information for named objects. +func Write(out io.Writer, fset *token.FileSet, pkg *types.Package) error { + if _, err := io.WriteString(out, "i"); err != nil { + return err + } + return gcimporter.IExportData(out, fset, pkg) +} + +// ReadBundle reads an export bundle from in, decodes it, and returns type +// information for the packages. +// File position information is added to fset. +// +// ReadBundle may inspect and add to the imports map to ensure that references +// within the export bundle to other packages are consistent. +// +// On return, the state of the reader is undefined. +// +// Experimental: This API is experimental and may change in the future. +func ReadBundle(in io.Reader, fset *token.FileSet, imports map[string]*types.Package) ([]*types.Package, error) { + data, err := readAll(in) + if err != nil { + return nil, fmt.Errorf("reading export bundle: %v", err) + } + return gcimporter.IImportBundle(fset, imports, data) +} + +// WriteBundle writes encoded type information for the specified packages to out. +// The FileSet provides file position information for named objects. +// +// Experimental: This API is experimental and may change in the future. +func WriteBundle(out io.Writer, fset *token.FileSet, pkgs []*types.Package) error { + return gcimporter.IExportBundle(out, fset, pkgs) +} diff --git a/vendor/golang.org/x/tools/go/gcexportdata/importer.go b/vendor/golang.org/x/tools/go/gcexportdata/importer.go new file mode 100644 index 0000000..37a7247 --- /dev/null +++ b/vendor/golang.org/x/tools/go/gcexportdata/importer.go @@ -0,0 +1,75 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gcexportdata + +import ( + "fmt" + "go/token" + "go/types" + "os" +) + +// NewImporter returns a new instance of the types.Importer interface +// that reads type information from export data files written by gc. +// The Importer also satisfies types.ImporterFrom. +// +// Export data files are located using "go build" workspace conventions +// and the build.Default context. +// +// Use this importer instead of go/importer.For("gc", ...) to avoid the +// version-skew problems described in the documentation of this package, +// or to control the FileSet or access the imports map populated during +// package loading. +// +// Deprecated: Use the higher-level API in golang.org/x/tools/go/packages, +// which is more efficient. +func NewImporter(fset *token.FileSet, imports map[string]*types.Package) types.ImporterFrom { + return importer{fset, imports} +} + +type importer struct { + fset *token.FileSet + imports map[string]*types.Package +} + +func (imp importer) Import(importPath string) (*types.Package, error) { + return imp.ImportFrom(importPath, "", 0) +} + +func (imp importer) ImportFrom(importPath, srcDir string, mode types.ImportMode) (_ *types.Package, err error) { + filename, path := Find(importPath, srcDir) + if filename == "" { + if importPath == "unsafe" { + // Even for unsafe, call Find first in case + // the package was vendored. + return types.Unsafe, nil + } + return nil, fmt.Errorf("can't find import: %s", importPath) + } + + if pkg, ok := imp.imports[path]; ok && pkg.Complete() { + return pkg, nil // cache hit + } + + // open file + f, err := os.Open(filename) + if err != nil { + return nil, err + } + defer func() { + f.Close() + if err != nil { + // add file name to error + err = fmt.Errorf("reading export data: %s: %v", filename, err) + } + }() + + r, err := NewReader(f) + if err != nil { + return nil, err + } + + return Read(r, imp.fset, imp.imports, path) +} diff --git a/vendor/golang.org/x/tools/go/internal/cgo/cgo.go b/vendor/golang.org/x/tools/go/internal/cgo/cgo.go new file mode 100644 index 0000000..697974b --- /dev/null +++ b/vendor/golang.org/x/tools/go/internal/cgo/cgo.go @@ -0,0 +1,219 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package cgo handles cgo preprocessing of files containing `import "C"`. +// +// DESIGN +// +// The approach taken is to run the cgo processor on the package's +// CgoFiles and parse the output, faking the filenames of the +// resulting ASTs so that the synthetic file containing the C types is +// called "C" (e.g. "~/go/src/net/C") and the preprocessed files +// have their original names (e.g. "~/go/src/net/cgo_unix.go"), +// not the names of the actual temporary files. +// +// The advantage of this approach is its fidelity to 'go build'. The +// downside is that the token.Position.Offset for each AST node is +// incorrect, being an offset within the temporary file. Line numbers +// should still be correct because of the //line comments. +// +// The logic of this file is mostly plundered from the 'go build' +// tool, which also invokes the cgo preprocessor. +// +// +// REJECTED ALTERNATIVE +// +// An alternative approach that we explored is to extend go/types' +// Importer mechanism to provide the identity of the importing package +// so that each time `import "C"` appears it resolves to a different +// synthetic package containing just the objects needed in that case. +// The loader would invoke cgo but parse only the cgo_types.go file +// defining the package-level objects, discarding the other files +// resulting from preprocessing. +// +// The benefit of this approach would have been that source-level +// syntax information would correspond exactly to the original cgo +// file, with no preprocessing involved, making source tools like +// godoc, guru, and eg happy. However, the approach was rejected +// due to the additional complexity it would impose on go/types. (It +// made for a beautiful demo, though.) +// +// cgo files, despite their *.go extension, are not legal Go source +// files per the specification since they may refer to unexported +// members of package "C" such as C.int. Also, a function such as +// C.getpwent has in effect two types, one matching its C type and one +// which additionally returns (errno C.int). The cgo preprocessor +// uses name mangling to distinguish these two functions in the +// processed code, but go/types would need to duplicate this logic in +// its handling of function calls, analogous to the treatment of map +// lookups in which y=m[k] and y,ok=m[k] are both legal. + +package cgo + +import ( + "fmt" + "go/ast" + "go/build" + "go/parser" + "go/token" + "log" + "os" + "os/exec" + "path/filepath" + "regexp" + "strings" +) + +// ProcessFiles invokes the cgo preprocessor on bp.CgoFiles, parses +// the output and returns the resulting ASTs. +func ProcessFiles(bp *build.Package, fset *token.FileSet, DisplayPath func(path string) string, mode parser.Mode) ([]*ast.File, error) { + tmpdir, err := os.MkdirTemp("", strings.Replace(bp.ImportPath, "/", "_", -1)+"_C") + if err != nil { + return nil, err + } + defer os.RemoveAll(tmpdir) + + pkgdir := bp.Dir + if DisplayPath != nil { + pkgdir = DisplayPath(pkgdir) + } + + cgoFiles, cgoDisplayFiles, err := Run(bp, pkgdir, tmpdir, false) + if err != nil { + return nil, err + } + var files []*ast.File + for i := range cgoFiles { + rd, err := os.Open(cgoFiles[i]) + if err != nil { + return nil, err + } + display := filepath.Join(bp.Dir, cgoDisplayFiles[i]) + f, err := parser.ParseFile(fset, display, rd, mode) + rd.Close() + if err != nil { + return nil, err + } + files = append(files, f) + } + return files, nil +} + +var cgoRe = regexp.MustCompile(`[/\\:]`) + +// Run invokes the cgo preprocessor on bp.CgoFiles and returns two +// lists of files: the resulting processed files (in temporary +// directory tmpdir) and the corresponding names of the unprocessed files. +// +// Run is adapted from (*builder).cgo in +// $GOROOT/src/cmd/go/build.go, but these features are unsupported: +// Objective C, CGOPKGPATH, CGO_FLAGS. +// +// If useabs is set to true, absolute paths of the bp.CgoFiles will be passed in +// to the cgo preprocessor. This in turn will set the // line comments +// referring to those files to use absolute paths. This is needed for +// go/packages using the legacy go list support so it is able to find +// the original files. +func Run(bp *build.Package, pkgdir, tmpdir string, useabs bool) (files, displayFiles []string, err error) { + cgoCPPFLAGS, _, _, _ := cflags(bp, true) + _, cgoexeCFLAGS, _, _ := cflags(bp, false) + + if len(bp.CgoPkgConfig) > 0 { + pcCFLAGS, err := pkgConfigFlags(bp) + if err != nil { + return nil, nil, err + } + cgoCPPFLAGS = append(cgoCPPFLAGS, pcCFLAGS...) + } + + // Allows including _cgo_export.h from .[ch] files in the package. + cgoCPPFLAGS = append(cgoCPPFLAGS, "-I", tmpdir) + + // _cgo_gotypes.go (displayed "C") contains the type definitions. + files = append(files, filepath.Join(tmpdir, "_cgo_gotypes.go")) + displayFiles = append(displayFiles, "C") + for _, fn := range bp.CgoFiles { + // "foo.cgo1.go" (displayed "foo.go") is the processed Go source. + f := cgoRe.ReplaceAllString(fn[:len(fn)-len("go")], "_") + files = append(files, filepath.Join(tmpdir, f+"cgo1.go")) + displayFiles = append(displayFiles, fn) + } + + var cgoflags []string + if bp.Goroot && bp.ImportPath == "runtime/cgo" { + cgoflags = append(cgoflags, "-import_runtime_cgo=false") + } + if bp.Goroot && bp.ImportPath == "runtime/race" || bp.ImportPath == "runtime/cgo" { + cgoflags = append(cgoflags, "-import_syscall=false") + } + + var cgoFiles []string = bp.CgoFiles + if useabs { + cgoFiles = make([]string, len(bp.CgoFiles)) + for i := range cgoFiles { + cgoFiles[i] = filepath.Join(pkgdir, bp.CgoFiles[i]) + } + } + + args := stringList( + "go", "tool", "cgo", "-objdir", tmpdir, cgoflags, "--", + cgoCPPFLAGS, cgoexeCFLAGS, cgoFiles, + ) + if false { + log.Printf("Running cgo for package %q: %s (dir=%s)", bp.ImportPath, args, pkgdir) + } + cmd := exec.Command(args[0], args[1:]...) + cmd.Dir = pkgdir + cmd.Env = append(os.Environ(), "PWD="+pkgdir) + cmd.Stdout = os.Stderr + cmd.Stderr = os.Stderr + if err := cmd.Run(); err != nil { + return nil, nil, fmt.Errorf("cgo failed: %s: %s", args, err) + } + + return files, displayFiles, nil +} + +// -- unmodified from 'go build' --------------------------------------- + +// Return the flags to use when invoking the C or C++ compilers, or cgo. +func cflags(p *build.Package, def bool) (cppflags, cflags, cxxflags, ldflags []string) { + var defaults string + if def { + defaults = "-g -O2" + } + + cppflags = stringList(envList("CGO_CPPFLAGS", ""), p.CgoCPPFLAGS) + cflags = stringList(envList("CGO_CFLAGS", defaults), p.CgoCFLAGS) + cxxflags = stringList(envList("CGO_CXXFLAGS", defaults), p.CgoCXXFLAGS) + ldflags = stringList(envList("CGO_LDFLAGS", defaults), p.CgoLDFLAGS) + return +} + +// envList returns the value of the given environment variable broken +// into fields, using the default value when the variable is empty. +func envList(key, def string) []string { + v := os.Getenv(key) + if v == "" { + v = def + } + return strings.Fields(v) +} + +// stringList's arguments should be a sequence of string or []string values. +// stringList flattens them into a single []string. +func stringList(args ...interface{}) []string { + var x []string + for _, arg := range args { + switch arg := arg.(type) { + case []string: + x = append(x, arg...) + case string: + x = append(x, arg) + default: + panic("stringList: invalid argument") + } + } + return x +} diff --git a/vendor/golang.org/x/tools/go/internal/cgo/cgo_pkgconfig.go b/vendor/golang.org/x/tools/go/internal/cgo/cgo_pkgconfig.go new file mode 100644 index 0000000..2455be5 --- /dev/null +++ b/vendor/golang.org/x/tools/go/internal/cgo/cgo_pkgconfig.go @@ -0,0 +1,42 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cgo + +import ( + "errors" + "fmt" + "go/build" + "os/exec" + "strings" +) + +// pkgConfig runs pkg-config with the specified arguments and returns the flags it prints. +func pkgConfig(mode string, pkgs []string) (flags []string, err error) { + cmd := exec.Command("pkg-config", append([]string{mode}, pkgs...)...) + out, err := cmd.Output() + if err != nil { + s := fmt.Sprintf("%s failed: %v", strings.Join(cmd.Args, " "), err) + if len(out) > 0 { + s = fmt.Sprintf("%s: %s", s, out) + } + if err, ok := err.(*exec.ExitError); ok && len(err.Stderr) > 0 { + s = fmt.Sprintf("%s\nstderr:\n%s", s, err.Stderr) + } + return nil, errors.New(s) + } + if len(out) > 0 { + flags = strings.Fields(string(out)) + } + return +} + +// pkgConfigFlags calls pkg-config if needed and returns the cflags +// needed to build the package. +func pkgConfigFlags(p *build.Package) (cflags []string, err error) { + if len(p.CgoPkgConfig) == 0 { + return nil, nil + } + return pkgConfig("--cflags", p.CgoPkgConfig) +} diff --git a/vendor/golang.org/x/tools/go/loader/doc.go b/vendor/golang.org/x/tools/go/loader/doc.go new file mode 100644 index 0000000..e35b1fd --- /dev/null +++ b/vendor/golang.org/x/tools/go/loader/doc.go @@ -0,0 +1,202 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package loader loads a complete Go program from source code, parsing +// and type-checking the initial packages plus their transitive closure +// of dependencies. The ASTs and the derived facts are retained for +// later use. +// +// Deprecated: This is an older API and does not have support +// for modules. Use golang.org/x/tools/go/packages instead. +// +// The package defines two primary types: Config, which specifies a +// set of initial packages to load and various other options; and +// Program, which is the result of successfully loading the packages +// specified by a configuration. +// +// The configuration can be set directly, but *Config provides various +// convenience methods to simplify the common cases, each of which can +// be called any number of times. Finally, these are followed by a +// call to Load() to actually load and type-check the program. +// +// var conf loader.Config +// +// // Use the command-line arguments to specify +// // a set of initial packages to load from source. +// // See FromArgsUsage for help. +// rest, err := conf.FromArgs(os.Args[1:], wantTests) +// +// // Parse the specified files and create an ad hoc package with path "foo". +// // All files must have the same 'package' declaration. +// conf.CreateFromFilenames("foo", "foo.go", "bar.go") +// +// // Create an ad hoc package with path "foo" from +// // the specified already-parsed files. +// // All ASTs must have the same 'package' declaration. +// conf.CreateFromFiles("foo", parsedFiles) +// +// // Add "runtime" to the set of packages to be loaded. +// conf.Import("runtime") +// +// // Adds "fmt" and "fmt_test" to the set of packages +// // to be loaded. "fmt" will include *_test.go files. +// conf.ImportWithTests("fmt") +// +// // Finally, load all the packages specified by the configuration. +// prog, err := conf.Load() +// +// See examples_test.go for examples of API usage. +// +// # CONCEPTS AND TERMINOLOGY +// +// The WORKSPACE is the set of packages accessible to the loader. The +// workspace is defined by Config.Build, a *build.Context. The +// default context treats subdirectories of $GOROOT and $GOPATH as +// packages, but this behavior may be overridden. +// +// An AD HOC package is one specified as a set of source files on the +// command line. In the simplest case, it may consist of a single file +// such as $GOROOT/src/net/http/triv.go. +// +// EXTERNAL TEST packages are those comprised of a set of *_test.go +// files all with the same 'package foo_test' declaration, all in the +// same directory. (go/build.Package calls these files XTestFiles.) +// +// An IMPORTABLE package is one that can be referred to by some import +// spec. Every importable package is uniquely identified by its +// PACKAGE PATH or just PATH, a string such as "fmt", "encoding/json", +// or "cmd/vendor/golang.org/x/arch/x86/x86asm". A package path +// typically denotes a subdirectory of the workspace. +// +// An import declaration uses an IMPORT PATH to refer to a package. +// Most import declarations use the package path as the import path. +// +// Due to VENDORING (https://golang.org/s/go15vendor), the +// interpretation of an import path may depend on the directory in which +// it appears. To resolve an import path to a package path, go/build +// must search the enclosing directories for a subdirectory named +// "vendor". +// +// ad hoc packages and external test packages are NON-IMPORTABLE. The +// path of an ad hoc package is inferred from the package +// declarations of its files and is therefore not a unique package key. +// For example, Config.CreatePkgs may specify two initial ad hoc +// packages, both with path "main". +// +// An AUGMENTED package is an importable package P plus all the +// *_test.go files with same 'package foo' declaration as P. +// (go/build.Package calls these files TestFiles.) +// +// The INITIAL packages are those specified in the configuration. A +// DEPENDENCY is a package loaded to satisfy an import in an initial +// package or another dependency. +package loader + +// IMPLEMENTATION NOTES +// +// 'go test', in-package test files, and import cycles +// --------------------------------------------------- +// +// An external test package may depend upon members of the augmented +// package that are not in the unaugmented package, such as functions +// that expose internals. (See bufio/export_test.go for an example.) +// So, the loader must ensure that for each external test package +// it loads, it also augments the corresponding non-test package. +// +// The import graph over n unaugmented packages must be acyclic; the +// import graph over n-1 unaugmented packages plus one augmented +// package must also be acyclic. ('go test' relies on this.) But the +// import graph over n augmented packages may contain cycles. +// +// First, all the (unaugmented) non-test packages and their +// dependencies are imported in the usual way; the loader reports an +// error if it detects an import cycle. +// +// Then, each package P for which testing is desired is augmented by +// the list P' of its in-package test files, by calling +// (*types.Checker).Files. This arrangement ensures that P' may +// reference definitions within P, but P may not reference definitions +// within P'. Furthermore, P' may import any other package, including +// ones that depend upon P, without an import cycle error. +// +// Consider two packages A and B, both of which have lists of +// in-package test files we'll call A' and B', and which have the +// following import graph edges: +// B imports A +// B' imports A +// A' imports B +// This last edge would be expected to create an error were it not +// for the special type-checking discipline above. +// Cycles of size greater than two are possible. For example: +// compress/bzip2/bzip2_test.go (package bzip2) imports "io/ioutil" +// io/ioutil/tempfile_test.go (package ioutil) imports "regexp" +// regexp/exec_test.go (package regexp) imports "compress/bzip2" +// +// +// Concurrency +// ----------- +// +// Let us define the import dependency graph as follows. Each node is a +// list of files passed to (Checker).Files at once. Many of these lists +// are the production code of an importable Go package, so those nodes +// are labelled by the package's path. The remaining nodes are +// ad hoc packages and lists of in-package *_test.go files that augment +// an importable package; those nodes have no label. +// +// The edges of the graph represent import statements appearing within a +// file. An edge connects a node (a list of files) to the node it +// imports, which is importable and thus always labelled. +// +// Loading is controlled by this dependency graph. +// +// To reduce I/O latency, we start loading a package's dependencies +// asynchronously as soon as we've parsed its files and enumerated its +// imports (scanImports). This performs a preorder traversal of the +// import dependency graph. +// +// To exploit hardware parallelism, we type-check unrelated packages in +// parallel, where "unrelated" means not ordered by the partial order of +// the import dependency graph. +// +// We use a concurrency-safe non-blocking cache (importer.imported) to +// record the results of type-checking, whether success or failure. An +// entry is created in this cache by startLoad the first time the +// package is imported. The first goroutine to request an entry becomes +// responsible for completing the task and broadcasting completion to +// subsequent requestors, which block until then. +// +// Type checking occurs in (parallel) postorder: we cannot type-check a +// set of files until we have loaded and type-checked all of their +// immediate dependencies (and thus all of their transitive +// dependencies). If the input were guaranteed free of import cycles, +// this would be trivial: we could simply wait for completion of the +// dependencies and then invoke the typechecker. +// +// But as we saw in the 'go test' section above, some cycles in the +// import graph over packages are actually legal, so long as the +// cycle-forming edge originates in the in-package test files that +// augment the package. This explains why the nodes of the import +// dependency graph are not packages, but lists of files: the unlabelled +// nodes avoid the cycles. Consider packages A and B where B imports A +// and A's in-package tests AT import B. The naively constructed import +// graph over packages would contain a cycle (A+AT) --> B --> (A+AT) but +// the graph over lists of files is AT --> B --> A, where AT is an +// unlabelled node. +// +// Awaiting completion of the dependencies in a cyclic graph would +// deadlock, so we must materialize the import dependency graph (as +// importer.graph) and check whether each import edge forms a cycle. If +// x imports y, and the graph already contains a path from y to x, then +// there is an import cycle, in which case the processing of x must not +// wait for the completion of processing of y. +// +// When the type-checker makes a callback (doImport) to the loader for a +// given import edge, there are two possible cases. In the normal case, +// the dependency has already been completely type-checked; doImport +// does a cache lookup and returns it. In the cyclic case, the entry in +// the cache is still necessarily incomplete, indicating a cycle. We +// perform the cycle check again to obtain the error message, and return +// the error. +// +// The result of using concurrency is about a 2.5x speedup for stdlib_test. diff --git a/vendor/golang.org/x/tools/go/loader/loader.go b/vendor/golang.org/x/tools/go/loader/loader.go new file mode 100644 index 0000000..013c0f5 --- /dev/null +++ b/vendor/golang.org/x/tools/go/loader/loader.go @@ -0,0 +1,1066 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package loader + +// See doc.go for package documentation and implementation notes. + +import ( + "errors" + "fmt" + "go/ast" + "go/build" + "go/parser" + "go/token" + "go/types" + "os" + "path/filepath" + "sort" + "strings" + "sync" + "time" + + "golang.org/x/tools/go/ast/astutil" + "golang.org/x/tools/go/internal/cgo" + "golang.org/x/tools/internal/versions" +) + +var ignoreVendor build.ImportMode + +const trace = false // show timing info for type-checking + +// Config specifies the configuration for loading a whole program from +// Go source code. +// The zero value for Config is a ready-to-use default configuration. +type Config struct { + // Fset is the file set for the parser to use when loading the + // program. If nil, it may be lazily initialized by any + // method of Config. + Fset *token.FileSet + + // ParserMode specifies the mode to be used by the parser when + // loading source packages. + ParserMode parser.Mode + + // TypeChecker contains options relating to the type checker. + // + // The supplied IgnoreFuncBodies is not used; the effective + // value comes from the TypeCheckFuncBodies func below. + // The supplied Import function is not used either. + TypeChecker types.Config + + // TypeCheckFuncBodies is a predicate over package paths. + // A package for which the predicate is false will + // have its package-level declarations type checked, but not + // its function bodies; this can be used to quickly load + // dependencies from source. If nil, all func bodies are type + // checked. + TypeCheckFuncBodies func(path string) bool + + // If Build is non-nil, it is used to locate source packages. + // Otherwise &build.Default is used. + // + // By default, cgo is invoked to preprocess Go files that + // import the fake package "C". This behaviour can be + // disabled by setting CGO_ENABLED=0 in the environment prior + // to startup, or by setting Build.CgoEnabled=false. + Build *build.Context + + // The current directory, used for resolving relative package + // references such as "./go/loader". If empty, os.Getwd will be + // used instead. + Cwd string + + // If DisplayPath is non-nil, it is used to transform each + // file name obtained from Build.Import(). This can be used + // to prevent a virtualized build.Config's file names from + // leaking into the user interface. + DisplayPath func(path string) string + + // If AllowErrors is true, Load will return a Program even + // if some of the its packages contained I/O, parser or type + // errors; such errors are accessible via PackageInfo.Errors. If + // false, Load will fail if any package had an error. + AllowErrors bool + + // CreatePkgs specifies a list of non-importable initial + // packages to create. The resulting packages will appear in + // the corresponding elements of the Program.Created slice. + CreatePkgs []PkgSpec + + // ImportPkgs specifies a set of initial packages to load. + // The map keys are package paths. + // + // The map value indicates whether to load tests. If true, Load + // will add and type-check two lists of files to the package: + // non-test files followed by in-package *_test.go files. In + // addition, it will append the external test package (if any) + // to Program.Created. + ImportPkgs map[string]bool + + // FindPackage is called during Load to create the build.Package + // for a given import path from a given directory. + // If FindPackage is nil, (*build.Context).Import is used. + // A client may use this hook to adapt to a proprietary build + // system that does not follow the "go build" layout + // conventions, for example. + // + // It must be safe to call concurrently from multiple goroutines. + FindPackage func(ctxt *build.Context, importPath, fromDir string, mode build.ImportMode) (*build.Package, error) + + // AfterTypeCheck is called immediately after a list of files + // has been type-checked and appended to info.Files. + // + // This optional hook function is the earliest opportunity for + // the client to observe the output of the type checker, + // which may be useful to reduce analysis latency when loading + // a large program. + // + // The function is permitted to modify info.Info, for instance + // to clear data structures that are no longer needed, which can + // dramatically reduce peak memory consumption. + // + // The function may be called twice for the same PackageInfo: + // once for the files of the package and again for the + // in-package test files. + // + // It must be safe to call concurrently from multiple goroutines. + AfterTypeCheck func(info *PackageInfo, files []*ast.File) +} + +// A PkgSpec specifies a non-importable package to be created by Load. +// Files are processed first, but typically only one of Files and +// Filenames is provided. The path needn't be globally unique. +// +// For vendoring purposes, the package's directory is the one that +// contains the first file. +type PkgSpec struct { + Path string // package path ("" => use package declaration) + Files []*ast.File // ASTs of already-parsed files + Filenames []string // names of files to be parsed +} + +// A Program is a Go program loaded from source as specified by a Config. +type Program struct { + Fset *token.FileSet // the file set for this program + + // Created[i] contains the initial package whose ASTs or + // filenames were supplied by Config.CreatePkgs[i], followed by + // the external test package, if any, of each package in + // Config.ImportPkgs ordered by ImportPath. + // + // NOTE: these files must not import "C". Cgo preprocessing is + // only performed on imported packages, not ad hoc packages. + // + // TODO(adonovan): we need to copy and adapt the logic of + // goFilesPackage (from $GOROOT/src/cmd/go/build.go) and make + // Config.Import and Config.Create methods return the same kind + // of entity, essentially a build.Package. + // Perhaps we can even reuse that type directly. + Created []*PackageInfo + + // Imported contains the initially imported packages, + // as specified by Config.ImportPkgs. + Imported map[string]*PackageInfo + + // AllPackages contains the PackageInfo of every package + // encountered by Load: all initial packages and all + // dependencies, including incomplete ones. + AllPackages map[*types.Package]*PackageInfo + + // importMap is the canonical mapping of package paths to + // packages. It contains all Imported initial packages, but not + // Created ones, and all imported dependencies. + importMap map[string]*types.Package +} + +// PackageInfo holds the ASTs and facts derived by the type-checker +// for a single package. +// +// Not mutated once exposed via the API. +type PackageInfo struct { + Pkg *types.Package + Importable bool // true if 'import "Pkg.Path()"' would resolve to this + TransitivelyErrorFree bool // true if Pkg and all its dependencies are free of errors + Files []*ast.File // syntax trees for the package's files + Errors []error // non-nil if the package had errors + types.Info // type-checker deductions. + dir string // package directory + + checker *types.Checker // transient type-checker state + errorFunc func(error) +} + +func (info *PackageInfo) String() string { return info.Pkg.Path() } + +func (info *PackageInfo) appendError(err error) { + if info.errorFunc != nil { + info.errorFunc(err) + } else { + fmt.Fprintln(os.Stderr, err) + } + info.Errors = append(info.Errors, err) +} + +func (conf *Config) fset() *token.FileSet { + if conf.Fset == nil { + conf.Fset = token.NewFileSet() + } + return conf.Fset +} + +// ParseFile is a convenience function (intended for testing) that invokes +// the parser using the Config's FileSet, which is initialized if nil. +// +// src specifies the parser input as a string, []byte, or io.Reader, and +// filename is its apparent name. If src is nil, the contents of +// filename are read from the file system. +func (conf *Config) ParseFile(filename string, src interface{}) (*ast.File, error) { + // TODO(adonovan): use conf.build() etc like parseFiles does. + return parser.ParseFile(conf.fset(), filename, src, conf.ParserMode) +} + +// FromArgsUsage is a partial usage message that applications calling +// FromArgs may wish to include in their -help output. +const FromArgsUsage = ` + is a list of arguments denoting a set of initial packages. +It may take one of two forms: + +1. A list of *.go source files. + + All of the specified files are loaded, parsed and type-checked + as a single package. All the files must belong to the same directory. + +2. A list of import paths, each denoting a package. + + The package's directory is found relative to the $GOROOT and + $GOPATH using similar logic to 'go build', and the *.go files in + that directory are loaded, parsed and type-checked as a single + package. + + In addition, all *_test.go files in the directory are then loaded + and parsed. Those files whose package declaration equals that of + the non-*_test.go files are included in the primary package. Test + files whose package declaration ends with "_test" are type-checked + as another package, the 'external' test package, so that a single + import path may denote two packages. (Whether this behaviour is + enabled is tool-specific, and may depend on additional flags.) + +A '--' argument terminates the list of packages. +` + +// FromArgs interprets args as a set of initial packages to load from +// source and updates the configuration. It returns the list of +// unconsumed arguments. +// +// It is intended for use in command-line interfaces that require a +// set of initial packages to be specified; see FromArgsUsage message +// for details. +// +// Only superficial errors are reported at this stage; errors dependent +// on I/O are detected during Load. +func (conf *Config) FromArgs(args []string, xtest bool) ([]string, error) { + var rest []string + for i, arg := range args { + if arg == "--" { + rest = args[i+1:] + args = args[:i] + break // consume "--" and return the remaining args + } + } + + if len(args) > 0 && strings.HasSuffix(args[0], ".go") { + // Assume args is a list of a *.go files + // denoting a single ad hoc package. + for _, arg := range args { + if !strings.HasSuffix(arg, ".go") { + return nil, fmt.Errorf("named files must be .go files: %s", arg) + } + } + conf.CreateFromFilenames("", args...) + } else { + // Assume args are directories each denoting a + // package and (perhaps) an external test, iff xtest. + for _, arg := range args { + if xtest { + conf.ImportWithTests(arg) + } else { + conf.Import(arg) + } + } + } + + return rest, nil +} + +// CreateFromFilenames is a convenience function that adds +// a conf.CreatePkgs entry to create a package of the specified *.go +// files. +func (conf *Config) CreateFromFilenames(path string, filenames ...string) { + conf.CreatePkgs = append(conf.CreatePkgs, PkgSpec{Path: path, Filenames: filenames}) +} + +// CreateFromFiles is a convenience function that adds a conf.CreatePkgs +// entry to create package of the specified path and parsed files. +func (conf *Config) CreateFromFiles(path string, files ...*ast.File) { + conf.CreatePkgs = append(conf.CreatePkgs, PkgSpec{Path: path, Files: files}) +} + +// ImportWithTests is a convenience function that adds path to +// ImportPkgs, the set of initial source packages located relative to +// $GOPATH. The package will be augmented by any *_test.go files in +// its directory that contain a "package x" (not "package x_test") +// declaration. +// +// In addition, if any *_test.go files contain a "package x_test" +// declaration, an additional package comprising just those files will +// be added to CreatePkgs. +func (conf *Config) ImportWithTests(path string) { conf.addImport(path, true) } + +// Import is a convenience function that adds path to ImportPkgs, the +// set of initial packages that will be imported from source. +func (conf *Config) Import(path string) { conf.addImport(path, false) } + +func (conf *Config) addImport(path string, tests bool) { + if path == "C" { + return // ignore; not a real package + } + if conf.ImportPkgs == nil { + conf.ImportPkgs = make(map[string]bool) + } + conf.ImportPkgs[path] = conf.ImportPkgs[path] || tests +} + +// PathEnclosingInterval returns the PackageInfo and ast.Node that +// contain source interval [start, end), and all the node's ancestors +// up to the AST root. It searches all ast.Files of all packages in prog. +// exact is defined as for astutil.PathEnclosingInterval. +// +// The zero value is returned if not found. +func (prog *Program) PathEnclosingInterval(start, end token.Pos) (pkg *PackageInfo, path []ast.Node, exact bool) { + for _, info := range prog.AllPackages { + for _, f := range info.Files { + if f.Pos() == token.NoPos { + // This can happen if the parser saw + // too many errors and bailed out. + // (Use parser.AllErrors to prevent that.) + continue + } + if !tokenFileContainsPos(prog.Fset.File(f.Pos()), start) { + continue + } + if path, exact := astutil.PathEnclosingInterval(f, start, end); path != nil { + return info, path, exact + } + } + } + return nil, nil, false +} + +// InitialPackages returns a new slice containing the set of initial +// packages (Created + Imported) in unspecified order. +func (prog *Program) InitialPackages() []*PackageInfo { + infos := make([]*PackageInfo, 0, len(prog.Created)+len(prog.Imported)) + infos = append(infos, prog.Created...) + for _, info := range prog.Imported { + infos = append(infos, info) + } + return infos +} + +// Package returns the ASTs and results of type checking for the +// specified package. +func (prog *Program) Package(path string) *PackageInfo { + if info, ok := prog.AllPackages[prog.importMap[path]]; ok { + return info + } + for _, info := range prog.Created { + if path == info.Pkg.Path() { + return info + } + } + return nil +} + +// ---------- Implementation ---------- + +// importer holds the working state of the algorithm. +type importer struct { + conf *Config // the client configuration + start time.Time // for logging + + progMu sync.Mutex // guards prog + prog *Program // the resulting program + + // findpkg is a memoization of FindPackage. + findpkgMu sync.Mutex // guards findpkg + findpkg map[findpkgKey]*findpkgValue + + importedMu sync.Mutex // guards imported + imported map[string]*importInfo // all imported packages (incl. failures) by import path + + // import dependency graph: graph[x][y] => x imports y + // + // Since non-importable packages cannot be cyclic, we ignore + // their imports, thus we only need the subgraph over importable + // packages. Nodes are identified by their import paths. + graphMu sync.Mutex + graph map[string]map[string]bool +} + +type findpkgKey struct { + importPath string + fromDir string + mode build.ImportMode +} + +type findpkgValue struct { + ready chan struct{} // closed to broadcast readiness + bp *build.Package + err error +} + +// importInfo tracks the success or failure of a single import. +// +// Upon completion, exactly one of info and err is non-nil: +// info on successful creation of a package, err otherwise. +// A successful package may still contain type errors. +type importInfo struct { + path string // import path + info *PackageInfo // results of typechecking (including errors) + complete chan struct{} // closed to broadcast that info is set. +} + +// awaitCompletion blocks until ii is complete, +// i.e. the info field is safe to inspect. +func (ii *importInfo) awaitCompletion() { + <-ii.complete // wait for close +} + +// Complete marks ii as complete. +// Its info and err fields will not be subsequently updated. +func (ii *importInfo) Complete(info *PackageInfo) { + if info == nil { + panic("info == nil") + } + ii.info = info + close(ii.complete) +} + +type importError struct { + path string // import path + err error // reason for failure to create a package +} + +// Load creates the initial packages specified by conf.{Create,Import}Pkgs, +// loading their dependencies packages as needed. +// +// On success, Load returns a Program containing a PackageInfo for +// each package. On failure, it returns an error. +// +// If AllowErrors is true, Load will return a Program even if some +// packages contained I/O, parser or type errors, or if dependencies +// were missing. (Such errors are accessible via PackageInfo.Errors. If +// false, Load will fail if any package had an error. +// +// It is an error if no packages were loaded. +func (conf *Config) Load() (*Program, error) { + // Create a simple default error handler for parse/type errors. + if conf.TypeChecker.Error == nil { + conf.TypeChecker.Error = func(e error) { fmt.Fprintln(os.Stderr, e) } + } + + // Set default working directory for relative package references. + if conf.Cwd == "" { + var err error + conf.Cwd, err = os.Getwd() + if err != nil { + return nil, err + } + } + + // Install default FindPackage hook using go/build logic. + if conf.FindPackage == nil { + conf.FindPackage = (*build.Context).Import + } + + prog := &Program{ + Fset: conf.fset(), + Imported: make(map[string]*PackageInfo), + importMap: make(map[string]*types.Package), + AllPackages: make(map[*types.Package]*PackageInfo), + } + + imp := importer{ + conf: conf, + prog: prog, + findpkg: make(map[findpkgKey]*findpkgValue), + imported: make(map[string]*importInfo), + start: time.Now(), + graph: make(map[string]map[string]bool), + } + + // -- loading proper (concurrent phase) -------------------------------- + + var errpkgs []string // packages that contained errors + + // Load the initially imported packages and their dependencies, + // in parallel. + // No vendor check on packages imported from the command line. + infos, importErrors := imp.importAll("", conf.Cwd, conf.ImportPkgs, ignoreVendor) + for _, ie := range importErrors { + conf.TypeChecker.Error(ie.err) // failed to create package + errpkgs = append(errpkgs, ie.path) + } + for _, info := range infos { + prog.Imported[info.Pkg.Path()] = info + } + + // Augment the designated initial packages by their tests. + // Dependencies are loaded in parallel. + var xtestPkgs []*build.Package + for importPath, augment := range conf.ImportPkgs { + if !augment { + continue + } + + // No vendor check on packages imported from command line. + bp, err := imp.findPackage(importPath, conf.Cwd, ignoreVendor) + if err != nil { + // Package not found, or can't even parse package declaration. + // Already reported by previous loop; ignore it. + continue + } + + // Needs external test package? + if len(bp.XTestGoFiles) > 0 { + xtestPkgs = append(xtestPkgs, bp) + } + + // Consult the cache using the canonical package path. + path := bp.ImportPath + imp.importedMu.Lock() // (unnecessary, we're sequential here) + ii, ok := imp.imported[path] + // Paranoid checks added due to issue #11012. + if !ok { + // Unreachable. + // The previous loop called importAll and thus + // startLoad for each path in ImportPkgs, which + // populates imp.imported[path] with a non-zero value. + panic(fmt.Sprintf("imported[%q] not found", path)) + } + if ii == nil { + // Unreachable. + // The ii values in this loop are the same as in + // the previous loop, which enforced the invariant + // that at least one of ii.err and ii.info is non-nil. + panic(fmt.Sprintf("imported[%q] == nil", path)) + } + if ii.info == nil { + // Unreachable. + // awaitCompletion has the postcondition + // ii.info != nil. + panic(fmt.Sprintf("imported[%q].info = nil", path)) + } + info := ii.info + imp.importedMu.Unlock() + + // Parse the in-package test files. + files, errs := imp.conf.parsePackageFiles(bp, 't') + for _, err := range errs { + info.appendError(err) + } + + // The test files augmenting package P cannot be imported, + // but may import packages that import P, + // so we must disable the cycle check. + imp.addFiles(info, files, false) + } + + createPkg := func(path, dir string, files []*ast.File, errs []error) { + info := imp.newPackageInfo(path, dir) + for _, err := range errs { + info.appendError(err) + } + + // Ad hoc packages are non-importable, + // so no cycle check is needed. + // addFiles loads dependencies in parallel. + imp.addFiles(info, files, false) + prog.Created = append(prog.Created, info) + } + + // Create packages specified by conf.CreatePkgs. + for _, cp := range conf.CreatePkgs { + files, errs := parseFiles(conf.fset(), conf.build(), nil, conf.Cwd, cp.Filenames, conf.ParserMode) + files = append(files, cp.Files...) + + path := cp.Path + if path == "" { + if len(files) > 0 { + path = files[0].Name.Name + } else { + path = "(unnamed)" + } + } + + dir := conf.Cwd + if len(files) > 0 && files[0].Pos().IsValid() { + dir = filepath.Dir(conf.fset().File(files[0].Pos()).Name()) + } + createPkg(path, dir, files, errs) + } + + // Create external test packages. + sort.Sort(byImportPath(xtestPkgs)) + for _, bp := range xtestPkgs { + files, errs := imp.conf.parsePackageFiles(bp, 'x') + createPkg(bp.ImportPath+"_test", bp.Dir, files, errs) + } + + // -- finishing up (sequential) ---------------------------------------- + + if len(prog.Imported)+len(prog.Created) == 0 { + return nil, errors.New("no initial packages were loaded") + } + + // Create infos for indirectly imported packages. + // e.g. incomplete packages without syntax, loaded from export data. + for _, obj := range prog.importMap { + info := prog.AllPackages[obj] + if info == nil { + prog.AllPackages[obj] = &PackageInfo{Pkg: obj, Importable: true} + } else { + // finished + info.checker = nil + info.errorFunc = nil + } + } + + if !conf.AllowErrors { + // Report errors in indirectly imported packages. + for _, info := range prog.AllPackages { + if len(info.Errors) > 0 { + errpkgs = append(errpkgs, info.Pkg.Path()) + } + } + if errpkgs != nil { + var more string + if len(errpkgs) > 3 { + more = fmt.Sprintf(" and %d more", len(errpkgs)-3) + errpkgs = errpkgs[:3] + } + return nil, fmt.Errorf("couldn't load packages due to errors: %s%s", + strings.Join(errpkgs, ", "), more) + } + } + + markErrorFreePackages(prog.AllPackages) + + return prog, nil +} + +type byImportPath []*build.Package + +func (b byImportPath) Len() int { return len(b) } +func (b byImportPath) Less(i, j int) bool { return b[i].ImportPath < b[j].ImportPath } +func (b byImportPath) Swap(i, j int) { b[i], b[j] = b[j], b[i] } + +// markErrorFreePackages sets the TransitivelyErrorFree flag on all +// applicable packages. +func markErrorFreePackages(allPackages map[*types.Package]*PackageInfo) { + // Build the transpose of the import graph. + importedBy := make(map[*types.Package]map[*types.Package]bool) + for P := range allPackages { + for _, Q := range P.Imports() { + clients, ok := importedBy[Q] + if !ok { + clients = make(map[*types.Package]bool) + importedBy[Q] = clients + } + clients[P] = true + } + } + + // Find all packages reachable from some error package. + reachable := make(map[*types.Package]bool) + var visit func(*types.Package) + visit = func(p *types.Package) { + if !reachable[p] { + reachable[p] = true + for q := range importedBy[p] { + visit(q) + } + } + } + for _, info := range allPackages { + if len(info.Errors) > 0 { + visit(info.Pkg) + } + } + + // Mark the others as "transitively error-free". + for _, info := range allPackages { + if !reachable[info.Pkg] { + info.TransitivelyErrorFree = true + } + } +} + +// build returns the effective build context. +func (conf *Config) build() *build.Context { + if conf.Build != nil { + return conf.Build + } + return &build.Default +} + +// parsePackageFiles enumerates the files belonging to package path, +// then loads, parses and returns them, plus a list of I/O or parse +// errors that were encountered. +// +// 'which' indicates which files to include: +// +// 'g': include non-test *.go source files (GoFiles + processed CgoFiles) +// 't': include in-package *_test.go source files (TestGoFiles) +// 'x': include external *_test.go source files. (XTestGoFiles) +func (conf *Config) parsePackageFiles(bp *build.Package, which rune) ([]*ast.File, []error) { + if bp.ImportPath == "unsafe" { + return nil, nil + } + var filenames []string + switch which { + case 'g': + filenames = bp.GoFiles + case 't': + filenames = bp.TestGoFiles + case 'x': + filenames = bp.XTestGoFiles + default: + panic(which) + } + + files, errs := parseFiles(conf.fset(), conf.build(), conf.DisplayPath, bp.Dir, filenames, conf.ParserMode) + + // Preprocess CgoFiles and parse the outputs (sequentially). + if which == 'g' && bp.CgoFiles != nil { + cgofiles, err := cgo.ProcessFiles(bp, conf.fset(), conf.DisplayPath, conf.ParserMode) + if err != nil { + errs = append(errs, err) + } else { + files = append(files, cgofiles...) + } + } + + return files, errs +} + +// doImport imports the package denoted by path. +// It implements the types.Importer signature. +// +// It returns an error if a package could not be created +// (e.g. go/build or parse error), but type errors are reported via +// the types.Config.Error callback (the first of which is also saved +// in the package's PackageInfo). +// +// Idempotent. +func (imp *importer) doImport(from *PackageInfo, to string) (*types.Package, error) { + if to == "C" { + // This should be unreachable, but ad hoc packages are + // not currently subject to cgo preprocessing. + // See https://golang.org/issue/11627. + return nil, fmt.Errorf(`the loader doesn't cgo-process ad hoc packages like %q; see Go issue 11627`, + from.Pkg.Path()) + } + + bp, err := imp.findPackage(to, from.dir, 0) + if err != nil { + return nil, err + } + + // The standard unsafe package is handled specially, + // and has no PackageInfo. + if bp.ImportPath == "unsafe" { + return types.Unsafe, nil + } + + // Look for the package in the cache using its canonical path. + path := bp.ImportPath + imp.importedMu.Lock() + ii := imp.imported[path] + imp.importedMu.Unlock() + if ii == nil { + panic("internal error: unexpected import: " + path) + } + if ii.info != nil { + return ii.info.Pkg, nil + } + + // Import of incomplete package: this indicates a cycle. + fromPath := from.Pkg.Path() + if cycle := imp.findPath(path, fromPath); cycle != nil { + // Normalize cycle: start from alphabetically largest node. + pos, start := -1, "" + for i, s := range cycle { + if pos < 0 || s > start { + pos, start = i, s + } + } + cycle = append(cycle, cycle[:pos]...)[pos:] // rotate cycle to start from largest + cycle = append(cycle, cycle[0]) // add start node to end to show cycliness + return nil, fmt.Errorf("import cycle: %s", strings.Join(cycle, " -> ")) + } + + panic("internal error: import of incomplete (yet acyclic) package: " + fromPath) +} + +// findPackage locates the package denoted by the importPath in the +// specified directory. +func (imp *importer) findPackage(importPath, fromDir string, mode build.ImportMode) (*build.Package, error) { + // We use a non-blocking duplicate-suppressing cache (gopl.io §9.7) + // to avoid holding the lock around FindPackage. + key := findpkgKey{importPath, fromDir, mode} + imp.findpkgMu.Lock() + v, ok := imp.findpkg[key] + if ok { + // cache hit + imp.findpkgMu.Unlock() + + <-v.ready // wait for entry to become ready + } else { + // Cache miss: this goroutine becomes responsible for + // populating the map entry and broadcasting its readiness. + v = &findpkgValue{ready: make(chan struct{})} + imp.findpkg[key] = v + imp.findpkgMu.Unlock() + + ioLimit <- true + v.bp, v.err = imp.conf.FindPackage(imp.conf.build(), importPath, fromDir, mode) + <-ioLimit + + if _, ok := v.err.(*build.NoGoError); ok { + v.err = nil // empty directory is not an error + } + + close(v.ready) // broadcast ready condition + } + return v.bp, v.err +} + +// importAll loads, parses, and type-checks the specified packages in +// parallel and returns their completed importInfos in unspecified order. +// +// fromPath is the package path of the importing package, if it is +// importable, "" otherwise. It is used for cycle detection. +// +// fromDir is the directory containing the import declaration that +// caused these imports. +func (imp *importer) importAll(fromPath, fromDir string, imports map[string]bool, mode build.ImportMode) (infos []*PackageInfo, errors []importError) { + if fromPath != "" { + // We're loading a set of imports. + // + // We must record graph edges from the importing package + // to its dependencies, and check for cycles. + imp.graphMu.Lock() + deps, ok := imp.graph[fromPath] + if !ok { + deps = make(map[string]bool) + imp.graph[fromPath] = deps + } + for importPath := range imports { + deps[importPath] = true + } + imp.graphMu.Unlock() + } + + var pending []*importInfo + for importPath := range imports { + if fromPath != "" { + if cycle := imp.findPath(importPath, fromPath); cycle != nil { + // Cycle-forming import: we must not check it + // since it would deadlock. + if trace { + fmt.Fprintf(os.Stderr, "import cycle: %q\n", cycle) + } + continue + } + } + bp, err := imp.findPackage(importPath, fromDir, mode) + if err != nil { + errors = append(errors, importError{ + path: importPath, + err: err, + }) + continue + } + pending = append(pending, imp.startLoad(bp)) + } + + for _, ii := range pending { + ii.awaitCompletion() + infos = append(infos, ii.info) + } + + return infos, errors +} + +// findPath returns an arbitrary path from 'from' to 'to' in the import +// graph, or nil if there was none. +func (imp *importer) findPath(from, to string) []string { + imp.graphMu.Lock() + defer imp.graphMu.Unlock() + + seen := make(map[string]bool) + var search func(stack []string, importPath string) []string + search = func(stack []string, importPath string) []string { + if !seen[importPath] { + seen[importPath] = true + stack = append(stack, importPath) + if importPath == to { + return stack + } + for x := range imp.graph[importPath] { + if p := search(stack, x); p != nil { + return p + } + } + } + return nil + } + return search(make([]string, 0, 20), from) +} + +// startLoad initiates the loading, parsing and type-checking of the +// specified package and its dependencies, if it has not already begun. +// +// It returns an importInfo, not necessarily in a completed state. The +// caller must call awaitCompletion() before accessing its info field. +// +// startLoad is concurrency-safe and idempotent. +func (imp *importer) startLoad(bp *build.Package) *importInfo { + path := bp.ImportPath + imp.importedMu.Lock() + ii, ok := imp.imported[path] + if !ok { + ii = &importInfo{path: path, complete: make(chan struct{})} + imp.imported[path] = ii + go func() { + info := imp.load(bp) + ii.Complete(info) + }() + } + imp.importedMu.Unlock() + + return ii +} + +// load implements package loading by parsing Go source files +// located by go/build. +func (imp *importer) load(bp *build.Package) *PackageInfo { + info := imp.newPackageInfo(bp.ImportPath, bp.Dir) + info.Importable = true + files, errs := imp.conf.parsePackageFiles(bp, 'g') + for _, err := range errs { + info.appendError(err) + } + + imp.addFiles(info, files, true) + + imp.progMu.Lock() + imp.prog.importMap[bp.ImportPath] = info.Pkg + imp.progMu.Unlock() + + return info +} + +// addFiles adds and type-checks the specified files to info, loading +// their dependencies if needed. The order of files determines the +// package initialization order. It may be called multiple times on the +// same package. Errors are appended to the info.Errors field. +// +// cycleCheck determines whether the imports within files create +// dependency edges that should be checked for potential cycles. +func (imp *importer) addFiles(info *PackageInfo, files []*ast.File, cycleCheck bool) { + // Ensure the dependencies are loaded, in parallel. + var fromPath string + if cycleCheck { + fromPath = info.Pkg.Path() + } + // TODO(adonovan): opt: make the caller do scanImports. + // Callers with a build.Package can skip it. + imp.importAll(fromPath, info.dir, scanImports(files), 0) + + if trace { + fmt.Fprintf(os.Stderr, "%s: start %q (%d)\n", + time.Since(imp.start), info.Pkg.Path(), len(files)) + } + + // Don't call checker.Files on Unsafe, even with zero files, + // because it would mutate the package, which is a global. + if info.Pkg == types.Unsafe { + if len(files) > 0 { + panic(`"unsafe" package contains unexpected files`) + } + } else { + // Ignore the returned (first) error since we + // already collect them all in the PackageInfo. + info.checker.Files(files) + info.Files = append(info.Files, files...) + } + + if imp.conf.AfterTypeCheck != nil { + imp.conf.AfterTypeCheck(info, files) + } + + if trace { + fmt.Fprintf(os.Stderr, "%s: stop %q\n", + time.Since(imp.start), info.Pkg.Path()) + } +} + +func (imp *importer) newPackageInfo(path, dir string) *PackageInfo { + var pkg *types.Package + if path == "unsafe" { + pkg = types.Unsafe + } else { + pkg = types.NewPackage(path, "") + } + info := &PackageInfo{ + Pkg: pkg, + Info: types.Info{ + Types: make(map[ast.Expr]types.TypeAndValue), + Defs: make(map[*ast.Ident]types.Object), + Uses: make(map[*ast.Ident]types.Object), + Implicits: make(map[ast.Node]types.Object), + Instances: make(map[*ast.Ident]types.Instance), + Scopes: make(map[ast.Node]*types.Scope), + Selections: make(map[*ast.SelectorExpr]*types.Selection), + }, + errorFunc: imp.conf.TypeChecker.Error, + dir: dir, + } + versions.InitFileVersions(&info.Info) + + // Copy the types.Config so we can vary it across PackageInfos. + tc := imp.conf.TypeChecker + tc.IgnoreFuncBodies = false + if f := imp.conf.TypeCheckFuncBodies; f != nil { + tc.IgnoreFuncBodies = !f(path) + } + tc.Importer = closure{imp, info} + tc.Error = info.appendError // appendError wraps the user's Error function + + info.checker = types.NewChecker(&tc, imp.conf.fset(), pkg, &info.Info) + imp.progMu.Lock() + imp.prog.AllPackages[pkg] = info + imp.progMu.Unlock() + return info +} + +type closure struct { + imp *importer + info *PackageInfo +} + +func (c closure) Import(to string) (*types.Package, error) { return c.imp.doImport(c.info, to) } diff --git a/vendor/golang.org/x/tools/go/loader/util.go b/vendor/golang.org/x/tools/go/loader/util.go new file mode 100644 index 0000000..3a80aca --- /dev/null +++ b/vendor/golang.org/x/tools/go/loader/util.go @@ -0,0 +1,123 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package loader + +import ( + "go/ast" + "go/build" + "go/parser" + "go/token" + "io" + "os" + "strconv" + "sync" + + "golang.org/x/tools/go/buildutil" +) + +// We use a counting semaphore to limit +// the number of parallel I/O calls per process. +var ioLimit = make(chan bool, 10) + +// parseFiles parses the Go source files within directory dir and +// returns the ASTs of the ones that could be at least partially parsed, +// along with a list of I/O and parse errors encountered. +// +// I/O is done via ctxt, which may specify a virtual file system. +// displayPath is used to transform the filenames attached to the ASTs. +func parseFiles(fset *token.FileSet, ctxt *build.Context, displayPath func(string) string, dir string, files []string, mode parser.Mode) ([]*ast.File, []error) { + if displayPath == nil { + displayPath = func(path string) string { return path } + } + var wg sync.WaitGroup + n := len(files) + parsed := make([]*ast.File, n) + errors := make([]error, n) + for i, file := range files { + if !buildutil.IsAbsPath(ctxt, file) { + file = buildutil.JoinPath(ctxt, dir, file) + } + wg.Add(1) + go func(i int, file string) { + ioLimit <- true // wait + defer func() { + wg.Done() + <-ioLimit // signal + }() + var rd io.ReadCloser + var err error + if ctxt.OpenFile != nil { + rd, err = ctxt.OpenFile(file) + } else { + rd, err = os.Open(file) + } + if err != nil { + errors[i] = err // open failed + return + } + + // ParseFile may return both an AST and an error. + parsed[i], errors[i] = parser.ParseFile(fset, displayPath(file), rd, mode) + rd.Close() + }(i, file) + } + wg.Wait() + + // Eliminate nils, preserving order. + var o int + for _, f := range parsed { + if f != nil { + parsed[o] = f + o++ + } + } + parsed = parsed[:o] + + o = 0 + for _, err := range errors { + if err != nil { + errors[o] = err + o++ + } + } + errors = errors[:o] + + return parsed, errors +} + +// scanImports returns the set of all import paths from all +// import specs in the specified files. +func scanImports(files []*ast.File) map[string]bool { + imports := make(map[string]bool) + for _, f := range files { + for _, decl := range f.Decls { + if decl, ok := decl.(*ast.GenDecl); ok && decl.Tok == token.IMPORT { + for _, spec := range decl.Specs { + spec := spec.(*ast.ImportSpec) + + // NB: do not assume the program is well-formed! + path, err := strconv.Unquote(spec.Path.Value) + if err != nil { + continue // quietly ignore the error + } + if path == "C" { + continue // skip pseudopackage + } + imports[path] = true + } + } + } + } + return imports +} + +// ---------- Internal helpers ---------- + +// TODO(adonovan): make this a method: func (*token.File) Contains(token.Pos) +func tokenFileContainsPos(f *token.File, pos token.Pos) bool { + p := int(pos) + base := f.Base() + return base <= p && p < base+f.Size() +} diff --git a/vendor/golang.org/x/tools/go/packages/doc.go b/vendor/golang.org/x/tools/go/packages/doc.go new file mode 100644 index 0000000..3531ac8 --- /dev/null +++ b/vendor/golang.org/x/tools/go/packages/doc.go @@ -0,0 +1,242 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package packages loads Go packages for inspection and analysis. + +The [Load] function takes as input a list of patterns and returns a +list of [Package] values describing individual packages matched by those +patterns. +A [Config] specifies configuration options, the most important of which is +the [LoadMode], which controls the amount of detail in the loaded packages. + +Load passes most patterns directly to the underlying build tool. +The default build tool is the go command. +Its supported patterns are described at +https://pkg.go.dev/cmd/go#hdr-Package_lists_and_patterns. +Other build systems may be supported by providing a "driver"; +see [The driver protocol]. + +All patterns with the prefix "query=", where query is a +non-empty string of letters from [a-z], are reserved and may be +interpreted as query operators. + +Two query operators are currently supported: "file" and "pattern". + +The query "file=path/to/file.go" matches the package or packages enclosing +the Go source file path/to/file.go. For example "file=~/go/src/fmt/print.go" +might return the packages "fmt" and "fmt [fmt.test]". + +The query "pattern=string" causes "string" to be passed directly to +the underlying build tool. In most cases this is unnecessary, +but an application can use Load("pattern=" + x) as an escaping mechanism +to ensure that x is not interpreted as a query operator if it contains '='. + +All other query operators are reserved for future use and currently +cause Load to report an error. + +The Package struct provides basic information about the package, including + + - ID, a unique identifier for the package in the returned set; + - GoFiles, the names of the package's Go source files; + - Imports, a map from source import strings to the Packages they name; + - Types, the type information for the package's exported symbols; + - Syntax, the parsed syntax trees for the package's source code; and + - TypesInfo, the result of a complete type-check of the package syntax trees. + +(See the documentation for type Package for the complete list of fields +and more detailed descriptions.) + +For example, + + Load(nil, "bytes", "unicode...") + +returns four Package structs describing the standard library packages +bytes, unicode, unicode/utf16, and unicode/utf8. Note that one pattern +can match multiple packages and that a package might be matched by +multiple patterns: in general it is not possible to determine which +packages correspond to which patterns. + +Note that the list returned by Load contains only the packages matched +by the patterns. Their dependencies can be found by walking the import +graph using the Imports fields. + +The Load function can be configured by passing a pointer to a Config as +the first argument. A nil Config is equivalent to the zero Config, which +causes Load to run in LoadFiles mode, collecting minimal information. +See the documentation for type Config for details. + +As noted earlier, the Config.Mode controls the amount of detail +reported about the loaded packages. See the documentation for type LoadMode +for details. + +Most tools should pass their command-line arguments (after any flags) +uninterpreted to [Load], so that it can interpret them +according to the conventions of the underlying build system. + +See the Example function for typical usage. + +# The driver protocol + +[Load] may be used to load Go packages even in Go projects that use +alternative build systems, by installing an appropriate "driver" +program for the build system and specifying its location in the +GOPACKAGESDRIVER environment variable. +For example, +https://github.com/bazelbuild/rules_go/wiki/Editor-and-tool-integration +explains how to use the driver for Bazel. + +The driver program is responsible for interpreting patterns in its +preferred notation and reporting information about the packages that +those patterns identify. Drivers must also support the special "file=" +and "pattern=" patterns described above. + +The patterns are provided as positional command-line arguments. A +JSON-encoded [DriverRequest] message providing additional information +is written to the driver's standard input. The driver must write a +JSON-encoded [DriverResponse] message to its standard output. (This +message differs from the JSON schema produced by 'go list'.) +*/ +package packages // import "golang.org/x/tools/go/packages" + +/* + +Motivation and design considerations + +The new package's design solves problems addressed by two existing +packages: go/build, which locates and describes packages, and +golang.org/x/tools/go/loader, which loads, parses and type-checks them. +The go/build.Package structure encodes too much of the 'go build' way +of organizing projects, leaving us in need of a data type that describes a +package of Go source code independent of the underlying build system. +We wanted something that works equally well with go build and vgo, and +also other build systems such as Bazel and Blaze, making it possible to +construct analysis tools that work in all these environments. +Tools such as errcheck and staticcheck were essentially unavailable to +the Go community at Google, and some of Google's internal tools for Go +are unavailable externally. +This new package provides a uniform way to obtain package metadata by +querying each of these build systems, optionally supporting their +preferred command-line notations for packages, so that tools integrate +neatly with users' build environments. The Metadata query function +executes an external query tool appropriate to the current workspace. + +Loading packages always returns the complete import graph "all the way down", +even if all you want is information about a single package, because the query +mechanisms of all the build systems we currently support ({go,vgo} list, and +blaze/bazel aspect-based query) cannot provide detailed information +about one package without visiting all its dependencies too, so there is +no additional asymptotic cost to providing transitive information. +(This property might not be true of a hypothetical 5th build system.) + +In calls to TypeCheck, all initial packages, and any package that +transitively depends on one of them, must be loaded from source. +Consider A->B->C->D->E: if A,C are initial, A,B,C must be loaded from +source; D may be loaded from export data, and E may not be loaded at all +(though it's possible that D's export data mentions it, so a +types.Package may be created for it and exposed.) + +The old loader had a feature to suppress type-checking of function +bodies on a per-package basis, primarily intended to reduce the work of +obtaining type information for imported packages. Now that imports are +satisfied by export data, the optimization no longer seems necessary. + +Despite some early attempts, the old loader did not exploit export data, +instead always using the equivalent of WholeProgram mode. This was due +to the complexity of mixing source and export data packages (now +resolved by the upward traversal mentioned above), and because export data +files were nearly always missing or stale. Now that 'go build' supports +caching, all the underlying build systems can guarantee to produce +export data in a reasonable (amortized) time. + +Test "main" packages synthesized by the build system are now reported as +first-class packages, avoiding the need for clients (such as go/ssa) to +reinvent this generation logic. + +One way in which go/packages is simpler than the old loader is in its +treatment of in-package tests. In-package tests are packages that +consist of all the files of the library under test, plus the test files. +The old loader constructed in-package tests by a two-phase process of +mutation called "augmentation": first it would construct and type check +all the ordinary library packages and type-check the packages that +depend on them; then it would add more (test) files to the package and +type-check again. This two-phase approach had four major problems: +1) in processing the tests, the loader modified the library package, + leaving no way for a client application to see both the test + package and the library package; one would mutate into the other. +2) because test files can declare additional methods on types defined in + the library portion of the package, the dispatch of method calls in + the library portion was affected by the presence of the test files. + This should have been a clue that the packages were logically + different. +3) this model of "augmentation" assumed at most one in-package test + per library package, which is true of projects using 'go build', + but not other build systems. +4) because of the two-phase nature of test processing, all packages that + import the library package had to be processed before augmentation, + forcing a "one-shot" API and preventing the client from calling Load + in several times in sequence as is now possible in WholeProgram mode. + (TypeCheck mode has a similar one-shot restriction for a different reason.) + +Early drafts of this package supported "multi-shot" operation. +Although it allowed clients to make a sequence of calls (or concurrent +calls) to Load, building up the graph of Packages incrementally, +it was of marginal value: it complicated the API +(since it allowed some options to vary across calls but not others), +it complicated the implementation, +it cannot be made to work in Types mode, as explained above, +and it was less efficient than making one combined call (when this is possible). +Among the clients we have inspected, none made multiple calls to load +but could not be easily and satisfactorily modified to make only a single call. +However, applications changes may be required. +For example, the ssadump command loads the user-specified packages +and in addition the runtime package. It is tempting to simply append +"runtime" to the user-provided list, but that does not work if the user +specified an ad-hoc package such as [a.go b.go]. +Instead, ssadump no longer requests the runtime package, +but seeks it among the dependencies of the user-specified packages, +and emits an error if it is not found. + +Questions & Tasks + +- Add GOARCH/GOOS? + They are not portable concepts, but could be made portable. + Our goal has been to allow users to express themselves using the conventions + of the underlying build system: if the build system honors GOARCH + during a build and during a metadata query, then so should + applications built atop that query mechanism. + Conversely, if the target architecture of the build is determined by + command-line flags, the application can pass the relevant + flags through to the build system using a command such as: + myapp -query_flag="--cpu=amd64" -query_flag="--os=darwin" + However, this approach is low-level, unwieldy, and non-portable. + GOOS and GOARCH seem important enough to warrant a dedicated option. + +- How should we handle partial failures such as a mixture of good and + malformed patterns, existing and non-existent packages, successful and + failed builds, import failures, import cycles, and so on, in a call to + Load? + +- Support bazel, blaze, and go1.10 list, not just go1.11 list. + +- Handle (and test) various partial success cases, e.g. + a mixture of good packages and: + invalid patterns + nonexistent packages + empty packages + packages with malformed package or import declarations + unreadable files + import cycles + other parse errors + type errors + Make sure we record errors at the correct place in the graph. + +- Missing packages among initial arguments are not reported. + Return bogus packages for them, like golist does. + +- "undeclared name" errors (for example) are reported out of source file + order. I suspect this is due to the breadth-first resolution now used + by go/types. Is that a bug? Discuss with gri. + +*/ diff --git a/vendor/golang.org/x/tools/go/packages/external.go b/vendor/golang.org/x/tools/go/packages/external.go new file mode 100644 index 0000000..c2b4b71 --- /dev/null +++ b/vendor/golang.org/x/tools/go/packages/external.go @@ -0,0 +1,156 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packages + +// This file defines the protocol that enables an external "driver" +// tool to supply package metadata in place of 'go list'. + +import ( + "bytes" + "encoding/json" + "fmt" + "os" + "os/exec" + "strings" +) + +// DriverRequest defines the schema of a request for package metadata +// from an external driver program. The JSON-encoded DriverRequest +// message is provided to the driver program's standard input. The +// query patterns are provided as command-line arguments. +// +// See the package documentation for an overview. +type DriverRequest struct { + Mode LoadMode `json:"mode"` + + // Env specifies the environment the underlying build system should be run in. + Env []string `json:"env"` + + // BuildFlags are flags that should be passed to the underlying build system. + BuildFlags []string `json:"build_flags"` + + // Tests specifies whether the patterns should also return test packages. + Tests bool `json:"tests"` + + // Overlay maps file paths (relative to the driver's working directory) + // to the contents of overlay files (see Config.Overlay). + Overlay map[string][]byte `json:"overlay"` +} + +// DriverResponse defines the schema of a response from an external +// driver program, providing the results of a query for package +// metadata. The driver program must write a JSON-encoded +// DriverResponse message to its standard output. +// +// See the package documentation for an overview. +type DriverResponse struct { + // NotHandled is returned if the request can't be handled by the current + // driver. If an external driver returns a response with NotHandled, the + // rest of the DriverResponse is ignored, and go/packages will fallback + // to the next driver. If go/packages is extended in the future to support + // lists of multiple drivers, go/packages will fall back to the next driver. + NotHandled bool + + // Compiler and Arch are the arguments pass of types.SizesFor + // to get a types.Sizes to use when type checking. + Compiler string + Arch string + + // Roots is the set of package IDs that make up the root packages. + // We have to encode this separately because when we encode a single package + // we cannot know if it is one of the roots as that requires knowledge of the + // graph it is part of. + Roots []string `json:",omitempty"` + + // Packages is the full set of packages in the graph. + // The packages are not connected into a graph. + // The Imports if populated will be stubs that only have their ID set. + // Imports will be connected and then type and syntax information added in a + // later pass (see refine). + Packages []*Package + + // GoVersion is the minor version number used by the driver + // (e.g. the go command on the PATH) when selecting .go files. + // Zero means unknown. + GoVersion int +} + +// driver is the type for functions that query the build system for the +// packages named by the patterns. +type driver func(cfg *Config, patterns ...string) (*DriverResponse, error) + +// findExternalDriver returns the file path of a tool that supplies +// the build system package structure, or "" if not found." +// If GOPACKAGESDRIVER is set in the environment findExternalTool returns its +// value, otherwise it searches for a binary named gopackagesdriver on the PATH. +func findExternalDriver(cfg *Config) driver { + const toolPrefix = "GOPACKAGESDRIVER=" + tool := "" + for _, env := range cfg.Env { + if val := strings.TrimPrefix(env, toolPrefix); val != env { + tool = val + } + } + if tool != "" && tool == "off" { + return nil + } + if tool == "" { + var err error + tool, err = exec.LookPath("gopackagesdriver") + if err != nil { + return nil + } + } + return func(cfg *Config, words ...string) (*DriverResponse, error) { + req, err := json.Marshal(DriverRequest{ + Mode: cfg.Mode, + Env: cfg.Env, + BuildFlags: cfg.BuildFlags, + Tests: cfg.Tests, + Overlay: cfg.Overlay, + }) + if err != nil { + return nil, fmt.Errorf("failed to encode message to driver tool: %v", err) + } + + buf := new(bytes.Buffer) + stderr := new(bytes.Buffer) + cmd := exec.CommandContext(cfg.Context, tool, words...) + cmd.Dir = cfg.Dir + // The cwd gets resolved to the real path. On Darwin, where + // /tmp is a symlink, this breaks anything that expects the + // working directory to keep the original path, including the + // go command when dealing with modules. + // + // os.Getwd stdlib has a special feature where if the + // cwd and the PWD are the same node then it trusts + // the PWD, so by setting it in the env for the child + // process we fix up all the paths returned by the go + // command. + // + // (See similar trick in Invocation.run in ../../internal/gocommand/invoke.go) + cmd.Env = append(slicesClip(cfg.Env), "PWD="+cfg.Dir) + cmd.Stdin = bytes.NewReader(req) + cmd.Stdout = buf + cmd.Stderr = stderr + + if err := cmd.Run(); err != nil { + return nil, fmt.Errorf("%v: %v: %s", tool, err, cmd.Stderr) + } + if len(stderr.Bytes()) != 0 && os.Getenv("GOPACKAGESPRINTDRIVERERRORS") != "" { + fmt.Fprintf(os.Stderr, "%s stderr: <<%s>>\n", cmdDebugStr(cmd), stderr) + } + + var response DriverResponse + if err := json.Unmarshal(buf.Bytes(), &response); err != nil { + return nil, err + } + return &response, nil + } +} + +// slicesClip removes unused capacity from the slice, returning s[:len(s):len(s)]. +// TODO(adonovan): use go1.21 slices.Clip. +func slicesClip[S ~[]E, E any](s S) S { return s[:len(s):len(s)] } diff --git a/vendor/golang.org/x/tools/go/packages/golist.go b/vendor/golang.org/x/tools/go/packages/golist.go new file mode 100644 index 0000000..1a3a5b4 --- /dev/null +++ b/vendor/golang.org/x/tools/go/packages/golist.go @@ -0,0 +1,1066 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packages + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "log" + "os" + "os/exec" + "path" + "path/filepath" + "reflect" + "sort" + "strconv" + "strings" + "sync" + "unicode" + + "golang.org/x/tools/internal/gocommand" + "golang.org/x/tools/internal/packagesinternal" +) + +// debug controls verbose logging. +var debug, _ = strconv.ParseBool(os.Getenv("GOPACKAGESDEBUG")) + +// A goTooOldError reports that the go command +// found by exec.LookPath is too old to use the new go list behavior. +type goTooOldError struct { + error +} + +// responseDeduper wraps a DriverResponse, deduplicating its contents. +type responseDeduper struct { + seenRoots map[string]bool + seenPackages map[string]*Package + dr *DriverResponse +} + +func newDeduper() *responseDeduper { + return &responseDeduper{ + dr: &DriverResponse{}, + seenRoots: map[string]bool{}, + seenPackages: map[string]*Package{}, + } +} + +// addAll fills in r with a DriverResponse. +func (r *responseDeduper) addAll(dr *DriverResponse) { + for _, pkg := range dr.Packages { + r.addPackage(pkg) + } + for _, root := range dr.Roots { + r.addRoot(root) + } + r.dr.GoVersion = dr.GoVersion +} + +func (r *responseDeduper) addPackage(p *Package) { + if r.seenPackages[p.ID] != nil { + return + } + r.seenPackages[p.ID] = p + r.dr.Packages = append(r.dr.Packages, p) +} + +func (r *responseDeduper) addRoot(id string) { + if r.seenRoots[id] { + return + } + r.seenRoots[id] = true + r.dr.Roots = append(r.dr.Roots, id) +} + +type golistState struct { + cfg *Config + ctx context.Context + + envOnce sync.Once + goEnvError error + goEnv map[string]string + + rootsOnce sync.Once + rootDirsError error + rootDirs map[string]string + + goVersionOnce sync.Once + goVersionError error + goVersion int // The X in Go 1.X. + + // vendorDirs caches the (non)existence of vendor directories. + vendorDirs map[string]bool +} + +// getEnv returns Go environment variables. Only specific variables are +// populated -- computing all of them is slow. +func (state *golistState) getEnv() (map[string]string, error) { + state.envOnce.Do(func() { + var b *bytes.Buffer + b, state.goEnvError = state.invokeGo("env", "-json", "GOMOD", "GOPATH") + if state.goEnvError != nil { + return + } + + state.goEnv = make(map[string]string) + decoder := json.NewDecoder(b) + if state.goEnvError = decoder.Decode(&state.goEnv); state.goEnvError != nil { + return + } + }) + return state.goEnv, state.goEnvError +} + +// mustGetEnv is a convenience function that can be used if getEnv has already succeeded. +func (state *golistState) mustGetEnv() map[string]string { + env, err := state.getEnv() + if err != nil { + panic(fmt.Sprintf("mustGetEnv: %v", err)) + } + return env +} + +// goListDriver uses the go list command to interpret the patterns and produce +// the build system package structure. +// See driver for more details. +func goListDriver(cfg *Config, patterns ...string) (_ *DriverResponse, err error) { + // Make sure that any asynchronous go commands are killed when we return. + parentCtx := cfg.Context + if parentCtx == nil { + parentCtx = context.Background() + } + ctx, cancel := context.WithCancel(parentCtx) + defer cancel() + + response := newDeduper() + + state := &golistState{ + cfg: cfg, + ctx: ctx, + vendorDirs: map[string]bool{}, + } + + // Fill in response.Sizes asynchronously if necessary. + if cfg.Mode&NeedTypesSizes != 0 || cfg.Mode&NeedTypes != 0 { + errCh := make(chan error) + go func() { + compiler, arch, err := getSizesForArgs(ctx, state.cfgInvocation(), cfg.gocmdRunner) + response.dr.Compiler = compiler + response.dr.Arch = arch + errCh <- err + }() + defer func() { + if sizesErr := <-errCh; sizesErr != nil { + err = sizesErr + } + }() + } + + // Determine files requested in contains patterns + var containFiles []string + restPatterns := make([]string, 0, len(patterns)) + // Extract file= and other [querytype]= patterns. Report an error if querytype + // doesn't exist. +extractQueries: + for _, pattern := range patterns { + eqidx := strings.Index(pattern, "=") + if eqidx < 0 { + restPatterns = append(restPatterns, pattern) + } else { + query, value := pattern[:eqidx], pattern[eqidx+len("="):] + switch query { + case "file": + containFiles = append(containFiles, value) + case "pattern": + restPatterns = append(restPatterns, value) + case "": // not a reserved query + restPatterns = append(restPatterns, pattern) + default: + for _, rune := range query { + if rune < 'a' || rune > 'z' { // not a reserved query + restPatterns = append(restPatterns, pattern) + continue extractQueries + } + } + // Reject all other patterns containing "=" + return nil, fmt.Errorf("invalid query type %q in query pattern %q", query, pattern) + } + } + } + + // See if we have any patterns to pass through to go list. Zero initial + // patterns also requires a go list call, since it's the equivalent of + // ".". + if len(restPatterns) > 0 || len(patterns) == 0 { + dr, err := state.createDriverResponse(restPatterns...) + if err != nil { + return nil, err + } + response.addAll(dr) + } + + if len(containFiles) != 0 { + if err := state.runContainsQueries(response, containFiles); err != nil { + return nil, err + } + } + + // (We may yet return an error due to defer.) + return response.dr, nil +} + +func (state *golistState) runContainsQueries(response *responseDeduper, queries []string) error { + for _, query := range queries { + // TODO(matloob): Do only one query per directory. + fdir := filepath.Dir(query) + // Pass absolute path of directory to go list so that it knows to treat it as a directory, + // not a package path. + pattern, err := filepath.Abs(fdir) + if err != nil { + return fmt.Errorf("could not determine absolute path of file= query path %q: %v", query, err) + } + dirResponse, err := state.createDriverResponse(pattern) + + // If there was an error loading the package, or no packages are returned, + // or the package is returned with errors, try to load the file as an + // ad-hoc package. + // Usually the error will appear in a returned package, but may not if we're + // in module mode and the ad-hoc is located outside a module. + if err != nil || len(dirResponse.Packages) == 0 || len(dirResponse.Packages) == 1 && len(dirResponse.Packages[0].GoFiles) == 0 && + len(dirResponse.Packages[0].Errors) == 1 { + var queryErr error + if dirResponse, queryErr = state.adhocPackage(pattern, query); queryErr != nil { + return err // return the original error + } + } + isRoot := make(map[string]bool, len(dirResponse.Roots)) + for _, root := range dirResponse.Roots { + isRoot[root] = true + } + for _, pkg := range dirResponse.Packages { + // Add any new packages to the main set + // We don't bother to filter packages that will be dropped by the changes of roots, + // that will happen anyway during graph construction outside this function. + // Over-reporting packages is not a problem. + response.addPackage(pkg) + // if the package was not a root one, it cannot have the file + if !isRoot[pkg.ID] { + continue + } + for _, pkgFile := range pkg.GoFiles { + if filepath.Base(query) == filepath.Base(pkgFile) { + response.addRoot(pkg.ID) + break + } + } + } + } + return nil +} + +// adhocPackage attempts to load or construct an ad-hoc package for a given +// query, if the original call to the driver produced inadequate results. +func (state *golistState) adhocPackage(pattern, query string) (*DriverResponse, error) { + response, err := state.createDriverResponse(query) + if err != nil { + return nil, err + } + // If we get nothing back from `go list`, + // try to make this file into its own ad-hoc package. + // TODO(rstambler): Should this check against the original response? + if len(response.Packages) == 0 { + response.Packages = append(response.Packages, &Package{ + ID: "command-line-arguments", + PkgPath: query, + GoFiles: []string{query}, + CompiledGoFiles: []string{query}, + Imports: make(map[string]*Package), + }) + response.Roots = append(response.Roots, "command-line-arguments") + } + // Handle special cases. + if len(response.Packages) == 1 { + // golang/go#33482: If this is a file= query for ad-hoc packages where + // the file only exists on an overlay, and exists outside of a module, + // add the file to the package and remove the errors. + if response.Packages[0].ID == "command-line-arguments" || + filepath.ToSlash(response.Packages[0].PkgPath) == filepath.ToSlash(query) { + if len(response.Packages[0].GoFiles) == 0 { + filename := filepath.Join(pattern, filepath.Base(query)) // avoid recomputing abspath + // TODO(matloob): check if the file is outside of a root dir? + for path := range state.cfg.Overlay { + if path == filename { + response.Packages[0].Errors = nil + response.Packages[0].GoFiles = []string{path} + response.Packages[0].CompiledGoFiles = []string{path} + } + } + } + } + } + return response, nil +} + +// Fields must match go list; +// see $GOROOT/src/cmd/go/internal/load/pkg.go. +type jsonPackage struct { + ImportPath string + Dir string + Name string + Export string + GoFiles []string + CompiledGoFiles []string + IgnoredGoFiles []string + IgnoredOtherFiles []string + EmbedPatterns []string + EmbedFiles []string + CFiles []string + CgoFiles []string + CXXFiles []string + MFiles []string + HFiles []string + FFiles []string + SFiles []string + SwigFiles []string + SwigCXXFiles []string + SysoFiles []string + Imports []string + ImportMap map[string]string + Deps []string + Module *Module + TestGoFiles []string + TestImports []string + XTestGoFiles []string + XTestImports []string + ForTest string // q in a "p [q.test]" package, else "" + DepOnly bool + + Error *packagesinternal.PackageError + DepsErrors []*packagesinternal.PackageError +} + +type jsonPackageError struct { + ImportStack []string + Pos string + Err string +} + +func otherFiles(p *jsonPackage) [][]string { + return [][]string{p.CFiles, p.CXXFiles, p.MFiles, p.HFiles, p.FFiles, p.SFiles, p.SwigFiles, p.SwigCXXFiles, p.SysoFiles} +} + +// createDriverResponse uses the "go list" command to expand the pattern +// words and return a response for the specified packages. +func (state *golistState) createDriverResponse(words ...string) (*DriverResponse, error) { + // go list uses the following identifiers in ImportPath and Imports: + // + // "p" -- importable package or main (command) + // "q.test" -- q's test executable + // "p [q.test]" -- variant of p as built for q's test executable + // "q_test [q.test]" -- q's external test package + // + // The packages p that are built differently for a test q.test + // are q itself, plus any helpers used by the external test q_test, + // typically including "testing" and all its dependencies. + + // Run "go list" for complete + // information on the specified packages. + goVersion, err := state.getGoVersion() + if err != nil { + return nil, err + } + buf, err := state.invokeGo("list", golistargs(state.cfg, words, goVersion)...) + if err != nil { + return nil, err + } + + seen := make(map[string]*jsonPackage) + pkgs := make(map[string]*Package) + additionalErrors := make(map[string][]Error) + // Decode the JSON and convert it to Package form. + response := &DriverResponse{ + GoVersion: goVersion, + } + for dec := json.NewDecoder(buf); dec.More(); { + p := new(jsonPackage) + if err := dec.Decode(p); err != nil { + return nil, fmt.Errorf("JSON decoding failed: %v", err) + } + + if p.ImportPath == "" { + // The documentation for go list says that “[e]rroneous packages will have + // a non-empty ImportPath”. If for some reason it comes back empty, we + // prefer to error out rather than silently discarding data or handing + // back a package without any way to refer to it. + if p.Error != nil { + return nil, Error{ + Pos: p.Error.Pos, + Msg: p.Error.Err, + } + } + return nil, fmt.Errorf("package missing import path: %+v", p) + } + + // Work around https://golang.org/issue/33157: + // go list -e, when given an absolute path, will find the package contained at + // that directory. But when no package exists there, it will return a fake package + // with an error and the ImportPath set to the absolute path provided to go list. + // Try to convert that absolute path to what its package path would be if it's + // contained in a known module or GOPATH entry. This will allow the package to be + // properly "reclaimed" when overlays are processed. + if filepath.IsAbs(p.ImportPath) && p.Error != nil { + pkgPath, ok, err := state.getPkgPath(p.ImportPath) + if err != nil { + return nil, err + } + if ok { + p.ImportPath = pkgPath + } + } + + if old, found := seen[p.ImportPath]; found { + // If one version of the package has an error, and the other doesn't, assume + // that this is a case where go list is reporting a fake dependency variant + // of the imported package: When a package tries to invalidly import another + // package, go list emits a variant of the imported package (with the same + // import path, but with an error on it, and the package will have a + // DepError set on it). An example of when this can happen is for imports of + // main packages: main packages can not be imported, but they may be + // separately matched and listed by another pattern. + // See golang.org/issue/36188 for more details. + + // The plan is that eventually, hopefully in Go 1.15, the error will be + // reported on the importing package rather than the duplicate "fake" + // version of the imported package. Once all supported versions of Go + // have the new behavior this logic can be deleted. + // TODO(matloob): delete the workaround logic once all supported versions of + // Go return the errors on the proper package. + + // There should be exactly one version of a package that doesn't have an + // error. + if old.Error == nil && p.Error == nil { + if !reflect.DeepEqual(p, old) { + return nil, fmt.Errorf("internal error: go list gives conflicting information for package %v", p.ImportPath) + } + continue + } + + // Determine if this package's error needs to be bubbled up. + // This is a hack, and we expect for go list to eventually set the error + // on the package. + if old.Error != nil { + var errkind string + if strings.Contains(old.Error.Err, "not an importable package") { + errkind = "not an importable package" + } else if strings.Contains(old.Error.Err, "use of internal package") && strings.Contains(old.Error.Err, "not allowed") { + errkind = "use of internal package not allowed" + } + if errkind != "" { + if len(old.Error.ImportStack) < 1 { + return nil, fmt.Errorf(`internal error: go list gave a %q error with empty import stack`, errkind) + } + importingPkg := old.Error.ImportStack[len(old.Error.ImportStack)-1] + if importingPkg == old.ImportPath { + // Using an older version of Go which put this package itself on top of import + // stack, instead of the importer. Look for importer in second from top + // position. + if len(old.Error.ImportStack) < 2 { + return nil, fmt.Errorf(`internal error: go list gave a %q error with an import stack without importing package`, errkind) + } + importingPkg = old.Error.ImportStack[len(old.Error.ImportStack)-2] + } + additionalErrors[importingPkg] = append(additionalErrors[importingPkg], Error{ + Pos: old.Error.Pos, + Msg: old.Error.Err, + Kind: ListError, + }) + } + } + + // Make sure that if there's a version of the package without an error, + // that's the one reported to the user. + if old.Error == nil { + continue + } + + // This package will replace the old one at the end of the loop. + } + seen[p.ImportPath] = p + + pkg := &Package{ + Name: p.Name, + ID: p.ImportPath, + GoFiles: absJoin(p.Dir, p.GoFiles, p.CgoFiles), + CompiledGoFiles: absJoin(p.Dir, p.CompiledGoFiles), + OtherFiles: absJoin(p.Dir, otherFiles(p)...), + EmbedFiles: absJoin(p.Dir, p.EmbedFiles), + EmbedPatterns: absJoin(p.Dir, p.EmbedPatterns), + IgnoredFiles: absJoin(p.Dir, p.IgnoredGoFiles, p.IgnoredOtherFiles), + forTest: p.ForTest, + depsErrors: p.DepsErrors, + Module: p.Module, + } + + if (state.cfg.Mode&typecheckCgo) != 0 && len(p.CgoFiles) != 0 { + if len(p.CompiledGoFiles) > len(p.GoFiles) { + // We need the cgo definitions, which are in the first + // CompiledGoFile after the non-cgo ones. This is a hack but there + // isn't currently a better way to find it. We also need the pure + // Go files and unprocessed cgo files, all of which are already + // in pkg.GoFiles. + cgoTypes := p.CompiledGoFiles[len(p.GoFiles)] + pkg.CompiledGoFiles = append([]string{cgoTypes}, pkg.GoFiles...) + } else { + // golang/go#38990: go list silently fails to do cgo processing + pkg.CompiledGoFiles = nil + pkg.Errors = append(pkg.Errors, Error{ + Msg: "go list failed to return CompiledGoFiles. This may indicate failure to perform cgo processing; try building at the command line. See https://golang.org/issue/38990.", + Kind: ListError, + }) + } + } + + // Work around https://golang.org/issue/28749: + // cmd/go puts assembly, C, and C++ files in CompiledGoFiles. + // Remove files from CompiledGoFiles that are non-go files + // (or are not files that look like they are from the cache). + if len(pkg.CompiledGoFiles) > 0 { + out := pkg.CompiledGoFiles[:0] + for _, f := range pkg.CompiledGoFiles { + if ext := filepath.Ext(f); ext != ".go" && ext != "" { // ext == "" means the file is from the cache, so probably cgo-processed file + continue + } + out = append(out, f) + } + pkg.CompiledGoFiles = out + } + + // Extract the PkgPath from the package's ID. + if i := strings.IndexByte(pkg.ID, ' '); i >= 0 { + pkg.PkgPath = pkg.ID[:i] + } else { + pkg.PkgPath = pkg.ID + } + + if pkg.PkgPath == "unsafe" { + pkg.CompiledGoFiles = nil // ignore fake unsafe.go file (#59929) + } else if len(pkg.CompiledGoFiles) == 0 { + // Work around for pre-go.1.11 versions of go list. + // TODO(matloob): they should be handled by the fallback. + // Can we delete this? + pkg.CompiledGoFiles = pkg.GoFiles + } + + // Assume go list emits only absolute paths for Dir. + if p.Dir != "" && !filepath.IsAbs(p.Dir) { + log.Fatalf("internal error: go list returned non-absolute Package.Dir: %s", p.Dir) + } + + if p.Export != "" && !filepath.IsAbs(p.Export) { + pkg.ExportFile = filepath.Join(p.Dir, p.Export) + } else { + pkg.ExportFile = p.Export + } + + // imports + // + // Imports contains the IDs of all imported packages. + // ImportsMap records (path, ID) only where they differ. + ids := make(map[string]bool) + for _, id := range p.Imports { + ids[id] = true + } + pkg.Imports = make(map[string]*Package) + for path, id := range p.ImportMap { + pkg.Imports[path] = &Package{ID: id} // non-identity import + delete(ids, id) + } + for id := range ids { + if id == "C" { + continue + } + + pkg.Imports[id] = &Package{ID: id} // identity import + } + if !p.DepOnly { + response.Roots = append(response.Roots, pkg.ID) + } + + // Temporary work-around for golang/go#39986. Parse filenames out of + // error messages. This happens if there are unrecoverable syntax + // errors in the source, so we can't match on a specific error message. + // + // TODO(rfindley): remove this heuristic, in favor of considering + // InvalidGoFiles from the list driver. + if err := p.Error; err != nil && state.shouldAddFilenameFromError(p) { + addFilenameFromPos := func(pos string) bool { + split := strings.Split(pos, ":") + if len(split) < 1 { + return false + } + filename := strings.TrimSpace(split[0]) + if filename == "" { + return false + } + if !filepath.IsAbs(filename) { + filename = filepath.Join(state.cfg.Dir, filename) + } + info, _ := os.Stat(filename) + if info == nil { + return false + } + pkg.CompiledGoFiles = append(pkg.CompiledGoFiles, filename) + pkg.GoFiles = append(pkg.GoFiles, filename) + return true + } + found := addFilenameFromPos(err.Pos) + // In some cases, go list only reports the error position in the + // error text, not the error position. One such case is when the + // file's package name is a keyword (see golang.org/issue/39763). + if !found { + addFilenameFromPos(err.Err) + } + } + + if p.Error != nil { + msg := strings.TrimSpace(p.Error.Err) // Trim to work around golang.org/issue/32363. + // Address golang.org/issue/35964 by appending import stack to error message. + if msg == "import cycle not allowed" && len(p.Error.ImportStack) != 0 { + msg += fmt.Sprintf(": import stack: %v", p.Error.ImportStack) + } + pkg.Errors = append(pkg.Errors, Error{ + Pos: p.Error.Pos, + Msg: msg, + Kind: ListError, + }) + } + + pkgs[pkg.ID] = pkg + } + + for id, errs := range additionalErrors { + if p, ok := pkgs[id]; ok { + p.Errors = append(p.Errors, errs...) + } + } + for _, pkg := range pkgs { + response.Packages = append(response.Packages, pkg) + } + sort.Slice(response.Packages, func(i, j int) bool { return response.Packages[i].ID < response.Packages[j].ID }) + + return response, nil +} + +func (state *golistState) shouldAddFilenameFromError(p *jsonPackage) bool { + if len(p.GoFiles) > 0 || len(p.CompiledGoFiles) > 0 { + return false + } + + goV, err := state.getGoVersion() + if err != nil { + return false + } + + // On Go 1.14 and earlier, only add filenames from errors if the import stack is empty. + // The import stack behaves differently for these versions than newer Go versions. + if goV < 15 { + return len(p.Error.ImportStack) == 0 + } + + // On Go 1.15 and later, only parse filenames out of error if there's no import stack, + // or the current package is at the top of the import stack. This is not guaranteed + // to work perfectly, but should avoid some cases where files in errors don't belong to this + // package. + return len(p.Error.ImportStack) == 0 || p.Error.ImportStack[len(p.Error.ImportStack)-1] == p.ImportPath +} + +// getGoVersion returns the effective minor version of the go command. +func (state *golistState) getGoVersion() (int, error) { + state.goVersionOnce.Do(func() { + state.goVersion, state.goVersionError = gocommand.GoVersion(state.ctx, state.cfgInvocation(), state.cfg.gocmdRunner) + }) + return state.goVersion, state.goVersionError +} + +// getPkgPath finds the package path of a directory if it's relative to a root +// directory. +func (state *golistState) getPkgPath(dir string) (string, bool, error) { + absDir, err := filepath.Abs(dir) + if err != nil { + return "", false, err + } + roots, err := state.determineRootDirs() + if err != nil { + return "", false, err + } + + for rdir, rpath := range roots { + // Make sure that the directory is in the module, + // to avoid creating a path relative to another module. + if !strings.HasPrefix(absDir, rdir) { + continue + } + // TODO(matloob): This doesn't properly handle symlinks. + r, err := filepath.Rel(rdir, dir) + if err != nil { + continue + } + if rpath != "" { + // We choose only one root even though the directory even it can belong in multiple modules + // or GOPATH entries. This is okay because we only need to work with absolute dirs when a + // file is missing from disk, for instance when gopls calls go/packages in an overlay. + // Once the file is saved, gopls, or the next invocation of the tool will get the correct + // result straight from golist. + // TODO(matloob): Implement module tiebreaking? + return path.Join(rpath, filepath.ToSlash(r)), true, nil + } + return filepath.ToSlash(r), true, nil + } + return "", false, nil +} + +// absJoin absolutizes and flattens the lists of files. +func absJoin(dir string, fileses ...[]string) (res []string) { + for _, files := range fileses { + for _, file := range files { + if !filepath.IsAbs(file) { + file = filepath.Join(dir, file) + } + res = append(res, file) + } + } + return res +} + +func jsonFlag(cfg *Config, goVersion int) string { + if goVersion < 19 { + return "-json" + } + var fields []string + added := make(map[string]bool) + addFields := func(fs ...string) { + for _, f := range fs { + if !added[f] { + added[f] = true + fields = append(fields, f) + } + } + } + addFields("Name", "ImportPath", "Error") // These fields are always needed + if cfg.Mode&NeedFiles != 0 || cfg.Mode&NeedTypes != 0 { + addFields("Dir", "GoFiles", "IgnoredGoFiles", "IgnoredOtherFiles", "CFiles", + "CgoFiles", "CXXFiles", "MFiles", "HFiles", "FFiles", "SFiles", + "SwigFiles", "SwigCXXFiles", "SysoFiles") + if cfg.Tests { + addFields("TestGoFiles", "XTestGoFiles") + } + } + if cfg.Mode&NeedTypes != 0 { + // CompiledGoFiles seems to be required for the test case TestCgoNoSyntax, + // even when -compiled isn't passed in. + // TODO(#52435): Should we make the test ask for -compiled, or automatically + // request CompiledGoFiles in certain circumstances? + addFields("Dir", "CompiledGoFiles") + } + if cfg.Mode&NeedCompiledGoFiles != 0 { + addFields("Dir", "CompiledGoFiles", "Export") + } + if cfg.Mode&NeedImports != 0 { + // When imports are requested, DepOnly is used to distinguish between packages + // explicitly requested and transitive imports of those packages. + addFields("DepOnly", "Imports", "ImportMap") + if cfg.Tests { + addFields("TestImports", "XTestImports") + } + } + if cfg.Mode&NeedDeps != 0 { + addFields("DepOnly") + } + if usesExportData(cfg) { + // Request Dir in the unlikely case Export is not absolute. + addFields("Dir", "Export") + } + if cfg.Mode&needInternalForTest != 0 { + addFields("ForTest") + } + if cfg.Mode&needInternalDepsErrors != 0 { + addFields("DepsErrors") + } + if cfg.Mode&NeedModule != 0 { + addFields("Module") + } + if cfg.Mode&NeedEmbedFiles != 0 { + addFields("EmbedFiles") + } + if cfg.Mode&NeedEmbedPatterns != 0 { + addFields("EmbedPatterns") + } + return "-json=" + strings.Join(fields, ",") +} + +func golistargs(cfg *Config, words []string, goVersion int) []string { + const findFlags = NeedImports | NeedTypes | NeedSyntax | NeedTypesInfo + fullargs := []string{ + "-e", jsonFlag(cfg, goVersion), + fmt.Sprintf("-compiled=%t", cfg.Mode&(NeedCompiledGoFiles|NeedSyntax|NeedTypes|NeedTypesInfo|NeedTypesSizes) != 0), + fmt.Sprintf("-test=%t", cfg.Tests), + fmt.Sprintf("-export=%t", usesExportData(cfg)), + fmt.Sprintf("-deps=%t", cfg.Mode&NeedImports != 0), + // go list doesn't let you pass -test and -find together, + // probably because you'd just get the TestMain. + fmt.Sprintf("-find=%t", !cfg.Tests && cfg.Mode&findFlags == 0 && !usesExportData(cfg)), + } + + // golang/go#60456: with go1.21 and later, go list serves pgo variants, which + // can be costly to compute and may result in redundant processing for the + // caller. Disable these variants. If someone wants to add e.g. a NeedPGO + // mode flag, that should be a separate proposal. + if goVersion >= 21 { + fullargs = append(fullargs, "-pgo=off") + } + + fullargs = append(fullargs, cfg.BuildFlags...) + fullargs = append(fullargs, "--") + fullargs = append(fullargs, words...) + return fullargs +} + +// cfgInvocation returns an Invocation that reflects cfg's settings. +func (state *golistState) cfgInvocation() gocommand.Invocation { + cfg := state.cfg + return gocommand.Invocation{ + BuildFlags: cfg.BuildFlags, + ModFile: cfg.modFile, + ModFlag: cfg.modFlag, + CleanEnv: cfg.Env != nil, + Env: cfg.Env, + Logf: cfg.Logf, + WorkingDir: cfg.Dir, + Overlay: cfg.goListOverlayFile, + } +} + +// invokeGo returns the stdout of a go command invocation. +func (state *golistState) invokeGo(verb string, args ...string) (*bytes.Buffer, error) { + cfg := state.cfg + + inv := state.cfgInvocation() + inv.Verb = verb + inv.Args = args + gocmdRunner := cfg.gocmdRunner + if gocmdRunner == nil { + gocmdRunner = &gocommand.Runner{} + } + stdout, stderr, friendlyErr, err := gocmdRunner.RunRaw(cfg.Context, inv) + if err != nil { + // Check for 'go' executable not being found. + if ee, ok := err.(*exec.Error); ok && ee.Err == exec.ErrNotFound { + return nil, fmt.Errorf("'go list' driver requires 'go', but %s", exec.ErrNotFound) + } + + exitErr, ok := err.(*exec.ExitError) + if !ok { + // Catastrophic error: + // - context cancellation + return nil, fmt.Errorf("couldn't run 'go': %w", err) + } + + // Old go version? + if strings.Contains(stderr.String(), "flag provided but not defined") { + return nil, goTooOldError{fmt.Errorf("unsupported version of go: %s: %s", exitErr, stderr)} + } + + // Related to #24854 + if len(stderr.String()) > 0 && strings.Contains(stderr.String(), "unexpected directory layout") { + return nil, friendlyErr + } + + // Is there an error running the C compiler in cgo? This will be reported in the "Error" field + // and should be suppressed by go list -e. + // + // This condition is not perfect yet because the error message can include other error messages than runtime/cgo. + isPkgPathRune := func(r rune) bool { + // From https://golang.org/ref/spec#Import_declarations: + // Implementation restriction: A compiler may restrict ImportPaths to non-empty strings + // using only characters belonging to Unicode's L, M, N, P, and S general categories + // (the Graphic characters without spaces) and may also exclude the + // characters !"#$%&'()*,:;<=>?[\]^`{|} and the Unicode replacement character U+FFFD. + return unicode.IsOneOf([]*unicode.RangeTable{unicode.L, unicode.M, unicode.N, unicode.P, unicode.S}, r) && + !strings.ContainsRune("!\"#$%&'()*,:;<=>?[\\]^`{|}\uFFFD", r) + } + // golang/go#36770: Handle case where cmd/go prints module download messages before the error. + msg := stderr.String() + for strings.HasPrefix(msg, "go: downloading") { + msg = msg[strings.IndexRune(msg, '\n')+1:] + } + if len(stderr.String()) > 0 && strings.HasPrefix(stderr.String(), "# ") { + msg := msg[len("# "):] + if strings.HasPrefix(strings.TrimLeftFunc(msg, isPkgPathRune), "\n") { + return stdout, nil + } + // Treat pkg-config errors as a special case (golang.org/issue/36770). + if strings.HasPrefix(msg, "pkg-config") { + return stdout, nil + } + } + + // This error only appears in stderr. See golang.org/cl/166398 for a fix in go list to show + // the error in the Err section of stdout in case -e option is provided. + // This fix is provided for backwards compatibility. + if len(stderr.String()) > 0 && strings.Contains(stderr.String(), "named files must be .go files") { + output := fmt.Sprintf(`{"ImportPath": "command-line-arguments","Incomplete": true,"Error": {"Pos": "","Err": %q}}`, + strings.Trim(stderr.String(), "\n")) + return bytes.NewBufferString(output), nil + } + + // Similar to the previous error, but currently lacks a fix in Go. + if len(stderr.String()) > 0 && strings.Contains(stderr.String(), "named files must all be in one directory") { + output := fmt.Sprintf(`{"ImportPath": "command-line-arguments","Incomplete": true,"Error": {"Pos": "","Err": %q}}`, + strings.Trim(stderr.String(), "\n")) + return bytes.NewBufferString(output), nil + } + + // Backwards compatibility for Go 1.11 because 1.12 and 1.13 put the directory in the ImportPath. + // If the package doesn't exist, put the absolute path of the directory into the error message, + // as Go 1.13 list does. + const noSuchDirectory = "no such directory" + if len(stderr.String()) > 0 && strings.Contains(stderr.String(), noSuchDirectory) { + errstr := stderr.String() + abspath := strings.TrimSpace(errstr[strings.Index(errstr, noSuchDirectory)+len(noSuchDirectory):]) + output := fmt.Sprintf(`{"ImportPath": %q,"Incomplete": true,"Error": {"Pos": "","Err": %q}}`, + abspath, strings.Trim(stderr.String(), "\n")) + return bytes.NewBufferString(output), nil + } + + // Workaround for #29280: go list -e has incorrect behavior when an ad-hoc package doesn't exist. + // Note that the error message we look for in this case is different that the one looked for above. + if len(stderr.String()) > 0 && strings.Contains(stderr.String(), "no such file or directory") { + output := fmt.Sprintf(`{"ImportPath": "command-line-arguments","Incomplete": true,"Error": {"Pos": "","Err": %q}}`, + strings.Trim(stderr.String(), "\n")) + return bytes.NewBufferString(output), nil + } + + // Workaround for #34273. go list -e with GO111MODULE=on has incorrect behavior when listing a + // directory outside any module. + if len(stderr.String()) > 0 && strings.Contains(stderr.String(), "outside available modules") { + output := fmt.Sprintf(`{"ImportPath": %q,"Incomplete": true,"Error": {"Pos": "","Err": %q}}`, + // TODO(matloob): command-line-arguments isn't correct here. + "command-line-arguments", strings.Trim(stderr.String(), "\n")) + return bytes.NewBufferString(output), nil + } + + // Another variation of the previous error + if len(stderr.String()) > 0 && strings.Contains(stderr.String(), "outside module root") { + output := fmt.Sprintf(`{"ImportPath": %q,"Incomplete": true,"Error": {"Pos": "","Err": %q}}`, + // TODO(matloob): command-line-arguments isn't correct here. + "command-line-arguments", strings.Trim(stderr.String(), "\n")) + return bytes.NewBufferString(output), nil + } + + // Workaround for an instance of golang.org/issue/26755: go list -e will return a non-zero exit + // status if there's a dependency on a package that doesn't exist. But it should return + // a zero exit status and set an error on that package. + if len(stderr.String()) > 0 && strings.Contains(stderr.String(), "no Go files in") { + // Don't clobber stdout if `go list` actually returned something. + if len(stdout.String()) > 0 { + return stdout, nil + } + // try to extract package name from string + stderrStr := stderr.String() + var importPath string + colon := strings.Index(stderrStr, ":") + if colon > 0 && strings.HasPrefix(stderrStr, "go build ") { + importPath = stderrStr[len("go build "):colon] + } + output := fmt.Sprintf(`{"ImportPath": %q,"Incomplete": true,"Error": {"Pos": "","Err": %q}}`, + importPath, strings.Trim(stderrStr, "\n")) + return bytes.NewBufferString(output), nil + } + + // Export mode entails a build. + // If that build fails, errors appear on stderr + // (despite the -e flag) and the Export field is blank. + // Do not fail in that case. + // The same is true if an ad-hoc package given to go list doesn't exist. + // TODO(matloob): Remove these once we can depend on go list to exit with a zero status with -e even when + // packages don't exist or a build fails. + if !usesExportData(cfg) && !containsGoFile(args) { + return nil, friendlyErr + } + } + return stdout, nil +} + +func containsGoFile(s []string) bool { + for _, f := range s { + if strings.HasSuffix(f, ".go") { + return true + } + } + return false +} + +func cmdDebugStr(cmd *exec.Cmd) string { + env := make(map[string]string) + for _, kv := range cmd.Env { + split := strings.SplitN(kv, "=", 2) + k, v := split[0], split[1] + env[k] = v + } + + var args []string + for _, arg := range cmd.Args { + quoted := strconv.Quote(arg) + if quoted[1:len(quoted)-1] != arg || strings.Contains(arg, " ") { + args = append(args, quoted) + } else { + args = append(args, arg) + } + } + return fmt.Sprintf("GOROOT=%v GOPATH=%v GO111MODULE=%v GOPROXY=%v PWD=%v %v", env["GOROOT"], env["GOPATH"], env["GO111MODULE"], env["GOPROXY"], env["PWD"], strings.Join(args, " ")) +} + +// getSizesForArgs queries 'go list' for the appropriate +// Compiler and GOARCH arguments to pass to [types.SizesFor]. +func getSizesForArgs(ctx context.Context, inv gocommand.Invocation, gocmdRunner *gocommand.Runner) (string, string, error) { + inv.Verb = "list" + inv.Args = []string{"-f", "{{context.GOARCH}} {{context.Compiler}}", "--", "unsafe"} + stdout, stderr, friendlyErr, rawErr := gocmdRunner.RunRaw(ctx, inv) + var goarch, compiler string + if rawErr != nil { + rawErrMsg := rawErr.Error() + if strings.Contains(rawErrMsg, "cannot find main module") || + strings.Contains(rawErrMsg, "go.mod file not found") { + // User's running outside of a module. + // All bets are off. Get GOARCH and guess compiler is gc. + // TODO(matloob): Is this a problem in practice? + inv.Verb = "env" + inv.Args = []string{"GOARCH"} + envout, enverr := gocmdRunner.Run(ctx, inv) + if enverr != nil { + return "", "", enverr + } + goarch = strings.TrimSpace(envout.String()) + compiler = "gc" + } else if friendlyErr != nil { + return "", "", friendlyErr + } else { + // This should be unreachable, but be defensive + // in case RunRaw's error results are inconsistent. + return "", "", rawErr + } + } else { + fields := strings.Fields(stdout.String()) + if len(fields) < 2 { + return "", "", fmt.Errorf("could not parse GOARCH and Go compiler in format \" \":\nstdout: <<%s>>\nstderr: <<%s>>", + stdout.String(), stderr.String()) + } + goarch = fields[0] + compiler = fields[1] + } + return compiler, goarch, nil +} diff --git a/vendor/golang.org/x/tools/go/packages/golist_overlay.go b/vendor/golang.org/x/tools/go/packages/golist_overlay.go new file mode 100644 index 0000000..d823c47 --- /dev/null +++ b/vendor/golang.org/x/tools/go/packages/golist_overlay.go @@ -0,0 +1,83 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packages + +import ( + "encoding/json" + "path/filepath" + + "golang.org/x/tools/internal/gocommand" +) + +// determineRootDirs returns a mapping from absolute directories that could +// contain code to their corresponding import path prefixes. +func (state *golistState) determineRootDirs() (map[string]string, error) { + env, err := state.getEnv() + if err != nil { + return nil, err + } + if env["GOMOD"] != "" { + state.rootsOnce.Do(func() { + state.rootDirs, state.rootDirsError = state.determineRootDirsModules() + }) + } else { + state.rootsOnce.Do(func() { + state.rootDirs, state.rootDirsError = state.determineRootDirsGOPATH() + }) + } + return state.rootDirs, state.rootDirsError +} + +func (state *golistState) determineRootDirsModules() (map[string]string, error) { + // List all of the modules--the first will be the directory for the main + // module. Any replaced modules will also need to be treated as roots. + // Editing files in the module cache isn't a great idea, so we don't + // plan to ever support that. + out, err := state.invokeGo("list", "-m", "-json", "all") + if err != nil { + // 'go list all' will fail if we're outside of a module and + // GO111MODULE=on. Try falling back without 'all'. + var innerErr error + out, innerErr = state.invokeGo("list", "-m", "-json") + if innerErr != nil { + return nil, err + } + } + roots := map[string]string{} + modules := map[string]string{} + var i int + for dec := json.NewDecoder(out); dec.More(); { + mod := new(gocommand.ModuleJSON) + if err := dec.Decode(mod); err != nil { + return nil, err + } + if mod.Dir != "" && mod.Path != "" { + // This is a valid module; add it to the map. + absDir, err := filepath.Abs(mod.Dir) + if err != nil { + return nil, err + } + modules[absDir] = mod.Path + // The first result is the main module. + if i == 0 || mod.Replace != nil && mod.Replace.Path != "" { + roots[absDir] = mod.Path + } + } + i++ + } + return roots, nil +} + +func (state *golistState) determineRootDirsGOPATH() (map[string]string, error) { + m := map[string]string{} + for _, dir := range filepath.SplitList(state.mustGetEnv()["GOPATH"]) { + absDir, err := filepath.Abs(dir) + if err != nil { + return nil, err + } + m[filepath.Join(absDir, "src")] = "" + } + return m, nil +} diff --git a/vendor/golang.org/x/tools/go/packages/loadmode_string.go b/vendor/golang.org/x/tools/go/packages/loadmode_string.go new file mode 100644 index 0000000..5c080d2 --- /dev/null +++ b/vendor/golang.org/x/tools/go/packages/loadmode_string.go @@ -0,0 +1,57 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packages + +import ( + "fmt" + "strings" +) + +var allModes = []LoadMode{ + NeedName, + NeedFiles, + NeedCompiledGoFiles, + NeedImports, + NeedDeps, + NeedExportFile, + NeedTypes, + NeedSyntax, + NeedTypesInfo, + NeedTypesSizes, +} + +var modeStrings = []string{ + "NeedName", + "NeedFiles", + "NeedCompiledGoFiles", + "NeedImports", + "NeedDeps", + "NeedExportFile", + "NeedTypes", + "NeedSyntax", + "NeedTypesInfo", + "NeedTypesSizes", +} + +func (mod LoadMode) String() string { + m := mod + if m == 0 { + return "LoadMode(0)" + } + var out []string + for i, x := range allModes { + if x > m { + break + } + if (m & x) != 0 { + out = append(out, modeStrings[i]) + m = m ^ x + } + } + if m != 0 { + out = append(out, "Unknown") + } + return fmt.Sprintf("LoadMode(%s)", strings.Join(out, "|")) +} diff --git a/vendor/golang.org/x/tools/go/packages/packages.go b/vendor/golang.org/x/tools/go/packages/packages.go new file mode 100644 index 0000000..0b6bfaf --- /dev/null +++ b/vendor/golang.org/x/tools/go/packages/packages.go @@ -0,0 +1,1515 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packages + +// See doc.go for package documentation and implementation notes. + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "go/ast" + "go/parser" + "go/scanner" + "go/token" + "go/types" + "io" + "log" + "os" + "path/filepath" + "runtime" + "strings" + "sync" + "time" + + "golang.org/x/sync/errgroup" + + "golang.org/x/tools/go/gcexportdata" + "golang.org/x/tools/internal/gocommand" + "golang.org/x/tools/internal/packagesinternal" + "golang.org/x/tools/internal/typesinternal" + "golang.org/x/tools/internal/versions" +) + +// A LoadMode controls the amount of detail to return when loading. +// The bits below can be combined to specify which fields should be +// filled in the result packages. +// +// The zero value is a special case, equivalent to combining +// the NeedName, NeedFiles, and NeedCompiledGoFiles bits. +// +// ID and Errors (if present) will always be filled. +// [Load] may return more information than requested. +// +// Unfortunately there are a number of open bugs related to +// interactions among the LoadMode bits: +// - https://github.com/golang/go/issues/56633 +// - https://github.com/golang/go/issues/56677 +// - https://github.com/golang/go/issues/58726 +// - https://github.com/golang/go/issues/63517 +type LoadMode int + +const ( + // NeedName adds Name and PkgPath. + NeedName LoadMode = 1 << iota + + // NeedFiles adds GoFiles and OtherFiles. + NeedFiles + + // NeedCompiledGoFiles adds CompiledGoFiles. + NeedCompiledGoFiles + + // NeedImports adds Imports. If NeedDeps is not set, the Imports field will contain + // "placeholder" Packages with only the ID set. + NeedImports + + // NeedDeps adds the fields requested by the LoadMode in the packages in Imports. + NeedDeps + + // NeedExportFile adds ExportFile. + NeedExportFile + + // NeedTypes adds Types, Fset, and IllTyped. + NeedTypes + + // NeedSyntax adds Syntax and Fset. + NeedSyntax + + // NeedTypesInfo adds TypesInfo. + NeedTypesInfo + + // NeedTypesSizes adds TypesSizes. + NeedTypesSizes + + // needInternalDepsErrors adds the internal deps errors field for use by gopls. + needInternalDepsErrors + + // needInternalForTest adds the internal forTest field. + // Tests must also be set on the context for this field to be populated. + needInternalForTest + + // typecheckCgo enables full support for type checking cgo. Requires Go 1.15+. + // Modifies CompiledGoFiles and Types, and has no effect on its own. + typecheckCgo + + // NeedModule adds Module. + NeedModule + + // NeedEmbedFiles adds EmbedFiles. + NeedEmbedFiles + + // NeedEmbedPatterns adds EmbedPatterns. + NeedEmbedPatterns +) + +const ( + // Deprecated: LoadFiles exists for historical compatibility + // and should not be used. Please directly specify the needed fields using the Need values. + LoadFiles = NeedName | NeedFiles | NeedCompiledGoFiles + + // Deprecated: LoadImports exists for historical compatibility + // and should not be used. Please directly specify the needed fields using the Need values. + LoadImports = LoadFiles | NeedImports + + // Deprecated: LoadTypes exists for historical compatibility + // and should not be used. Please directly specify the needed fields using the Need values. + LoadTypes = LoadImports | NeedTypes | NeedTypesSizes + + // Deprecated: LoadSyntax exists for historical compatibility + // and should not be used. Please directly specify the needed fields using the Need values. + LoadSyntax = LoadTypes | NeedSyntax | NeedTypesInfo + + // Deprecated: LoadAllSyntax exists for historical compatibility + // and should not be used. Please directly specify the needed fields using the Need values. + LoadAllSyntax = LoadSyntax | NeedDeps + + // Deprecated: NeedExportsFile is a historical misspelling of NeedExportFile. + NeedExportsFile = NeedExportFile +) + +// A Config specifies details about how packages should be loaded. +// The zero value is a valid configuration. +// +// Calls to Load do not modify this struct. +// +// TODO(adonovan): #67702: this is currently false: in fact, +// calls to [Load] do not modify the public fields of this struct, but +// may modify hidden fields, so concurrent calls to [Load] must not +// use the same Config. But perhaps we should reestablish the +// documented invariant. +type Config struct { + // Mode controls the level of information returned for each package. + Mode LoadMode + + // Context specifies the context for the load operation. + // Cancelling the context may cause [Load] to abort and + // return an error. + Context context.Context + + // Logf is the logger for the config. + // If the user provides a logger, debug logging is enabled. + // If the GOPACKAGESDEBUG environment variable is set to true, + // but the logger is nil, default to log.Printf. + Logf func(format string, args ...interface{}) + + // Dir is the directory in which to run the build system's query tool + // that provides information about the packages. + // If Dir is empty, the tool is run in the current directory. + Dir string + + // Env is the environment to use when invoking the build system's query tool. + // If Env is nil, the current environment is used. + // As in os/exec's Cmd, only the last value in the slice for + // each environment key is used. To specify the setting of only + // a few variables, append to the current environment, as in: + // + // opt.Env = append(os.Environ(), "GOOS=plan9", "GOARCH=386") + // + Env []string + + // gocmdRunner guards go command calls from concurrency errors. + gocmdRunner *gocommand.Runner + + // BuildFlags is a list of command-line flags to be passed through to + // the build system's query tool. + BuildFlags []string + + // modFile will be used for -modfile in go command invocations. + modFile string + + // modFlag will be used for -modfile in go command invocations. + modFlag string + + // Fset provides source position information for syntax trees and types. + // If Fset is nil, Load will use a new fileset, but preserve Fset's value. + Fset *token.FileSet + + // ParseFile is called to read and parse each file + // when preparing a package's type-checked syntax tree. + // It must be safe to call ParseFile simultaneously from multiple goroutines. + // If ParseFile is nil, the loader will uses parser.ParseFile. + // + // ParseFile should parse the source from src and use filename only for + // recording position information. + // + // An application may supply a custom implementation of ParseFile + // to change the effective file contents or the behavior of the parser, + // or to modify the syntax tree. For example, selectively eliminating + // unwanted function bodies can significantly accelerate type checking. + ParseFile func(fset *token.FileSet, filename string, src []byte) (*ast.File, error) + + // If Tests is set, the loader includes not just the packages + // matching a particular pattern but also any related test packages, + // including test-only variants of the package and the test executable. + // + // For example, when using the go command, loading "fmt" with Tests=true + // returns four packages, with IDs "fmt" (the standard package), + // "fmt [fmt.test]" (the package as compiled for the test), + // "fmt_test" (the test functions from source files in package fmt_test), + // and "fmt.test" (the test binary). + // + // In build systems with explicit names for tests, + // setting Tests may have no effect. + Tests bool + + // Overlay is a mapping from absolute file paths to file contents. + // + // For each map entry, [Load] uses the alternative file + // contents provided by the overlay mapping instead of reading + // from the file system. This mechanism can be used to enable + // editor-integrated tools to correctly analyze the contents + // of modified but unsaved buffers, for example. + // + // The overlay mapping is passed to the build system's driver + // (see "The driver protocol") so that it too can report + // consistent package metadata about unsaved files. However, + // drivers may vary in their level of support for overlays. + Overlay map[string][]byte + + // goListOverlayFile is the JSON file that encodes the Overlay + // mapping, used by 'go list -overlay=...' + goListOverlayFile string +} + +// Load loads and returns the Go packages named by the given patterns. +// +// Config specifies loading options; +// nil behaves the same as an empty Config. +// +// The [Config.Mode] field is a set of bits that determine what kinds +// of information should be computed and returned. Modes that require +// more information tend to be slower. See [LoadMode] for details +// and important caveats. Its zero value is equivalent to +// NeedName | NeedFiles | NeedCompiledGoFiles. +// +// Each call to Load returns a new set of [Package] instances. +// The Packages and their Imports form a directed acyclic graph. +// +// If the [NeedTypes] mode flag was set, each call to Load uses a new +// [types.Importer], so [types.Object] and [types.Type] values from +// different calls to Load must not be mixed as they will have +// inconsistent notions of type identity. +// +// If any of the patterns was invalid as defined by the +// underlying build system, Load returns an error. +// It may return an empty list of packages without an error, +// for instance for an empty expansion of a valid wildcard. +// Errors associated with a particular package are recorded in the +// corresponding Package's Errors list, and do not cause Load to +// return an error. Clients may need to handle such errors before +// proceeding with further analysis. The PrintErrors function is +// provided for convenient display of all errors. +func Load(cfg *Config, patterns ...string) ([]*Package, error) { + ld := newLoader(cfg) + response, external, err := defaultDriver(&ld.Config, patterns...) + if err != nil { + return nil, err + } + + ld.sizes = types.SizesFor(response.Compiler, response.Arch) + if ld.sizes == nil && ld.Config.Mode&(NeedTypes|NeedTypesSizes|NeedTypesInfo) != 0 { + // Type size information is needed but unavailable. + if external { + // An external driver may fail to populate the Compiler/GOARCH fields, + // especially since they are relatively new (see #63700). + // Provide a sensible fallback in this case. + ld.sizes = types.SizesFor("gc", runtime.GOARCH) + if ld.sizes == nil { // gccgo-only arch + ld.sizes = types.SizesFor("gc", "amd64") + } + } else { + // Go list should never fail to deliver accurate size information. + // Reject the whole Load since the error is the same for every package. + return nil, fmt.Errorf("can't determine type sizes for compiler %q on GOARCH %q", + response.Compiler, response.Arch) + } + } + + return ld.refine(response) +} + +// defaultDriver is a driver that implements go/packages' fallback behavior. +// It will try to request to an external driver, if one exists. If there's +// no external driver, or the driver returns a response with NotHandled set, +// defaultDriver will fall back to the go list driver. +// The boolean result indicates that an external driver handled the request. +func defaultDriver(cfg *Config, patterns ...string) (*DriverResponse, bool, error) { + const ( + // windowsArgMax specifies the maximum command line length for + // the Windows' CreateProcess function. + windowsArgMax = 32767 + // maxEnvSize is a very rough estimation of the maximum environment + // size of a user. + maxEnvSize = 16384 + // safeArgMax specifies the maximum safe command line length to use + // by the underlying driver excl. the environment. We choose the Windows' + // ARG_MAX as the starting point because it's one of the lowest ARG_MAX + // constants out of the different supported platforms, + // e.g., https://www.in-ulm.de/~mascheck/various/argmax/#results. + safeArgMax = windowsArgMax - maxEnvSize + ) + chunks, err := splitIntoChunks(patterns, safeArgMax) + if err != nil { + return nil, false, err + } + + if driver := findExternalDriver(cfg); driver != nil { + response, err := callDriverOnChunks(driver, cfg, chunks) + if err != nil { + return nil, false, err + } else if !response.NotHandled { + return response, true, nil + } + // (fall through) + } + + // go list fallback + // + // Write overlays once, as there are many calls + // to 'go list' (one per chunk plus others too). + overlay, cleanupOverlay, err := gocommand.WriteOverlays(cfg.Overlay) + if err != nil { + return nil, false, err + } + defer cleanupOverlay() + cfg.goListOverlayFile = overlay + + response, err := callDriverOnChunks(goListDriver, cfg, chunks) + if err != nil { + return nil, false, err + } + return response, false, err +} + +// splitIntoChunks chunks the slice so that the total number of characters +// in a chunk is no longer than argMax. +func splitIntoChunks(patterns []string, argMax int) ([][]string, error) { + if argMax <= 0 { + return nil, errors.New("failed to split patterns into chunks, negative safe argMax value") + } + var chunks [][]string + charsInChunk := 0 + nextChunkStart := 0 + for i, v := range patterns { + vChars := len(v) + if vChars > argMax { + // a single pattern is longer than the maximum safe ARG_MAX, hardly should happen + return nil, errors.New("failed to split patterns into chunks, a pattern is too long") + } + charsInChunk += vChars + 1 // +1 is for a whitespace between patterns that has to be counted too + if charsInChunk > argMax { + chunks = append(chunks, patterns[nextChunkStart:i]) + nextChunkStart = i + charsInChunk = vChars + } + } + // add the last chunk + if nextChunkStart < len(patterns) { + chunks = append(chunks, patterns[nextChunkStart:]) + } + return chunks, nil +} + +func callDriverOnChunks(driver driver, cfg *Config, chunks [][]string) (*DriverResponse, error) { + if len(chunks) == 0 { + return driver(cfg) + } + responses := make([]*DriverResponse, len(chunks)) + errNotHandled := errors.New("driver returned NotHandled") + var g errgroup.Group + for i, chunk := range chunks { + i := i + chunk := chunk + g.Go(func() (err error) { + responses[i], err = driver(cfg, chunk...) + if responses[i] != nil && responses[i].NotHandled { + err = errNotHandled + } + return err + }) + } + if err := g.Wait(); err != nil { + if errors.Is(err, errNotHandled) { + return &DriverResponse{NotHandled: true}, nil + } + return nil, err + } + return mergeResponses(responses...), nil +} + +func mergeResponses(responses ...*DriverResponse) *DriverResponse { + if len(responses) == 0 { + return nil + } + response := newDeduper() + response.dr.NotHandled = false + response.dr.Compiler = responses[0].Compiler + response.dr.Arch = responses[0].Arch + response.dr.GoVersion = responses[0].GoVersion + for _, v := range responses { + response.addAll(v) + } + return response.dr +} + +// A Package describes a loaded Go package. +// +// It also defines part of the JSON schema of [DriverResponse]. +// See the package documentation for an overview. +type Package struct { + // ID is a unique identifier for a package, + // in a syntax provided by the underlying build system. + // + // Because the syntax varies based on the build system, + // clients should treat IDs as opaque and not attempt to + // interpret them. + ID string + + // Name is the package name as it appears in the package source code. + Name string + + // PkgPath is the package path as used by the go/types package. + PkgPath string + + // Errors contains any errors encountered querying the metadata + // of the package, or while parsing or type-checking its files. + Errors []Error + + // TypeErrors contains the subset of errors produced during type checking. + TypeErrors []types.Error + + // GoFiles lists the absolute file paths of the package's Go source files. + // It may include files that should not be compiled, for example because + // they contain non-matching build tags, are documentary pseudo-files such as + // unsafe/unsafe.go or builtin/builtin.go, or are subject to cgo preprocessing. + GoFiles []string + + // CompiledGoFiles lists the absolute file paths of the package's source + // files that are suitable for type checking. + // This may differ from GoFiles if files are processed before compilation. + CompiledGoFiles []string + + // OtherFiles lists the absolute file paths of the package's non-Go source files, + // including assembly, C, C++, Fortran, Objective-C, SWIG, and so on. + OtherFiles []string + + // EmbedFiles lists the absolute file paths of the package's files + // embedded with go:embed. + EmbedFiles []string + + // EmbedPatterns lists the absolute file patterns of the package's + // files embedded with go:embed. + EmbedPatterns []string + + // IgnoredFiles lists source files that are not part of the package + // using the current build configuration but that might be part of + // the package using other build configurations. + IgnoredFiles []string + + // ExportFile is the absolute path to a file containing type + // information for the package as provided by the build system. + ExportFile string + + // Imports maps import paths appearing in the package's Go source files + // to corresponding loaded Packages. + Imports map[string]*Package + + // Module is the module information for the package if it exists. + // + // Note: it may be missing for std and cmd; see Go issue #65816. + Module *Module + + // -- The following fields are not part of the driver JSON schema. -- + + // Types provides type information for the package. + // The NeedTypes LoadMode bit sets this field for packages matching the + // patterns; type information for dependencies may be missing or incomplete, + // unless NeedDeps and NeedImports are also set. + // + // Each call to [Load] returns a consistent set of type + // symbols, as defined by the comment at [types.Identical]. + // Avoid mixing type information from two or more calls to [Load]. + Types *types.Package `json:"-"` + + // Fset provides position information for Types, TypesInfo, and Syntax. + // It is set only when Types is set. + Fset *token.FileSet `json:"-"` + + // IllTyped indicates whether the package or any dependency contains errors. + // It is set only when Types is set. + IllTyped bool `json:"-"` + + // Syntax is the package's syntax trees, for the files listed in CompiledGoFiles. + // + // The NeedSyntax LoadMode bit populates this field for packages matching the patterns. + // If NeedDeps and NeedImports are also set, this field will also be populated + // for dependencies. + // + // Syntax is kept in the same order as CompiledGoFiles, with the caveat that nils are + // removed. If parsing returned nil, Syntax may be shorter than CompiledGoFiles. + Syntax []*ast.File `json:"-"` + + // TypesInfo provides type information about the package's syntax trees. + // It is set only when Syntax is set. + TypesInfo *types.Info `json:"-"` + + // TypesSizes provides the effective size function for types in TypesInfo. + TypesSizes types.Sizes `json:"-"` + + // -- internal -- + + // forTest is the package under test, if any. + forTest string + + // depsErrors is the DepsErrors field from the go list response, if any. + depsErrors []*packagesinternal.PackageError +} + +// Module provides module information for a package. +// +// It also defines part of the JSON schema of [DriverResponse]. +// See the package documentation for an overview. +type Module struct { + Path string // module path + Version string // module version + Replace *Module // replaced by this module + Time *time.Time // time version was created + Main bool // is this the main module? + Indirect bool // is this module only an indirect dependency of main module? + Dir string // directory holding files for this module, if any + GoMod string // path to go.mod file used when loading this module, if any + GoVersion string // go version used in module + Error *ModuleError // error loading module +} + +// ModuleError holds errors loading a module. +type ModuleError struct { + Err string // the error itself +} + +func init() { + packagesinternal.GetForTest = func(p interface{}) string { + return p.(*Package).forTest + } + packagesinternal.GetDepsErrors = func(p interface{}) []*packagesinternal.PackageError { + return p.(*Package).depsErrors + } + packagesinternal.SetModFile = func(config interface{}, value string) { + config.(*Config).modFile = value + } + packagesinternal.SetModFlag = func(config interface{}, value string) { + config.(*Config).modFlag = value + } + packagesinternal.TypecheckCgo = int(typecheckCgo) + packagesinternal.DepsErrors = int(needInternalDepsErrors) + packagesinternal.ForTest = int(needInternalForTest) +} + +// An Error describes a problem with a package's metadata, syntax, or types. +type Error struct { + Pos string // "file:line:col" or "file:line" or "" or "-" + Msg string + Kind ErrorKind +} + +// ErrorKind describes the source of the error, allowing the user to +// differentiate between errors generated by the driver, the parser, or the +// type-checker. +type ErrorKind int + +const ( + UnknownError ErrorKind = iota + ListError + ParseError + TypeError +) + +func (err Error) Error() string { + pos := err.Pos + if pos == "" { + pos = "-" // like token.Position{}.String() + } + return pos + ": " + err.Msg +} + +// flatPackage is the JSON form of Package +// It drops all the type and syntax fields, and transforms the Imports +// +// TODO(adonovan): identify this struct with Package, effectively +// publishing the JSON protocol. +type flatPackage struct { + ID string + Name string `json:",omitempty"` + PkgPath string `json:",omitempty"` + Errors []Error `json:",omitempty"` + GoFiles []string `json:",omitempty"` + CompiledGoFiles []string `json:",omitempty"` + OtherFiles []string `json:",omitempty"` + EmbedFiles []string `json:",omitempty"` + EmbedPatterns []string `json:",omitempty"` + IgnoredFiles []string `json:",omitempty"` + ExportFile string `json:",omitempty"` + Imports map[string]string `json:",omitempty"` +} + +// MarshalJSON returns the Package in its JSON form. +// For the most part, the structure fields are written out unmodified, and +// the type and syntax fields are skipped. +// The imports are written out as just a map of path to package id. +// The errors are written using a custom type that tries to preserve the +// structure of error types we know about. +// +// This method exists to enable support for additional build systems. It is +// not intended for use by clients of the API and we may change the format. +func (p *Package) MarshalJSON() ([]byte, error) { + flat := &flatPackage{ + ID: p.ID, + Name: p.Name, + PkgPath: p.PkgPath, + Errors: p.Errors, + GoFiles: p.GoFiles, + CompiledGoFiles: p.CompiledGoFiles, + OtherFiles: p.OtherFiles, + EmbedFiles: p.EmbedFiles, + EmbedPatterns: p.EmbedPatterns, + IgnoredFiles: p.IgnoredFiles, + ExportFile: p.ExportFile, + } + if len(p.Imports) > 0 { + flat.Imports = make(map[string]string, len(p.Imports)) + for path, ipkg := range p.Imports { + flat.Imports[path] = ipkg.ID + } + } + return json.Marshal(flat) +} + +// UnmarshalJSON reads in a Package from its JSON format. +// See MarshalJSON for details about the format accepted. +func (p *Package) UnmarshalJSON(b []byte) error { + flat := &flatPackage{} + if err := json.Unmarshal(b, &flat); err != nil { + return err + } + *p = Package{ + ID: flat.ID, + Name: flat.Name, + PkgPath: flat.PkgPath, + Errors: flat.Errors, + GoFiles: flat.GoFiles, + CompiledGoFiles: flat.CompiledGoFiles, + OtherFiles: flat.OtherFiles, + EmbedFiles: flat.EmbedFiles, + EmbedPatterns: flat.EmbedPatterns, + IgnoredFiles: flat.IgnoredFiles, + ExportFile: flat.ExportFile, + } + if len(flat.Imports) > 0 { + p.Imports = make(map[string]*Package, len(flat.Imports)) + for path, id := range flat.Imports { + p.Imports[path] = &Package{ID: id} + } + } + return nil +} + +func (p *Package) String() string { return p.ID } + +// loaderPackage augments Package with state used during the loading phase +type loaderPackage struct { + *Package + importErrors map[string]error // maps each bad import to its error + loadOnce sync.Once + color uint8 // for cycle detection + needsrc bool // load from source (Mode >= LoadTypes) + needtypes bool // type information is either requested or depended on + initial bool // package was matched by a pattern + goVersion int // minor version number of go command on PATH +} + +// loader holds the working state of a single call to load. +type loader struct { + pkgs map[string]*loaderPackage + Config + sizes types.Sizes // non-nil if needed by mode + parseCache map[string]*parseValue + parseCacheMu sync.Mutex + exportMu sync.Mutex // enforces mutual exclusion of exportdata operations + + // Config.Mode contains the implied mode (see impliedLoadMode). + // Implied mode contains all the fields we need the data for. + // In requestedMode there are the actually requested fields. + // We'll zero them out before returning packages to the user. + // This makes it easier for us to get the conditions where + // we need certain modes right. + requestedMode LoadMode +} + +type parseValue struct { + f *ast.File + err error + ready chan struct{} +} + +func newLoader(cfg *Config) *loader { + ld := &loader{ + parseCache: map[string]*parseValue{}, + } + if cfg != nil { + ld.Config = *cfg + // If the user has provided a logger, use it. + ld.Config.Logf = cfg.Logf + } + if ld.Config.Logf == nil { + // If the GOPACKAGESDEBUG environment variable is set to true, + // but the user has not provided a logger, default to log.Printf. + if debug { + ld.Config.Logf = log.Printf + } else { + ld.Config.Logf = func(format string, args ...interface{}) {} + } + } + if ld.Config.Mode == 0 { + ld.Config.Mode = NeedName | NeedFiles | NeedCompiledGoFiles // Preserve zero behavior of Mode for backwards compatibility. + } + if ld.Config.Env == nil { + ld.Config.Env = os.Environ() + } + if ld.Config.gocmdRunner == nil { + ld.Config.gocmdRunner = &gocommand.Runner{} + } + if ld.Context == nil { + ld.Context = context.Background() + } + if ld.Dir == "" { + if dir, err := os.Getwd(); err == nil { + ld.Dir = dir + } + } + + // Save the actually requested fields. We'll zero them out before returning packages to the user. + ld.requestedMode = ld.Mode + ld.Mode = impliedLoadMode(ld.Mode) + + if ld.Mode&NeedTypes != 0 || ld.Mode&NeedSyntax != 0 { + if ld.Fset == nil { + ld.Fset = token.NewFileSet() + } + + // ParseFile is required even in LoadTypes mode + // because we load source if export data is missing. + if ld.ParseFile == nil { + ld.ParseFile = func(fset *token.FileSet, filename string, src []byte) (*ast.File, error) { + const mode = parser.AllErrors | parser.ParseComments + return parser.ParseFile(fset, filename, src, mode) + } + } + } + + return ld +} + +// refine connects the supplied packages into a graph and then adds type +// and syntax information as requested by the LoadMode. +func (ld *loader) refine(response *DriverResponse) ([]*Package, error) { + roots := response.Roots + rootMap := make(map[string]int, len(roots)) + for i, root := range roots { + rootMap[root] = i + } + ld.pkgs = make(map[string]*loaderPackage) + // first pass, fixup and build the map and roots + var initial = make([]*loaderPackage, len(roots)) + for _, pkg := range response.Packages { + rootIndex := -1 + if i, found := rootMap[pkg.ID]; found { + rootIndex = i + } + + // Overlays can invalidate export data. + // TODO(matloob): make this check fine-grained based on dependencies on overlaid files + exportDataInvalid := len(ld.Overlay) > 0 || pkg.ExportFile == "" && pkg.PkgPath != "unsafe" + // This package needs type information if the caller requested types and the package is + // either a root, or it's a non-root and the user requested dependencies ... + needtypes := (ld.Mode&NeedTypes|NeedTypesInfo != 0 && (rootIndex >= 0 || ld.Mode&NeedDeps != 0)) + // This package needs source if the call requested source (or types info, which implies source) + // and the package is either a root, or itas a non- root and the user requested dependencies... + needsrc := ((ld.Mode&(NeedSyntax|NeedTypesInfo) != 0 && (rootIndex >= 0 || ld.Mode&NeedDeps != 0)) || + // ... or if we need types and the exportData is invalid. We fall back to (incompletely) + // typechecking packages from source if they fail to compile. + (ld.Mode&(NeedTypes|NeedTypesInfo) != 0 && exportDataInvalid)) && pkg.PkgPath != "unsafe" + lpkg := &loaderPackage{ + Package: pkg, + needtypes: needtypes, + needsrc: needsrc, + goVersion: response.GoVersion, + } + ld.pkgs[lpkg.ID] = lpkg + if rootIndex >= 0 { + initial[rootIndex] = lpkg + lpkg.initial = true + } + } + for i, root := range roots { + if initial[i] == nil { + return nil, fmt.Errorf("root package %v is missing", root) + } + } + + if ld.Mode&NeedImports != 0 { + // Materialize the import graph. + + const ( + white = 0 // new + grey = 1 // in progress + black = 2 // complete + ) + + // visit traverses the import graph, depth-first, + // and materializes the graph as Packages.Imports. + // + // Valid imports are saved in the Packages.Import map. + // Invalid imports (cycles and missing nodes) are saved in the importErrors map. + // Thus, even in the presence of both kinds of errors, + // the Import graph remains a DAG. + // + // visit returns whether the package needs src or has a transitive + // dependency on a package that does. These are the only packages + // for which we load source code. + var stack []*loaderPackage + var visit func(lpkg *loaderPackage) bool + visit = func(lpkg *loaderPackage) bool { + switch lpkg.color { + case black: + return lpkg.needsrc + case grey: + panic("internal error: grey node") + } + lpkg.color = grey + stack = append(stack, lpkg) // push + stubs := lpkg.Imports // the structure form has only stubs with the ID in the Imports + lpkg.Imports = make(map[string]*Package, len(stubs)) + for importPath, ipkg := range stubs { + var importErr error + imp := ld.pkgs[ipkg.ID] + if imp == nil { + // (includes package "C" when DisableCgo) + importErr = fmt.Errorf("missing package: %q", ipkg.ID) + } else if imp.color == grey { + importErr = fmt.Errorf("import cycle: %s", stack) + } + if importErr != nil { + if lpkg.importErrors == nil { + lpkg.importErrors = make(map[string]error) + } + lpkg.importErrors[importPath] = importErr + continue + } + + if visit(imp) { + lpkg.needsrc = true + } + lpkg.Imports[importPath] = imp.Package + } + + // Complete type information is required for the + // immediate dependencies of each source package. + if lpkg.needsrc && ld.Mode&NeedTypes != 0 { + for _, ipkg := range lpkg.Imports { + ld.pkgs[ipkg.ID].needtypes = true + } + } + + // NeedTypeSizes causes TypeSizes to be set even + // on packages for which types aren't needed. + if ld.Mode&NeedTypesSizes != 0 { + lpkg.TypesSizes = ld.sizes + } + stack = stack[:len(stack)-1] // pop + lpkg.color = black + + return lpkg.needsrc + } + + // For each initial package, create its import DAG. + for _, lpkg := range initial { + visit(lpkg) + } + + } else { + // !NeedImports: drop the stub (ID-only) import packages + // that we are not even going to try to resolve. + for _, lpkg := range initial { + lpkg.Imports = nil + } + } + + // Load type data and syntax if needed, starting at + // the initial packages (roots of the import DAG). + if ld.Mode&NeedTypes != 0 || ld.Mode&NeedSyntax != 0 { + var wg sync.WaitGroup + for _, lpkg := range initial { + wg.Add(1) + go func(lpkg *loaderPackage) { + ld.loadRecursive(lpkg) + wg.Done() + }(lpkg) + } + wg.Wait() + } + + // If the context is done, return its error and + // throw out [likely] incomplete packages. + if err := ld.Context.Err(); err != nil { + return nil, err + } + + result := make([]*Package, len(initial)) + for i, lpkg := range initial { + result[i] = lpkg.Package + } + for i := range ld.pkgs { + // Clear all unrequested fields, + // to catch programs that use more than they request. + if ld.requestedMode&NeedName == 0 { + ld.pkgs[i].Name = "" + ld.pkgs[i].PkgPath = "" + } + if ld.requestedMode&NeedFiles == 0 { + ld.pkgs[i].GoFiles = nil + ld.pkgs[i].OtherFiles = nil + ld.pkgs[i].IgnoredFiles = nil + } + if ld.requestedMode&NeedEmbedFiles == 0 { + ld.pkgs[i].EmbedFiles = nil + } + if ld.requestedMode&NeedEmbedPatterns == 0 { + ld.pkgs[i].EmbedPatterns = nil + } + if ld.requestedMode&NeedCompiledGoFiles == 0 { + ld.pkgs[i].CompiledGoFiles = nil + } + if ld.requestedMode&NeedImports == 0 { + ld.pkgs[i].Imports = nil + } + if ld.requestedMode&NeedExportFile == 0 { + ld.pkgs[i].ExportFile = "" + } + if ld.requestedMode&NeedTypes == 0 { + ld.pkgs[i].Types = nil + ld.pkgs[i].IllTyped = false + } + if ld.requestedMode&NeedSyntax == 0 { + ld.pkgs[i].Syntax = nil + } + if ld.requestedMode&NeedTypes == 0 && ld.requestedMode&NeedSyntax == 0 { + ld.pkgs[i].Fset = nil + } + if ld.requestedMode&NeedTypesInfo == 0 { + ld.pkgs[i].TypesInfo = nil + } + if ld.requestedMode&NeedTypesSizes == 0 { + ld.pkgs[i].TypesSizes = nil + } + if ld.requestedMode&NeedModule == 0 { + ld.pkgs[i].Module = nil + } + } + + return result, nil +} + +// loadRecursive loads the specified package and its dependencies, +// recursively, in parallel, in topological order. +// It is atomic and idempotent. +// Precondition: ld.Mode&NeedTypes. +func (ld *loader) loadRecursive(lpkg *loaderPackage) { + lpkg.loadOnce.Do(func() { + // Load the direct dependencies, in parallel. + var wg sync.WaitGroup + for _, ipkg := range lpkg.Imports { + imp := ld.pkgs[ipkg.ID] + wg.Add(1) + go func(imp *loaderPackage) { + ld.loadRecursive(imp) + wg.Done() + }(imp) + } + wg.Wait() + ld.loadPackage(lpkg) + }) +} + +// loadPackage loads the specified package. +// It must be called only once per Package, +// after immediate dependencies are loaded. +// Precondition: ld.Mode & NeedTypes. +func (ld *loader) loadPackage(lpkg *loaderPackage) { + if lpkg.PkgPath == "unsafe" { + // Fill in the blanks to avoid surprises. + lpkg.Types = types.Unsafe + lpkg.Fset = ld.Fset + lpkg.Syntax = []*ast.File{} + lpkg.TypesInfo = new(types.Info) + lpkg.TypesSizes = ld.sizes + return + } + + // Call NewPackage directly with explicit name. + // This avoids skew between golist and go/types when the files' + // package declarations are inconsistent. + lpkg.Types = types.NewPackage(lpkg.PkgPath, lpkg.Name) + lpkg.Fset = ld.Fset + + // Start shutting down if the context is done and do not load + // source or export data files. + // Packages that import this one will have ld.Context.Err() != nil. + // ld.Context.Err() will be returned later by refine. + if ld.Context.Err() != nil { + return + } + + // Subtle: we populate all Types fields with an empty Package + // before loading export data so that export data processing + // never has to create a types.Package for an indirect dependency, + // which would then require that such created packages be explicitly + // inserted back into the Import graph as a final step after export data loading. + // (Hence this return is after the Types assignment.) + // The Diamond test exercises this case. + if !lpkg.needtypes && !lpkg.needsrc { + return + } + if !lpkg.needsrc { + if err := ld.loadFromExportData(lpkg); err != nil { + lpkg.Errors = append(lpkg.Errors, Error{ + Pos: "-", + Msg: err.Error(), + Kind: UnknownError, // e.g. can't find/open/parse export data + }) + } + return // not a source package, don't get syntax trees + } + + appendError := func(err error) { + // Convert various error types into the one true Error. + var errs []Error + switch err := err.(type) { + case Error: + // from driver + errs = append(errs, err) + + case *os.PathError: + // from parser + errs = append(errs, Error{ + Pos: err.Path + ":1", + Msg: err.Err.Error(), + Kind: ParseError, + }) + + case scanner.ErrorList: + // from parser + for _, err := range err { + errs = append(errs, Error{ + Pos: err.Pos.String(), + Msg: err.Msg, + Kind: ParseError, + }) + } + + case types.Error: + // from type checker + lpkg.TypeErrors = append(lpkg.TypeErrors, err) + errs = append(errs, Error{ + Pos: err.Fset.Position(err.Pos).String(), + Msg: err.Msg, + Kind: TypeError, + }) + + default: + // unexpected impoverished error from parser? + errs = append(errs, Error{ + Pos: "-", + Msg: err.Error(), + Kind: UnknownError, + }) + + // If you see this error message, please file a bug. + log.Printf("internal error: error %q (%T) without position", err, err) + } + + lpkg.Errors = append(lpkg.Errors, errs...) + } + + // If the go command on the PATH is newer than the runtime, + // then the go/{scanner,ast,parser,types} packages from the + // standard library may be unable to process the files + // selected by go list. + // + // There is currently no way to downgrade the effective + // version of the go command (see issue 52078), so we proceed + // with the newer go command but, in case of parse or type + // errors, we emit an additional diagnostic. + // + // See: + // - golang.org/issue/52078 (flag to set release tags) + // - golang.org/issue/50825 (gopls legacy version support) + // - golang.org/issue/55883 (go/packages confusing error) + // + // Should we assert a hard minimum of (currently) go1.16 here? + var runtimeVersion int + if _, err := fmt.Sscanf(runtime.Version(), "go1.%d", &runtimeVersion); err == nil && runtimeVersion < lpkg.goVersion { + defer func() { + if len(lpkg.Errors) > 0 { + appendError(Error{ + Pos: "-", + Msg: fmt.Sprintf("This application uses version go1.%d of the source-processing packages but runs version go1.%d of 'go list'. It may fail to process source files that rely on newer language features. If so, rebuild the application using a newer version of Go.", runtimeVersion, lpkg.goVersion), + Kind: UnknownError, + }) + } + }() + } + + if ld.Config.Mode&NeedTypes != 0 && len(lpkg.CompiledGoFiles) == 0 && lpkg.ExportFile != "" { + // The config requested loading sources and types, but sources are missing. + // Add an error to the package and fall back to loading from export data. + appendError(Error{"-", fmt.Sprintf("sources missing for package %s", lpkg.ID), ParseError}) + _ = ld.loadFromExportData(lpkg) // ignore any secondary errors + + return // can't get syntax trees for this package + } + + files, errs := ld.parseFiles(lpkg.CompiledGoFiles) + for _, err := range errs { + appendError(err) + } + + lpkg.Syntax = files + if ld.Config.Mode&NeedTypes == 0 { + return + } + + // Start shutting down if the context is done and do not type check. + // Packages that import this one will have ld.Context.Err() != nil. + // ld.Context.Err() will be returned later by refine. + if ld.Context.Err() != nil { + return + } + + lpkg.TypesInfo = &types.Info{ + Types: make(map[ast.Expr]types.TypeAndValue), + Defs: make(map[*ast.Ident]types.Object), + Uses: make(map[*ast.Ident]types.Object), + Implicits: make(map[ast.Node]types.Object), + Instances: make(map[*ast.Ident]types.Instance), + Scopes: make(map[ast.Node]*types.Scope), + Selections: make(map[*ast.SelectorExpr]*types.Selection), + } + versions.InitFileVersions(lpkg.TypesInfo) + lpkg.TypesSizes = ld.sizes + + importer := importerFunc(func(path string) (*types.Package, error) { + if path == "unsafe" { + return types.Unsafe, nil + } + + // The imports map is keyed by import path. + ipkg := lpkg.Imports[path] + if ipkg == nil { + if err := lpkg.importErrors[path]; err != nil { + return nil, err + } + // There was skew between the metadata and the + // import declarations, likely due to an edit + // race, or because the ParseFile feature was + // used to supply alternative file contents. + return nil, fmt.Errorf("no metadata for %s", path) + } + + if ipkg.Types != nil && ipkg.Types.Complete() { + return ipkg.Types, nil + } + log.Fatalf("internal error: package %q without types was imported from %q", path, lpkg) + panic("unreachable") + }) + + // type-check + tc := &types.Config{ + Importer: importer, + + // Type-check bodies of functions only in initial packages. + // Example: for import graph A->B->C and initial packages {A,C}, + // we can ignore function bodies in B. + IgnoreFuncBodies: ld.Mode&NeedDeps == 0 && !lpkg.initial, + + Error: appendError, + Sizes: ld.sizes, // may be nil + } + if lpkg.Module != nil && lpkg.Module.GoVersion != "" { + tc.GoVersion = "go" + lpkg.Module.GoVersion + } + if (ld.Mode & typecheckCgo) != 0 { + if !typesinternal.SetUsesCgo(tc) { + appendError(Error{ + Msg: "typecheckCgo requires Go 1.15+", + Kind: ListError, + }) + return + } + } + + typErr := types.NewChecker(tc, ld.Fset, lpkg.Types, lpkg.TypesInfo).Files(lpkg.Syntax) + lpkg.importErrors = nil // no longer needed + + // In go/types go1.21 and go1.22, Checker.Files failed fast with a + // a "too new" error, without calling tc.Error and without + // proceeding to type-check the package (#66525). + // We rely on the runtimeVersion error to give the suggested remedy. + if typErr != nil && len(lpkg.Errors) == 0 && len(lpkg.Syntax) > 0 { + if msg := typErr.Error(); strings.HasPrefix(msg, "package requires newer Go version") { + appendError(types.Error{ + Fset: ld.Fset, + Pos: lpkg.Syntax[0].Package, + Msg: msg, + }) + } + } + + // If !Cgo, the type-checker uses FakeImportC mode, so + // it doesn't invoke the importer for import "C", + // nor report an error for the import, + // or for any undefined C.f reference. + // We must detect this explicitly and correctly + // mark the package as IllTyped (by reporting an error). + // TODO(adonovan): if these errors are annoying, + // we could just set IllTyped quietly. + if tc.FakeImportC { + outer: + for _, f := range lpkg.Syntax { + for _, imp := range f.Imports { + if imp.Path.Value == `"C"` { + err := types.Error{Fset: ld.Fset, Pos: imp.Pos(), Msg: `import "C" ignored`} + appendError(err) + break outer + } + } + } + } + + // If types.Checker.Files had an error that was unreported, + // make sure to report the unknown error so the package is illTyped. + if typErr != nil && len(lpkg.Errors) == 0 { + appendError(typErr) + } + + // Record accumulated errors. + illTyped := len(lpkg.Errors) > 0 + if !illTyped { + for _, imp := range lpkg.Imports { + if imp.IllTyped { + illTyped = true + break + } + } + } + lpkg.IllTyped = illTyped +} + +// An importFunc is an implementation of the single-method +// types.Importer interface based on a function value. +type importerFunc func(path string) (*types.Package, error) + +func (f importerFunc) Import(path string) (*types.Package, error) { return f(path) } + +// We use a counting semaphore to limit +// the number of parallel I/O calls per process. +var ioLimit = make(chan bool, 20) + +func (ld *loader) parseFile(filename string) (*ast.File, error) { + ld.parseCacheMu.Lock() + v, ok := ld.parseCache[filename] + if ok { + // cache hit + ld.parseCacheMu.Unlock() + <-v.ready + } else { + // cache miss + v = &parseValue{ready: make(chan struct{})} + ld.parseCache[filename] = v + ld.parseCacheMu.Unlock() + + var src []byte + for f, contents := range ld.Config.Overlay { + if sameFile(f, filename) { + src = contents + } + } + var err error + if src == nil { + ioLimit <- true // wait + src, err = os.ReadFile(filename) + <-ioLimit // signal + } + if err != nil { + v.err = err + } else { + v.f, v.err = ld.ParseFile(ld.Fset, filename, src) + } + + close(v.ready) + } + return v.f, v.err +} + +// parseFiles reads and parses the Go source files and returns the ASTs +// of the ones that could be at least partially parsed, along with a +// list of I/O and parse errors encountered. +// +// Because files are scanned in parallel, the token.Pos +// positions of the resulting ast.Files are not ordered. +func (ld *loader) parseFiles(filenames []string) ([]*ast.File, []error) { + var wg sync.WaitGroup + n := len(filenames) + parsed := make([]*ast.File, n) + errors := make([]error, n) + for i, file := range filenames { + wg.Add(1) + go func(i int, filename string) { + parsed[i], errors[i] = ld.parseFile(filename) + wg.Done() + }(i, file) + } + wg.Wait() + + // Eliminate nils, preserving order. + var o int + for _, f := range parsed { + if f != nil { + parsed[o] = f + o++ + } + } + parsed = parsed[:o] + + o = 0 + for _, err := range errors { + if err != nil { + errors[o] = err + o++ + } + } + errors = errors[:o] + + return parsed, errors +} + +// sameFile returns true if x and y have the same basename and denote +// the same file. +func sameFile(x, y string) bool { + if x == y { + // It could be the case that y doesn't exist. + // For instance, it may be an overlay file that + // hasn't been written to disk. To handle that case + // let x == y through. (We added the exact absolute path + // string to the CompiledGoFiles list, so the unwritten + // overlay case implies x==y.) + return true + } + if strings.EqualFold(filepath.Base(x), filepath.Base(y)) { // (optimisation) + if xi, err := os.Stat(x); err == nil { + if yi, err := os.Stat(y); err == nil { + return os.SameFile(xi, yi) + } + } + } + return false +} + +// loadFromExportData ensures that type information is present for the specified +// package, loading it from an export data file on the first request. +// On success it sets lpkg.Types to a new Package. +func (ld *loader) loadFromExportData(lpkg *loaderPackage) error { + if lpkg.PkgPath == "" { + log.Fatalf("internal error: Package %s has no PkgPath", lpkg) + } + + // Because gcexportdata.Read has the potential to create or + // modify the types.Package for each node in the transitive + // closure of dependencies of lpkg, all exportdata operations + // must be sequential. (Finer-grained locking would require + // changes to the gcexportdata API.) + // + // The exportMu lock guards the lpkg.Types field and the + // types.Package it points to, for each loaderPackage in the graph. + // + // Not all accesses to Package.Pkg need to be protected by exportMu: + // graph ordering ensures that direct dependencies of source + // packages are fully loaded before the importer reads their Pkg field. + ld.exportMu.Lock() + defer ld.exportMu.Unlock() + + if tpkg := lpkg.Types; tpkg != nil && tpkg.Complete() { + return nil // cache hit + } + + lpkg.IllTyped = true // fail safe + + if lpkg.ExportFile == "" { + // Errors while building export data will have been printed to stderr. + return fmt.Errorf("no export data file") + } + f, err := os.Open(lpkg.ExportFile) + if err != nil { + return err + } + defer f.Close() + + // Read gc export data. + // + // We don't currently support gccgo export data because all + // underlying workspaces use the gc toolchain. (Even build + // systems that support gccgo don't use it for workspace + // queries.) + r, err := gcexportdata.NewReader(f) + if err != nil { + return fmt.Errorf("reading %s: %v", lpkg.ExportFile, err) + } + + // Build the view. + // + // The gcexportdata machinery has no concept of package ID. + // It identifies packages by their PkgPath, which although not + // globally unique is unique within the scope of one invocation + // of the linker, type-checker, or gcexportdata. + // + // So, we must build a PkgPath-keyed view of the global + // (conceptually ID-keyed) cache of packages and pass it to + // gcexportdata. The view must contain every existing + // package that might possibly be mentioned by the + // current package---its transitive closure. + // + // In loadPackage, we unconditionally create a types.Package for + // each dependency so that export data loading does not + // create new ones. + // + // TODO(adonovan): it would be simpler and more efficient + // if the export data machinery invoked a callback to + // get-or-create a package instead of a map. + // + view := make(map[string]*types.Package) // view seen by gcexportdata + seen := make(map[*loaderPackage]bool) // all visited packages + var visit func(pkgs map[string]*Package) + visit = func(pkgs map[string]*Package) { + for _, p := range pkgs { + lpkg := ld.pkgs[p.ID] + if !seen[lpkg] { + seen[lpkg] = true + view[lpkg.PkgPath] = lpkg.Types + visit(lpkg.Imports) + } + } + } + visit(lpkg.Imports) + + viewLen := len(view) + 1 // adding the self package + // Parse the export data. + // (May modify incomplete packages in view but not create new ones.) + tpkg, err := gcexportdata.Read(r, ld.Fset, view, lpkg.PkgPath) + if err != nil { + return fmt.Errorf("reading %s: %v", lpkg.ExportFile, err) + } + if _, ok := view["go.shape"]; ok { + // Account for the pseudopackage "go.shape" that gets + // created by generic code. + viewLen++ + } + if viewLen != len(view) { + log.Panicf("golang.org/x/tools/go/packages: unexpected new packages during load of %s", lpkg.PkgPath) + } + + lpkg.Types = tpkg + lpkg.IllTyped = false + return nil +} + +// impliedLoadMode returns loadMode with its dependencies. +func impliedLoadMode(loadMode LoadMode) LoadMode { + if loadMode&(NeedDeps|NeedTypes|NeedTypesInfo) != 0 { + // All these things require knowing the import graph. + loadMode |= NeedImports + } + if loadMode&NeedTypes != 0 { + // Types require the GoVersion from Module. + loadMode |= NeedModule + } + + return loadMode +} + +func usesExportData(cfg *Config) bool { + return cfg.Mode&NeedExportFile != 0 || cfg.Mode&NeedTypes != 0 && cfg.Mode&NeedDeps == 0 +} + +var _ interface{} = io.Discard // assert build toolchain is go1.16 or later diff --git a/vendor/golang.org/x/tools/go/packages/visit.go b/vendor/golang.org/x/tools/go/packages/visit.go new file mode 100644 index 0000000..df14ffd --- /dev/null +++ b/vendor/golang.org/x/tools/go/packages/visit.go @@ -0,0 +1,68 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packages + +import ( + "fmt" + "os" + "sort" +) + +// Visit visits all the packages in the import graph whose roots are +// pkgs, calling the optional pre function the first time each package +// is encountered (preorder), and the optional post function after a +// package's dependencies have been visited (postorder). +// The boolean result of pre(pkg) determines whether +// the imports of package pkg are visited. +func Visit(pkgs []*Package, pre func(*Package) bool, post func(*Package)) { + seen := make(map[*Package]bool) + var visit func(*Package) + visit = func(pkg *Package) { + if !seen[pkg] { + seen[pkg] = true + + if pre == nil || pre(pkg) { + paths := make([]string, 0, len(pkg.Imports)) + for path := range pkg.Imports { + paths = append(paths, path) + } + sort.Strings(paths) // Imports is a map, this makes visit stable + for _, path := range paths { + visit(pkg.Imports[path]) + } + } + + if post != nil { + post(pkg) + } + } + } + for _, pkg := range pkgs { + visit(pkg) + } +} + +// PrintErrors prints to os.Stderr the accumulated errors of all +// packages in the import graph rooted at pkgs, dependencies first. +// PrintErrors returns the number of errors printed. +func PrintErrors(pkgs []*Package) int { + var n int + errModules := make(map[*Module]bool) + Visit(pkgs, nil, func(pkg *Package) { + for _, err := range pkg.Errors { + fmt.Fprintln(os.Stderr, err) + n++ + } + + // Print pkg.Module.Error once if present. + mod := pkg.Module + if mod != nil && mod.Error != nil && !errModules[mod] { + errModules[mod] = true + fmt.Fprintln(os.Stderr, mod.Error.Err) + n++ + } + }) + return n +} diff --git a/vendor/golang.org/x/tools/go/ssa/TODO b/vendor/golang.org/x/tools/go/ssa/TODO new file mode 100644 index 0000000..6c35253 --- /dev/null +++ b/vendor/golang.org/x/tools/go/ssa/TODO @@ -0,0 +1,16 @@ +-*- text -*- + +SSA Generics to-do list +=========================== + +DOCUMENTATION: +- Read me for internals + +TYPE PARAMETERIZED GENERIC FUNCTIONS: +- sanity.go updates. +- Check source functions going to generics. +- Tests, tests, tests... + +USAGE: +- Back fill users for handling ssa.InstantiateGenerics being off. + diff --git a/vendor/golang.org/x/tools/go/ssa/block.go b/vendor/golang.org/x/tools/go/ssa/block.go new file mode 100644 index 0000000..28170c7 --- /dev/null +++ b/vendor/golang.org/x/tools/go/ssa/block.go @@ -0,0 +1,113 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssa + +import "fmt" + +// This file implements the BasicBlock type. + +// addEdge adds a control-flow graph edge from from to to. +func addEdge(from, to *BasicBlock) { + from.Succs = append(from.Succs, to) + to.Preds = append(to.Preds, from) +} + +// Parent returns the function that contains block b. +func (b *BasicBlock) Parent() *Function { return b.parent } + +// String returns a human-readable label of this block. +// It is not guaranteed unique within the function. +func (b *BasicBlock) String() string { + return fmt.Sprintf("%d", b.Index) +} + +// emit appends an instruction to the current basic block. +// If the instruction defines a Value, it is returned. +func (b *BasicBlock) emit(i Instruction) Value { + i.setBlock(b) + b.Instrs = append(b.Instrs, i) + v, _ := i.(Value) + return v +} + +// predIndex returns the i such that b.Preds[i] == c or panics if +// there is none. +func (b *BasicBlock) predIndex(c *BasicBlock) int { + for i, pred := range b.Preds { + if pred == c { + return i + } + } + panic(fmt.Sprintf("no edge %s -> %s", c, b)) +} + +// hasPhi returns true if b.Instrs contains φ-nodes. +func (b *BasicBlock) hasPhi() bool { + _, ok := b.Instrs[0].(*Phi) + return ok +} + +// phis returns the prefix of b.Instrs containing all the block's φ-nodes. +func (b *BasicBlock) phis() []Instruction { + for i, instr := range b.Instrs { + if _, ok := instr.(*Phi); !ok { + return b.Instrs[:i] + } + } + return nil // unreachable in well-formed blocks +} + +// replacePred replaces all occurrences of p in b's predecessor list with q. +// Ordinarily there should be at most one. +func (b *BasicBlock) replacePred(p, q *BasicBlock) { + for i, pred := range b.Preds { + if pred == p { + b.Preds[i] = q + } + } +} + +// replaceSucc replaces all occurrences of p in b's successor list with q. +// Ordinarily there should be at most one. +func (b *BasicBlock) replaceSucc(p, q *BasicBlock) { + for i, succ := range b.Succs { + if succ == p { + b.Succs[i] = q + } + } +} + +// removePred removes all occurrences of p in b's +// predecessor list and φ-nodes. +// Ordinarily there should be at most one. +func (b *BasicBlock) removePred(p *BasicBlock) { + phis := b.phis() + + // We must preserve edge order for φ-nodes. + j := 0 + for i, pred := range b.Preds { + if pred != p { + b.Preds[j] = b.Preds[i] + // Strike out φ-edge too. + for _, instr := range phis { + phi := instr.(*Phi) + phi.Edges[j] = phi.Edges[i] + } + j++ + } + } + // Nil out b.Preds[j:] and φ-edges[j:] to aid GC. + for i := j; i < len(b.Preds); i++ { + b.Preds[i] = nil + for _, instr := range phis { + instr.(*Phi).Edges[i] = nil + } + } + b.Preds = b.Preds[:j] + for _, instr := range phis { + phi := instr.(*Phi) + phi.Edges = phi.Edges[:j] + } +} diff --git a/vendor/golang.org/x/tools/go/ssa/blockopt.go b/vendor/golang.org/x/tools/go/ssa/blockopt.go new file mode 100644 index 0000000..7dabce8 --- /dev/null +++ b/vendor/golang.org/x/tools/go/ssa/blockopt.go @@ -0,0 +1,183 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssa + +// Simple block optimizations to simplify the control flow graph. + +// TODO(adonovan): opt: instead of creating several "unreachable" blocks +// per function in the Builder, reuse a single one (e.g. at Blocks[1]) +// to reduce garbage. + +import ( + "fmt" + "os" +) + +// If true, perform sanity checking and show progress at each +// successive iteration of optimizeBlocks. Very verbose. +const debugBlockOpt = false + +// markReachable sets Index=-1 for all blocks reachable from b. +func markReachable(b *BasicBlock) { + b.Index = -1 + for _, succ := range b.Succs { + if succ.Index == 0 { + markReachable(succ) + } + } +} + +// deleteUnreachableBlocks marks all reachable blocks of f and +// eliminates (nils) all others, including possibly cyclic subgraphs. +func deleteUnreachableBlocks(f *Function) { + const white, black = 0, -1 + // We borrow b.Index temporarily as the mark bit. + for _, b := range f.Blocks { + b.Index = white + } + markReachable(f.Blocks[0]) + if f.Recover != nil { + markReachable(f.Recover) + } + for i, b := range f.Blocks { + if b.Index == white { + for _, c := range b.Succs { + if c.Index == black { + c.removePred(b) // delete white->black edge + } + } + if debugBlockOpt { + fmt.Fprintln(os.Stderr, "unreachable", b) + } + f.Blocks[i] = nil // delete b + } + } + f.removeNilBlocks() +} + +// jumpThreading attempts to apply simple jump-threading to block b, +// in which a->b->c become a->c if b is just a Jump. +// The result is true if the optimization was applied. +func jumpThreading(f *Function, b *BasicBlock) bool { + if b.Index == 0 { + return false // don't apply to entry block + } + if b.Instrs == nil { + return false + } + if _, ok := b.Instrs[0].(*Jump); !ok { + return false // not just a jump + } + c := b.Succs[0] + if c == b { + return false // don't apply to degenerate jump-to-self. + } + if c.hasPhi() { + return false // not sound without more effort + } + for j, a := range b.Preds { + a.replaceSucc(b, c) + + // If a now has two edges to c, replace its degenerate If by Jump. + if len(a.Succs) == 2 && a.Succs[0] == c && a.Succs[1] == c { + jump := new(Jump) + jump.setBlock(a) + a.Instrs[len(a.Instrs)-1] = jump + a.Succs = a.Succs[:1] + c.removePred(b) + } else { + if j == 0 { + c.replacePred(b, a) + } else { + c.Preds = append(c.Preds, a) + } + } + + if debugBlockOpt { + fmt.Fprintln(os.Stderr, "jumpThreading", a, b, c) + } + } + f.Blocks[b.Index] = nil // delete b + return true +} + +// fuseBlocks attempts to apply the block fusion optimization to block +// a, in which a->b becomes ab if len(a.Succs)==len(b.Preds)==1. +// The result is true if the optimization was applied. +func fuseBlocks(f *Function, a *BasicBlock) bool { + if len(a.Succs) != 1 { + return false + } + b := a.Succs[0] + if len(b.Preds) != 1 { + return false + } + + // Degenerate &&/|| ops may result in a straight-line CFG + // containing φ-nodes. (Ideally we'd replace such them with + // their sole operand but that requires Referrers, built later.) + if b.hasPhi() { + return false // not sound without further effort + } + + // Eliminate jump at end of A, then copy all of B across. + a.Instrs = append(a.Instrs[:len(a.Instrs)-1], b.Instrs...) + for _, instr := range b.Instrs { + instr.setBlock(a) + } + + // A inherits B's successors + a.Succs = append(a.succs2[:0], b.Succs...) + + // Fix up Preds links of all successors of B. + for _, c := range b.Succs { + c.replacePred(b, a) + } + + if debugBlockOpt { + fmt.Fprintln(os.Stderr, "fuseBlocks", a, b) + } + + f.Blocks[b.Index] = nil // delete b + return true +} + +// optimizeBlocks() performs some simple block optimizations on a +// completed function: dead block elimination, block fusion, jump +// threading. +func optimizeBlocks(f *Function) { + deleteUnreachableBlocks(f) + + // Loop until no further progress. + changed := true + for changed { + changed = false + + if debugBlockOpt { + f.WriteTo(os.Stderr) + mustSanityCheck(f, nil) + } + + for _, b := range f.Blocks { + // f.Blocks will temporarily contain nils to indicate + // deleted blocks; we remove them at the end. + if b == nil { + continue + } + + // Fuse blocks. b->c becomes bc. + if fuseBlocks(f, b) { + changed = true + } + + // a->b->c becomes a->c if b contains only a Jump. + if jumpThreading(f, b) { + changed = true + continue // (b was disconnected) + } + } + } + f.removeNilBlocks() +} diff --git a/vendor/golang.org/x/tools/go/ssa/builder.go b/vendor/golang.org/x/tools/go/ssa/builder.go new file mode 100644 index 0000000..55943e4 --- /dev/null +++ b/vendor/golang.org/x/tools/go/ssa/builder.go @@ -0,0 +1,3276 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssa + +// This file defines the builder, which builds SSA-form IR for function bodies. +// +// SSA construction has two phases, "create" and "build". First, one +// or more packages are created in any order by a sequence of calls to +// CreatePackage, either from syntax or from mere type information. +// Each created package has a complete set of Members (const, var, +// type, func) that can be accessed through methods like +// Program.FuncValue. +// +// It is not necessary to call CreatePackage for all dependencies of +// each syntax package, only for its direct imports. (In future +// perhaps even this restriction may be lifted.) +// +// Second, packages created from syntax are built, by one or more +// calls to Package.Build, which may be concurrent; or by a call to +// Program.Build, which builds all packages in parallel. Building +// traverses the type-annotated syntax tree of each function body and +// creates SSA-form IR, a control-flow graph of instructions, +// populating fields such as Function.Body, .Params, and others. +// +// Building may create additional methods, including: +// - wrapper methods (e.g. for embeddding, or implicit &recv) +// - bound method closures (e.g. for use(recv.f)) +// - thunks (e.g. for use(I.f) or use(T.f)) +// - generic instances (e.g. to produce f[int] from f[any]). +// As these methods are created, they are added to the build queue, +// and then processed in turn, until a fixed point is reached, +// Since these methods might belong to packages that were not +// created (by a call to CreatePackage), their Pkg field is unset. +// +// Instances of generic functions may be either instantiated (f[int] +// is a copy of f[T] with substitutions) or wrapped (f[int] delegates +// to f[T]), depending on the availability of generic syntax and the +// InstantiateGenerics mode flag. +// +// Each package has an initializer function named "init" that calls +// the initializer functions of each direct import, computes and +// assigns the initial value of each global variable, and calls each +// source-level function named "init". (These generate SSA functions +// named "init#1", "init#2", etc.) +// +// Runtime types +// +// Each MakeInterface operation is a conversion from a non-interface +// type to an interface type. The semantics of this operation requires +// a runtime type descriptor, which is the type portion of an +// interface, and the value abstracted by reflect.Type. +// +// The program accumulates all non-parameterized types that are +// encountered as MakeInterface operands, along with all types that +// may be derived from them using reflection. This set is available as +// Program.RuntimeTypes, and the methods of these types may be +// reachable via interface calls or reflection even if they are never +// referenced from the SSA IR. (In practice, algorithms such as RTA +// that compute reachability from package main perform their own +// tracking of runtime types at a finer grain, so this feature is not +// very useful.) +// +// Function literals +// +// Anonymous functions must be built as soon as they are encountered, +// as it may affect locals of the enclosing function, but they are not +// marked 'built' until the end of the outermost enclosing function. +// (Among other things, this causes them to be logged in top-down order.) +// +// The Function.build fields determines the algorithm for building the +// function body. It is cleared to mark that building is complete. + +import ( + "fmt" + "go/ast" + "go/constant" + "go/token" + "go/types" + "os" + "runtime" + "sync" + + "golang.org/x/tools/internal/aliases" + "golang.org/x/tools/internal/typeparams" + "golang.org/x/tools/internal/versions" +) + +type opaqueType struct{ name string } + +func (t *opaqueType) String() string { return t.name } +func (t *opaqueType) Underlying() types.Type { return t } + +var ( + varOk = newVar("ok", tBool) + varIndex = newVar("index", tInt) + + // Type constants. + tBool = types.Typ[types.Bool] + tByte = types.Typ[types.Byte] + tInt = types.Typ[types.Int] + tInvalid = types.Typ[types.Invalid] + tString = types.Typ[types.String] + tUntypedNil = types.Typ[types.UntypedNil] + + tRangeIter = &opaqueType{"iter"} // the type of all "range" iterators + tDeferStack = types.NewPointer(&opaqueType{"deferStack"}) // the type of a "deferStack" from ssa:deferstack() + tEface = types.NewInterfaceType(nil, nil).Complete() + + // SSA Value constants. + vZero = intConst(0) + vOne = intConst(1) + vTrue = NewConst(constant.MakeBool(true), tBool) + vFalse = NewConst(constant.MakeBool(false), tBool) + + jReady = intConst(0) // range-over-func jump is READY + jBusy = intConst(-1) // range-over-func jump is BUSY + jDone = intConst(-2) // range-over-func jump is DONE + + // The ssa:deferstack intrinsic returns the current function's defer stack. + vDeferStack = &Builtin{ + name: "ssa:deferstack", + sig: types.NewSignatureType(nil, nil, nil, nil, types.NewTuple(anonVar(tDeferStack)), false), + } +) + +// builder holds state associated with the package currently being built. +// Its methods contain all the logic for AST-to-SSA conversion. +// +// All Functions belong to the same Program. +// +// builders are not thread-safe. +type builder struct { + fns []*Function // Functions that have finished their CREATE phases. + + finished int // finished is the length of the prefix of fns containing built functions. + + // The task of building shared functions within the builder. + // Shared functions are ones the the builder may either create or lookup. + // These may be built by other builders in parallel. + // The task is done when the builder has finished iterating, and it + // waits for all shared functions to finish building. + // nil implies there are no hared functions to wait on. + buildshared *task +} + +// shared is done when the builder has built all of the +// enqueued functions to a fixed-point. +func (b *builder) shared() *task { + if b.buildshared == nil { // lazily-initialize + b.buildshared = &task{done: make(chan unit)} + } + return b.buildshared +} + +// enqueue fn to be built by the builder. +func (b *builder) enqueue(fn *Function) { + b.fns = append(b.fns, fn) +} + +// waitForSharedFunction indicates that the builder should wait until +// the potentially shared function fn has finished building. +// +// This should include any functions that may be built by other +// builders. +func (b *builder) waitForSharedFunction(fn *Function) { + if fn.buildshared != nil { // maybe need to wait? + s := b.shared() + s.addEdge(fn.buildshared) + } +} + +// cond emits to fn code to evaluate boolean condition e and jump +// to t or f depending on its value, performing various simplifications. +// +// Postcondition: fn.currentBlock is nil. +func (b *builder) cond(fn *Function, e ast.Expr, t, f *BasicBlock) { + switch e := e.(type) { + case *ast.ParenExpr: + b.cond(fn, e.X, t, f) + return + + case *ast.BinaryExpr: + switch e.Op { + case token.LAND: + ltrue := fn.newBasicBlock("cond.true") + b.cond(fn, e.X, ltrue, f) + fn.currentBlock = ltrue + b.cond(fn, e.Y, t, f) + return + + case token.LOR: + lfalse := fn.newBasicBlock("cond.false") + b.cond(fn, e.X, t, lfalse) + fn.currentBlock = lfalse + b.cond(fn, e.Y, t, f) + return + } + + case *ast.UnaryExpr: + if e.Op == token.NOT { + b.cond(fn, e.X, f, t) + return + } + } + + // A traditional compiler would simplify "if false" (etc) here + // but we do not, for better fidelity to the source code. + // + // The value of a constant condition may be platform-specific, + // and may cause blocks that are reachable in some configuration + // to be hidden from subsequent analyses such as bug-finding tools. + emitIf(fn, b.expr(fn, e), t, f) +} + +// logicalBinop emits code to fn to evaluate e, a &&- or +// ||-expression whose reified boolean value is wanted. +// The value is returned. +func (b *builder) logicalBinop(fn *Function, e *ast.BinaryExpr) Value { + rhs := fn.newBasicBlock("binop.rhs") + done := fn.newBasicBlock("binop.done") + + // T(e) = T(e.X) = T(e.Y) after untyped constants have been + // eliminated. + // TODO(adonovan): not true; MyBool==MyBool yields UntypedBool. + t := fn.typeOf(e) + + var short Value // value of the short-circuit path + switch e.Op { + case token.LAND: + b.cond(fn, e.X, rhs, done) + short = NewConst(constant.MakeBool(false), t) + + case token.LOR: + b.cond(fn, e.X, done, rhs) + short = NewConst(constant.MakeBool(true), t) + } + + // Is rhs unreachable? + if rhs.Preds == nil { + // Simplify false&&y to false, true||y to true. + fn.currentBlock = done + return short + } + + // Is done unreachable? + if done.Preds == nil { + // Simplify true&&y (or false||y) to y. + fn.currentBlock = rhs + return b.expr(fn, e.Y) + } + + // All edges from e.X to done carry the short-circuit value. + var edges []Value + for range done.Preds { + edges = append(edges, short) + } + + // The edge from e.Y to done carries the value of e.Y. + fn.currentBlock = rhs + edges = append(edges, b.expr(fn, e.Y)) + emitJump(fn, done) + fn.currentBlock = done + + phi := &Phi{Edges: edges, Comment: e.Op.String()} + phi.pos = e.OpPos + phi.typ = t + return done.emit(phi) +} + +// exprN lowers a multi-result expression e to SSA form, emitting code +// to fn and returning a single Value whose type is a *types.Tuple. +// The caller must access the components via Extract. +// +// Multi-result expressions include CallExprs in a multi-value +// assignment or return statement, and "value,ok" uses of +// TypeAssertExpr, IndexExpr (when X is a map), and UnaryExpr (when Op +// is token.ARROW). +func (b *builder) exprN(fn *Function, e ast.Expr) Value { + typ := fn.typeOf(e).(*types.Tuple) + switch e := e.(type) { + case *ast.ParenExpr: + return b.exprN(fn, e.X) + + case *ast.CallExpr: + // Currently, no built-in function nor type conversion + // has multiple results, so we can avoid some of the + // cases for single-valued CallExpr. + var c Call + b.setCall(fn, e, &c.Call) + c.typ = typ + return fn.emit(&c) + + case *ast.IndexExpr: + mapt := typeparams.CoreType(fn.typeOf(e.X)).(*types.Map) // ,ok must be a map. + lookup := &Lookup{ + X: b.expr(fn, e.X), + Index: emitConv(fn, b.expr(fn, e.Index), mapt.Key()), + CommaOk: true, + } + lookup.setType(typ) + lookup.setPos(e.Lbrack) + return fn.emit(lookup) + + case *ast.TypeAssertExpr: + return emitTypeTest(fn, b.expr(fn, e.X), typ.At(0).Type(), e.Lparen) + + case *ast.UnaryExpr: // must be receive <- + unop := &UnOp{ + Op: token.ARROW, + X: b.expr(fn, e.X), + CommaOk: true, + } + unop.setType(typ) + unop.setPos(e.OpPos) + return fn.emit(unop) + } + panic(fmt.Sprintf("exprN(%T) in %s", e, fn)) +} + +// builtin emits to fn SSA instructions to implement a call to the +// built-in function obj with the specified arguments +// and return type. It returns the value defined by the result. +// +// The result is nil if no special handling was required; in this case +// the caller should treat this like an ordinary library function +// call. +func (b *builder) builtin(fn *Function, obj *types.Builtin, args []ast.Expr, typ types.Type, pos token.Pos) Value { + typ = fn.typ(typ) + switch obj.Name() { + case "make": + switch ct := typeparams.CoreType(typ).(type) { + case *types.Slice: + n := b.expr(fn, args[1]) + m := n + if len(args) == 3 { + m = b.expr(fn, args[2]) + } + if m, ok := m.(*Const); ok { + // treat make([]T, n, m) as new([m]T)[:n] + cap := m.Int64() + at := types.NewArray(ct.Elem(), cap) + v := &Slice{ + X: emitNew(fn, at, pos, "makeslice"), + High: n, + } + v.setPos(pos) + v.setType(typ) + return fn.emit(v) + } + v := &MakeSlice{ + Len: n, + Cap: m, + } + v.setPos(pos) + v.setType(typ) + return fn.emit(v) + + case *types.Map: + var res Value + if len(args) == 2 { + res = b.expr(fn, args[1]) + } + v := &MakeMap{Reserve: res} + v.setPos(pos) + v.setType(typ) + return fn.emit(v) + + case *types.Chan: + var sz Value = vZero + if len(args) == 2 { + sz = b.expr(fn, args[1]) + } + v := &MakeChan{Size: sz} + v.setPos(pos) + v.setType(typ) + return fn.emit(v) + } + + case "new": + return emitNew(fn, typeparams.MustDeref(typ), pos, "new") + + case "len", "cap": + // Special case: len or cap of an array or *array is + // based on the type, not the value which may be nil. + // We must still evaluate the value, though. (If it + // was side-effect free, the whole call would have + // been constant-folded.) + t := typeparams.Deref(fn.typeOf(args[0])) + if at, ok := typeparams.CoreType(t).(*types.Array); ok { + b.expr(fn, args[0]) // for effects only + return intConst(at.Len()) + } + // Otherwise treat as normal. + + case "panic": + fn.emit(&Panic{ + X: emitConv(fn, b.expr(fn, args[0]), tEface), + pos: pos, + }) + fn.currentBlock = fn.newBasicBlock("unreachable") + return vTrue // any non-nil Value will do + } + return nil // treat all others as a regular function call +} + +// addr lowers a single-result addressable expression e to SSA form, +// emitting code to fn and returning the location (an lvalue) defined +// by the expression. +// +// If escaping is true, addr marks the base variable of the +// addressable expression e as being a potentially escaping pointer +// value. For example, in this code: +// +// a := A{ +// b: [1]B{B{c: 1}} +// } +// return &a.b[0].c +// +// the application of & causes a.b[0].c to have its address taken, +// which means that ultimately the local variable a must be +// heap-allocated. This is a simple but very conservative escape +// analysis. +// +// Operations forming potentially escaping pointers include: +// - &x, including when implicit in method call or composite literals. +// - a[:] iff a is an array (not *array) +// - references to variables in lexically enclosing functions. +func (b *builder) addr(fn *Function, e ast.Expr, escaping bool) lvalue { + switch e := e.(type) { + case *ast.Ident: + if isBlankIdent(e) { + return blank{} + } + obj := fn.objectOf(e).(*types.Var) + var v Value + if g := fn.Prog.packageLevelMember(obj); g != nil { + v = g.(*Global) // var (address) + } else { + v = fn.lookup(obj, escaping) + } + return &address{addr: v, pos: e.Pos(), expr: e} + + case *ast.CompositeLit: + typ := typeparams.Deref(fn.typeOf(e)) + var v *Alloc + if escaping { + v = emitNew(fn, typ, e.Lbrace, "complit") + } else { + v = emitLocal(fn, typ, e.Lbrace, "complit") + } + var sb storebuf + b.compLit(fn, v, e, true, &sb) + sb.emit(fn) + return &address{addr: v, pos: e.Lbrace, expr: e} + + case *ast.ParenExpr: + return b.addr(fn, e.X, escaping) + + case *ast.SelectorExpr: + sel := fn.selection(e) + if sel == nil { + // qualified identifier + return b.addr(fn, e.Sel, escaping) + } + if sel.kind != types.FieldVal { + panic(sel) + } + wantAddr := true + v := b.receiver(fn, e.X, wantAddr, escaping, sel) + index := sel.index[len(sel.index)-1] + fld := fieldOf(typeparams.MustDeref(v.Type()), index) // v is an addr. + + // Due to the two phases of resolving AssignStmt, a panic from x.f = p() + // when x is nil is required to come after the side-effects of + // evaluating x and p(). + emit := func(fn *Function) Value { + return emitFieldSelection(fn, v, index, true, e.Sel) + } + return &lazyAddress{addr: emit, t: fld.Type(), pos: e.Sel.Pos(), expr: e.Sel} + + case *ast.IndexExpr: + xt := fn.typeOf(e.X) + elem, mode := indexType(xt) + var x Value + var et types.Type + switch mode { + case ixArrVar: // array, array|slice, array|*array, or array|*array|slice. + x = b.addr(fn, e.X, escaping).address(fn) + et = types.NewPointer(elem) + case ixVar: // *array, slice, *array|slice + x = b.expr(fn, e.X) + et = types.NewPointer(elem) + case ixMap: + mt := typeparams.CoreType(xt).(*types.Map) + return &element{ + m: b.expr(fn, e.X), + k: emitConv(fn, b.expr(fn, e.Index), mt.Key()), + t: mt.Elem(), + pos: e.Lbrack, + } + default: + panic("unexpected container type in IndexExpr: " + xt.String()) + } + index := b.expr(fn, e.Index) + if isUntyped(index.Type()) { + index = emitConv(fn, index, tInt) + } + // Due to the two phases of resolving AssignStmt, a panic from x[i] = p() + // when x is nil or i is out-of-bounds is required to come after the + // side-effects of evaluating x, i and p(). + emit := func(fn *Function) Value { + v := &IndexAddr{ + X: x, + Index: index, + } + v.setPos(e.Lbrack) + v.setType(et) + return fn.emit(v) + } + return &lazyAddress{addr: emit, t: typeparams.MustDeref(et), pos: e.Lbrack, expr: e} + + case *ast.StarExpr: + return &address{addr: b.expr(fn, e.X), pos: e.Star, expr: e} + } + + panic(fmt.Sprintf("unexpected address expression: %T", e)) +} + +type store struct { + lhs lvalue + rhs Value +} + +type storebuf struct{ stores []store } + +func (sb *storebuf) store(lhs lvalue, rhs Value) { + sb.stores = append(sb.stores, store{lhs, rhs}) +} + +func (sb *storebuf) emit(fn *Function) { + for _, s := range sb.stores { + s.lhs.store(fn, s.rhs) + } +} + +// assign emits to fn code to initialize the lvalue loc with the value +// of expression e. If isZero is true, assign assumes that loc holds +// the zero value for its type. +// +// This is equivalent to loc.store(fn, b.expr(fn, e)), but may generate +// better code in some cases, e.g., for composite literals in an +// addressable location. +// +// If sb is not nil, assign generates code to evaluate expression e, but +// not to update loc. Instead, the necessary stores are appended to the +// storebuf sb so that they can be executed later. This allows correct +// in-place update of existing variables when the RHS is a composite +// literal that may reference parts of the LHS. +func (b *builder) assign(fn *Function, loc lvalue, e ast.Expr, isZero bool, sb *storebuf) { + // Can we initialize it in place? + if e, ok := unparen(e).(*ast.CompositeLit); ok { + // A CompositeLit never evaluates to a pointer, + // so if the type of the location is a pointer, + // an &-operation is implied. + if !is[blank](loc) && isPointerCore(loc.typ()) { // avoid calling blank.typ() + ptr := b.addr(fn, e, true).address(fn) + // copy address + if sb != nil { + sb.store(loc, ptr) + } else { + loc.store(fn, ptr) + } + return + } + + if _, ok := loc.(*address); ok { + if isNonTypeParamInterface(loc.typ()) { + // e.g. var x interface{} = T{...} + // Can't in-place initialize an interface value. + // Fall back to copying. + } else { + // x = T{...} or x := T{...} + addr := loc.address(fn) + if sb != nil { + b.compLit(fn, addr, e, isZero, sb) + } else { + var sb storebuf + b.compLit(fn, addr, e, isZero, &sb) + sb.emit(fn) + } + + // Subtle: emit debug ref for aggregate types only; + // slice and map are handled by store ops in compLit. + switch typeparams.CoreType(loc.typ()).(type) { + case *types.Struct, *types.Array: + emitDebugRef(fn, e, addr, true) + } + + return + } + } + } + + // simple case: just copy + rhs := b.expr(fn, e) + if sb != nil { + sb.store(loc, rhs) + } else { + loc.store(fn, rhs) + } +} + +// expr lowers a single-result expression e to SSA form, emitting code +// to fn and returning the Value defined by the expression. +func (b *builder) expr(fn *Function, e ast.Expr) Value { + e = unparen(e) + + tv := fn.info.Types[e] + + // Is expression a constant? + if tv.Value != nil { + return NewConst(tv.Value, fn.typ(tv.Type)) + } + + var v Value + if tv.Addressable() { + // Prefer pointer arithmetic ({Index,Field}Addr) followed + // by Load over subelement extraction (e.g. Index, Field), + // to avoid large copies. + v = b.addr(fn, e, false).load(fn) + } else { + v = b.expr0(fn, e, tv) + } + if fn.debugInfo() { + emitDebugRef(fn, e, v, false) + } + return v +} + +func (b *builder) expr0(fn *Function, e ast.Expr, tv types.TypeAndValue) Value { + switch e := e.(type) { + case *ast.BasicLit: + panic("non-constant BasicLit") // unreachable + + case *ast.FuncLit: + /* function literal */ + anon := &Function{ + name: fmt.Sprintf("%s$%d", fn.Name(), 1+len(fn.AnonFuncs)), + Signature: fn.typeOf(e.Type).(*types.Signature), + pos: e.Type.Func, + parent: fn, + anonIdx: int32(len(fn.AnonFuncs)), + Pkg: fn.Pkg, + Prog: fn.Prog, + syntax: e, + info: fn.info, + goversion: fn.goversion, + build: (*builder).buildFromSyntax, + topLevelOrigin: nil, // use anonIdx to lookup an anon instance's origin. + typeparams: fn.typeparams, // share the parent's type parameters. + typeargs: fn.typeargs, // share the parent's type arguments. + subst: fn.subst, // share the parent's type substitutions. + uniq: fn.uniq, // start from parent's unique values + } + fn.AnonFuncs = append(fn.AnonFuncs, anon) + // Build anon immediately, as it may cause fn's locals to escape. + // (It is not marked 'built' until the end of the enclosing FuncDecl.) + anon.build(b, anon) + fn.uniq = anon.uniq // resume after anon's unique values + if anon.FreeVars == nil { + return anon + } + v := &MakeClosure{Fn: anon} + v.setType(fn.typ(tv.Type)) + for _, fv := range anon.FreeVars { + v.Bindings = append(v.Bindings, fv.outer) + fv.outer = nil + } + return fn.emit(v) + + case *ast.TypeAssertExpr: // single-result form only + return emitTypeAssert(fn, b.expr(fn, e.X), fn.typ(tv.Type), e.Lparen) + + case *ast.CallExpr: + if fn.info.Types[e.Fun].IsType() { + // Explicit type conversion, e.g. string(x) or big.Int(x) + x := b.expr(fn, e.Args[0]) + y := emitConv(fn, x, fn.typ(tv.Type)) + if y != x { + switch y := y.(type) { + case *Convert: + y.pos = e.Lparen + case *ChangeType: + y.pos = e.Lparen + case *MakeInterface: + y.pos = e.Lparen + case *SliceToArrayPointer: + y.pos = e.Lparen + case *UnOp: // conversion from slice to array. + y.pos = e.Lparen + } + } + return y + } + // Call to "intrinsic" built-ins, e.g. new, make, panic. + if id, ok := unparen(e.Fun).(*ast.Ident); ok { + if obj, ok := fn.info.Uses[id].(*types.Builtin); ok { + if v := b.builtin(fn, obj, e.Args, fn.typ(tv.Type), e.Lparen); v != nil { + return v + } + } + } + // Regular function call. + var v Call + b.setCall(fn, e, &v.Call) + v.setType(fn.typ(tv.Type)) + return fn.emit(&v) + + case *ast.UnaryExpr: + switch e.Op { + case token.AND: // &X --- potentially escaping. + addr := b.addr(fn, e.X, true) + if _, ok := unparen(e.X).(*ast.StarExpr); ok { + // &*p must panic if p is nil (http://golang.org/s/go12nil). + // For simplicity, we'll just (suboptimally) rely + // on the side effects of a load. + // TODO(adonovan): emit dedicated nilcheck. + addr.load(fn) + } + return addr.address(fn) + case token.ADD: + return b.expr(fn, e.X) + case token.NOT, token.ARROW, token.SUB, token.XOR: // ! <- - ^ + v := &UnOp{ + Op: e.Op, + X: b.expr(fn, e.X), + } + v.setPos(e.OpPos) + v.setType(fn.typ(tv.Type)) + return fn.emit(v) + default: + panic(e.Op) + } + + case *ast.BinaryExpr: + switch e.Op { + case token.LAND, token.LOR: + return b.logicalBinop(fn, e) + case token.SHL, token.SHR: + fallthrough + case token.ADD, token.SUB, token.MUL, token.QUO, token.REM, token.AND, token.OR, token.XOR, token.AND_NOT: + return emitArith(fn, e.Op, b.expr(fn, e.X), b.expr(fn, e.Y), fn.typ(tv.Type), e.OpPos) + + case token.EQL, token.NEQ, token.GTR, token.LSS, token.LEQ, token.GEQ: + cmp := emitCompare(fn, e.Op, b.expr(fn, e.X), b.expr(fn, e.Y), e.OpPos) + // The type of x==y may be UntypedBool. + return emitConv(fn, cmp, types.Default(fn.typ(tv.Type))) + default: + panic("illegal op in BinaryExpr: " + e.Op.String()) + } + + case *ast.SliceExpr: + var low, high, max Value + var x Value + xtyp := fn.typeOf(e.X) + switch typeparams.CoreType(xtyp).(type) { + case *types.Array: + // Potentially escaping. + x = b.addr(fn, e.X, true).address(fn) + case *types.Basic, *types.Slice, *types.Pointer: // *array + x = b.expr(fn, e.X) + default: + // core type exception? + if isBytestring(xtyp) { + x = b.expr(fn, e.X) // bytestring is handled as string and []byte. + } else { + panic("unexpected sequence type in SliceExpr") + } + } + if e.Low != nil { + low = b.expr(fn, e.Low) + } + if e.High != nil { + high = b.expr(fn, e.High) + } + if e.Slice3 { + max = b.expr(fn, e.Max) + } + v := &Slice{ + X: x, + Low: low, + High: high, + Max: max, + } + v.setPos(e.Lbrack) + v.setType(fn.typ(tv.Type)) + return fn.emit(v) + + case *ast.Ident: + obj := fn.info.Uses[e] + // Universal built-in or nil? + switch obj := obj.(type) { + case *types.Builtin: + return &Builtin{name: obj.Name(), sig: fn.instanceType(e).(*types.Signature)} + case *types.Nil: + return zeroConst(fn.instanceType(e)) + } + + // Package-level func or var? + // (obj must belong to same package or a direct import.) + if v := fn.Prog.packageLevelMember(obj); v != nil { + if g, ok := v.(*Global); ok { + return emitLoad(fn, g) // var (address) + } + callee := v.(*Function) // (func) + if callee.typeparams.Len() > 0 { + targs := fn.subst.types(instanceArgs(fn.info, e)) + callee = callee.instance(targs, b) + } + return callee + } + // Local var. + return emitLoad(fn, fn.lookup(obj.(*types.Var), false)) // var (address) + + case *ast.SelectorExpr: + sel := fn.selection(e) + if sel == nil { + // builtin unsafe.{Add,Slice} + if obj, ok := fn.info.Uses[e.Sel].(*types.Builtin); ok { + return &Builtin{name: obj.Name(), sig: fn.typ(tv.Type).(*types.Signature)} + } + // qualified identifier + return b.expr(fn, e.Sel) + } + switch sel.kind { + case types.MethodExpr: + // (*T).f or T.f, the method f from the method-set of type T. + // The result is a "thunk". + thunk := createThunk(fn.Prog, sel) + b.enqueue(thunk) + return emitConv(fn, thunk, fn.typ(tv.Type)) + + case types.MethodVal: + // e.f where e is an expression and f is a method. + // The result is a "bound". + obj := sel.obj.(*types.Func) + rt := fn.typ(recvType(obj)) + wantAddr := isPointer(rt) + escaping := true + v := b.receiver(fn, e.X, wantAddr, escaping, sel) + + if types.IsInterface(rt) { + // If v may be an interface type I (after instantiating), + // we must emit a check that v is non-nil. + if recv, ok := aliases.Unalias(sel.recv).(*types.TypeParam); ok { + // Emit a nil check if any possible instantiation of the + // type parameter is an interface type. + if typeSetOf(recv).Len() > 0 { + // recv has a concrete term its typeset. + // So it cannot be instantiated as an interface. + // + // Example: + // func _[T interface{~int; Foo()}] () { + // var v T + // _ = v.Foo // <-- MethodVal + // } + } else { + // rt may be instantiated as an interface. + // Emit nil check: typeassert (any(v)).(any). + emitTypeAssert(fn, emitConv(fn, v, tEface), tEface, token.NoPos) + } + } else { + // non-type param interface + // Emit nil check: typeassert v.(I). + emitTypeAssert(fn, v, rt, e.Sel.Pos()) + } + } + if targs := receiverTypeArgs(obj); len(targs) > 0 { + // obj is generic. + obj = fn.Prog.canon.instantiateMethod(obj, fn.subst.types(targs), fn.Prog.ctxt) + } + bound := createBound(fn.Prog, obj) + b.enqueue(bound) + + c := &MakeClosure{ + Fn: bound, + Bindings: []Value{v}, + } + c.setPos(e.Sel.Pos()) + c.setType(fn.typ(tv.Type)) + return fn.emit(c) + + case types.FieldVal: + indices := sel.index + last := len(indices) - 1 + v := b.expr(fn, e.X) + v = emitImplicitSelections(fn, v, indices[:last], e.Pos()) + v = emitFieldSelection(fn, v, indices[last], false, e.Sel) + return v + } + + panic("unexpected expression-relative selector") + + case *ast.IndexListExpr: + // f[X, Y] must be a generic function + if !instance(fn.info, e.X) { + panic("unexpected expression-could not match index list to instantiation") + } + return b.expr(fn, e.X) // Handle instantiation within the *Ident or *SelectorExpr cases. + + case *ast.IndexExpr: + if instance(fn.info, e.X) { + return b.expr(fn, e.X) // Handle instantiation within the *Ident or *SelectorExpr cases. + } + // not a generic instantiation. + xt := fn.typeOf(e.X) + switch et, mode := indexType(xt); mode { + case ixVar: + // Addressable slice/array; use IndexAddr and Load. + return b.addr(fn, e, false).load(fn) + + case ixArrVar, ixValue: + // An array in a register, a string or a combined type that contains + // either an [_]array (ixArrVar) or string (ixValue). + + // Note: for ixArrVar and CoreType(xt)==nil can be IndexAddr and Load. + index := b.expr(fn, e.Index) + if isUntyped(index.Type()) { + index = emitConv(fn, index, tInt) + } + v := &Index{ + X: b.expr(fn, e.X), + Index: index, + } + v.setPos(e.Lbrack) + v.setType(et) + return fn.emit(v) + + case ixMap: + ct := typeparams.CoreType(xt).(*types.Map) + v := &Lookup{ + X: b.expr(fn, e.X), + Index: emitConv(fn, b.expr(fn, e.Index), ct.Key()), + } + v.setPos(e.Lbrack) + v.setType(ct.Elem()) + return fn.emit(v) + default: + panic("unexpected container type in IndexExpr: " + xt.String()) + } + + case *ast.CompositeLit, *ast.StarExpr: + // Addressable types (lvalues) + return b.addr(fn, e, false).load(fn) + } + + panic(fmt.Sprintf("unexpected expr: %T", e)) +} + +// stmtList emits to fn code for all statements in list. +func (b *builder) stmtList(fn *Function, list []ast.Stmt) { + for _, s := range list { + b.stmt(fn, s) + } +} + +// receiver emits to fn code for expression e in the "receiver" +// position of selection e.f (where f may be a field or a method) and +// returns the effective receiver after applying the implicit field +// selections of sel. +// +// wantAddr requests that the result is an address. If +// !sel.indirect, this may require that e be built in addr() mode; it +// must thus be addressable. +// +// escaping is defined as per builder.addr(). +func (b *builder) receiver(fn *Function, e ast.Expr, wantAddr, escaping bool, sel *selection) Value { + var v Value + if wantAddr && !sel.indirect && !isPointerCore(fn.typeOf(e)) { + v = b.addr(fn, e, escaping).address(fn) + } else { + v = b.expr(fn, e) + } + + last := len(sel.index) - 1 + // The position of implicit selection is the position of the inducing receiver expression. + v = emitImplicitSelections(fn, v, sel.index[:last], e.Pos()) + if types.IsInterface(v.Type()) { + // When v is an interface, sel.Kind()==MethodValue and v.f is invoked. + // So v is not loaded, even if v has a pointer core type. + } else if !wantAddr && isPointerCore(v.Type()) { + v = emitLoad(fn, v) + } + return v +} + +// setCallFunc populates the function parts of a CallCommon structure +// (Func, Method, Recv, Args[0]) based on the kind of invocation +// occurring in e. +func (b *builder) setCallFunc(fn *Function, e *ast.CallExpr, c *CallCommon) { + c.pos = e.Lparen + + // Is this a method call? + if selector, ok := unparen(e.Fun).(*ast.SelectorExpr); ok { + sel := fn.selection(selector) + if sel != nil && sel.kind == types.MethodVal { + obj := sel.obj.(*types.Func) + recv := recvType(obj) + + wantAddr := isPointer(recv) + escaping := true + v := b.receiver(fn, selector.X, wantAddr, escaping, sel) + if types.IsInterface(recv) { + // Invoke-mode call. + c.Value = v // possibly type param + c.Method = obj + } else { + // "Call"-mode call. + c.Value = fn.Prog.objectMethod(obj, b) + c.Args = append(c.Args, v) + } + return + } + + // sel.kind==MethodExpr indicates T.f() or (*T).f(): + // a statically dispatched call to the method f in the + // method-set of T or *T. T may be an interface. + // + // e.Fun would evaluate to a concrete method, interface + // wrapper function, or promotion wrapper. + // + // For now, we evaluate it in the usual way. + // + // TODO(adonovan): opt: inline expr() here, to make the + // call static and to avoid generation of wrappers. + // It's somewhat tricky as it may consume the first + // actual parameter if the call is "invoke" mode. + // + // Examples: + // type T struct{}; func (T) f() {} // "call" mode + // type T interface { f() } // "invoke" mode + // + // type S struct{ T } + // + // var s S + // S.f(s) + // (*S).f(&s) + // + // Suggested approach: + // - consume the first actual parameter expression + // and build it with b.expr(). + // - apply implicit field selections. + // - use MethodVal logic to populate fields of c. + } + + // Evaluate the function operand in the usual way. + c.Value = b.expr(fn, e.Fun) +} + +// emitCallArgs emits to f code for the actual parameters of call e to +// a (possibly built-in) function of effective type sig. +// The argument values are appended to args, which is then returned. +func (b *builder) emitCallArgs(fn *Function, sig *types.Signature, e *ast.CallExpr, args []Value) []Value { + // f(x, y, z...): pass slice z straight through. + if e.Ellipsis != 0 { + for i, arg := range e.Args { + v := emitConv(fn, b.expr(fn, arg), sig.Params().At(i).Type()) + args = append(args, v) + } + return args + } + + offset := len(args) // 1 if call has receiver, 0 otherwise + + // Evaluate actual parameter expressions. + // + // If this is a chained call of the form f(g()) where g has + // multiple return values (MRV), they are flattened out into + // args; a suffix of them may end up in a varargs slice. + for _, arg := range e.Args { + v := b.expr(fn, arg) + if ttuple, ok := v.Type().(*types.Tuple); ok { // MRV chain + for i, n := 0, ttuple.Len(); i < n; i++ { + args = append(args, emitExtract(fn, v, i)) + } + } else { + args = append(args, v) + } + } + + // Actual->formal assignability conversions for normal parameters. + np := sig.Params().Len() // number of normal parameters + if sig.Variadic() { + np-- + } + for i := 0; i < np; i++ { + args[offset+i] = emitConv(fn, args[offset+i], sig.Params().At(i).Type()) + } + + // Actual->formal assignability conversions for variadic parameter, + // and construction of slice. + if sig.Variadic() { + varargs := args[offset+np:] + st := sig.Params().At(np).Type().(*types.Slice) + vt := st.Elem() + if len(varargs) == 0 { + args = append(args, zeroConst(st)) + } else { + // Replace a suffix of args with a slice containing it. + at := types.NewArray(vt, int64(len(varargs))) + a := emitNew(fn, at, token.NoPos, "varargs") + a.setPos(e.Rparen) + for i, arg := range varargs { + iaddr := &IndexAddr{ + X: a, + Index: intConst(int64(i)), + } + iaddr.setType(types.NewPointer(vt)) + fn.emit(iaddr) + emitStore(fn, iaddr, arg, arg.Pos()) + } + s := &Slice{X: a} + s.setType(st) + args[offset+np] = fn.emit(s) + args = args[:offset+np+1] + } + } + return args +} + +// setCall emits to fn code to evaluate all the parameters of a function +// call e, and populates *c with those values. +func (b *builder) setCall(fn *Function, e *ast.CallExpr, c *CallCommon) { + // First deal with the f(...) part and optional receiver. + b.setCallFunc(fn, e, c) + + // Then append the other actual parameters. + sig, _ := typeparams.CoreType(fn.typeOf(e.Fun)).(*types.Signature) + if sig == nil { + panic(fmt.Sprintf("no signature for call of %s", e.Fun)) + } + c.Args = b.emitCallArgs(fn, sig, e, c.Args) +} + +// assignOp emits to fn code to perform loc = val. +func (b *builder) assignOp(fn *Function, loc lvalue, val Value, op token.Token, pos token.Pos) { + loc.store(fn, emitArith(fn, op, loc.load(fn), val, loc.typ(), pos)) +} + +// localValueSpec emits to fn code to define all of the vars in the +// function-local ValueSpec, spec. +func (b *builder) localValueSpec(fn *Function, spec *ast.ValueSpec) { + switch { + case len(spec.Values) == len(spec.Names): + // e.g. var x, y = 0, 1 + // 1:1 assignment + for i, id := range spec.Names { + if !isBlankIdent(id) { + emitLocalVar(fn, identVar(fn, id)) + } + lval := b.addr(fn, id, false) // non-escaping + b.assign(fn, lval, spec.Values[i], true, nil) + } + + case len(spec.Values) == 0: + // e.g. var x, y int + // Locals are implicitly zero-initialized. + for _, id := range spec.Names { + if !isBlankIdent(id) { + lhs := emitLocalVar(fn, identVar(fn, id)) + if fn.debugInfo() { + emitDebugRef(fn, id, lhs, true) + } + } + } + + default: + // e.g. var x, y = pos() + tuple := b.exprN(fn, spec.Values[0]) + for i, id := range spec.Names { + if !isBlankIdent(id) { + emitLocalVar(fn, identVar(fn, id)) + lhs := b.addr(fn, id, false) // non-escaping + lhs.store(fn, emitExtract(fn, tuple, i)) + } + } + } +} + +// assignStmt emits code to fn for a parallel assignment of rhss to lhss. +// isDef is true if this is a short variable declaration (:=). +// +// Note the similarity with localValueSpec. +func (b *builder) assignStmt(fn *Function, lhss, rhss []ast.Expr, isDef bool) { + // Side effects of all LHSs and RHSs must occur in left-to-right order. + lvals := make([]lvalue, len(lhss)) + isZero := make([]bool, len(lhss)) + for i, lhs := range lhss { + var lval lvalue = blank{} + if !isBlankIdent(lhs) { + if isDef { + if obj, ok := fn.info.Defs[lhs.(*ast.Ident)].(*types.Var); ok { + emitLocalVar(fn, obj) + isZero[i] = true + } + } + lval = b.addr(fn, lhs, false) // non-escaping + } + lvals[i] = lval + } + if len(lhss) == len(rhss) { + // Simple assignment: x = f() (!isDef) + // Parallel assignment: x, y = f(), g() (!isDef) + // or short var decl: x, y := f(), g() (isDef) + // + // In all cases, the RHSs may refer to the LHSs, + // so we need a storebuf. + var sb storebuf + for i := range rhss { + b.assign(fn, lvals[i], rhss[i], isZero[i], &sb) + } + sb.emit(fn) + } else { + // e.g. x, y = pos() + tuple := b.exprN(fn, rhss[0]) + emitDebugRef(fn, rhss[0], tuple, false) + for i, lval := range lvals { + lval.store(fn, emitExtract(fn, tuple, i)) + } + } +} + +// arrayLen returns the length of the array whose composite literal elements are elts. +func (b *builder) arrayLen(fn *Function, elts []ast.Expr) int64 { + var max int64 = -1 + var i int64 = -1 + for _, e := range elts { + if kv, ok := e.(*ast.KeyValueExpr); ok { + i = b.expr(fn, kv.Key).(*Const).Int64() + } else { + i++ + } + if i > max { + max = i + } + } + return max + 1 +} + +// compLit emits to fn code to initialize a composite literal e at +// address addr with type typ. +// +// Nested composite literals are recursively initialized in place +// where possible. If isZero is true, compLit assumes that addr +// holds the zero value for typ. +// +// Because the elements of a composite literal may refer to the +// variables being updated, as in the second line below, +// +// x := T{a: 1} +// x = T{a: x.a} +// +// all the reads must occur before all the writes. Thus all stores to +// loc are emitted to the storebuf sb for later execution. +// +// A CompositeLit may have pointer type only in the recursive (nested) +// case when the type name is implicit. e.g. in []*T{{}}, the inner +// literal has type *T behaves like &T{}. +// In that case, addr must hold a T, not a *T. +func (b *builder) compLit(fn *Function, addr Value, e *ast.CompositeLit, isZero bool, sb *storebuf) { + typ := typeparams.Deref(fn.typeOf(e)) // retain the named/alias/param type, if any + switch t := typeparams.CoreType(typ).(type) { + case *types.Struct: + if !isZero && len(e.Elts) != t.NumFields() { + // memclear + zt := typeparams.MustDeref(addr.Type()) + sb.store(&address{addr, e.Lbrace, nil}, zeroConst(zt)) + isZero = true + } + for i, e := range e.Elts { + fieldIndex := i + pos := e.Pos() + if kv, ok := e.(*ast.KeyValueExpr); ok { + fname := kv.Key.(*ast.Ident).Name + for i, n := 0, t.NumFields(); i < n; i++ { + sf := t.Field(i) + if sf.Name() == fname { + fieldIndex = i + pos = kv.Colon + e = kv.Value + break + } + } + } + sf := t.Field(fieldIndex) + faddr := &FieldAddr{ + X: addr, + Field: fieldIndex, + } + faddr.setPos(pos) + faddr.setType(types.NewPointer(sf.Type())) + fn.emit(faddr) + b.assign(fn, &address{addr: faddr, pos: pos, expr: e}, e, isZero, sb) + } + + case *types.Array, *types.Slice: + var at *types.Array + var array Value + switch t := t.(type) { + case *types.Slice: + at = types.NewArray(t.Elem(), b.arrayLen(fn, e.Elts)) + array = emitNew(fn, at, e.Lbrace, "slicelit") + case *types.Array: + at = t + array = addr + + if !isZero && int64(len(e.Elts)) != at.Len() { + // memclear + zt := typeparams.MustDeref(array.Type()) + sb.store(&address{array, e.Lbrace, nil}, zeroConst(zt)) + } + } + + var idx *Const + for _, e := range e.Elts { + pos := e.Pos() + if kv, ok := e.(*ast.KeyValueExpr); ok { + idx = b.expr(fn, kv.Key).(*Const) + pos = kv.Colon + e = kv.Value + } else { + var idxval int64 + if idx != nil { + idxval = idx.Int64() + 1 + } + idx = intConst(idxval) + } + iaddr := &IndexAddr{ + X: array, + Index: idx, + } + iaddr.setType(types.NewPointer(at.Elem())) + fn.emit(iaddr) + if t != at { // slice + // backing array is unaliased => storebuf not needed. + b.assign(fn, &address{addr: iaddr, pos: pos, expr: e}, e, true, nil) + } else { + b.assign(fn, &address{addr: iaddr, pos: pos, expr: e}, e, true, sb) + } + } + + if t != at { // slice + s := &Slice{X: array} + s.setPos(e.Lbrace) + s.setType(typ) + sb.store(&address{addr: addr, pos: e.Lbrace, expr: e}, fn.emit(s)) + } + + case *types.Map: + m := &MakeMap{Reserve: intConst(int64(len(e.Elts)))} + m.setPos(e.Lbrace) + m.setType(typ) + fn.emit(m) + for _, e := range e.Elts { + e := e.(*ast.KeyValueExpr) + + // If a key expression in a map literal is itself a + // composite literal, the type may be omitted. + // For example: + // map[*struct{}]bool{{}: true} + // An &-operation may be implied: + // map[*struct{}]bool{&struct{}{}: true} + wantAddr := false + if _, ok := unparen(e.Key).(*ast.CompositeLit); ok { + wantAddr = isPointerCore(t.Key()) + } + + var key Value + if wantAddr { + // A CompositeLit never evaluates to a pointer, + // so if the type of the location is a pointer, + // an &-operation is implied. + key = b.addr(fn, e.Key, true).address(fn) + } else { + key = b.expr(fn, e.Key) + } + + loc := element{ + m: m, + k: emitConv(fn, key, t.Key()), + t: t.Elem(), + pos: e.Colon, + } + + // We call assign() only because it takes care + // of any &-operation required in the recursive + // case, e.g., + // map[int]*struct{}{0: {}} implies &struct{}{}. + // In-place update is of course impossible, + // and no storebuf is needed. + b.assign(fn, &loc, e.Value, true, nil) + } + sb.store(&address{addr: addr, pos: e.Lbrace, expr: e}, m) + + default: + panic("unexpected CompositeLit type: " + typ.String()) + } +} + +// switchStmt emits to fn code for the switch statement s, optionally +// labelled by label. +func (b *builder) switchStmt(fn *Function, s *ast.SwitchStmt, label *lblock) { + // We treat SwitchStmt like a sequential if-else chain. + // Multiway dispatch can be recovered later by ssautil.Switches() + // to those cases that are free of side effects. + if s.Init != nil { + b.stmt(fn, s.Init) + } + var tag Value = vTrue + if s.Tag != nil { + tag = b.expr(fn, s.Tag) + } + done := fn.newBasicBlock("switch.done") + if label != nil { + label._break = done + } + // We pull the default case (if present) down to the end. + // But each fallthrough label must point to the next + // body block in source order, so we preallocate a + // body block (fallthru) for the next case. + // Unfortunately this makes for a confusing block order. + var dfltBody *[]ast.Stmt + var dfltFallthrough *BasicBlock + var fallthru, dfltBlock *BasicBlock + ncases := len(s.Body.List) + for i, clause := range s.Body.List { + body := fallthru + if body == nil { + body = fn.newBasicBlock("switch.body") // first case only + } + + // Preallocate body block for the next case. + fallthru = done + if i+1 < ncases { + fallthru = fn.newBasicBlock("switch.body") + } + + cc := clause.(*ast.CaseClause) + if cc.List == nil { + // Default case. + dfltBody = &cc.Body + dfltFallthrough = fallthru + dfltBlock = body + continue + } + + var nextCond *BasicBlock + for _, cond := range cc.List { + nextCond = fn.newBasicBlock("switch.next") + // TODO(adonovan): opt: when tag==vTrue, we'd + // get better code if we use b.cond(cond) + // instead of BinOp(EQL, tag, b.expr(cond)) + // followed by If. Don't forget conversions + // though. + cond := emitCompare(fn, token.EQL, tag, b.expr(fn, cond), cond.Pos()) + emitIf(fn, cond, body, nextCond) + fn.currentBlock = nextCond + } + fn.currentBlock = body + fn.targets = &targets{ + tail: fn.targets, + _break: done, + _fallthrough: fallthru, + } + b.stmtList(fn, cc.Body) + fn.targets = fn.targets.tail + emitJump(fn, done) + fn.currentBlock = nextCond + } + if dfltBlock != nil { + emitJump(fn, dfltBlock) + fn.currentBlock = dfltBlock + fn.targets = &targets{ + tail: fn.targets, + _break: done, + _fallthrough: dfltFallthrough, + } + b.stmtList(fn, *dfltBody) + fn.targets = fn.targets.tail + } + emitJump(fn, done) + fn.currentBlock = done +} + +// typeSwitchStmt emits to fn code for the type switch statement s, optionally +// labelled by label. +func (b *builder) typeSwitchStmt(fn *Function, s *ast.TypeSwitchStmt, label *lblock) { + // We treat TypeSwitchStmt like a sequential if-else chain. + // Multiway dispatch can be recovered later by ssautil.Switches(). + + // Typeswitch lowering: + // + // var x X + // switch y := x.(type) { + // case T1, T2: S1 // >1 (y := x) + // case nil: SN // nil (y := x) + // default: SD // 0 types (y := x) + // case T3: S3 // 1 type (y := x.(T3)) + // } + // + // ...s.Init... + // x := eval x + // .caseT1: + // t1, ok1 := typeswitch,ok x + // if ok1 then goto S1 else goto .caseT2 + // .caseT2: + // t2, ok2 := typeswitch,ok x + // if ok2 then goto S1 else goto .caseNil + // .S1: + // y := x + // ...S1... + // goto done + // .caseNil: + // if t2, ok2 := typeswitch,ok x + // if x == nil then goto SN else goto .caseT3 + // .SN: + // y := x + // ...SN... + // goto done + // .caseT3: + // t3, ok3 := typeswitch,ok x + // if ok3 then goto S3 else goto default + // .S3: + // y := t3 + // ...S3... + // goto done + // .default: + // y := x + // ...SD... + // goto done + // .done: + if s.Init != nil { + b.stmt(fn, s.Init) + } + + var x Value + switch ass := s.Assign.(type) { + case *ast.ExprStmt: // x.(type) + x = b.expr(fn, unparen(ass.X).(*ast.TypeAssertExpr).X) + case *ast.AssignStmt: // y := x.(type) + x = b.expr(fn, unparen(ass.Rhs[0]).(*ast.TypeAssertExpr).X) + } + + done := fn.newBasicBlock("typeswitch.done") + if label != nil { + label._break = done + } + var default_ *ast.CaseClause + for _, clause := range s.Body.List { + cc := clause.(*ast.CaseClause) + if cc.List == nil { + default_ = cc + continue + } + body := fn.newBasicBlock("typeswitch.body") + var next *BasicBlock + var casetype types.Type + var ti Value // ti, ok := typeassert,ok x + for _, cond := range cc.List { + next = fn.newBasicBlock("typeswitch.next") + casetype = fn.typeOf(cond) + var condv Value + if casetype == tUntypedNil { + condv = emitCompare(fn, token.EQL, x, zeroConst(x.Type()), cond.Pos()) + ti = x + } else { + yok := emitTypeTest(fn, x, casetype, cc.Case) + ti = emitExtract(fn, yok, 0) + condv = emitExtract(fn, yok, 1) + } + emitIf(fn, condv, body, next) + fn.currentBlock = next + } + if len(cc.List) != 1 { + ti = x + } + fn.currentBlock = body + b.typeCaseBody(fn, cc, ti, done) + fn.currentBlock = next + } + if default_ != nil { + b.typeCaseBody(fn, default_, x, done) + } else { + emitJump(fn, done) + } + fn.currentBlock = done +} + +func (b *builder) typeCaseBody(fn *Function, cc *ast.CaseClause, x Value, done *BasicBlock) { + if obj, ok := fn.info.Implicits[cc].(*types.Var); ok { + // In a switch y := x.(type), each case clause + // implicitly declares a distinct object y. + // In a single-type case, y has that type. + // In multi-type cases, 'case nil' and default, + // y has the same type as the interface operand. + emitStore(fn, emitLocalVar(fn, obj), x, obj.Pos()) + } + fn.targets = &targets{ + tail: fn.targets, + _break: done, + } + b.stmtList(fn, cc.Body) + fn.targets = fn.targets.tail + emitJump(fn, done) +} + +// selectStmt emits to fn code for the select statement s, optionally +// labelled by label. +func (b *builder) selectStmt(fn *Function, s *ast.SelectStmt, label *lblock) { + // A blocking select of a single case degenerates to a + // simple send or receive. + // TODO(adonovan): opt: is this optimization worth its weight? + if len(s.Body.List) == 1 { + clause := s.Body.List[0].(*ast.CommClause) + if clause.Comm != nil { + b.stmt(fn, clause.Comm) + done := fn.newBasicBlock("select.done") + if label != nil { + label._break = done + } + fn.targets = &targets{ + tail: fn.targets, + _break: done, + } + b.stmtList(fn, clause.Body) + fn.targets = fn.targets.tail + emitJump(fn, done) + fn.currentBlock = done + return + } + } + + // First evaluate all channels in all cases, and find + // the directions of each state. + var states []*SelectState + blocking := true + debugInfo := fn.debugInfo() + for _, clause := range s.Body.List { + var st *SelectState + switch comm := clause.(*ast.CommClause).Comm.(type) { + case nil: // default case + blocking = false + continue + + case *ast.SendStmt: // ch<- i + ch := b.expr(fn, comm.Chan) + chtyp := typeparams.CoreType(fn.typ(ch.Type())).(*types.Chan) + st = &SelectState{ + Dir: types.SendOnly, + Chan: ch, + Send: emitConv(fn, b.expr(fn, comm.Value), chtyp.Elem()), + Pos: comm.Arrow, + } + if debugInfo { + st.DebugNode = comm + } + + case *ast.AssignStmt: // x := <-ch + recv := unparen(comm.Rhs[0]).(*ast.UnaryExpr) + st = &SelectState{ + Dir: types.RecvOnly, + Chan: b.expr(fn, recv.X), + Pos: recv.OpPos, + } + if debugInfo { + st.DebugNode = recv + } + + case *ast.ExprStmt: // <-ch + recv := unparen(comm.X).(*ast.UnaryExpr) + st = &SelectState{ + Dir: types.RecvOnly, + Chan: b.expr(fn, recv.X), + Pos: recv.OpPos, + } + if debugInfo { + st.DebugNode = recv + } + } + states = append(states, st) + } + + // We dispatch on the (fair) result of Select using a + // sequential if-else chain, in effect: + // + // idx, recvOk, r0...r_n-1 := select(...) + // if idx == 0 { // receive on channel 0 (first receive => r0) + // x, ok := r0, recvOk + // ...state0... + // } else if v == 1 { // send on channel 1 + // ...state1... + // } else { + // ...default... + // } + sel := &Select{ + States: states, + Blocking: blocking, + } + sel.setPos(s.Select) + var vars []*types.Var + vars = append(vars, varIndex, varOk) + for _, st := range states { + if st.Dir == types.RecvOnly { + chtyp := typeparams.CoreType(fn.typ(st.Chan.Type())).(*types.Chan) + vars = append(vars, anonVar(chtyp.Elem())) + } + } + sel.setType(types.NewTuple(vars...)) + + fn.emit(sel) + idx := emitExtract(fn, sel, 0) + + done := fn.newBasicBlock("select.done") + if label != nil { + label._break = done + } + + var defaultBody *[]ast.Stmt + state := 0 + r := 2 // index in 'sel' tuple of value; increments if st.Dir==RECV + for _, cc := range s.Body.List { + clause := cc.(*ast.CommClause) + if clause.Comm == nil { + defaultBody = &clause.Body + continue + } + body := fn.newBasicBlock("select.body") + next := fn.newBasicBlock("select.next") + emitIf(fn, emitCompare(fn, token.EQL, idx, intConst(int64(state)), token.NoPos), body, next) + fn.currentBlock = body + fn.targets = &targets{ + tail: fn.targets, + _break: done, + } + switch comm := clause.Comm.(type) { + case *ast.ExprStmt: // <-ch + if debugInfo { + v := emitExtract(fn, sel, r) + emitDebugRef(fn, states[state].DebugNode.(ast.Expr), v, false) + } + r++ + + case *ast.AssignStmt: // x := <-states[state].Chan + if comm.Tok == token.DEFINE { + emitLocalVar(fn, identVar(fn, comm.Lhs[0].(*ast.Ident))) + } + x := b.addr(fn, comm.Lhs[0], false) // non-escaping + v := emitExtract(fn, sel, r) + if debugInfo { + emitDebugRef(fn, states[state].DebugNode.(ast.Expr), v, false) + } + x.store(fn, v) + + if len(comm.Lhs) == 2 { // x, ok := ... + if comm.Tok == token.DEFINE { + emitLocalVar(fn, identVar(fn, comm.Lhs[1].(*ast.Ident))) + } + ok := b.addr(fn, comm.Lhs[1], false) // non-escaping + ok.store(fn, emitExtract(fn, sel, 1)) + } + r++ + } + b.stmtList(fn, clause.Body) + fn.targets = fn.targets.tail + emitJump(fn, done) + fn.currentBlock = next + state++ + } + if defaultBody != nil { + fn.targets = &targets{ + tail: fn.targets, + _break: done, + } + b.stmtList(fn, *defaultBody) + fn.targets = fn.targets.tail + } else { + // A blocking select must match some case. + // (This should really be a runtime.errorString, not a string.) + fn.emit(&Panic{ + X: emitConv(fn, stringConst("blocking select matched no case"), tEface), + }) + fn.currentBlock = fn.newBasicBlock("unreachable") + } + emitJump(fn, done) + fn.currentBlock = done +} + +// forStmt emits to fn code for the for statement s, optionally +// labelled by label. +func (b *builder) forStmt(fn *Function, s *ast.ForStmt, label *lblock) { + // Use forStmtGo122 instead if it applies. + if s.Init != nil { + if assign, ok := s.Init.(*ast.AssignStmt); ok && assign.Tok == token.DEFINE { + if versions.AtLeast(fn.goversion, versions.Go1_22) { + b.forStmtGo122(fn, s, label) + return + } + } + } + + // ...init... + // jump loop + // loop: + // if cond goto body else done + // body: + // ...body... + // jump post + // post: (target of continue) + // ...post... + // jump loop + // done: (target of break) + if s.Init != nil { + b.stmt(fn, s.Init) + } + + body := fn.newBasicBlock("for.body") + done := fn.newBasicBlock("for.done") // target of 'break' + loop := body // target of back-edge + if s.Cond != nil { + loop = fn.newBasicBlock("for.loop") + } + cont := loop // target of 'continue' + if s.Post != nil { + cont = fn.newBasicBlock("for.post") + } + if label != nil { + label._break = done + label._continue = cont + } + emitJump(fn, loop) + fn.currentBlock = loop + if loop != body { + b.cond(fn, s.Cond, body, done) + fn.currentBlock = body + } + fn.targets = &targets{ + tail: fn.targets, + _break: done, + _continue: cont, + } + b.stmt(fn, s.Body) + fn.targets = fn.targets.tail + emitJump(fn, cont) + + if s.Post != nil { + fn.currentBlock = cont + b.stmt(fn, s.Post) + emitJump(fn, loop) // back-edge + } + fn.currentBlock = done +} + +// forStmtGo122 emits to fn code for the for statement s, optionally +// labelled by label. s must define its variables. +// +// This allocates once per loop iteration. This is only correct in +// GoVersions >= go1.22. +func (b *builder) forStmtGo122(fn *Function, s *ast.ForStmt, label *lblock) { + // i_outer = alloc[T] + // *i_outer = ...init... // under objects[i] = i_outer + // jump loop + // loop: + // i = phi [head: i_outer, loop: i_next] + // ...cond... // under objects[i] = i + // if cond goto body else done + // body: + // ...body... // under objects[i] = i (same as loop) + // jump post + // post: + // tmp = *i + // i_next = alloc[T] + // *i_next = tmp + // ...post... // under objects[i] = i_next + // goto loop + // done: + + init := s.Init.(*ast.AssignStmt) + startingBlocks := len(fn.Blocks) + + pre := fn.currentBlock // current block before starting + loop := fn.newBasicBlock("for.loop") // target of back-edge + body := fn.newBasicBlock("for.body") + post := fn.newBasicBlock("for.post") // target of 'continue' + done := fn.newBasicBlock("for.done") // target of 'break' + + // For each of the n loop variables, we create five SSA values, + // outer, phi, next, load, and store in pre, loop, and post. + // There is no limit on n. + type loopVar struct { + obj *types.Var + outer *Alloc + phi *Phi + load *UnOp + next *Alloc + store *Store + } + vars := make([]loopVar, len(init.Lhs)) + for i, lhs := range init.Lhs { + v := identVar(fn, lhs.(*ast.Ident)) + typ := fn.typ(v.Type()) + + fn.currentBlock = pre + outer := emitLocal(fn, typ, v.Pos(), v.Name()) + + fn.currentBlock = loop + phi := &Phi{Comment: v.Name()} + phi.pos = v.Pos() + phi.typ = outer.Type() + fn.emit(phi) + + fn.currentBlock = post + // If next is local, it reuses the address and zeroes the old value so + // load before allocating next. + load := emitLoad(fn, phi) + next := emitLocal(fn, typ, v.Pos(), v.Name()) + store := emitStore(fn, next, load, token.NoPos) + + phi.Edges = []Value{outer, next} // pre edge is emitted before post edge. + + vars[i] = loopVar{v, outer, phi, load, next, store} + } + + // ...init... under fn.objects[v] = i_outer + fn.currentBlock = pre + for _, v := range vars { + fn.vars[v.obj] = v.outer + } + const isDef = false // assign to already-allocated outers + b.assignStmt(fn, init.Lhs, init.Rhs, isDef) + if label != nil { + label._break = done + label._continue = post + } + emitJump(fn, loop) + + // ...cond... under fn.objects[v] = i + fn.currentBlock = loop + for _, v := range vars { + fn.vars[v.obj] = v.phi + } + if s.Cond != nil { + b.cond(fn, s.Cond, body, done) + } else { + emitJump(fn, body) + } + + // ...body... under fn.objects[v] = i + fn.currentBlock = body + fn.targets = &targets{ + tail: fn.targets, + _break: done, + _continue: post, + } + b.stmt(fn, s.Body) + fn.targets = fn.targets.tail + emitJump(fn, post) + + // ...post... under fn.objects[v] = i_next + for _, v := range vars { + fn.vars[v.obj] = v.next + } + fn.currentBlock = post + if s.Post != nil { + b.stmt(fn, s.Post) + } + emitJump(fn, loop) // back-edge + fn.currentBlock = done + + // For each loop variable that does not escape, + // (the common case), fuse its next cells into its + // (local) outer cell as they have disjoint live ranges. + // + // It is sufficient to test whether i_next escapes, + // because its Heap flag will be marked true if either + // the cond or post expression causes i to escape + // (because escape distributes over phi). + var nlocals int + for _, v := range vars { + if !v.next.Heap { + nlocals++ + } + } + if nlocals > 0 { + replace := make(map[Value]Value, 2*nlocals) + dead := make(map[Instruction]bool, 4*nlocals) + for _, v := range vars { + if !v.next.Heap { + replace[v.next] = v.outer + replace[v.phi] = v.outer + dead[v.phi], dead[v.next], dead[v.load], dead[v.store] = true, true, true, true + } + } + + // Replace all uses of i_next and phi with i_outer. + // Referrers have not been built for fn yet so only update Instruction operands. + // We need only look within the blocks added by the loop. + var operands []*Value // recycle storage + for _, b := range fn.Blocks[startingBlocks:] { + for _, instr := range b.Instrs { + operands = instr.Operands(operands[:0]) + for _, ptr := range operands { + k := *ptr + if v := replace[k]; v != nil { + *ptr = v + } + } + } + } + + // Remove instructions for phi, load, and store. + // lift() will remove the unused i_next *Alloc. + isDead := func(i Instruction) bool { return dead[i] } + loop.Instrs = removeInstrsIf(loop.Instrs, isDead) + post.Instrs = removeInstrsIf(post.Instrs, isDead) + } +} + +// rangeIndexed emits to fn the header for an integer-indexed loop +// over array, *array or slice value x. +// The v result is defined only if tv is non-nil. +// forPos is the position of the "for" token. +func (b *builder) rangeIndexed(fn *Function, x Value, tv types.Type, pos token.Pos) (k, v Value, loop, done *BasicBlock) { + // + // length = len(x) + // index = -1 + // loop: (target of continue) + // index++ + // if index < length goto body else done + // body: + // k = index + // v = x[index] + // ...body... + // jump loop + // done: (target of break) + + // Determine number of iterations. + var length Value + dt := typeparams.Deref(x.Type()) + if arr, ok := typeparams.CoreType(dt).(*types.Array); ok { + // For array or *array, the number of iterations is + // known statically thanks to the type. We avoid a + // data dependence upon x, permitting later dead-code + // elimination if x is pure, static unrolling, etc. + // Ranging over a nil *array may have >0 iterations. + // We still generate code for x, in case it has effects. + length = intConst(arr.Len()) + } else { + // length = len(x). + var c Call + c.Call.Value = makeLen(x.Type()) + c.Call.Args = []Value{x} + c.setType(tInt) + length = fn.emit(&c) + } + + index := emitLocal(fn, tInt, token.NoPos, "rangeindex") + emitStore(fn, index, intConst(-1), pos) + + loop = fn.newBasicBlock("rangeindex.loop") + emitJump(fn, loop) + fn.currentBlock = loop + + incr := &BinOp{ + Op: token.ADD, + X: emitLoad(fn, index), + Y: vOne, + } + incr.setType(tInt) + emitStore(fn, index, fn.emit(incr), pos) + + body := fn.newBasicBlock("rangeindex.body") + done = fn.newBasicBlock("rangeindex.done") + emitIf(fn, emitCompare(fn, token.LSS, incr, length, token.NoPos), body, done) + fn.currentBlock = body + + k = emitLoad(fn, index) + if tv != nil { + switch t := typeparams.CoreType(x.Type()).(type) { + case *types.Array: + instr := &Index{ + X: x, + Index: k, + } + instr.setType(t.Elem()) + instr.setPos(x.Pos()) + v = fn.emit(instr) + + case *types.Pointer: // *array + instr := &IndexAddr{ + X: x, + Index: k, + } + instr.setType(types.NewPointer(t.Elem().Underlying().(*types.Array).Elem())) + instr.setPos(x.Pos()) + v = emitLoad(fn, fn.emit(instr)) + + case *types.Slice: + instr := &IndexAddr{ + X: x, + Index: k, + } + instr.setType(types.NewPointer(t.Elem())) + instr.setPos(x.Pos()) + v = emitLoad(fn, fn.emit(instr)) + + default: + panic("rangeIndexed x:" + t.String()) + } + } + return +} + +// rangeIter emits to fn the header for a loop using +// Range/Next/Extract to iterate over map or string value x. +// tk and tv are the types of the key/value results k and v, or nil +// if the respective component is not wanted. +func (b *builder) rangeIter(fn *Function, x Value, tk, tv types.Type, pos token.Pos) (k, v Value, loop, done *BasicBlock) { + // + // it = range x + // loop: (target of continue) + // okv = next it (ok, key, value) + // ok = extract okv #0 + // if ok goto body else done + // body: + // k = extract okv #1 + // v = extract okv #2 + // ...body... + // jump loop + // done: (target of break) + // + + if tk == nil { + tk = tInvalid + } + if tv == nil { + tv = tInvalid + } + + rng := &Range{X: x} + rng.setPos(pos) + rng.setType(tRangeIter) + it := fn.emit(rng) + + loop = fn.newBasicBlock("rangeiter.loop") + emitJump(fn, loop) + fn.currentBlock = loop + + okv := &Next{ + Iter: it, + IsString: isBasic(typeparams.CoreType(x.Type())), + } + okv.setType(types.NewTuple( + varOk, + newVar("k", tk), + newVar("v", tv), + )) + fn.emit(okv) + + body := fn.newBasicBlock("rangeiter.body") + done = fn.newBasicBlock("rangeiter.done") + emitIf(fn, emitExtract(fn, okv, 0), body, done) + fn.currentBlock = body + + if tk != tInvalid { + k = emitExtract(fn, okv, 1) + } + if tv != tInvalid { + v = emitExtract(fn, okv, 2) + } + return +} + +// rangeChan emits to fn the header for a loop that receives from +// channel x until it fails. +// tk is the channel's element type, or nil if the k result is +// not wanted +// pos is the position of the '=' or ':=' token. +func (b *builder) rangeChan(fn *Function, x Value, tk types.Type, pos token.Pos) (k Value, loop, done *BasicBlock) { + // + // loop: (target of continue) + // ko = <-x (key, ok) + // ok = extract ko #1 + // if ok goto body else done + // body: + // k = extract ko #0 + // ...body... + // goto loop + // done: (target of break) + + loop = fn.newBasicBlock("rangechan.loop") + emitJump(fn, loop) + fn.currentBlock = loop + recv := &UnOp{ + Op: token.ARROW, + X: x, + CommaOk: true, + } + recv.setPos(pos) + recv.setType(types.NewTuple( + newVar("k", typeparams.CoreType(x.Type()).(*types.Chan).Elem()), + varOk, + )) + ko := fn.emit(recv) + body := fn.newBasicBlock("rangechan.body") + done = fn.newBasicBlock("rangechan.done") + emitIf(fn, emitExtract(fn, ko, 1), body, done) + fn.currentBlock = body + if tk != nil { + k = emitExtract(fn, ko, 0) + } + return +} + +// rangeInt emits to fn the header for a range loop with an integer operand. +// tk is the key value's type, or nil if the k result is not wanted. +// pos is the position of the "for" token. +func (b *builder) rangeInt(fn *Function, x Value, tk types.Type, pos token.Pos) (k Value, loop, done *BasicBlock) { + // + // iter = 0 + // if 0 < x goto body else done + // loop: (target of continue) + // iter++ + // if iter < x goto body else done + // body: + // k = x + // ...body... + // jump loop + // done: (target of break) + + if isUntyped(x.Type()) { + x = emitConv(fn, x, tInt) + } + + T := x.Type() + iter := emitLocal(fn, T, token.NoPos, "rangeint.iter") + // x may be unsigned. Avoid initializing x to -1. + + body := fn.newBasicBlock("rangeint.body") + done = fn.newBasicBlock("rangeint.done") + emitIf(fn, emitCompare(fn, token.LSS, zeroConst(T), x, token.NoPos), body, done) + + loop = fn.newBasicBlock("rangeint.loop") + fn.currentBlock = loop + + incr := &BinOp{ + Op: token.ADD, + X: emitLoad(fn, iter), + Y: emitConv(fn, vOne, T), + } + incr.setType(T) + emitStore(fn, iter, fn.emit(incr), pos) + emitIf(fn, emitCompare(fn, token.LSS, incr, x, token.NoPos), body, done) + fn.currentBlock = body + + if tk != nil { + // Integer types (int, uint8, etc.) are named and + // we know that k is assignable to x when tk != nil. + // This implies tk and T are identical so no conversion is needed. + k = emitLoad(fn, iter) + } + + return +} + +// rangeStmt emits to fn code for the range statement s, optionally +// labelled by label. +func (b *builder) rangeStmt(fn *Function, s *ast.RangeStmt, label *lblock) { + var tk, tv types.Type + if s.Key != nil && !isBlankIdent(s.Key) { + tk = fn.typeOf(s.Key) + } + if s.Value != nil && !isBlankIdent(s.Value) { + tv = fn.typeOf(s.Value) + } + + // create locals for s.Key and s.Value. + createVars := func() { + // Unlike a short variable declaration, a RangeStmt + // using := never redeclares an existing variable; it + // always creates a new one. + if tk != nil { + emitLocalVar(fn, identVar(fn, s.Key.(*ast.Ident))) + } + if tv != nil { + emitLocalVar(fn, identVar(fn, s.Value.(*ast.Ident))) + } + } + + afterGo122 := versions.AtLeast(fn.goversion, versions.Go1_22) + if s.Tok == token.DEFINE && !afterGo122 { + // pre-go1.22: If iteration variables are defined (:=), this + // occurs once outside the loop. + createVars() + } + + x := b.expr(fn, s.X) + + var k, v Value + var loop, done *BasicBlock + switch rt := typeparams.CoreType(x.Type()).(type) { + case *types.Slice, *types.Array, *types.Pointer: // *array + k, v, loop, done = b.rangeIndexed(fn, x, tv, s.For) + + case *types.Chan: + k, loop, done = b.rangeChan(fn, x, tk, s.For) + + case *types.Map: + k, v, loop, done = b.rangeIter(fn, x, tk, tv, s.For) + + case *types.Basic: + switch { + case rt.Info()&types.IsString != 0: + k, v, loop, done = b.rangeIter(fn, x, tk, tv, s.For) + + case rt.Info()&types.IsInteger != 0: + k, loop, done = b.rangeInt(fn, x, tk, s.For) + + default: + panic("Cannot range over basic type: " + rt.String()) + } + + case *types.Signature: + // Special case rewrite (fn.goversion >= go1.23): + // for x := range f { ... } + // into + // f(func(x T) bool { ... }) + b.rangeFunc(fn, x, tk, tv, s, label) + return + + default: + panic("Cannot range over: " + rt.String()) + } + + if s.Tok == token.DEFINE && afterGo122 { + // go1.22: If iteration variables are defined (:=), this occurs inside the loop. + createVars() + } + + // Evaluate both LHS expressions before we update either. + var kl, vl lvalue + if tk != nil { + kl = b.addr(fn, s.Key, false) // non-escaping + } + if tv != nil { + vl = b.addr(fn, s.Value, false) // non-escaping + } + if tk != nil { + kl.store(fn, k) + } + if tv != nil { + vl.store(fn, v) + } + + if label != nil { + label._break = done + label._continue = loop + } + + fn.targets = &targets{ + tail: fn.targets, + _break: done, + _continue: loop, + } + b.stmt(fn, s.Body) + fn.targets = fn.targets.tail + emitJump(fn, loop) // back-edge + fn.currentBlock = done +} + +// rangeFunc emits to fn code for the range-over-func rng.Body of the iterator +// function x, optionally labelled by label. It creates a new anonymous function +// yield for rng and builds the function. +func (b *builder) rangeFunc(fn *Function, x Value, tk, tv types.Type, rng *ast.RangeStmt, label *lblock) { + // Consider the SSA code for the outermost range-over-func in fn: + // + // func fn(...) (ret R) { + // ... + // for k, v = range x { + // ... + // } + // ... + // } + // + // The code emitted into fn will look something like this. + // + // loop: + // jump := READY + // y := make closure yield [ret, deferstack, jump, k, v] + // x(y) + // switch jump { + // [see resuming execution] + // } + // goto done + // done: + // ... + // + // where yield is a new synthetic yield function: + // + // func yield(_k tk, _v tv) bool + // free variables: [ret, stack, jump, k, v] + // { + // entry: + // if jump != READY then goto invalid else valid + // invalid: + // panic("iterator called when it is not in a ready state") + // valid: + // jump = BUSY + // k = _k + // v = _v + // ... + // cont: + // jump = READY + // return true + // } + // + // Yield state: + // + // Each range loop has an associated jump variable that records + // the state of the iterator. A yield function is initially + // in a READY (0) and callable state. If the yield function is called + // and is not in READY state, it panics. When it is called in a callable + // state, it becomes BUSY. When execution reaches the end of the body + // of the loop (or a continue statement targeting the loop is executed), + // the yield function returns true and resumes being in a READY state. + // After the iterator function x(y) returns, then if the yield function + // is in a READY state, the yield enters the DONE state. + // + // Each lowered control statement (break X, continue X, goto Z, or return) + // that exits the loop sets the variable to a unique positive EXIT value, + // before returning false from the yield function. + // + // If the yield function returns abruptly due to a panic or GoExit, + // it remains in a BUSY state. The generated code asserts that, after + // the iterator call x(y) returns normally, the jump variable state + // is DONE. + // + // Resuming execution: + // + // The code generated for the range statement checks the jump + // variable to determine how to resume execution. + // + // switch jump { + // case BUSY: panic("...") + // case DONE: goto done + // case READY: state = DONE; goto done + // case 123: ... // action for exit 123. + // case 456: ... // action for exit 456. + // ... + // } + // + // Forward goto statements within a yield are jumps to labels that + // have not yet been traversed in fn. They may be in the Body of the + // function. What we emit for these is: + // + // goto target + // target: + // ... + // + // We leave an unresolved exit in yield.exits to check at the end + // of building yield if it encountered target in the body. If it + // encountered target, no additional work is required. Otherwise, + // the yield emits a new early exit in the basic block for target. + // We expect that blockopt will fuse the early exit into the case + // block later. The unresolved exit is then added to yield.parent.exits. + + loop := fn.newBasicBlock("rangefunc.loop") + done := fn.newBasicBlock("rangefunc.done") + + // These are targets within y. + fn.targets = &targets{ + tail: fn.targets, + _break: done, + // _continue is within y. + } + if label != nil { + label._break = done + // _continue is within y + } + + emitJump(fn, loop) + fn.currentBlock = loop + + // loop: + // jump := READY + + anonIdx := len(fn.AnonFuncs) + + jump := newVar(fmt.Sprintf("jump$%d", anonIdx+1), tInt) + emitLocalVar(fn, jump) // zero value is READY + + xsig := typeparams.CoreType(x.Type()).(*types.Signature) + ysig := typeparams.CoreType(xsig.Params().At(0).Type()).(*types.Signature) + + /* synthetic yield function for body of range-over-func loop */ + y := &Function{ + name: fmt.Sprintf("%s$%d", fn.Name(), anonIdx+1), + Signature: ysig, + Synthetic: "range-over-func yield", + pos: rangePosition(rng), + parent: fn, + anonIdx: int32(len(fn.AnonFuncs)), + Pkg: fn.Pkg, + Prog: fn.Prog, + syntax: rng, + info: fn.info, + goversion: fn.goversion, + build: (*builder).buildYieldFunc, + topLevelOrigin: nil, + typeparams: fn.typeparams, + typeargs: fn.typeargs, + subst: fn.subst, + jump: jump, + deferstack: fn.deferstack, + returnVars: fn.returnVars, // use the parent's return variables + uniq: fn.uniq, // start from parent's unique values + } + + // If the RangeStmt has a label, this is how it is passed to buildYieldFunc. + if label != nil { + y.lblocks = map[*types.Label]*lblock{label.label: nil} + } + fn.AnonFuncs = append(fn.AnonFuncs, y) + + // Build y immediately. It may: + // * cause fn's locals to escape, and + // * create new exit nodes in exits. + // (y is not marked 'built' until the end of the enclosing FuncDecl.) + unresolved := len(fn.exits) + y.build(b, y) + fn.uniq = y.uniq // resume after y's unique values + + // Emit the call of y. + // c := MakeClosure y + // x(c) + c := &MakeClosure{Fn: y} + c.setType(ysig) + for _, fv := range y.FreeVars { + c.Bindings = append(c.Bindings, fv.outer) + fv.outer = nil + } + fn.emit(c) + call := Call{ + Call: CallCommon{ + Value: x, + Args: []Value{c}, + pos: token.NoPos, + }, + } + call.setType(xsig.Results()) + fn.emit(&call) + + exits := fn.exits[unresolved:] + b.buildYieldResume(fn, jump, exits, done) + + emitJump(fn, done) + fn.currentBlock = done +} + +// buildYieldResume emits to fn code for how to resume execution once a call to +// the iterator function over the yield function returns x(y). It does this by building +// a switch over the value of jump for when it is READY, BUSY, or EXIT(id). +func (b *builder) buildYieldResume(fn *Function, jump *types.Var, exits []*exit, done *BasicBlock) { + // v := *jump + // switch v { + // case BUSY: panic("...") + // case READY: jump = DONE; goto done + // case EXIT(a): ... + // case EXIT(b): ... + // ... + // } + v := emitLoad(fn, fn.lookup(jump, false)) + + // case BUSY: panic("...") + isbusy := fn.newBasicBlock("rangefunc.resume.busy") + ifready := fn.newBasicBlock("rangefunc.resume.ready.check") + emitIf(fn, emitCompare(fn, token.EQL, v, jBusy, token.NoPos), isbusy, ifready) + fn.currentBlock = isbusy + fn.emit(&Panic{ + X: emitConv(fn, stringConst("iterator call did not preserve panic"), tEface), + }) + fn.currentBlock = ifready + + // case READY: jump = DONE; goto done + isready := fn.newBasicBlock("rangefunc.resume.ready") + ifexit := fn.newBasicBlock("rangefunc.resume.exits") + emitIf(fn, emitCompare(fn, token.EQL, v, jReady, token.NoPos), isready, ifexit) + fn.currentBlock = isready + storeVar(fn, jump, jDone, token.NoPos) + emitJump(fn, done) + fn.currentBlock = ifexit + + for _, e := range exits { + id := intConst(e.id) + + // case EXIT(id): { /* do e */ } + cond := emitCompare(fn, token.EQL, v, id, e.pos) + matchb := fn.newBasicBlock("rangefunc.resume.match") + cndb := fn.newBasicBlock("rangefunc.resume.cnd") + emitIf(fn, cond, matchb, cndb) + fn.currentBlock = matchb + + // Cases to fill in the { /* do e */ } bit. + switch { + case e.label != nil: // forward goto? + // case EXIT(id): goto lb // label + lb := fn.lblockOf(e.label) + // Do not mark lb as resolved. + // If fn does not contain label, lb remains unresolved and + // fn must itself be a range-over-func function. lb will be: + // lb: + // fn.jump = id + // return false + emitJump(fn, lb._goto) + + case e.to != fn: // e jumps to an ancestor of fn? + // case EXIT(id): { fn.jump = id; return false } + // fn is a range-over-func function. + storeVar(fn, fn.jump, id, token.NoPos) + fn.emit(&Return{Results: []Value{vFalse}, pos: e.pos}) + + case e.block == nil && e.label == nil: // return from fn? + // case EXIT(id): { return ... } + fn.emit(new(RunDefers)) + results := make([]Value, len(fn.results)) + for i, r := range fn.results { + results[i] = emitLoad(fn, r) + } + fn.emit(&Return{Results: results, pos: e.pos}) + + case e.block != nil: + // case EXIT(id): goto block + emitJump(fn, e.block) + + default: + panic("unreachable") + } + fn.currentBlock = cndb + } +} + +// stmt lowers statement s to SSA form, emitting code to fn. +func (b *builder) stmt(fn *Function, _s ast.Stmt) { + // The label of the current statement. If non-nil, its _goto + // target is always set; its _break and _continue are set only + // within the body of switch/typeswitch/select/for/range. + // It is effectively an additional default-nil parameter of stmt(). + var label *lblock +start: + switch s := _s.(type) { + case *ast.EmptyStmt: + // ignore. (Usually removed by gofmt.) + + case *ast.DeclStmt: // Con, Var or Typ + d := s.Decl.(*ast.GenDecl) + if d.Tok == token.VAR { + for _, spec := range d.Specs { + if vs, ok := spec.(*ast.ValueSpec); ok { + b.localValueSpec(fn, vs) + } + } + } + + case *ast.LabeledStmt: + if s.Label.Name == "_" { + // Blank labels can't be the target of a goto, break, + // or continue statement, so we don't need a new block. + _s = s.Stmt + goto start + } + label = fn.lblockOf(fn.label(s.Label)) + label.resolved = true + emitJump(fn, label._goto) + fn.currentBlock = label._goto + _s = s.Stmt + goto start // effectively: tailcall stmt(fn, s.Stmt, label) + + case *ast.ExprStmt: + b.expr(fn, s.X) + + case *ast.SendStmt: + chtyp := typeparams.CoreType(fn.typeOf(s.Chan)).(*types.Chan) + fn.emit(&Send{ + Chan: b.expr(fn, s.Chan), + X: emitConv(fn, b.expr(fn, s.Value), chtyp.Elem()), + pos: s.Arrow, + }) + + case *ast.IncDecStmt: + op := token.ADD + if s.Tok == token.DEC { + op = token.SUB + } + loc := b.addr(fn, s.X, false) + b.assignOp(fn, loc, NewConst(constant.MakeInt64(1), loc.typ()), op, s.Pos()) + + case *ast.AssignStmt: + switch s.Tok { + case token.ASSIGN, token.DEFINE: + b.assignStmt(fn, s.Lhs, s.Rhs, s.Tok == token.DEFINE) + + default: // +=, etc. + op := s.Tok + token.ADD - token.ADD_ASSIGN + b.assignOp(fn, b.addr(fn, s.Lhs[0], false), b.expr(fn, s.Rhs[0]), op, s.Pos()) + } + + case *ast.GoStmt: + // The "intrinsics" new/make/len/cap are forbidden here. + // panic is treated like an ordinary function call. + v := Go{pos: s.Go} + b.setCall(fn, s.Call, &v.Call) + fn.emit(&v) + + case *ast.DeferStmt: + // The "intrinsics" new/make/len/cap are forbidden here. + // panic is treated like an ordinary function call. + deferstack := emitLoad(fn, fn.lookup(fn.deferstack, false)) + v := Defer{pos: s.Defer, DeferStack: deferstack} + b.setCall(fn, s.Call, &v.Call) + fn.emit(&v) + + // A deferred call can cause recovery from panic, + // and control resumes at the Recover block. + createRecoverBlock(fn.source) + + case *ast.ReturnStmt: + b.returnStmt(fn, s) + + case *ast.BranchStmt: + b.branchStmt(fn, s) + + case *ast.BlockStmt: + b.stmtList(fn, s.List) + + case *ast.IfStmt: + if s.Init != nil { + b.stmt(fn, s.Init) + } + then := fn.newBasicBlock("if.then") + done := fn.newBasicBlock("if.done") + els := done + if s.Else != nil { + els = fn.newBasicBlock("if.else") + } + b.cond(fn, s.Cond, then, els) + fn.currentBlock = then + b.stmt(fn, s.Body) + emitJump(fn, done) + + if s.Else != nil { + fn.currentBlock = els + b.stmt(fn, s.Else) + emitJump(fn, done) + } + + fn.currentBlock = done + + case *ast.SwitchStmt: + b.switchStmt(fn, s, label) + + case *ast.TypeSwitchStmt: + b.typeSwitchStmt(fn, s, label) + + case *ast.SelectStmt: + b.selectStmt(fn, s, label) + + case *ast.ForStmt: + b.forStmt(fn, s, label) + + case *ast.RangeStmt: + b.rangeStmt(fn, s, label) + + default: + panic(fmt.Sprintf("unexpected statement kind: %T", s)) + } +} + +func (b *builder) branchStmt(fn *Function, s *ast.BranchStmt) { + var block *BasicBlock + if s.Label == nil { + block = targetedBlock(fn, s.Tok) + } else { + target := fn.label(s.Label) + block = labelledBlock(fn, target, s.Tok) + if block == nil { // forward goto + lb := fn.lblockOf(target) + block = lb._goto // jump to lb._goto + if fn.jump != nil { + // fn is a range-over-func and the goto may exit fn. + // Create an exit and resolve it at the end of + // builder.buildYieldFunc. + labelExit(fn, target, s.Pos()) + } + } + } + to := block.parent + + if to == fn { + emitJump(fn, block) + } else { // break outside of fn. + // fn must be a range-over-func + e := blockExit(fn, block, s.Pos()) + storeVar(fn, fn.jump, intConst(e.id), e.pos) + fn.emit(&Return{Results: []Value{vFalse}, pos: e.pos}) + } + fn.currentBlock = fn.newBasicBlock("unreachable") +} + +func (b *builder) returnStmt(fn *Function, s *ast.ReturnStmt) { + var results []Value + + sig := fn.source.Signature // signature of the enclosing source function + + // Convert return operands to result type. + if len(s.Results) == 1 && sig.Results().Len() > 1 { + // Return of one expression in a multi-valued function. + tuple := b.exprN(fn, s.Results[0]) + ttuple := tuple.Type().(*types.Tuple) + for i, n := 0, ttuple.Len(); i < n; i++ { + results = append(results, + emitConv(fn, emitExtract(fn, tuple, i), + sig.Results().At(i).Type())) + } + } else { + // 1:1 return, or no-arg return in non-void function. + for i, r := range s.Results { + v := emitConv(fn, b.expr(fn, r), sig.Results().At(i).Type()) + results = append(results, v) + } + } + + // Store the results. + for i, r := range results { + var result Value // fn.source.result[i] conceptually + if fn == fn.source { + result = fn.results[i] + } else { // lookup needed? + result = fn.lookup(fn.returnVars[i], false) + } + emitStore(fn, result, r, s.Return) + } + + if fn.jump != nil { + // Return from body of a range-over-func. + // The return statement is syntactically within the loop, + // but the generated code is in the 'switch jump {...}' after it. + e := returnExit(fn, s.Pos()) + storeVar(fn, fn.jump, intConst(e.id), e.pos) + fn.emit(&Return{Results: []Value{vFalse}, pos: e.pos}) + fn.currentBlock = fn.newBasicBlock("unreachable") + return + } + + // Run function calls deferred in this + // function when explicitly returning from it. + fn.emit(new(RunDefers)) + // Reload (potentially) named result variables to form the result tuple. + results = results[:0] + for _, nr := range fn.results { + results = append(results, emitLoad(fn, nr)) + } + fn.emit(&Return{Results: results, pos: s.Return}) + fn.currentBlock = fn.newBasicBlock("unreachable") +} + +// A buildFunc is a strategy for building the SSA body for a function. +type buildFunc = func(*builder, *Function) + +// iterate causes all created but unbuilt functions to be built. As +// this may create new methods, the process is iterated until it +// converges. +// +// Waits for any dependencies to finish building. +func (b *builder) iterate() { + for ; b.finished < len(b.fns); b.finished++ { + fn := b.fns[b.finished] + b.buildFunction(fn) + } + + b.buildshared.markDone() + b.buildshared.wait() +} + +// buildFunction builds SSA code for the body of function fn. Idempotent. +func (b *builder) buildFunction(fn *Function) { + if fn.build != nil { + assert(fn.parent == nil, "anonymous functions should not be built by buildFunction()") + + if fn.Prog.mode&LogSource != 0 { + defer logStack("build %s @ %s", fn, fn.Prog.Fset.Position(fn.pos))() + } + fn.build(b, fn) + fn.done() + } +} + +// buildParamsOnly builds fn.Params from fn.Signature, but does not build fn.Body. +func (b *builder) buildParamsOnly(fn *Function) { + // For external (C, asm) functions or functions loaded from + // export data, we must set fn.Params even though there is no + // body code to reference them. + if recv := fn.Signature.Recv(); recv != nil { + fn.addParamVar(recv) + } + params := fn.Signature.Params() + for i, n := 0, params.Len(); i < n; i++ { + fn.addParamVar(params.At(i)) + } +} + +// buildFromSyntax builds fn.Body from fn.syntax, which must be non-nil. +func (b *builder) buildFromSyntax(fn *Function) { + var ( + recvField *ast.FieldList + body *ast.BlockStmt + functype *ast.FuncType + ) + switch syntax := fn.syntax.(type) { + case *ast.FuncDecl: + functype = syntax.Type + recvField = syntax.Recv + body = syntax.Body + if body == nil { + b.buildParamsOnly(fn) // no body (non-Go function) + return + } + case *ast.FuncLit: + functype = syntax.Type + body = syntax.Body + case nil: + panic("no syntax") + default: + panic(syntax) // unexpected syntax + } + fn.source = fn + fn.startBody() + fn.createSyntacticParams(recvField, functype) + fn.createDeferStack() + b.stmt(fn, body) + if cb := fn.currentBlock; cb != nil && (cb == fn.Blocks[0] || cb == fn.Recover || cb.Preds != nil) { + // Control fell off the end of the function's body block. + // + // Block optimizations eliminate the current block, if + // unreachable. It is a builder invariant that + // if this no-arg return is ill-typed for + // fn.Signature.Results, this block must be + // unreachable. The sanity checker checks this. + fn.emit(new(RunDefers)) + fn.emit(new(Return)) + } + fn.finishBody() +} + +// buildYieldFunc builds the body of the yield function created +// from a range-over-func *ast.RangeStmt. +func (b *builder) buildYieldFunc(fn *Function) { + // See builder.rangeFunc for detailed documentation on how fn is set up. + // + // In psuedo-Go this roughly builds: + // func yield(_k tk, _v tv) bool { + // if jump != READY { panic("yield function called after range loop exit") } + // jump = BUSY + // k, v = _k, _v // assign the iterator variable (if needed) + // ... // rng.Body + // continue: + // jump = READY + // return true + // } + s := fn.syntax.(*ast.RangeStmt) + fn.source = fn.parent.source + fn.startBody() + params := fn.Signature.Params() + for i := 0; i < params.Len(); i++ { + fn.addParamVar(params.At(i)) + } + + // Initial targets + ycont := fn.newBasicBlock("yield-continue") + // lblocks is either {} or is {label: nil} where label is the label of syntax. + for label := range fn.lblocks { + fn.lblocks[label] = &lblock{ + label: label, + resolved: true, + _goto: ycont, + _continue: ycont, + // `break label` statement targets fn.parent.targets._break + } + } + fn.targets = &targets{ + _continue: ycont, + // `break` statement targets fn.parent.targets._break. + } + + // continue: + // jump = READY + // return true + saved := fn.currentBlock + fn.currentBlock = ycont + storeVar(fn, fn.jump, jReady, s.Body.Rbrace) + // A yield function's own deferstack is always empty, so rundefers is not needed. + fn.emit(&Return{Results: []Value{vTrue}, pos: token.NoPos}) + + // Emit header: + // + // if jump != READY { panic("yield iterator accessed after exit") } + // jump = BUSY + // k, v = _k, _v + fn.currentBlock = saved + yloop := fn.newBasicBlock("yield-loop") + invalid := fn.newBasicBlock("yield-invalid") + + jumpVal := emitLoad(fn, fn.lookup(fn.jump, true)) + emitIf(fn, emitCompare(fn, token.EQL, jumpVal, jReady, token.NoPos), yloop, invalid) + fn.currentBlock = invalid + fn.emit(&Panic{ + X: emitConv(fn, stringConst("yield function called after range loop exit"), tEface), + }) + + fn.currentBlock = yloop + storeVar(fn, fn.jump, jBusy, s.Body.Rbrace) + + // Initialize k and v from params. + var tk, tv types.Type + if s.Key != nil && !isBlankIdent(s.Key) { + tk = fn.typeOf(s.Key) // fn.parent.typeOf is identical + } + if s.Value != nil && !isBlankIdent(s.Value) { + tv = fn.typeOf(s.Value) + } + if s.Tok == token.DEFINE { + if tk != nil { + emitLocalVar(fn, identVar(fn, s.Key.(*ast.Ident))) + } + if tv != nil { + emitLocalVar(fn, identVar(fn, s.Value.(*ast.Ident))) + } + } + var k, v Value + if len(fn.Params) > 0 { + k = fn.Params[0] + } + if len(fn.Params) > 1 { + v = fn.Params[1] + } + var kl, vl lvalue + if tk != nil { + kl = b.addr(fn, s.Key, false) // non-escaping + } + if tv != nil { + vl = b.addr(fn, s.Value, false) // non-escaping + } + if tk != nil { + kl.store(fn, k) + } + if tv != nil { + vl.store(fn, v) + } + + // Build the body of the range loop. + b.stmt(fn, s.Body) + if cb := fn.currentBlock; cb != nil && (cb == fn.Blocks[0] || cb == fn.Recover || cb.Preds != nil) { + // Control fell off the end of the function's body block. + // Block optimizations eliminate the current block, if + // unreachable. + emitJump(fn, ycont) + } + + // Clean up exits and promote any unresolved exits to fn.parent. + for _, e := range fn.exits { + if e.label != nil { + lb := fn.lblocks[e.label] + if lb.resolved { + // label was resolved. Do not turn lb into an exit. + // e does not need to be handled by the parent. + continue + } + + // _goto becomes an exit. + // _goto: + // jump = id + // return false + fn.currentBlock = lb._goto + id := intConst(e.id) + storeVar(fn, fn.jump, id, e.pos) + fn.emit(&Return{Results: []Value{vFalse}, pos: e.pos}) + } + + if e.to != fn { // e needs to be handled by the parent too. + fn.parent.exits = append(fn.parent.exits, e) + } + } + + fn.finishBody() +} + +// addRuntimeType records t as a runtime type, +// along with all types derivable from it using reflection. +// +// Acquires prog.runtimeTypesMu. +func addRuntimeType(prog *Program, t types.Type) { + prog.runtimeTypesMu.Lock() + defer prog.runtimeTypesMu.Unlock() + forEachReachable(&prog.MethodSets, t, func(t types.Type) bool { + prev, _ := prog.runtimeTypes.Set(t, true).(bool) + return !prev // already seen? + }) +} + +// Build calls Package.Build for each package in prog. +// Building occurs in parallel unless the BuildSerially mode flag was set. +// +// Build is intended for whole-program analysis; a typical compiler +// need only build a single package. +// +// Build is idempotent and thread-safe. +func (prog *Program) Build() { + var wg sync.WaitGroup + for _, p := range prog.packages { + if prog.mode&BuildSerially != 0 { + p.Build() + } else { + wg.Add(1) + cpuLimit <- unit{} // acquire a token + go func(p *Package) { + p.Build() + wg.Done() + <-cpuLimit // release a token + }(p) + } + } + wg.Wait() +} + +// cpuLimit is a counting semaphore to limit CPU parallelism. +var cpuLimit = make(chan unit, runtime.GOMAXPROCS(0)) + +// Build builds SSA code for all functions and vars in package p. +// +// CreatePackage must have been called for all of p's direct imports +// (and hence its direct imports must have been error-free). It is not +// necessary to call CreatePackage for indirect dependencies. +// Functions will be created for all necessary methods in those +// packages on demand. +// +// Build is idempotent and thread-safe. +func (p *Package) Build() { p.buildOnce.Do(p.build) } + +func (p *Package) build() { + if p.info == nil { + return // synthetic package, e.g. "testmain" + } + if p.Prog.mode&LogSource != 0 { + defer logStack("build %s", p)() + } + + b := builder{fns: p.created} + b.iterate() + + // We no longer need transient information: ASTs or go/types deductions. + p.info = nil + p.created = nil + p.files = nil + p.initVersion = nil + + if p.Prog.mode&SanityCheckFunctions != 0 { + sanityCheckPackage(p) + } +} + +// buildPackageInit builds fn.Body for the synthetic package initializer. +func (b *builder) buildPackageInit(fn *Function) { + p := fn.Pkg + fn.startBody() + + var done *BasicBlock + + if p.Prog.mode&BareInits == 0 { + // Make init() skip if package is already initialized. + initguard := p.Var("init$guard") + doinit := fn.newBasicBlock("init.start") + done = fn.newBasicBlock("init.done") + emitIf(fn, emitLoad(fn, initguard), done, doinit) + fn.currentBlock = doinit + emitStore(fn, initguard, vTrue, token.NoPos) + + // Call the init() function of each package we import. + for _, pkg := range p.Pkg.Imports() { + prereq := p.Prog.packages[pkg] + if prereq == nil { + panic(fmt.Sprintf("Package(%q).Build(): unsatisfied import: Program.CreatePackage(%q) was not called", p.Pkg.Path(), pkg.Path())) + } + var v Call + v.Call.Value = prereq.init + v.Call.pos = fn.pos + v.setType(types.NewTuple()) + fn.emit(&v) + } + } + + // Initialize package-level vars in correct order. + if len(p.info.InitOrder) > 0 && len(p.files) == 0 { + panic("no source files provided for package. cannot initialize globals") + } + + for _, varinit := range p.info.InitOrder { + if fn.Prog.mode&LogSource != 0 { + fmt.Fprintf(os.Stderr, "build global initializer %v @ %s\n", + varinit.Lhs, p.Prog.Fset.Position(varinit.Rhs.Pos())) + } + // Initializers for global vars are evaluated in dependency + // order, but may come from arbitrary files of the package + // with different versions, so we transiently update + // fn.goversion for each one. (Since init is a synthetic + // function it has no syntax of its own that needs a version.) + fn.goversion = p.initVersion[varinit.Rhs] + if len(varinit.Lhs) == 1 { + // 1:1 initialization: var x, y = a(), b() + var lval lvalue + if v := varinit.Lhs[0]; v.Name() != "_" { + lval = &address{addr: p.objects[v].(*Global), pos: v.Pos()} + } else { + lval = blank{} + } + b.assign(fn, lval, varinit.Rhs, true, nil) + } else { + // n:1 initialization: var x, y := f() + tuple := b.exprN(fn, varinit.Rhs) + for i, v := range varinit.Lhs { + if v.Name() == "_" { + continue + } + emitStore(fn, p.objects[v].(*Global), emitExtract(fn, tuple, i), v.Pos()) + } + } + } + + // The rest of the init function is synthetic: + // no syntax, info, goversion. + fn.info = nil + fn.goversion = "" + + // Call all of the declared init() functions in source order. + for _, file := range p.files { + for _, decl := range file.Decls { + if decl, ok := decl.(*ast.FuncDecl); ok { + id := decl.Name + if !isBlankIdent(id) && id.Name == "init" && decl.Recv == nil { + declaredInit := p.objects[p.info.Defs[id]].(*Function) + var v Call + v.Call.Value = declaredInit + v.setType(types.NewTuple()) + p.init.emit(&v) + } + } + } + } + + // Finish up init(). + if p.Prog.mode&BareInits == 0 { + emitJump(fn, done) + fn.currentBlock = done + } + fn.emit(new(Return)) + fn.finishBody() +} diff --git a/vendor/golang.org/x/tools/go/ssa/const.go b/vendor/golang.org/x/tools/go/ssa/const.go new file mode 100644 index 0000000..2a4e0dd --- /dev/null +++ b/vendor/golang.org/x/tools/go/ssa/const.go @@ -0,0 +1,232 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssa + +// This file defines the Const SSA value type. + +import ( + "fmt" + "go/constant" + "go/token" + "go/types" + "strconv" + "strings" + + "golang.org/x/tools/internal/aliases" + "golang.org/x/tools/internal/typeparams" +) + +// NewConst returns a new constant of the specified value and type. +// val must be valid according to the specification of Const.Value. +func NewConst(val constant.Value, typ types.Type) *Const { + if val == nil { + switch soleTypeKind(typ) { + case types.IsBoolean: + val = constant.MakeBool(false) + case types.IsInteger: + val = constant.MakeInt64(0) + case types.IsString: + val = constant.MakeString("") + } + } + return &Const{typ, val} +} + +// soleTypeKind returns a BasicInfo for which constant.Value can +// represent all zero values for the types in the type set. +// +// types.IsBoolean for false is a representative. +// types.IsInteger for 0 +// types.IsString for "" +// 0 otherwise. +func soleTypeKind(typ types.Type) types.BasicInfo { + // State records the set of possible zero values (false, 0, ""). + // Candidates (perhaps all) are eliminated during the type-set + // iteration, which executes at least once. + state := types.IsBoolean | types.IsInteger | types.IsString + underIs(typeSetOf(typ), func(ut types.Type) bool { + var c types.BasicInfo + if t, ok := ut.(*types.Basic); ok { + c = t.Info() + } + if c&types.IsNumeric != 0 { // int/float/complex + c = types.IsInteger + } + state = state & c + return state != 0 + }) + return state +} + +// intConst returns an 'int' constant that evaluates to i. +// (i is an int64 in case the host is narrower than the target.) +func intConst(i int64) *Const { + return NewConst(constant.MakeInt64(i), tInt) +} + +// stringConst returns a 'string' constant that evaluates to s. +func stringConst(s string) *Const { + return NewConst(constant.MakeString(s), tString) +} + +// zeroConst returns a new "zero" constant of the specified type. +func zeroConst(t types.Type) *Const { + return NewConst(nil, t) +} + +func (c *Const) RelString(from *types.Package) string { + var s string + if c.Value == nil { + s = zeroString(c.typ, from) + } else if c.Value.Kind() == constant.String { + s = constant.StringVal(c.Value) + const max = 20 + // TODO(adonovan): don't cut a rune in half. + if len(s) > max { + s = s[:max-3] + "..." // abbreviate + } + s = strconv.Quote(s) + } else { + s = c.Value.String() + } + return s + ":" + relType(c.Type(), from) +} + +// zeroString returns the string representation of the "zero" value of the type t. +func zeroString(t types.Type, from *types.Package) string { + switch t := t.(type) { + case *types.Basic: + switch { + case t.Info()&types.IsBoolean != 0: + return "false" + case t.Info()&types.IsNumeric != 0: + return "0" + case t.Info()&types.IsString != 0: + return `""` + case t.Kind() == types.UnsafePointer: + fallthrough + case t.Kind() == types.UntypedNil: + return "nil" + default: + panic(fmt.Sprint("zeroString for unexpected type:", t)) + } + case *types.Pointer, *types.Slice, *types.Interface, *types.Chan, *types.Map, *types.Signature: + return "nil" + case *types.Named, *aliases.Alias: + return zeroString(t.Underlying(), from) + case *types.Array, *types.Struct: + return relType(t, from) + "{}" + case *types.Tuple: + // Tuples are not normal values. + // We are currently format as "(t[0], ..., t[n])". Could be something else. + components := make([]string, t.Len()) + for i := 0; i < t.Len(); i++ { + components[i] = zeroString(t.At(i).Type(), from) + } + return "(" + strings.Join(components, ", ") + ")" + case *types.TypeParam: + return "*new(" + relType(t, from) + ")" + } + panic(fmt.Sprint("zeroString: unexpected ", t)) +} + +func (c *Const) Name() string { + return c.RelString(nil) +} + +func (c *Const) String() string { + return c.Name() +} + +func (c *Const) Type() types.Type { + return c.typ +} + +func (c *Const) Referrers() *[]Instruction { + return nil +} + +func (c *Const) Parent() *Function { return nil } + +func (c *Const) Pos() token.Pos { + return token.NoPos +} + +// IsNil returns true if this constant is a nil value of +// a nillable reference type (pointer, slice, channel, map, or function), +// a basic interface type, or +// a type parameter all of whose possible instantiations are themselves nillable. +func (c *Const) IsNil() bool { + return c.Value == nil && nillable(c.typ) +} + +// nillable reports whether *new(T) == nil is legal for type T. +func nillable(t types.Type) bool { + if typeparams.IsTypeParam(t) { + return underIs(typeSetOf(t), func(u types.Type) bool { + // empty type set (u==nil) => any underlying types => not nillable + return u != nil && nillable(u) + }) + } + switch t.Underlying().(type) { + case *types.Pointer, *types.Slice, *types.Chan, *types.Map, *types.Signature: + return true + case *types.Interface: + return true // basic interface. + default: + return false + } +} + +// TODO(adonovan): move everything below into golang.org/x/tools/go/ssa/interp. + +// Int64 returns the numeric value of this constant truncated to fit +// a signed 64-bit integer. +func (c *Const) Int64() int64 { + switch x := constant.ToInt(c.Value); x.Kind() { + case constant.Int: + if i, ok := constant.Int64Val(x); ok { + return i + } + return 0 + case constant.Float: + f, _ := constant.Float64Val(x) + return int64(f) + } + panic(fmt.Sprintf("unexpected constant value: %T", c.Value)) +} + +// Uint64 returns the numeric value of this constant truncated to fit +// an unsigned 64-bit integer. +func (c *Const) Uint64() uint64 { + switch x := constant.ToInt(c.Value); x.Kind() { + case constant.Int: + if u, ok := constant.Uint64Val(x); ok { + return u + } + return 0 + case constant.Float: + f, _ := constant.Float64Val(x) + return uint64(f) + } + panic(fmt.Sprintf("unexpected constant value: %T", c.Value)) +} + +// Float64 returns the numeric value of this constant truncated to fit +// a float64. +func (c *Const) Float64() float64 { + x := constant.ToFloat(c.Value) // (c.Value == nil) => x.Kind() == Unknown + f, _ := constant.Float64Val(x) + return f +} + +// Complex128 returns the complex value of this constant truncated to +// fit a complex128. +func (c *Const) Complex128() complex128 { + x := constant.ToComplex(c.Value) // (c.Value == nil) => x.Kind() == Unknown + re, _ := constant.Float64Val(constant.Real(x)) + im, _ := constant.Float64Val(constant.Imag(x)) + return complex(re, im) +} diff --git a/vendor/golang.org/x/tools/go/ssa/coretype.go b/vendor/golang.org/x/tools/go/ssa/coretype.go new file mode 100644 index 0000000..8c218f9 --- /dev/null +++ b/vendor/golang.org/x/tools/go/ssa/coretype.go @@ -0,0 +1,161 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssa + +import ( + "go/types" + + "golang.org/x/tools/internal/aliases" + "golang.org/x/tools/internal/typeparams" +) + +// Utilities for dealing with core types. + +// isBytestring returns true if T has the same terms as interface{[]byte | string}. +// These act like a core type for some operations: slice expressions, append and copy. +// +// See https://go.dev/ref/spec#Core_types for the details on bytestring. +func isBytestring(T types.Type) bool { + U := T.Underlying() + if _, ok := U.(*types.Interface); !ok { + return false + } + + tset := typeSetOf(U) + if tset.Len() != 2 { + return false + } + hasBytes, hasString := false, false + underIs(tset, func(t types.Type) bool { + switch { + case isString(t): + hasString = true + case isByteSlice(t): + hasBytes = true + } + return hasBytes || hasString + }) + return hasBytes && hasString +} + +// termList is a list of types. +type termList []*types.Term // type terms of the type set +func (s termList) Len() int { return len(s) } +func (s termList) At(i int) types.Type { return s[i].Type() } + +// typeSetOf returns the type set of typ. Returns an empty typeset on an error. +func typeSetOf(typ types.Type) termList { + // This is a adaptation of x/exp/typeparams.NormalTerms which x/tools cannot depend on. + var terms []*types.Term + var err error + // typeSetOf(t) == typeSetOf(Unalias(t)) + switch typ := aliases.Unalias(typ).(type) { + case *types.TypeParam: + terms, err = typeparams.StructuralTerms(typ) + case *types.Union: + terms, err = typeparams.UnionTermSet(typ) + case *types.Interface: + terms, err = typeparams.InterfaceTermSet(typ) + default: + // Common case. + // Specializing the len=1 case to avoid a slice + // had no measurable space/time benefit. + terms = []*types.Term{types.NewTerm(false, typ)} + } + + if err != nil { + return termList(nil) + } + return termList(terms) +} + +// underIs calls f with the underlying types of the specific type terms +// of s and reports whether all calls to f returned true. If there are +// no specific terms, underIs returns the result of f(nil). +func underIs(s termList, f func(types.Type) bool) bool { + if s.Len() == 0 { + return f(nil) + } + for i := 0; i < s.Len(); i++ { + u := s.At(i).Underlying() + if !f(u) { + return false + } + } + return true +} + +// indexType returns the element type and index mode of a IndexExpr over a type. +// It returns (nil, invalid) if the type is not indexable; this should never occur in a well-typed program. +func indexType(typ types.Type) (types.Type, indexMode) { + switch U := typ.Underlying().(type) { + case *types.Array: + return U.Elem(), ixArrVar + case *types.Pointer: + if arr, ok := U.Elem().Underlying().(*types.Array); ok { + return arr.Elem(), ixVar + } + case *types.Slice: + return U.Elem(), ixVar + case *types.Map: + return U.Elem(), ixMap + case *types.Basic: + return tByte, ixValue // must be a string + case *types.Interface: + tset := typeSetOf(U) + if tset.Len() == 0 { + return nil, ixInvalid // no underlying terms or error is empty. + } + + elem, mode := indexType(tset.At(0)) + for i := 1; i < tset.Len() && mode != ixInvalid; i++ { + e, m := indexType(tset.At(i)) + if !types.Identical(elem, e) { // if type checked, just a sanity check + return nil, ixInvalid + } + // Update the mode to the most constrained address type. + mode = mode.meet(m) + } + if mode != ixInvalid { + return elem, mode + } + } + return nil, ixInvalid +} + +// An indexMode specifies the (addressing) mode of an index operand. +// +// Addressing mode of an index operation is based on the set of +// underlying types. +// Hasse diagram of the indexMode meet semi-lattice: +// +// ixVar ixMap +// | | +// ixArrVar | +// | | +// ixValue | +// \ / +// ixInvalid +type indexMode byte + +const ( + ixInvalid indexMode = iota // index is invalid + ixValue // index is a computed value (not addressable) + ixArrVar // like ixVar, but index operand contains an array + ixVar // index is an addressable variable + ixMap // index is a map index expression (acts like a variable on lhs, commaok on rhs of an assignment) +) + +// meet is the address type that is constrained by both x and y. +func (x indexMode) meet(y indexMode) indexMode { + if (x == ixMap || y == ixMap) && x != y { + return ixInvalid + } + // Use int representation and return min. + if x < y { + return y + } + return x +} diff --git a/vendor/golang.org/x/tools/go/ssa/create.go b/vendor/golang.org/x/tools/go/ssa/create.go new file mode 100644 index 0000000..423bce8 --- /dev/null +++ b/vendor/golang.org/x/tools/go/ssa/create.go @@ -0,0 +1,318 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssa + +// This file implements the CREATE phase of SSA construction. +// See builder.go for explanation. + +import ( + "fmt" + "go/ast" + "go/token" + "go/types" + "os" + "sync" + + "golang.org/x/tools/internal/versions" +) + +// NewProgram returns a new SSA Program. +// +// mode controls diagnostics and checking during SSA construction. +// +// To construct an SSA program: +// +// - Call NewProgram to create an empty Program. +// - Call CreatePackage providing typed syntax for each package +// you want to build, and call it with types but not +// syntax for each of those package's direct dependencies. +// - Call [Package.Build] on each syntax package you wish to build, +// or [Program.Build] to build all of them. +// +// See the Example tests for simple examples. +func NewProgram(fset *token.FileSet, mode BuilderMode) *Program { + return &Program{ + Fset: fset, + imported: make(map[string]*Package), + packages: make(map[*types.Package]*Package), + mode: mode, + canon: newCanonizer(), + ctxt: types.NewContext(), + } +} + +// memberFromObject populates package pkg with a member for the +// typechecker object obj. +// +// For objects from Go source code, syntax is the associated syntax +// tree (for funcs and vars only) and goversion defines the +// appropriate interpretation; they will be used during the build +// phase. +func memberFromObject(pkg *Package, obj types.Object, syntax ast.Node, goversion string) { + name := obj.Name() + switch obj := obj.(type) { + case *types.Builtin: + if pkg.Pkg != types.Unsafe { + panic("unexpected builtin object: " + obj.String()) + } + + case *types.TypeName: + if name != "_" { + pkg.Members[name] = &Type{ + object: obj, + pkg: pkg, + } + } + + case *types.Const: + c := &NamedConst{ + object: obj, + Value: NewConst(obj.Val(), obj.Type()), + pkg: pkg, + } + pkg.objects[obj] = c + if name != "_" { + pkg.Members[name] = c + } + + case *types.Var: + g := &Global{ + Pkg: pkg, + name: name, + object: obj, + typ: types.NewPointer(obj.Type()), // address + pos: obj.Pos(), + } + pkg.objects[obj] = g + if name != "_" { + pkg.Members[name] = g + } + + case *types.Func: + sig := obj.Type().(*types.Signature) + if sig.Recv() == nil && name == "init" { + pkg.ninit++ + name = fmt.Sprintf("init#%d", pkg.ninit) + } + fn := createFunction(pkg.Prog, obj, name, syntax, pkg.info, goversion) + fn.Pkg = pkg + pkg.created = append(pkg.created, fn) + pkg.objects[obj] = fn + if name != "_" && sig.Recv() == nil { + pkg.Members[name] = fn // package-level function + } + + default: // (incl. *types.Package) + panic("unexpected Object type: " + obj.String()) + } +} + +// createFunction creates a function or method. It supports both +// CreatePackage (with or without syntax) and the on-demand creation +// of methods in non-created packages based on their types.Func. +func createFunction(prog *Program, obj *types.Func, name string, syntax ast.Node, info *types.Info, goversion string) *Function { + sig := obj.Type().(*types.Signature) + + // Collect type parameters. + var tparams *types.TypeParamList + if rtparams := sig.RecvTypeParams(); rtparams.Len() > 0 { + tparams = rtparams // method of generic type + } else if sigparams := sig.TypeParams(); sigparams.Len() > 0 { + tparams = sigparams // generic function + } + + /* declared function/method (from syntax or export data) */ + fn := &Function{ + name: name, + object: obj, + Signature: sig, + build: (*builder).buildFromSyntax, + syntax: syntax, + info: info, + goversion: goversion, + pos: obj.Pos(), + Pkg: nil, // may be set by caller + Prog: prog, + typeparams: tparams, + } + if fn.syntax == nil { + fn.Synthetic = "from type information" + fn.build = (*builder).buildParamsOnly + } + if tparams.Len() > 0 { + fn.generic = new(generic) + } + return fn +} + +// membersFromDecl populates package pkg with members for each +// typechecker object (var, func, const or type) associated with the +// specified decl. +func membersFromDecl(pkg *Package, decl ast.Decl, goversion string) { + switch decl := decl.(type) { + case *ast.GenDecl: // import, const, type or var + switch decl.Tok { + case token.CONST: + for _, spec := range decl.Specs { + for _, id := range spec.(*ast.ValueSpec).Names { + memberFromObject(pkg, pkg.info.Defs[id], nil, "") + } + } + + case token.VAR: + for _, spec := range decl.Specs { + for _, rhs := range spec.(*ast.ValueSpec).Values { + pkg.initVersion[rhs] = goversion + } + for _, id := range spec.(*ast.ValueSpec).Names { + memberFromObject(pkg, pkg.info.Defs[id], spec, goversion) + } + } + + case token.TYPE: + for _, spec := range decl.Specs { + id := spec.(*ast.TypeSpec).Name + memberFromObject(pkg, pkg.info.Defs[id], nil, "") + } + } + + case *ast.FuncDecl: + id := decl.Name + memberFromObject(pkg, pkg.info.Defs[id], decl, goversion) + } +} + +// CreatePackage creates and returns an SSA Package from the +// specified type-checked, error-free file ASTs, and populates its +// Members mapping. +// +// importable determines whether this package should be returned by a +// subsequent call to ImportedPackage(pkg.Path()). +// +// The real work of building SSA form for each function is not done +// until a subsequent call to Package.Build. +// +// CreatePackage should not be called after building any package in +// the program. +func (prog *Program) CreatePackage(pkg *types.Package, files []*ast.File, info *types.Info, importable bool) *Package { + // TODO(adonovan): assert that no package has yet been built. + if pkg == nil { + panic("nil pkg") // otherwise pkg.Scope below returns types.Universe! + } + p := &Package{ + Prog: prog, + Members: make(map[string]Member), + objects: make(map[types.Object]Member), + Pkg: pkg, + syntax: info != nil, + // transient values (cleared after Package.Build) + info: info, + files: files, + initVersion: make(map[ast.Expr]string), + } + + /* synthesized package initializer */ + p.init = &Function{ + name: "init", + Signature: new(types.Signature), + Synthetic: "package initializer", + Pkg: p, + Prog: prog, + build: (*builder).buildPackageInit, + info: p.info, + goversion: "", // See Package.build for details. + } + p.Members[p.init.name] = p.init + p.created = append(p.created, p.init) + + // Allocate all package members: vars, funcs, consts and types. + if len(files) > 0 { + // Go source package. + for _, file := range files { + goversion := versions.Lang(versions.FileVersion(p.info, file)) + for _, decl := range file.Decls { + membersFromDecl(p, decl, goversion) + } + } + } else { + // GC-compiled binary package (or "unsafe") + // No code. + // No position information. + scope := p.Pkg.Scope() + for _, name := range scope.Names() { + obj := scope.Lookup(name) + memberFromObject(p, obj, nil, "") + if obj, ok := obj.(*types.TypeName); ok { + // No Unalias: aliases should not duplicate methods. + if named, ok := obj.Type().(*types.Named); ok { + for i, n := 0, named.NumMethods(); i < n; i++ { + memberFromObject(p, named.Method(i), nil, "") + } + } + } + } + } + + if prog.mode&BareInits == 0 { + // Add initializer guard variable. + initguard := &Global{ + Pkg: p, + name: "init$guard", + typ: types.NewPointer(tBool), + } + p.Members[initguard.Name()] = initguard + } + + if prog.mode&GlobalDebug != 0 { + p.SetDebugMode(true) + } + + if prog.mode&PrintPackages != 0 { + printMu.Lock() + p.WriteTo(os.Stdout) + printMu.Unlock() + } + + if importable { + prog.imported[p.Pkg.Path()] = p + } + prog.packages[p.Pkg] = p + + return p +} + +// printMu serializes printing of Packages/Functions to stdout. +var printMu sync.Mutex + +// AllPackages returns a new slice containing all packages created by +// prog.CreatePackage in unspecified order. +func (prog *Program) AllPackages() []*Package { + pkgs := make([]*Package, 0, len(prog.packages)) + for _, pkg := range prog.packages { + pkgs = append(pkgs, pkg) + } + return pkgs +} + +// ImportedPackage returns the importable Package whose PkgPath +// is path, or nil if no such Package has been created. +// +// A parameter to CreatePackage determines whether a package should be +// considered importable. For example, no import declaration can resolve +// to the ad-hoc main package created by 'go build foo.go'. +// +// TODO(adonovan): rethink this function and the "importable" concept; +// most packages are importable. This function assumes that all +// types.Package.Path values are unique within the ssa.Program, which is +// false---yet this function remains very convenient. +// Clients should use (*Program).Package instead where possible. +// SSA doesn't really need a string-keyed map of packages. +// +// Furthermore, the graph of packages may contain multiple variants +// (e.g. "p" vs "p as compiled for q.test"), and each has a different +// view of its dependencies. +func (prog *Program) ImportedPackage(path string) *Package { + return prog.imported[path] +} diff --git a/vendor/golang.org/x/tools/go/ssa/doc.go b/vendor/golang.org/x/tools/go/ssa/doc.go new file mode 100644 index 0000000..3310b55 --- /dev/null +++ b/vendor/golang.org/x/tools/go/ssa/doc.go @@ -0,0 +1,122 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package ssa defines a representation of the elements of Go programs +// (packages, types, functions, variables and constants) using a +// static single-assignment (SSA) form intermediate representation +// (IR) for the bodies of functions. +// +// For an introduction to SSA form, see +// http://en.wikipedia.org/wiki/Static_single_assignment_form. +// This page provides a broader reading list: +// http://www.dcs.gla.ac.uk/~jsinger/ssa.html. +// +// The level of abstraction of the SSA form is intentionally close to +// the source language to facilitate construction of source analysis +// tools. It is not intended for machine code generation. +// +// All looping, branching and switching constructs are replaced with +// unstructured control flow. Higher-level control flow constructs +// such as multi-way branch can be reconstructed as needed; see +// [golang.org/x/tools/go/ssa/ssautil.Switches] for an example. +// +// The simplest way to create the SSA representation of a package is +// to load typed syntax trees using [golang.org/x/tools/go/packages], then +// invoke the [golang.org/x/tools/go/ssa/ssautil.Packages] helper function. +// (See the package-level Examples named LoadPackages and LoadWholeProgram.) +// The resulting [ssa.Program] contains all the packages and their +// members, but SSA code is not created for function bodies until a +// subsequent call to [Package.Build] or [Program.Build]. +// +// The builder initially builds a naive SSA form in which all local +// variables are addresses of stack locations with explicit loads and +// stores. Registerisation of eligible locals and φ-node insertion +// using dominance and dataflow are then performed as a second pass +// called "lifting" to improve the accuracy and performance of +// subsequent analyses; this pass can be skipped by setting the +// NaiveForm builder flag. +// +// The primary interfaces of this package are: +// +// - [Member]: a named member of a Go package. +// - [Value]: an expression that yields a value. +// - [Instruction]: a statement that consumes values and performs computation. +// - [Node]: a [Value] or [Instruction] (emphasizing its membership in the SSA value graph) +// +// A computation that yields a result implements both the [Value] and +// [Instruction] interfaces. The following table shows for each +// concrete type which of these interfaces it implements. +// +// Value? Instruction? Member? +// *Alloc ✔ ✔ +// *BinOp ✔ ✔ +// *Builtin ✔ +// *Call ✔ ✔ +// *ChangeInterface ✔ ✔ +// *ChangeType ✔ ✔ +// *Const ✔ +// *Convert ✔ ✔ +// *DebugRef ✔ +// *Defer ✔ +// *Extract ✔ ✔ +// *Field ✔ ✔ +// *FieldAddr ✔ ✔ +// *FreeVar ✔ +// *Function ✔ ✔ (func) +// *Global ✔ ✔ (var) +// *Go ✔ +// *If ✔ +// *Index ✔ ✔ +// *IndexAddr ✔ ✔ +// *Jump ✔ +// *Lookup ✔ ✔ +// *MakeChan ✔ ✔ +// *MakeClosure ✔ ✔ +// *MakeInterface ✔ ✔ +// *MakeMap ✔ ✔ +// *MakeSlice ✔ ✔ +// *MapUpdate ✔ +// *MultiConvert ✔ ✔ +// *NamedConst ✔ (const) +// *Next ✔ ✔ +// *Panic ✔ +// *Parameter ✔ +// *Phi ✔ ✔ +// *Range ✔ ✔ +// *Return ✔ +// *RunDefers ✔ +// *Select ✔ ✔ +// *Send ✔ +// *Slice ✔ ✔ +// *SliceToArrayPointer ✔ ✔ +// *Store ✔ +// *Type ✔ (type) +// *TypeAssert ✔ ✔ +// *UnOp ✔ ✔ +// +// Other key types in this package include: [Program], [Package], [Function] +// and [BasicBlock]. +// +// The program representation constructed by this package is fully +// resolved internally, i.e. it does not rely on the names of Values, +// Packages, Functions, Types or BasicBlocks for the correct +// interpretation of the program. Only the identities of objects and +// the topology of the SSA and type graphs are semantically +// significant. (There is one exception: [types.Id] values, which identify field +// and method names, contain strings.) Avoidance of name-based +// operations simplifies the implementation of subsequent passes and +// can make them very efficient. Many objects are nonetheless named +// to aid in debugging, but it is not essential that the names be +// either accurate or unambiguous. The public API exposes a number of +// name-based maps for client convenience. +// +// The [golang.org/x/tools/go/ssa/ssautil] package provides various +// helper functions, for example to simplify loading a Go program into +// SSA form. +// +// TODO(adonovan): write a how-to document for all the various cases +// of trying to determine corresponding elements across the four +// domains of source locations, ast.Nodes, types.Objects, +// ssa.Values/Instructions. +package ssa // import "golang.org/x/tools/go/ssa" diff --git a/vendor/golang.org/x/tools/go/ssa/dom.go b/vendor/golang.org/x/tools/go/ssa/dom.go new file mode 100644 index 0000000..02c1ae8 --- /dev/null +++ b/vendor/golang.org/x/tools/go/ssa/dom.go @@ -0,0 +1,340 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssa + +// This file defines algorithms related to dominance. + +// Dominator tree construction ---------------------------------------- +// +// We use the algorithm described in Lengauer & Tarjan. 1979. A fast +// algorithm for finding dominators in a flowgraph. +// http://doi.acm.org/10.1145/357062.357071 +// +// We also apply the optimizations to SLT described in Georgiadis et +// al, Finding Dominators in Practice, JGAA 2006, +// http://jgaa.info/accepted/2006/GeorgiadisTarjanWerneck2006.10.1.pdf +// to avoid the need for buckets of size > 1. + +import ( + "bytes" + "fmt" + "math/big" + "os" + "sort" +) + +// Idom returns the block that immediately dominates b: +// its parent in the dominator tree, if any. +// Neither the entry node (b.Index==0) nor recover node +// (b==b.Parent().Recover()) have a parent. +func (b *BasicBlock) Idom() *BasicBlock { return b.dom.idom } + +// Dominees returns the list of blocks that b immediately dominates: +// its children in the dominator tree. +func (b *BasicBlock) Dominees() []*BasicBlock { return b.dom.children } + +// Dominates reports whether b dominates c. +func (b *BasicBlock) Dominates(c *BasicBlock) bool { + return b.dom.pre <= c.dom.pre && c.dom.post <= b.dom.post +} + +// DomPreorder returns a new slice containing the blocks of f +// in a preorder traversal of the dominator tree. +func (f *Function) DomPreorder() []*BasicBlock { + slice := append([]*BasicBlock(nil), f.Blocks...) + sort.Slice(slice, func(i, j int) bool { + return slice[i].dom.pre < slice[j].dom.pre + }) + return slice +} + +// DomPostorder returns a new slice containing the blocks of f +// in a postorder traversal of the dominator tree. +// (This is not the same as a postdominance order.) +func (f *Function) DomPostorder() []*BasicBlock { + slice := append([]*BasicBlock(nil), f.Blocks...) + sort.Slice(slice, func(i, j int) bool { + return slice[i].dom.post < slice[j].dom.post + }) + return slice +} + +// domInfo contains a BasicBlock's dominance information. +type domInfo struct { + idom *BasicBlock // immediate dominator (parent in domtree) + children []*BasicBlock // nodes immediately dominated by this one + pre, post int32 // pre- and post-order numbering within domtree +} + +// ltState holds the working state for Lengauer-Tarjan algorithm +// (during which domInfo.pre is repurposed for CFG DFS preorder number). +type ltState struct { + // Each slice is indexed by b.Index. + sdom []*BasicBlock // b's semidominator + parent []*BasicBlock // b's parent in DFS traversal of CFG + ancestor []*BasicBlock // b's ancestor with least sdom +} + +// dfs implements the depth-first search part of the LT algorithm. +func (lt *ltState) dfs(v *BasicBlock, i int32, preorder []*BasicBlock) int32 { + preorder[i] = v + v.dom.pre = i // For now: DFS preorder of spanning tree of CFG + i++ + lt.sdom[v.Index] = v + lt.link(nil, v) + for _, w := range v.Succs { + if lt.sdom[w.Index] == nil { + lt.parent[w.Index] = v + i = lt.dfs(w, i, preorder) + } + } + return i +} + +// eval implements the EVAL part of the LT algorithm. +func (lt *ltState) eval(v *BasicBlock) *BasicBlock { + // TODO(adonovan): opt: do path compression per simple LT. + u := v + for ; lt.ancestor[v.Index] != nil; v = lt.ancestor[v.Index] { + if lt.sdom[v.Index].dom.pre < lt.sdom[u.Index].dom.pre { + u = v + } + } + return u +} + +// link implements the LINK part of the LT algorithm. +func (lt *ltState) link(v, w *BasicBlock) { + lt.ancestor[w.Index] = v +} + +// buildDomTree computes the dominator tree of f using the LT algorithm. +// Precondition: all blocks are reachable (e.g. optimizeBlocks has been run). +func buildDomTree(f *Function) { + // The step numbers refer to the original LT paper; the + // reordering is due to Georgiadis. + + // Clear any previous domInfo. + for _, b := range f.Blocks { + b.dom = domInfo{} + } + + n := len(f.Blocks) + // Allocate space for 5 contiguous [n]*BasicBlock arrays: + // sdom, parent, ancestor, preorder, buckets. + space := make([]*BasicBlock, 5*n) + lt := ltState{ + sdom: space[0:n], + parent: space[n : 2*n], + ancestor: space[2*n : 3*n], + } + + // Step 1. Number vertices by depth-first preorder. + preorder := space[3*n : 4*n] + root := f.Blocks[0] + prenum := lt.dfs(root, 0, preorder) + recover := f.Recover + if recover != nil { + lt.dfs(recover, prenum, preorder) + } + + buckets := space[4*n : 5*n] + copy(buckets, preorder) + + // In reverse preorder... + for i := int32(n) - 1; i > 0; i-- { + w := preorder[i] + + // Step 3. Implicitly define the immediate dominator of each node. + for v := buckets[i]; v != w; v = buckets[v.dom.pre] { + u := lt.eval(v) + if lt.sdom[u.Index].dom.pre < i { + v.dom.idom = u + } else { + v.dom.idom = w + } + } + + // Step 2. Compute the semidominators of all nodes. + lt.sdom[w.Index] = lt.parent[w.Index] + for _, v := range w.Preds { + u := lt.eval(v) + if lt.sdom[u.Index].dom.pre < lt.sdom[w.Index].dom.pre { + lt.sdom[w.Index] = lt.sdom[u.Index] + } + } + + lt.link(lt.parent[w.Index], w) + + if lt.parent[w.Index] == lt.sdom[w.Index] { + w.dom.idom = lt.parent[w.Index] + } else { + buckets[i] = buckets[lt.sdom[w.Index].dom.pre] + buckets[lt.sdom[w.Index].dom.pre] = w + } + } + + // The final 'Step 3' is now outside the loop. + for v := buckets[0]; v != root; v = buckets[v.dom.pre] { + v.dom.idom = root + } + + // Step 4. Explicitly define the immediate dominator of each + // node, in preorder. + for _, w := range preorder[1:] { + if w == root || w == recover { + w.dom.idom = nil + } else { + if w.dom.idom != lt.sdom[w.Index] { + w.dom.idom = w.dom.idom.dom.idom + } + // Calculate Children relation as inverse of Idom. + w.dom.idom.dom.children = append(w.dom.idom.dom.children, w) + } + } + + pre, post := numberDomTree(root, 0, 0) + if recover != nil { + numberDomTree(recover, pre, post) + } + + // printDomTreeDot(os.Stderr, f) // debugging + // printDomTreeText(os.Stderr, root, 0) // debugging + + if f.Prog.mode&SanityCheckFunctions != 0 { + sanityCheckDomTree(f) + } +} + +// numberDomTree sets the pre- and post-order numbers of a depth-first +// traversal of the dominator tree rooted at v. These are used to +// answer dominance queries in constant time. +func numberDomTree(v *BasicBlock, pre, post int32) (int32, int32) { + v.dom.pre = pre + pre++ + for _, child := range v.dom.children { + pre, post = numberDomTree(child, pre, post) + } + v.dom.post = post + post++ + return pre, post +} + +// Testing utilities ---------------------------------------- + +// sanityCheckDomTree checks the correctness of the dominator tree +// computed by the LT algorithm by comparing against the dominance +// relation computed by a naive Kildall-style forward dataflow +// analysis (Algorithm 10.16 from the "Dragon" book). +func sanityCheckDomTree(f *Function) { + n := len(f.Blocks) + + // D[i] is the set of blocks that dominate f.Blocks[i], + // represented as a bit-set of block indices. + D := make([]big.Int, n) + + one := big.NewInt(1) + + // all is the set of all blocks; constant. + var all big.Int + all.Set(one).Lsh(&all, uint(n)).Sub(&all, one) + + // Initialization. + for i, b := range f.Blocks { + if i == 0 || b == f.Recover { + // A root is dominated only by itself. + D[i].SetBit(&D[0], 0, 1) + } else { + // All other blocks are (initially) dominated + // by every block. + D[i].Set(&all) + } + } + + // Iteration until fixed point. + for changed := true; changed; { + changed = false + for i, b := range f.Blocks { + if i == 0 || b == f.Recover { + continue + } + // Compute intersection across predecessors. + var x big.Int + x.Set(&all) + for _, pred := range b.Preds { + x.And(&x, &D[pred.Index]) + } + x.SetBit(&x, i, 1) // a block always dominates itself. + if D[i].Cmp(&x) != 0 { + D[i].Set(&x) + changed = true + } + } + } + + // Check the entire relation. O(n^2). + // The Recover block (if any) must be treated specially so we skip it. + ok := true + for i := 0; i < n; i++ { + for j := 0; j < n; j++ { + b, c := f.Blocks[i], f.Blocks[j] + if c == f.Recover { + continue + } + actual := b.Dominates(c) + expected := D[j].Bit(i) == 1 + if actual != expected { + fmt.Fprintf(os.Stderr, "dominates(%s, %s)==%t, want %t\n", b, c, actual, expected) + ok = false + } + } + } + + preorder := f.DomPreorder() + for _, b := range f.Blocks { + if got := preorder[b.dom.pre]; got != b { + fmt.Fprintf(os.Stderr, "preorder[%d]==%s, want %s\n", b.dom.pre, got, b) + ok = false + } + } + + if !ok { + panic("sanityCheckDomTree failed for " + f.String()) + } + +} + +// Printing functions ---------------------------------------- + +// printDomTreeText prints the dominator tree as text, using indentation. +func printDomTreeText(buf *bytes.Buffer, v *BasicBlock, indent int) { + fmt.Fprintf(buf, "%*s%s\n", 4*indent, "", v) + for _, child := range v.dom.children { + printDomTreeText(buf, child, indent+1) + } +} + +// printDomTreeDot prints the dominator tree of f in AT&T GraphViz +// (.dot) format. +func printDomTreeDot(buf *bytes.Buffer, f *Function) { + fmt.Fprintln(buf, "//", f) + fmt.Fprintln(buf, "digraph domtree {") + for i, b := range f.Blocks { + v := b.dom + fmt.Fprintf(buf, "\tn%d [label=\"%s (%d, %d)\",shape=\"rectangle\"];\n", v.pre, b, v.pre, v.post) + // TODO(adonovan): improve appearance of edges + // belonging to both dominator tree and CFG. + + // Dominator tree edge. + if i != 0 { + fmt.Fprintf(buf, "\tn%d -> n%d [style=\"solid\",weight=100];\n", v.idom.dom.pre, v.pre) + } + // CFG edges. + for _, pred := range b.Preds { + fmt.Fprintf(buf, "\tn%d -> n%d [style=\"dotted\",weight=0];\n", pred.dom.pre, v.pre) + } + } + fmt.Fprintln(buf, "}") +} diff --git a/vendor/golang.org/x/tools/go/ssa/emit.go b/vendor/golang.org/x/tools/go/ssa/emit.go new file mode 100644 index 0000000..c664ff8 --- /dev/null +++ b/vendor/golang.org/x/tools/go/ssa/emit.go @@ -0,0 +1,614 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssa + +// Helpers for emitting SSA instructions. + +import ( + "fmt" + "go/ast" + "go/token" + "go/types" + + "golang.org/x/tools/internal/typeparams" +) + +// emitAlloc emits to f a new Alloc instruction allocating a variable +// of type typ. +// +// The caller must set Alloc.Heap=true (for an heap-allocated variable) +// or add the Alloc to f.Locals (for a frame-allocated variable). +// +// During building, a variable in f.Locals may have its Heap flag +// set when it is discovered that its address is taken. +// These Allocs are removed from f.Locals at the end. +// +// The builder should generally call one of the emit{New,Local,LocalVar} wrappers instead. +func emitAlloc(f *Function, typ types.Type, pos token.Pos, comment string) *Alloc { + v := &Alloc{Comment: comment} + v.setType(types.NewPointer(typ)) + v.setPos(pos) + f.emit(v) + return v +} + +// emitNew emits to f a new Alloc instruction heap-allocating a +// variable of type typ. pos is the optional source location. +func emitNew(f *Function, typ types.Type, pos token.Pos, comment string) *Alloc { + alloc := emitAlloc(f, typ, pos, comment) + alloc.Heap = true + return alloc +} + +// emitLocal creates a local var for (t, pos, comment) and +// emits an Alloc instruction for it. +// +// (Use this function or emitNew for synthetic variables; +// for source-level variables in the same function, use emitLocalVar.) +func emitLocal(f *Function, t types.Type, pos token.Pos, comment string) *Alloc { + local := emitAlloc(f, t, pos, comment) + f.Locals = append(f.Locals, local) + return local +} + +// emitLocalVar creates a local var for v and emits an Alloc instruction for it. +// Subsequent calls to f.lookup(v) return it. +// It applies the appropriate generic instantiation to the type. +func emitLocalVar(f *Function, v *types.Var) *Alloc { + alloc := emitLocal(f, f.typ(v.Type()), v.Pos(), v.Name()) + f.vars[v] = alloc + return alloc +} + +// emitLoad emits to f an instruction to load the address addr into a +// new temporary, and returns the value so defined. +func emitLoad(f *Function, addr Value) *UnOp { + v := &UnOp{Op: token.MUL, X: addr} + v.setType(typeparams.MustDeref(addr.Type())) + f.emit(v) + return v +} + +// emitDebugRef emits to f a DebugRef pseudo-instruction associating +// expression e with value v. +func emitDebugRef(f *Function, e ast.Expr, v Value, isAddr bool) { + if !f.debugInfo() { + return // debugging not enabled + } + if v == nil || e == nil { + panic("nil") + } + var obj types.Object + e = unparen(e) + if id, ok := e.(*ast.Ident); ok { + if isBlankIdent(id) { + return + } + obj = f.objectOf(id) + switch obj.(type) { + case *types.Nil, *types.Const, *types.Builtin: + return + } + } + f.emit(&DebugRef{ + X: v, + Expr: e, + IsAddr: isAddr, + object: obj, + }) +} + +// emitArith emits to f code to compute the binary operation op(x, y) +// where op is an eager shift, logical or arithmetic operation. +// (Use emitCompare() for comparisons and Builder.logicalBinop() for +// non-eager operations.) +func emitArith(f *Function, op token.Token, x, y Value, t types.Type, pos token.Pos) Value { + switch op { + case token.SHL, token.SHR: + x = emitConv(f, x, t) + // y may be signed or an 'untyped' constant. + + // There is a runtime panic if y is signed and <0. Instead of inserting a check for y<0 + // and converting to an unsigned value (like the compiler) leave y as is. + + if isUntyped(y.Type().Underlying()) { + // Untyped conversion: + // Spec https://go.dev/ref/spec#Operators: + // The right operand in a shift expression must have integer type or be an untyped constant + // representable by a value of type uint. + y = emitConv(f, y, types.Typ[types.Uint]) + } + + case token.ADD, token.SUB, token.MUL, token.QUO, token.REM, token.AND, token.OR, token.XOR, token.AND_NOT: + x = emitConv(f, x, t) + y = emitConv(f, y, t) + + default: + panic("illegal op in emitArith: " + op.String()) + + } + v := &BinOp{ + Op: op, + X: x, + Y: y, + } + v.setPos(pos) + v.setType(t) + return f.emit(v) +} + +// emitCompare emits to f code compute the boolean result of +// comparison 'x op y'. +func emitCompare(f *Function, op token.Token, x, y Value, pos token.Pos) Value { + xt := x.Type().Underlying() + yt := y.Type().Underlying() + + // Special case to optimise a tagless SwitchStmt so that + // these are equivalent + // switch { case e: ...} + // switch true { case e: ... } + // if e==true { ... } + // even in the case when e's type is an interface. + // TODO(adonovan): opt: generalise to x==true, false!=y, etc. + if x == vTrue && op == token.EQL { + if yt, ok := yt.(*types.Basic); ok && yt.Info()&types.IsBoolean != 0 { + return y + } + } + + if types.Identical(xt, yt) { + // no conversion necessary + } else if isNonTypeParamInterface(x.Type()) { + y = emitConv(f, y, x.Type()) + } else if isNonTypeParamInterface(y.Type()) { + x = emitConv(f, x, y.Type()) + } else if _, ok := x.(*Const); ok { + x = emitConv(f, x, y.Type()) + } else if _, ok := y.(*Const); ok { + y = emitConv(f, y, x.Type()) + } else { + // other cases, e.g. channels. No-op. + } + + v := &BinOp{ + Op: op, + X: x, + Y: y, + } + v.setPos(pos) + v.setType(tBool) + return f.emit(v) +} + +// isValuePreserving returns true if a conversion from ut_src to +// ut_dst is value-preserving, i.e. just a change of type. +// Precondition: neither argument is a named or alias type. +func isValuePreserving(ut_src, ut_dst types.Type) bool { + // Identical underlying types? + if types.IdenticalIgnoreTags(ut_dst, ut_src) { + return true + } + + switch ut_dst.(type) { + case *types.Chan: + // Conversion between channel types? + _, ok := ut_src.(*types.Chan) + return ok + + case *types.Pointer: + // Conversion between pointers with identical base types? + _, ok := ut_src.(*types.Pointer) + return ok + } + return false +} + +// emitConv emits to f code to convert Value val to exactly type typ, +// and returns the converted value. Implicit conversions are required +// by language assignability rules in assignments, parameter passing, +// etc. +func emitConv(f *Function, val Value, typ types.Type) Value { + t_src := val.Type() + + // Identical types? Conversion is a no-op. + if types.Identical(t_src, typ) { + return val + } + ut_dst := typ.Underlying() + ut_src := t_src.Underlying() + + // Conversion to, or construction of a value of, an interface type? + if isNonTypeParamInterface(typ) { + // Interface name change? + if isValuePreserving(ut_src, ut_dst) { + c := &ChangeType{X: val} + c.setType(typ) + return f.emit(c) + } + + // Assignment from one interface type to another? + if isNonTypeParamInterface(t_src) { + c := &ChangeInterface{X: val} + c.setType(typ) + return f.emit(c) + } + + // Untyped nil constant? Return interface-typed nil constant. + if ut_src == tUntypedNil { + return zeroConst(typ) + } + + // Convert (non-nil) "untyped" literals to their default type. + if t, ok := ut_src.(*types.Basic); ok && t.Info()&types.IsUntyped != 0 { + val = emitConv(f, val, types.Default(ut_src)) + } + + // Record the types of operands to MakeInterface, if + // non-parameterized, as they are the set of runtime types. + t := val.Type() + if f.typeparams.Len() == 0 || !f.Prog.isParameterized(t) { + addRuntimeType(f.Prog, t) + } + + mi := &MakeInterface{X: val} + mi.setType(typ) + return f.emit(mi) + } + + // In the common case, the typesets of src and dst are singletons + // and we emit an appropriate conversion. But if either contains + // a type parameter, the conversion may represent a cross product, + // in which case which we emit a MultiConvert. + dst_terms := typeSetOf(ut_dst) + src_terms := typeSetOf(ut_src) + + // conversionCase describes an instruction pattern that maybe emitted to + // model d <- s for d in dst_terms and s in src_terms. + // Multiple conversions can match the same pattern. + type conversionCase uint8 + const ( + changeType conversionCase = 1 << iota + sliceToArray + sliceToArrayPtr + sliceTo0Array + sliceTo0ArrayPtr + convert + ) + // classify the conversion case of a source type us to a destination type ud. + // us and ud are underlying types (not *Named or *Alias) + classify := func(us, ud types.Type) conversionCase { + // Just a change of type, but not value or representation? + if isValuePreserving(us, ud) { + return changeType + } + + // Conversion from slice to array or slice to array pointer? + if slice, ok := us.(*types.Slice); ok { + var arr *types.Array + var ptr bool + // Conversion from slice to array pointer? + switch d := ud.(type) { + case *types.Array: + arr = d + case *types.Pointer: + arr, _ = d.Elem().Underlying().(*types.Array) + ptr = true + } + if arr != nil && types.Identical(slice.Elem(), arr.Elem()) { + if arr.Len() == 0 { + if ptr { + return sliceTo0ArrayPtr + } else { + return sliceTo0Array + } + } + if ptr { + return sliceToArrayPtr + } else { + return sliceToArray + } + } + } + + // The only remaining case in well-typed code is a representation- + // changing conversion of basic types (possibly with []byte/[]rune). + if !isBasic(us) && !isBasic(ud) { + panic(fmt.Sprintf("in %s: cannot convert term %s (%s [within %s]) to type %s [within %s]", f, val, val.Type(), us, typ, ud)) + } + return convert + } + + var classifications conversionCase + for _, s := range src_terms { + us := s.Type().Underlying() + for _, d := range dst_terms { + ud := d.Type().Underlying() + classifications |= classify(us, ud) + } + } + if classifications == 0 { + panic(fmt.Sprintf("in %s: cannot convert %s (%s) to %s", f, val, val.Type(), typ)) + } + + // Conversion of a compile-time constant value? + if c, ok := val.(*Const); ok { + // Conversion to a basic type? + if isBasic(ut_dst) { + // Conversion of a compile-time constant to + // another constant type results in a new + // constant of the destination type and + // (initially) the same abstract value. + // We don't truncate the value yet. + return NewConst(c.Value, typ) + } + // Can we always convert from zero value without panicking? + const mayPanic = sliceToArray | sliceToArrayPtr + if c.Value == nil && classifications&mayPanic == 0 { + return NewConst(nil, typ) + } + + // We're converting from constant to non-constant type, + // e.g. string -> []byte/[]rune. + } + + switch classifications { + case changeType: // representation-preserving change + c := &ChangeType{X: val} + c.setType(typ) + return f.emit(c) + + case sliceToArrayPtr, sliceTo0ArrayPtr: // slice to array pointer + c := &SliceToArrayPointer{X: val} + c.setType(typ) + return f.emit(c) + + case sliceToArray: // slice to arrays (not zero-length) + ptype := types.NewPointer(typ) + p := &SliceToArrayPointer{X: val} + p.setType(ptype) + x := f.emit(p) + unOp := &UnOp{Op: token.MUL, X: x} + unOp.setType(typ) + return f.emit(unOp) + + case sliceTo0Array: // slice to zero-length arrays (constant) + return zeroConst(typ) + + case convert: // representation-changing conversion + c := &Convert{X: val} + c.setType(typ) + return f.emit(c) + + default: // multiple conversion + c := &MultiConvert{X: val, from: src_terms, to: dst_terms} + c.setType(typ) + return f.emit(c) + } +} + +// emitTypeCoercion emits to f code to coerce the type of a +// Value v to exactly type typ, and returns the coerced value. +// +// Requires that coercing v.Typ() to typ is a value preserving change. +// +// Currently used only when v.Type() is a type instance of typ or vice versa. +// A type v is a type instance of a type t if there exists a +// type parameter substitution σ s.t. σ(v) == t. Example: +// +// σ(func(T) T) == func(int) int for σ == [T ↦ int] +// +// This happens in instantiation wrappers for conversion +// from an instantiation to a parameterized type (and vice versa) +// with σ substituting f.typeparams by f.typeargs. +func emitTypeCoercion(f *Function, v Value, typ types.Type) Value { + if types.Identical(v.Type(), typ) { + return v // no coercion needed + } + // TODO(taking): for instances should we record which side is the instance? + c := &ChangeType{ + X: v, + } + c.setType(typ) + f.emit(c) + return c +} + +// emitStore emits to f an instruction to store value val at location +// addr, applying implicit conversions as required by assignability rules. +func emitStore(f *Function, addr, val Value, pos token.Pos) *Store { + typ := typeparams.MustDeref(addr.Type()) + s := &Store{ + Addr: addr, + Val: emitConv(f, val, typ), + pos: pos, + } + f.emit(s) + return s +} + +// emitJump emits to f a jump to target, and updates the control-flow graph. +// Postcondition: f.currentBlock is nil. +func emitJump(f *Function, target *BasicBlock) { + b := f.currentBlock + b.emit(new(Jump)) + addEdge(b, target) + f.currentBlock = nil +} + +// emitIf emits to f a conditional jump to tblock or fblock based on +// cond, and updates the control-flow graph. +// Postcondition: f.currentBlock is nil. +func emitIf(f *Function, cond Value, tblock, fblock *BasicBlock) { + b := f.currentBlock + b.emit(&If{Cond: cond}) + addEdge(b, tblock) + addEdge(b, fblock) + f.currentBlock = nil +} + +// emitExtract emits to f an instruction to extract the index'th +// component of tuple. It returns the extracted value. +func emitExtract(f *Function, tuple Value, index int) Value { + e := &Extract{Tuple: tuple, Index: index} + e.setType(tuple.Type().(*types.Tuple).At(index).Type()) + return f.emit(e) +} + +// emitTypeAssert emits to f a type assertion value := x.(t) and +// returns the value. x.Type() must be an interface. +func emitTypeAssert(f *Function, x Value, t types.Type, pos token.Pos) Value { + a := &TypeAssert{X: x, AssertedType: t} + a.setPos(pos) + a.setType(t) + return f.emit(a) +} + +// emitTypeTest emits to f a type test value,ok := x.(t) and returns +// a (value, ok) tuple. x.Type() must be an interface. +func emitTypeTest(f *Function, x Value, t types.Type, pos token.Pos) Value { + a := &TypeAssert{ + X: x, + AssertedType: t, + CommaOk: true, + } + a.setPos(pos) + a.setType(types.NewTuple( + newVar("value", t), + varOk, + )) + return f.emit(a) +} + +// emitTailCall emits to f a function call in tail position. The +// caller is responsible for all fields of 'call' except its type. +// Intended for wrapper methods. +// Precondition: f does/will not use deferred procedure calls. +// Postcondition: f.currentBlock is nil. +func emitTailCall(f *Function, call *Call) { + tresults := f.Signature.Results() + nr := tresults.Len() + if nr == 1 { + call.typ = tresults.At(0).Type() + } else { + call.typ = tresults + } + tuple := f.emit(call) + var ret Return + switch nr { + case 0: + // no-op + case 1: + ret.Results = []Value{tuple} + default: + for i := 0; i < nr; i++ { + v := emitExtract(f, tuple, i) + // TODO(adonovan): in principle, this is required: + // v = emitConv(f, o.Type, f.Signature.Results[i].Type) + // but in practice emitTailCall is only used when + // the types exactly match. + ret.Results = append(ret.Results, v) + } + } + f.emit(&ret) + f.currentBlock = nil +} + +// emitImplicitSelections emits to f code to apply the sequence of +// implicit field selections specified by indices to base value v, and +// returns the selected value. +// +// If v is the address of a struct, the result will be the address of +// a field; if it is the value of a struct, the result will be the +// value of a field. +func emitImplicitSelections(f *Function, v Value, indices []int, pos token.Pos) Value { + for _, index := range indices { + if isPointerCore(v.Type()) { + fld := fieldOf(typeparams.MustDeref(v.Type()), index) + instr := &FieldAddr{ + X: v, + Field: index, + } + instr.setPos(pos) + instr.setType(types.NewPointer(fld.Type())) + v = f.emit(instr) + // Load the field's value iff indirectly embedded. + if isPointerCore(fld.Type()) { + v = emitLoad(f, v) + } + } else { + fld := fieldOf(v.Type(), index) + instr := &Field{ + X: v, + Field: index, + } + instr.setPos(pos) + instr.setType(fld.Type()) + v = f.emit(instr) + } + } + return v +} + +// emitFieldSelection emits to f code to select the index'th field of v. +// +// If wantAddr, the input must be a pointer-to-struct and the result +// will be the field's address; otherwise the result will be the +// field's value. +// Ident id is used for position and debug info. +func emitFieldSelection(f *Function, v Value, index int, wantAddr bool, id *ast.Ident) Value { + if isPointerCore(v.Type()) { + fld := fieldOf(typeparams.MustDeref(v.Type()), index) + instr := &FieldAddr{ + X: v, + Field: index, + } + instr.setPos(id.Pos()) + instr.setType(types.NewPointer(fld.Type())) + v = f.emit(instr) + // Load the field's value iff we don't want its address. + if !wantAddr { + v = emitLoad(f, v) + } + } else { + fld := fieldOf(v.Type(), index) + instr := &Field{ + X: v, + Field: index, + } + instr.setPos(id.Pos()) + instr.setType(fld.Type()) + v = f.emit(instr) + } + emitDebugRef(f, id, v, wantAddr) + return v +} + +// createRecoverBlock emits to f a block of code to return after a +// recovered panic, and sets f.Recover to it. +// +// If f's result parameters are named, the code loads and returns +// their current values, otherwise it returns the zero values of their +// type. +// +// Idempotent. +func createRecoverBlock(f *Function) { + if f.Recover != nil { + return // already created + } + saved := f.currentBlock + + f.Recover = f.newBasicBlock("recover") + f.currentBlock = f.Recover + + var results []Value + // Reload NRPs to form value tuple. + for _, nr := range f.results { + results = append(results, emitLoad(f, nr)) + } + + f.emit(&Return{Results: results}) + + f.currentBlock = saved +} diff --git a/vendor/golang.org/x/tools/go/ssa/func.go b/vendor/golang.org/x/tools/go/ssa/func.go new file mode 100644 index 0000000..2ed63bf --- /dev/null +++ b/vendor/golang.org/x/tools/go/ssa/func.go @@ -0,0 +1,816 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssa + +// This file implements the Function type. + +import ( + "bytes" + "fmt" + "go/ast" + "go/token" + "go/types" + "io" + "os" + "strings" + + "golang.org/x/tools/internal/typeparams" +) + +// Like ObjectOf, but panics instead of returning nil. +// Only valid during f's create and build phases. +func (f *Function) objectOf(id *ast.Ident) types.Object { + if o := f.info.ObjectOf(id); o != nil { + return o + } + panic(fmt.Sprintf("no types.Object for ast.Ident %s @ %s", + id.Name, f.Prog.Fset.Position(id.Pos()))) +} + +// Like TypeOf, but panics instead of returning nil. +// Only valid during f's create and build phases. +func (f *Function) typeOf(e ast.Expr) types.Type { + if T := f.info.TypeOf(e); T != nil { + return f.typ(T) + } + panic(fmt.Sprintf("no type for %T @ %s", e, f.Prog.Fset.Position(e.Pos()))) +} + +// typ is the locally instantiated type of T. +// If f is not an instantiation, then f.typ(T)==T. +func (f *Function) typ(T types.Type) types.Type { + return f.subst.typ(T) +} + +// If id is an Instance, returns info.Instances[id].Type. +// Otherwise returns f.typeOf(id). +func (f *Function) instanceType(id *ast.Ident) types.Type { + if t, ok := f.info.Instances[id]; ok { + return t.Type + } + return f.typeOf(id) +} + +// selection returns a *selection corresponding to f.info.Selections[selector] +// with potential updates for type substitution. +func (f *Function) selection(selector *ast.SelectorExpr) *selection { + sel := f.info.Selections[selector] + if sel == nil { + return nil + } + + switch sel.Kind() { + case types.MethodExpr, types.MethodVal: + if recv := f.typ(sel.Recv()); recv != sel.Recv() { + // recv changed during type substitution. + pkg := f.declaredPackage().Pkg + obj, index, indirect := types.LookupFieldOrMethod(recv, true, pkg, sel.Obj().Name()) + + // sig replaces sel.Type(). See (types.Selection).Typ() for details. + sig := obj.Type().(*types.Signature) + sig = changeRecv(sig, newVar(sig.Recv().Name(), recv)) + if sel.Kind() == types.MethodExpr { + sig = recvAsFirstArg(sig) + } + return &selection{ + kind: sel.Kind(), + recv: recv, + typ: sig, + obj: obj, + index: index, + indirect: indirect, + } + } + } + return toSelection(sel) +} + +// Destinations associated with unlabelled for/switch/select stmts. +// We push/pop one of these as we enter/leave each construct and for +// each BranchStmt we scan for the innermost target of the right type. +type targets struct { + tail *targets // rest of stack + _break *BasicBlock + _continue *BasicBlock + _fallthrough *BasicBlock +} + +// Destinations associated with a labelled block. +// We populate these as labels are encountered in forward gotos or +// labelled statements. +// Forward gotos are resolved once it is known which statement they +// are associated with inside the Function. +type lblock struct { + label *types.Label // Label targeted by the blocks. + resolved bool // _goto block encountered (back jump or resolved fwd jump) + _goto *BasicBlock + _break *BasicBlock + _continue *BasicBlock +} + +// label returns the symbol denoted by a label identifier. +// +// label should be a non-blank identifier (label.Name != "_"). +func (f *Function) label(label *ast.Ident) *types.Label { + return f.objectOf(label).(*types.Label) +} + +// lblockOf returns the branch target associated with the +// specified label, creating it if needed. +func (f *Function) lblockOf(label *types.Label) *lblock { + lb := f.lblocks[label] + if lb == nil { + lb = &lblock{ + label: label, + _goto: f.newBasicBlock(label.Name()), + } + if f.lblocks == nil { + f.lblocks = make(map[*types.Label]*lblock) + } + f.lblocks[label] = lb + } + return lb +} + +// labelledBlock searches f for the block of the specified label. +// +// If f is a yield function, it additionally searches ancestor Functions +// corresponding to enclosing range-over-func statements within the +// same source function, so the returned block may belong to a different Function. +func labelledBlock(f *Function, label *types.Label, tok token.Token) *BasicBlock { + if lb := f.lblocks[label]; lb != nil { + var block *BasicBlock + switch tok { + case token.BREAK: + block = lb._break + case token.CONTINUE: + block = lb._continue + case token.GOTO: + block = lb._goto + } + if block != nil { + return block + } + } + // Search ancestors if this is a yield function. + if f.jump != nil { + return labelledBlock(f.parent, label, tok) + } + return nil +} + +// targetedBlock looks for the nearest block in f.targets +// (and f's ancestors) that matches tok's type, and returns +// the block and function it was found in. +func targetedBlock(f *Function, tok token.Token) *BasicBlock { + if f == nil { + return nil + } + for t := f.targets; t != nil; t = t.tail { + var block *BasicBlock + switch tok { + case token.BREAK: + block = t._break + case token.CONTINUE: + block = t._continue + case token.FALLTHROUGH: + block = t._fallthrough + } + if block != nil { + return block + } + } + // Search f's ancestors (in case f is a yield function). + return targetedBlock(f.parent, tok) +} + +// addResultVar adds a result for a variable v to f.results and v to f.returnVars. +func (f *Function) addResultVar(v *types.Var) { + result := emitLocalVar(f, v) + f.results = append(f.results, result) + f.returnVars = append(f.returnVars, v) +} + +// addParamVar adds a parameter to f.Params. +func (f *Function) addParamVar(v *types.Var) *Parameter { + name := v.Name() + if name == "" { + name = fmt.Sprintf("arg%d", len(f.Params)) + } + param := &Parameter{ + name: name, + object: v, + typ: f.typ(v.Type()), + parent: f, + } + f.Params = append(f.Params, param) + return param +} + +// addSpilledParam declares a parameter that is pre-spilled to the +// stack; the function body will load/store the spilled location. +// Subsequent lifting will eliminate spills where possible. +func (f *Function) addSpilledParam(obj *types.Var) { + param := f.addParamVar(obj) + spill := emitLocalVar(f, obj) + f.emit(&Store{Addr: spill, Val: param}) +} + +// startBody initializes the function prior to generating SSA code for its body. +// Precondition: f.Type() already set. +func (f *Function) startBody() { + f.currentBlock = f.newBasicBlock("entry") + f.vars = make(map[*types.Var]Value) // needed for some synthetics, e.g. init +} + +// createSyntacticParams populates f.Params and generates code (spills +// and named result locals) for all the parameters declared in the +// syntax. In addition it populates the f.objects mapping. +// +// Preconditions: +// f.startBody() was called. f.info != nil. +// Postcondition: +// len(f.Params) == len(f.Signature.Params) + (f.Signature.Recv() ? 1 : 0) +func (f *Function) createSyntacticParams(recv *ast.FieldList, functype *ast.FuncType) { + // Receiver (at most one inner iteration). + if recv != nil { + for _, field := range recv.List { + for _, n := range field.Names { + f.addSpilledParam(identVar(f, n)) + } + // Anonymous receiver? No need to spill. + if field.Names == nil { + f.addParamVar(f.Signature.Recv()) + } + } + } + + // Parameters. + if functype.Params != nil { + n := len(f.Params) // 1 if has recv, 0 otherwise + for _, field := range functype.Params.List { + for _, n := range field.Names { + f.addSpilledParam(identVar(f, n)) + } + // Anonymous parameter? No need to spill. + if field.Names == nil { + f.addParamVar(f.Signature.Params().At(len(f.Params) - n)) + } + } + } + + // Results. + if functype.Results != nil { + for _, field := range functype.Results.List { + // Implicit "var" decl of locals for named results. + for _, n := range field.Names { + v := identVar(f, n) + f.addResultVar(v) + } + // Implicit "var" decl of local for an unnamed result. + if field.Names == nil { + v := f.Signature.Results().At(len(f.results)) + f.addResultVar(v) + } + } + } +} + +// createDeferStack initializes fn.deferstack to local variable +// initialized to a ssa:deferstack() call. +func (fn *Function) createDeferStack() { + // Each syntactic function makes a call to ssa:deferstack, + // which is spilled to a local. Unused ones are later removed. + fn.deferstack = newVar("defer$stack", tDeferStack) + call := &Call{Call: CallCommon{Value: vDeferStack}} + call.setType(tDeferStack) + deferstack := fn.emit(call) + spill := emitLocalVar(fn, fn.deferstack) + emitStore(fn, spill, deferstack, token.NoPos) +} + +type setNumable interface { + setNum(int) +} + +// numberRegisters assigns numbers to all SSA registers +// (value-defining Instructions) in f, to aid debugging. +// (Non-Instruction Values are named at construction.) +func numberRegisters(f *Function) { + v := 0 + for _, b := range f.Blocks { + for _, instr := range b.Instrs { + switch instr.(type) { + case Value: + instr.(setNumable).setNum(v) + v++ + } + } + } +} + +// buildReferrers populates the def/use information in all non-nil +// Value.Referrers slice. +// Precondition: all such slices are initially empty. +func buildReferrers(f *Function) { + var rands []*Value + for _, b := range f.Blocks { + for _, instr := range b.Instrs { + rands = instr.Operands(rands[:0]) // recycle storage + for _, rand := range rands { + if r := *rand; r != nil { + if ref := r.Referrers(); ref != nil { + *ref = append(*ref, instr) + } + } + } + } + } +} + +// finishBody() finalizes the contents of the function after SSA code generation of its body. +// +// The function is not done being built until done() is called. +func (f *Function) finishBody() { + f.currentBlock = nil + f.lblocks = nil + f.returnVars = nil + f.jump = nil + f.source = nil + f.exits = nil + + // Remove from f.Locals any Allocs that escape to the heap. + j := 0 + for _, l := range f.Locals { + if !l.Heap { + f.Locals[j] = l + j++ + } + } + // Nil out f.Locals[j:] to aid GC. + for i := j; i < len(f.Locals); i++ { + f.Locals[i] = nil + } + f.Locals = f.Locals[:j] + + optimizeBlocks(f) + + buildReferrers(f) + + buildDomTree(f) + + if f.Prog.mode&NaiveForm == 0 { + // For debugging pre-state of lifting pass: + // numberRegisters(f) + // f.WriteTo(os.Stderr) + lift(f) + } + + // clear remaining builder state + f.results = nil // (used by lifting) + f.deferstack = nil // (used by lifting) + f.vars = nil // (used by lifting) + f.subst = nil + + numberRegisters(f) // uses f.namedRegisters +} + +// done marks the building of f's SSA body complete, +// along with any nested functions, and optionally prints them. +func (f *Function) done() { + assert(f.parent == nil, "done called on an anonymous function") + + var visit func(*Function) + visit = func(f *Function) { + for _, anon := range f.AnonFuncs { + visit(anon) // anon is done building before f. + } + + f.uniq = 0 // done with uniq + f.build = nil // function is built + + if f.Prog.mode&PrintFunctions != 0 { + printMu.Lock() + f.WriteTo(os.Stdout) + printMu.Unlock() + } + + if f.Prog.mode&SanityCheckFunctions != 0 { + mustSanityCheck(f, nil) + } + } + visit(f) +} + +// removeNilBlocks eliminates nils from f.Blocks and updates each +// BasicBlock.Index. Use this after any pass that may delete blocks. +func (f *Function) removeNilBlocks() { + j := 0 + for _, b := range f.Blocks { + if b != nil { + b.Index = j + f.Blocks[j] = b + j++ + } + } + // Nil out f.Blocks[j:] to aid GC. + for i := j; i < len(f.Blocks); i++ { + f.Blocks[i] = nil + } + f.Blocks = f.Blocks[:j] +} + +// SetDebugMode sets the debug mode for package pkg. If true, all its +// functions will include full debug info. This greatly increases the +// size of the instruction stream, and causes Functions to depend upon +// the ASTs, potentially keeping them live in memory for longer. +func (pkg *Package) SetDebugMode(debug bool) { + pkg.debug = debug +} + +// debugInfo reports whether debug info is wanted for this function. +func (f *Function) debugInfo() bool { + // debug info for instantiations follows the debug info of their origin. + p := f.declaredPackage() + return p != nil && p.debug +} + +// lookup returns the address of the named variable identified by obj +// that is local to function f or one of its enclosing functions. +// If escaping, the reference comes from a potentially escaping pointer +// expression and the referent must be heap-allocated. +// We assume the referent is a *Alloc or *Phi. +// (The only Phis at this stage are those created directly by go1.22 "for" loops.) +func (f *Function) lookup(obj *types.Var, escaping bool) Value { + if v, ok := f.vars[obj]; ok { + if escaping { + switch v := v.(type) { + case *Alloc: + v.Heap = true + case *Phi: + for _, edge := range v.Edges { + if alloc, ok := edge.(*Alloc); ok { + alloc.Heap = true + } + } + } + } + return v // function-local var (address) + } + + // Definition must be in an enclosing function; + // plumb it through intervening closures. + if f.parent == nil { + panic("no ssa.Value for " + obj.String()) + } + outer := f.parent.lookup(obj, true) // escaping + v := &FreeVar{ + name: obj.Name(), + typ: outer.Type(), + pos: outer.Pos(), + outer: outer, + parent: f, + } + f.vars[obj] = v + f.FreeVars = append(f.FreeVars, v) + return v +} + +// emit emits the specified instruction to function f. +func (f *Function) emit(instr Instruction) Value { + return f.currentBlock.emit(instr) +} + +// RelString returns the full name of this function, qualified by +// package name, receiver type, etc. +// +// The specific formatting rules are not guaranteed and may change. +// +// Examples: +// +// "math.IsNaN" // a package-level function +// "(*bytes.Buffer).Bytes" // a declared method or a wrapper +// "(*bytes.Buffer).Bytes$thunk" // thunk (func wrapping method; receiver is param 0) +// "(*bytes.Buffer).Bytes$bound" // bound (func wrapping method; receiver supplied by closure) +// "main.main$1" // an anonymous function in main +// "main.init#1" // a declared init function +// "main.init" // the synthesized package initializer +// +// When these functions are referred to from within the same package +// (i.e. from == f.Pkg.Object), they are rendered without the package path. +// For example: "IsNaN", "(*Buffer).Bytes", etc. +// +// All non-synthetic functions have distinct package-qualified names. +// (But two methods may have the same name "(T).f" if one is a synthetic +// wrapper promoting a non-exported method "f" from another package; in +// that case, the strings are equal but the identifiers "f" are distinct.) +func (f *Function) RelString(from *types.Package) string { + // Anonymous? + if f.parent != nil { + // An anonymous function's Name() looks like "parentName$1", + // but its String() should include the type/package/etc. + parent := f.parent.RelString(from) + for i, anon := range f.parent.AnonFuncs { + if anon == f { + return fmt.Sprintf("%s$%d", parent, 1+i) + } + } + + return f.name // should never happen + } + + // Method (declared or wrapper)? + if recv := f.Signature.Recv(); recv != nil { + return f.relMethod(from, recv.Type()) + } + + // Thunk? + if f.method != nil { + return f.relMethod(from, f.method.recv) + } + + // Bound? + if len(f.FreeVars) == 1 && strings.HasSuffix(f.name, "$bound") { + return f.relMethod(from, f.FreeVars[0].Type()) + } + + // Package-level function? + // Prefix with package name for cross-package references only. + if p := f.relPkg(); p != nil && p != from { + return fmt.Sprintf("%s.%s", p.Path(), f.name) + } + + // Unknown. + return f.name +} + +func (f *Function) relMethod(from *types.Package, recv types.Type) string { + return fmt.Sprintf("(%s).%s", relType(recv, from), f.name) +} + +// writeSignature writes to buf the signature sig in declaration syntax. +func writeSignature(buf *bytes.Buffer, from *types.Package, name string, sig *types.Signature) { + buf.WriteString("func ") + if recv := sig.Recv(); recv != nil { + buf.WriteString("(") + if name := recv.Name(); name != "" { + buf.WriteString(name) + buf.WriteString(" ") + } + types.WriteType(buf, recv.Type(), types.RelativeTo(from)) + buf.WriteString(") ") + } + buf.WriteString(name) + types.WriteSignature(buf, sig, types.RelativeTo(from)) +} + +// declaredPackage returns the package fn is declared in or nil if the +// function is not declared in a package. +func (fn *Function) declaredPackage() *Package { + switch { + case fn.Pkg != nil: + return fn.Pkg // non-generic function (does that follow??) + case fn.topLevelOrigin != nil: + return fn.topLevelOrigin.Pkg // instance of a named generic function + case fn.parent != nil: + return fn.parent.declaredPackage() // instance of an anonymous [generic] function + default: + return nil // function is not declared in a package, e.g. a wrapper. + } +} + +// relPkg returns types.Package fn is printed in relationship to. +func (fn *Function) relPkg() *types.Package { + if p := fn.declaredPackage(); p != nil { + return p.Pkg + } + return nil +} + +var _ io.WriterTo = (*Function)(nil) // *Function implements io.Writer + +func (f *Function) WriteTo(w io.Writer) (int64, error) { + var buf bytes.Buffer + WriteFunction(&buf, f) + n, err := w.Write(buf.Bytes()) + return int64(n), err +} + +// WriteFunction writes to buf a human-readable "disassembly" of f. +func WriteFunction(buf *bytes.Buffer, f *Function) { + fmt.Fprintf(buf, "# Name: %s\n", f.String()) + if f.Pkg != nil { + fmt.Fprintf(buf, "# Package: %s\n", f.Pkg.Pkg.Path()) + } + if syn := f.Synthetic; syn != "" { + fmt.Fprintln(buf, "# Synthetic:", syn) + } + if pos := f.Pos(); pos.IsValid() { + fmt.Fprintf(buf, "# Location: %s\n", f.Prog.Fset.Position(pos)) + } + + if f.parent != nil { + fmt.Fprintf(buf, "# Parent: %s\n", f.parent.Name()) + } + + if f.Recover != nil { + fmt.Fprintf(buf, "# Recover: %s\n", f.Recover) + } + + from := f.relPkg() + + if f.FreeVars != nil { + buf.WriteString("# Free variables:\n") + for i, fv := range f.FreeVars { + fmt.Fprintf(buf, "# % 3d:\t%s %s\n", i, fv.Name(), relType(fv.Type(), from)) + } + } + + if len(f.Locals) > 0 { + buf.WriteString("# Locals:\n") + for i, l := range f.Locals { + fmt.Fprintf(buf, "# % 3d:\t%s %s\n", i, l.Name(), relType(typeparams.MustDeref(l.Type()), from)) + } + } + writeSignature(buf, from, f.Name(), f.Signature) + buf.WriteString(":\n") + + if f.Blocks == nil { + buf.WriteString("\t(external)\n") + } + + // NB. column calculations are confused by non-ASCII + // characters and assume 8-space tabs. + const punchcard = 80 // for old time's sake. + const tabwidth = 8 + for _, b := range f.Blocks { + if b == nil { + // Corrupt CFG. + fmt.Fprintf(buf, ".nil:\n") + continue + } + n, _ := fmt.Fprintf(buf, "%d:", b.Index) + bmsg := fmt.Sprintf("%s P:%d S:%d", b.Comment, len(b.Preds), len(b.Succs)) + fmt.Fprintf(buf, "%*s%s\n", punchcard-1-n-len(bmsg), "", bmsg) + + if false { // CFG debugging + fmt.Fprintf(buf, "\t# CFG: %s --> %s --> %s\n", b.Preds, b, b.Succs) + } + for _, instr := range b.Instrs { + buf.WriteString("\t") + switch v := instr.(type) { + case Value: + l := punchcard - tabwidth + // Left-align the instruction. + if name := v.Name(); name != "" { + n, _ := fmt.Fprintf(buf, "%s = ", name) + l -= n + } + n, _ := buf.WriteString(instr.String()) + l -= n + // Right-align the type if there's space. + if t := v.Type(); t != nil { + buf.WriteByte(' ') + ts := relType(t, from) + l -= len(ts) + len(" ") // (spaces before and after type) + if l > 0 { + fmt.Fprintf(buf, "%*s", l, "") + } + buf.WriteString(ts) + } + case nil: + // Be robust against bad transforms. + buf.WriteString("") + default: + buf.WriteString(instr.String()) + } + // -mode=S: show line numbers + if f.Prog.mode&LogSource != 0 { + if pos := instr.Pos(); pos.IsValid() { + fmt.Fprintf(buf, " L%d", f.Prog.Fset.Position(pos).Line) + } + } + buf.WriteString("\n") + } + } + fmt.Fprintf(buf, "\n") +} + +// newBasicBlock adds to f a new basic block and returns it. It does +// not automatically become the current block for subsequent calls to emit. +// comment is an optional string for more readable debugging output. +func (f *Function) newBasicBlock(comment string) *BasicBlock { + b := &BasicBlock{ + Index: len(f.Blocks), + Comment: comment, + parent: f, + } + b.Succs = b.succs2[:0] + f.Blocks = append(f.Blocks, b) + return b +} + +// NewFunction returns a new synthetic Function instance belonging to +// prog, with its name and signature fields set as specified. +// +// The caller is responsible for initializing the remaining fields of +// the function object, e.g. Pkg, Params, Blocks. +// +// It is practically impossible for clients to construct well-formed +// SSA functions/packages/programs directly, so we assume this is the +// job of the Builder alone. NewFunction exists to provide clients a +// little flexibility. For example, analysis tools may wish to +// construct fake Functions for the root of the callgraph, a fake +// "reflect" package, etc. +// +// TODO(adonovan): think harder about the API here. +func (prog *Program) NewFunction(name string, sig *types.Signature, provenance string) *Function { + return &Function{Prog: prog, name: name, Signature: sig, Synthetic: provenance} +} + +// Syntax returns the function's syntax (*ast.Func{Decl,Lit}) +// if it was produced from syntax or an *ast.RangeStmt if +// it is a range-over-func yield function. +func (f *Function) Syntax() ast.Node { return f.syntax } + +// identVar returns the variable defined by id. +func identVar(fn *Function, id *ast.Ident) *types.Var { + return fn.info.Defs[id].(*types.Var) +} + +// unique returns a unique positive int within the source tree of f. +// The source tree of f includes all of f's ancestors by parent and all +// of the AnonFuncs contained within these. +func unique(f *Function) int64 { + f.uniq++ + return f.uniq +} + +// exit is a change of control flow going from a range-over-func +// yield function to an ancestor function caused by a break, continue, +// goto, or return statement. +// +// There are 3 types of exits: +// * return from the source function (from ReturnStmt), +// * jump to a block (from break and continue statements [labelled/unlabelled]), +// * go to a label (from goto statements). +// +// As the builder does one pass over the ast, it is unclear whether +// a forward goto statement will leave a range-over-func body. +// The function being exited to is unresolved until the end +// of building the range-over-func body. +type exit struct { + id int64 // unique value for exit within from and to + from *Function // the function the exit starts from + to *Function // the function being exited to (nil if unresolved) + pos token.Pos + + block *BasicBlock // basic block within to being jumped to. + label *types.Label // forward label being jumped to via goto. + // block == nil && label == nil => return +} + +// storeVar emits to function f code to store a value v to a *types.Var x. +func storeVar(f *Function, x *types.Var, v Value, pos token.Pos) { + emitStore(f, f.lookup(x, true), v, pos) +} + +// labelExit creates a new exit to a yield fn to exit the function using a label. +func labelExit(fn *Function, label *types.Label, pos token.Pos) *exit { + e := &exit{ + id: unique(fn), + from: fn, + to: nil, + pos: pos, + label: label, + } + fn.exits = append(fn.exits, e) + return e +} + +// blockExit creates a new exit to a yield fn that jumps to a basic block. +func blockExit(fn *Function, block *BasicBlock, pos token.Pos) *exit { + e := &exit{ + id: unique(fn), + from: fn, + to: block.parent, + pos: pos, + block: block, + } + fn.exits = append(fn.exits, e) + return e +} + +// blockExit creates a new exit to a yield fn that returns the source function. +func returnExit(fn *Function, pos token.Pos) *exit { + e := &exit{ + id: unique(fn), + from: fn, + to: fn.source, + pos: pos, + } + fn.exits = append(fn.exits, e) + return e +} diff --git a/vendor/golang.org/x/tools/go/ssa/instantiate.go b/vendor/golang.org/x/tools/go/ssa/instantiate.go new file mode 100644 index 0000000..2512f32 --- /dev/null +++ b/vendor/golang.org/x/tools/go/ssa/instantiate.go @@ -0,0 +1,131 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssa + +import ( + "fmt" + "go/types" + "sync" +) + +// A generic records information about a generic origin function, +// including a cache of existing instantiations. +type generic struct { + instancesMu sync.Mutex + instances map[*typeList]*Function // canonical type arguments to an instance. +} + +// instance returns a Function that is the instantiation of generic +// origin function fn with the type arguments targs. +// +// Any created instance is added to cr. +// +// Acquires fn.generic.instancesMu. +func (fn *Function) instance(targs []types.Type, b *builder) *Function { + key := fn.Prog.canon.List(targs) + + gen := fn.generic + + gen.instancesMu.Lock() + defer gen.instancesMu.Unlock() + inst, ok := gen.instances[key] + if !ok { + inst = createInstance(fn, targs) + inst.buildshared = b.shared() + b.enqueue(inst) + + if gen.instances == nil { + gen.instances = make(map[*typeList]*Function) + } + gen.instances[key] = inst + } else { + b.waitForSharedFunction(inst) + } + return inst +} + +// createInstance returns the instantiation of generic function fn using targs. +// +// Requires fn.generic.instancesMu. +func createInstance(fn *Function, targs []types.Type) *Function { + prog := fn.Prog + + // Compute signature. + var sig *types.Signature + var obj *types.Func + if recv := fn.Signature.Recv(); recv != nil { + // method + obj = prog.canon.instantiateMethod(fn.object, targs, prog.ctxt) + sig = obj.Type().(*types.Signature) + } else { + // function + instSig, err := types.Instantiate(prog.ctxt, fn.Signature, targs, false) + if err != nil { + panic(err) + } + instance, ok := instSig.(*types.Signature) + if !ok { + panic("Instantiate of a Signature returned a non-signature") + } + obj = fn.object // instantiation does not exist yet + sig = prog.canon.Type(instance).(*types.Signature) + } + + // Choose strategy (instance or wrapper). + var ( + synthetic string + subst *subster + build buildFunc + ) + if prog.mode&InstantiateGenerics != 0 && !prog.isParameterized(targs...) { + synthetic = fmt.Sprintf("instance of %s", fn.Name()) + if fn.syntax != nil { + subst = makeSubster(prog.ctxt, obj, fn.typeparams, targs, false) + build = (*builder).buildFromSyntax + } else { + build = (*builder).buildParamsOnly + } + } else { + synthetic = fmt.Sprintf("instantiation wrapper of %s", fn.Name()) + build = (*builder).buildInstantiationWrapper + } + + /* generic instance or instantiation wrapper */ + return &Function{ + name: fmt.Sprintf("%s%s", fn.Name(), targs), // may not be unique + object: obj, + Signature: sig, + Synthetic: synthetic, + syntax: fn.syntax, // \ + info: fn.info, // } empty for non-created packages + goversion: fn.goversion, // / + build: build, + topLevelOrigin: fn, + pos: obj.Pos(), + Pkg: nil, + Prog: fn.Prog, + typeparams: fn.typeparams, // share with origin + typeargs: targs, + subst: subst, + } +} + +// isParameterized reports whether any of the specified types contains +// a free type parameter. It is safe to call concurrently. +func (prog *Program) isParameterized(ts ...types.Type) bool { + prog.hasParamsMu.Lock() + defer prog.hasParamsMu.Unlock() + + // TODO(adonovan): profile. If this operation is expensive, + // handle the most common but shallow cases such as T, pkg.T, + // *T without consulting the cache under the lock. + + for _, t := range ts { + if prog.hasParams.Has(t) { + return true + } + } + return false +} diff --git a/vendor/golang.org/x/tools/go/ssa/lift.go b/vendor/golang.org/x/tools/go/ssa/lift.go new file mode 100644 index 0000000..aada3dc --- /dev/null +++ b/vendor/golang.org/x/tools/go/ssa/lift.go @@ -0,0 +1,688 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssa + +// This file defines the lifting pass which tries to "lift" Alloc +// cells (new/local variables) into SSA registers, replacing loads +// with the dominating stored value, eliminating loads and stores, and +// inserting φ-nodes as needed. + +// Cited papers and resources: +// +// Ron Cytron et al. 1991. Efficiently computing SSA form... +// http://doi.acm.org/10.1145/115372.115320 +// +// Cooper, Harvey, Kennedy. 2001. A Simple, Fast Dominance Algorithm. +// Software Practice and Experience 2001, 4:1-10. +// http://www.hipersoft.rice.edu/grads/publications/dom14.pdf +// +// Daniel Berlin, llvmdev mailing list, 2012. +// http://lists.cs.uiuc.edu/pipermail/llvmdev/2012-January/046638.html +// (Be sure to expand the whole thread.) + +// TODO(adonovan): opt: there are many optimizations worth evaluating, and +// the conventional wisdom for SSA construction is that a simple +// algorithm well engineered often beats those of better asymptotic +// complexity on all but the most egregious inputs. +// +// Danny Berlin suggests that the Cooper et al. algorithm for +// computing the dominance frontier is superior to Cytron et al. +// Furthermore he recommends that rather than computing the DF for the +// whole function then renaming all alloc cells, it may be cheaper to +// compute the DF for each alloc cell separately and throw it away. +// +// Consider exploiting liveness information to avoid creating dead +// φ-nodes which we then immediately remove. +// +// Also see many other "TODO: opt" suggestions in the code. + +import ( + "fmt" + "go/token" + "math/big" + "os" + + "golang.org/x/tools/internal/typeparams" +) + +// If true, show diagnostic information at each step of lifting. +// Very verbose. +const debugLifting = false + +// domFrontier maps each block to the set of blocks in its dominance +// frontier. The outer slice is conceptually a map keyed by +// Block.Index. The inner slice is conceptually a set, possibly +// containing duplicates. +// +// TODO(adonovan): opt: measure impact of dups; consider a packed bit +// representation, e.g. big.Int, and bitwise parallel operations for +// the union step in the Children loop. +// +// domFrontier's methods mutate the slice's elements but not its +// length, so their receivers needn't be pointers. +type domFrontier [][]*BasicBlock + +func (df domFrontier) add(u, v *BasicBlock) { + p := &df[u.Index] + *p = append(*p, v) +} + +// build builds the dominance frontier df for the dominator (sub)tree +// rooted at u, using the Cytron et al. algorithm. +// +// TODO(adonovan): opt: consider Berlin approach, computing pruned SSA +// by pruning the entire IDF computation, rather than merely pruning +// the DF -> IDF step. +func (df domFrontier) build(u *BasicBlock) { + // Encounter each node u in postorder of dom tree. + for _, child := range u.dom.children { + df.build(child) + } + for _, vb := range u.Succs { + if v := vb.dom; v.idom != u { + df.add(u, vb) + } + } + for _, w := range u.dom.children { + for _, vb := range df[w.Index] { + // TODO(adonovan): opt: use word-parallel bitwise union. + if v := vb.dom; v.idom != u { + df.add(u, vb) + } + } + } +} + +func buildDomFrontier(fn *Function) domFrontier { + df := make(domFrontier, len(fn.Blocks)) + df.build(fn.Blocks[0]) + if fn.Recover != nil { + df.build(fn.Recover) + } + return df +} + +func removeInstr(refs []Instruction, instr Instruction) []Instruction { + return removeInstrsIf(refs, func(i Instruction) bool { return i == instr }) +} + +func removeInstrsIf(refs []Instruction, p func(Instruction) bool) []Instruction { + // TODO(taking): replace with go1.22 slices.DeleteFunc. + i := 0 + for _, ref := range refs { + if p(ref) { + continue + } + refs[i] = ref + i++ + } + for j := i; j != len(refs); j++ { + refs[j] = nil // aid GC + } + return refs[:i] +} + +// lift replaces local and new Allocs accessed only with +// load/store by SSA registers, inserting φ-nodes where necessary. +// The result is a program in classical pruned SSA form. +// +// Preconditions: +// - fn has no dead blocks (blockopt has run). +// - Def/use info (Operands and Referrers) is up-to-date. +// - The dominator tree is up-to-date. +func lift(fn *Function) { + // TODO(adonovan): opt: lots of little optimizations may be + // worthwhile here, especially if they cause us to avoid + // buildDomFrontier. For example: + // + // - Alloc never loaded? Eliminate. + // - Alloc never stored? Replace all loads with a zero constant. + // - Alloc stored once? Replace loads with dominating store; + // don't forget that an Alloc is itself an effective store + // of zero. + // - Alloc used only within a single block? + // Use degenerate algorithm avoiding φ-nodes. + // - Consider synergy with scalar replacement of aggregates (SRA). + // e.g. *(&x.f) where x is an Alloc. + // Perhaps we'd get better results if we generated this as x.f + // i.e. Field(x, .f) instead of Load(FieldIndex(x, .f)). + // Unclear. + // + // But we will start with the simplest correct code. + df := buildDomFrontier(fn) + + if debugLifting { + title := false + for i, blocks := range df { + if blocks != nil { + if !title { + fmt.Fprintf(os.Stderr, "Dominance frontier of %s:\n", fn) + title = true + } + fmt.Fprintf(os.Stderr, "\t%s: %s\n", fn.Blocks[i], blocks) + } + } + } + + newPhis := make(newPhiMap) + + // During this pass we will replace some BasicBlock.Instrs + // (allocs, loads and stores) with nil, keeping a count in + // BasicBlock.gaps. At the end we will reset Instrs to the + // concatenation of all non-dead newPhis and non-nil Instrs + // for the block, reusing the original array if space permits. + + // While we're here, we also eliminate 'rundefers' + // instructions and ssa:deferstack() in functions that contain no + // 'defer' instructions. For now, we also eliminate + // 's = ssa:deferstack()' calls if s doesn't escape, replacing s + // with nil in Defer{DeferStack: s}. This has the same meaning, + // but allows eliminating the intrinsic function `ssa:deferstack()` + // (unless it is needed due to range-over-func instances). This gives + // ssa users more time to support range-over-func. + usesDefer := false + deferstackAlloc, deferstackCall := deferstackPreamble(fn) + eliminateDeferStack := deferstackAlloc != nil && !deferstackAlloc.Heap + + // A counter used to generate ~unique ids for Phi nodes, as an + // aid to debugging. We use large numbers to make them highly + // visible. All nodes are renumbered later. + fresh := 1000 + + // Determine which allocs we can lift and number them densely. + // The renaming phase uses this numbering for compact maps. + numAllocs := 0 + for _, b := range fn.Blocks { + b.gaps = 0 + b.rundefers = 0 + for _, instr := range b.Instrs { + switch instr := instr.(type) { + case *Alloc: + index := -1 + if liftAlloc(df, instr, newPhis, &fresh) { + index = numAllocs + numAllocs++ + } + instr.index = index + case *Defer: + usesDefer = true + if eliminateDeferStack { + // Clear DeferStack and remove references to loads + if instr.DeferStack != nil { + if refs := instr.DeferStack.Referrers(); refs != nil { + *refs = removeInstr(*refs, instr) + } + instr.DeferStack = nil + } + } + case *RunDefers: + b.rundefers++ + } + } + } + + // renaming maps an alloc (keyed by index) to its replacement + // value. Initially the renaming contains nil, signifying the + // zero constant of the appropriate type; we construct the + // Const lazily at most once on each path through the domtree. + // TODO(adonovan): opt: cache per-function not per subtree. + renaming := make([]Value, numAllocs) + + // Renaming. + rename(fn.Blocks[0], renaming, newPhis) + + // Eliminate dead φ-nodes. + removeDeadPhis(fn.Blocks, newPhis) + + // Eliminate ssa:deferstack() call. + if eliminateDeferStack { + b := deferstackCall.block + for i, instr := range b.Instrs { + if instr == deferstackCall { + b.Instrs[i] = nil + b.gaps++ + break + } + } + } + + // Prepend remaining live φ-nodes to each block. + for _, b := range fn.Blocks { + nps := newPhis[b] + j := len(nps) + + rundefersToKill := b.rundefers + if usesDefer { + rundefersToKill = 0 + } + + if j+b.gaps+rundefersToKill == 0 { + continue // fast path: no new phis or gaps + } + + // Compact nps + non-nil Instrs into a new slice. + // TODO(adonovan): opt: compact in situ (rightwards) + // if Instrs has sufficient space or slack. + dst := make([]Instruction, len(b.Instrs)+j-b.gaps-rundefersToKill) + for i, np := range nps { + dst[i] = np.phi + } + for _, instr := range b.Instrs { + if instr == nil { + continue + } + if !usesDefer { + if _, ok := instr.(*RunDefers); ok { + continue + } + } + dst[j] = instr + j++ + } + b.Instrs = dst + } + + // Remove any fn.Locals that were lifted. + j := 0 + for _, l := range fn.Locals { + if l.index < 0 { + fn.Locals[j] = l + j++ + } + } + // Nil out fn.Locals[j:] to aid GC. + for i := j; i < len(fn.Locals); i++ { + fn.Locals[i] = nil + } + fn.Locals = fn.Locals[:j] +} + +// removeDeadPhis removes φ-nodes not transitively needed by a +// non-Phi, non-DebugRef instruction. +func removeDeadPhis(blocks []*BasicBlock, newPhis newPhiMap) { + // First pass: find the set of "live" φ-nodes: those reachable + // from some non-Phi instruction. + // + // We compute reachability in reverse, starting from each φ, + // rather than forwards, starting from each live non-Phi + // instruction, because this way visits much less of the + // Value graph. + livePhis := make(map[*Phi]bool) + for _, npList := range newPhis { + for _, np := range npList { + phi := np.phi + if !livePhis[phi] && phiHasDirectReferrer(phi) { + markLivePhi(livePhis, phi) + } + } + } + + // Existing φ-nodes due to && and || operators + // are all considered live (see Go issue 19622). + for _, b := range blocks { + for _, phi := range b.phis() { + markLivePhi(livePhis, phi.(*Phi)) + } + } + + // Second pass: eliminate unused phis from newPhis. + for block, npList := range newPhis { + j := 0 + for _, np := range npList { + if livePhis[np.phi] { + npList[j] = np + j++ + } else { + // discard it, first removing it from referrers + for _, val := range np.phi.Edges { + if refs := val.Referrers(); refs != nil { + *refs = removeInstr(*refs, np.phi) + } + } + np.phi.block = nil + } + } + newPhis[block] = npList[:j] + } +} + +// markLivePhi marks phi, and all φ-nodes transitively reachable via +// its Operands, live. +func markLivePhi(livePhis map[*Phi]bool, phi *Phi) { + livePhis[phi] = true + for _, rand := range phi.Operands(nil) { + if q, ok := (*rand).(*Phi); ok { + if !livePhis[q] { + markLivePhi(livePhis, q) + } + } + } +} + +// phiHasDirectReferrer reports whether phi is directly referred to by +// a non-Phi instruction. Such instructions are the +// roots of the liveness traversal. +func phiHasDirectReferrer(phi *Phi) bool { + for _, instr := range *phi.Referrers() { + if _, ok := instr.(*Phi); !ok { + return true + } + } + return false +} + +type blockSet struct{ big.Int } // (inherit methods from Int) + +// add adds b to the set and returns true if the set changed. +func (s *blockSet) add(b *BasicBlock) bool { + i := b.Index + if s.Bit(i) != 0 { + return false + } + s.SetBit(&s.Int, i, 1) + return true +} + +// take removes an arbitrary element from a set s and +// returns its index, or returns -1 if empty. +func (s *blockSet) take() int { + l := s.BitLen() + for i := 0; i < l; i++ { + if s.Bit(i) == 1 { + s.SetBit(&s.Int, i, 0) + return i + } + } + return -1 +} + +// newPhi is a pair of a newly introduced φ-node and the lifted Alloc +// it replaces. +type newPhi struct { + phi *Phi + alloc *Alloc +} + +// newPhiMap records for each basic block, the set of newPhis that +// must be prepended to the block. +type newPhiMap map[*BasicBlock][]newPhi + +// liftAlloc determines whether alloc can be lifted into registers, +// and if so, it populates newPhis with all the φ-nodes it may require +// and returns true. +// +// fresh is a source of fresh ids for phi nodes. +func liftAlloc(df domFrontier, alloc *Alloc, newPhis newPhiMap, fresh *int) bool { + // Don't lift result values in functions that defer + // calls that may recover from panic. + if fn := alloc.Parent(); fn.Recover != nil { + for _, nr := range fn.results { + if nr == alloc { + return false + } + } + } + + // Compute defblocks, the set of blocks containing a + // definition of the alloc cell. + var defblocks blockSet + for _, instr := range *alloc.Referrers() { + // Bail out if we discover the alloc is not liftable; + // the only operations permitted to use the alloc are + // loads/stores into the cell, and DebugRef. + switch instr := instr.(type) { + case *Store: + if instr.Val == alloc { + return false // address used as value + } + if instr.Addr != alloc { + panic("Alloc.Referrers is inconsistent") + } + defblocks.add(instr.Block()) + case *UnOp: + if instr.Op != token.MUL { + return false // not a load + } + if instr.X != alloc { + panic("Alloc.Referrers is inconsistent") + } + case *DebugRef: + // ok + default: + return false // some other instruction + } + } + // The Alloc itself counts as a (zero) definition of the cell. + defblocks.add(alloc.Block()) + + if debugLifting { + fmt.Fprintln(os.Stderr, "\tlifting ", alloc, alloc.Name()) + } + + fn := alloc.Parent() + + // Φ-insertion. + // + // What follows is the body of the main loop of the insert-φ + // function described by Cytron et al, but instead of using + // counter tricks, we just reset the 'hasAlready' and 'work' + // sets each iteration. These are bitmaps so it's pretty cheap. + // + // TODO(adonovan): opt: recycle slice storage for W, + // hasAlready, defBlocks across liftAlloc calls. + var hasAlready blockSet + + // Initialize W and work to defblocks. + var work blockSet = defblocks // blocks seen + var W blockSet // blocks to do + W.Set(&defblocks.Int) + + // Traverse iterated dominance frontier, inserting φ-nodes. + for i := W.take(); i != -1; i = W.take() { + u := fn.Blocks[i] + for _, v := range df[u.Index] { + if hasAlready.add(v) { + // Create φ-node. + // It will be prepended to v.Instrs later, if needed. + phi := &Phi{ + Edges: make([]Value, len(v.Preds)), + Comment: alloc.Comment, + } + // This is merely a debugging aid: + phi.setNum(*fresh) + *fresh++ + + phi.pos = alloc.Pos() + phi.setType(typeparams.MustDeref(alloc.Type())) + phi.block = v + if debugLifting { + fmt.Fprintf(os.Stderr, "\tplace %s = %s at block %s\n", phi.Name(), phi, v) + } + newPhis[v] = append(newPhis[v], newPhi{phi, alloc}) + + if work.add(v) { + W.add(v) + } + } + } + } + + return true +} + +// replaceAll replaces all intraprocedural uses of x with y, +// updating x.Referrers and y.Referrers. +// Precondition: x.Referrers() != nil, i.e. x must be local to some function. +func replaceAll(x, y Value) { + var rands []*Value + pxrefs := x.Referrers() + pyrefs := y.Referrers() + for _, instr := range *pxrefs { + rands = instr.Operands(rands[:0]) // recycle storage + for _, rand := range rands { + if *rand != nil { + if *rand == x { + *rand = y + } + } + } + if pyrefs != nil { + *pyrefs = append(*pyrefs, instr) // dups ok + } + } + *pxrefs = nil // x is now unreferenced +} + +// renamed returns the value to which alloc is being renamed, +// constructing it lazily if it's the implicit zero initialization. +func renamed(renaming []Value, alloc *Alloc) Value { + v := renaming[alloc.index] + if v == nil { + v = zeroConst(typeparams.MustDeref(alloc.Type())) + renaming[alloc.index] = v + } + return v +} + +// rename implements the (Cytron et al) SSA renaming algorithm, a +// preorder traversal of the dominator tree replacing all loads of +// Alloc cells with the value stored to that cell by the dominating +// store instruction. For lifting, we need only consider loads, +// stores and φ-nodes. +// +// renaming is a map from *Alloc (keyed by index number) to its +// dominating stored value; newPhis[x] is the set of new φ-nodes to be +// prepended to block x. +func rename(u *BasicBlock, renaming []Value, newPhis newPhiMap) { + // Each φ-node becomes the new name for its associated Alloc. + for _, np := range newPhis[u] { + phi := np.phi + alloc := np.alloc + renaming[alloc.index] = phi + } + + // Rename loads and stores of allocs. + for i, instr := range u.Instrs { + switch instr := instr.(type) { + case *Alloc: + if instr.index >= 0 { // store of zero to Alloc cell + // Replace dominated loads by the zero value. + renaming[instr.index] = nil + if debugLifting { + fmt.Fprintf(os.Stderr, "\tkill alloc %s\n", instr) + } + // Delete the Alloc. + u.Instrs[i] = nil + u.gaps++ + } + + case *Store: + if alloc, ok := instr.Addr.(*Alloc); ok && alloc.index >= 0 { // store to Alloc cell + // Replace dominated loads by the stored value. + renaming[alloc.index] = instr.Val + if debugLifting { + fmt.Fprintf(os.Stderr, "\tkill store %s; new value: %s\n", + instr, instr.Val.Name()) + } + // Remove the store from the referrer list of the stored value. + if refs := instr.Val.Referrers(); refs != nil { + *refs = removeInstr(*refs, instr) + } + // Delete the Store. + u.Instrs[i] = nil + u.gaps++ + } + + case *UnOp: + if instr.Op == token.MUL { + if alloc, ok := instr.X.(*Alloc); ok && alloc.index >= 0 { // load of Alloc cell + newval := renamed(renaming, alloc) + if debugLifting { + fmt.Fprintf(os.Stderr, "\tupdate load %s = %s with %s\n", + instr.Name(), instr, newval.Name()) + } + // Replace all references to + // the loaded value by the + // dominating stored value. + replaceAll(instr, newval) + // Delete the Load. + u.Instrs[i] = nil + u.gaps++ + } + } + + case *DebugRef: + if alloc, ok := instr.X.(*Alloc); ok && alloc.index >= 0 { // ref of Alloc cell + if instr.IsAddr { + instr.X = renamed(renaming, alloc) + instr.IsAddr = false + + // Add DebugRef to instr.X's referrers. + if refs := instr.X.Referrers(); refs != nil { + *refs = append(*refs, instr) + } + } else { + // A source expression denotes the address + // of an Alloc that was optimized away. + instr.X = nil + + // Delete the DebugRef. + u.Instrs[i] = nil + u.gaps++ + } + } + } + } + + // For each φ-node in a CFG successor, rename the edge. + for _, v := range u.Succs { + phis := newPhis[v] + if len(phis) == 0 { + continue + } + i := v.predIndex(u) + for _, np := range phis { + phi := np.phi + alloc := np.alloc + newval := renamed(renaming, alloc) + if debugLifting { + fmt.Fprintf(os.Stderr, "\tsetphi %s edge %s -> %s (#%d) (alloc=%s) := %s\n", + phi.Name(), u, v, i, alloc.Name(), newval.Name()) + } + phi.Edges[i] = newval + if prefs := newval.Referrers(); prefs != nil { + *prefs = append(*prefs, phi) + } + } + } + + // Continue depth-first recursion over domtree, pushing a + // fresh copy of the renaming map for each subtree. + for i, v := range u.dom.children { + r := renaming + if i < len(u.dom.children)-1 { + // On all but the final iteration, we must make + // a copy to avoid destructive update. + r = make([]Value, len(renaming)) + copy(r, renaming) + } + rename(v, r, newPhis) + } + +} + +// deferstackPreamble returns the *Alloc and ssa:deferstack() call for fn.deferstack. +func deferstackPreamble(fn *Function) (*Alloc, *Call) { + if alloc, _ := fn.vars[fn.deferstack].(*Alloc); alloc != nil { + for _, ref := range *alloc.Referrers() { + if ref, _ := ref.(*Store); ref != nil && ref.Addr == alloc { + if call, _ := ref.Val.(*Call); call != nil { + return alloc, call + } + } + } + } + return nil, nil +} diff --git a/vendor/golang.org/x/tools/go/ssa/lvalue.go b/vendor/golang.org/x/tools/go/ssa/lvalue.go new file mode 100644 index 0000000..eede307 --- /dev/null +++ b/vendor/golang.org/x/tools/go/ssa/lvalue.go @@ -0,0 +1,155 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssa + +// lvalues are the union of addressable expressions and map-index +// expressions. + +import ( + "go/ast" + "go/token" + "go/types" + + "golang.org/x/tools/internal/typeparams" +) + +// An lvalue represents an assignable location that may appear on the +// left-hand side of an assignment. This is a generalization of a +// pointer to permit updates to elements of maps. +type lvalue interface { + store(fn *Function, v Value) // stores v into the location + load(fn *Function) Value // loads the contents of the location + address(fn *Function) Value // address of the location + typ() types.Type // returns the type of the location +} + +// An address is an lvalue represented by a true pointer. +type address struct { + addr Value // must have a pointer core type. + pos token.Pos // source position + expr ast.Expr // source syntax of the value (not address) [debug mode] +} + +func (a *address) load(fn *Function) Value { + load := emitLoad(fn, a.addr) + load.pos = a.pos + return load +} + +func (a *address) store(fn *Function, v Value) { + store := emitStore(fn, a.addr, v, a.pos) + if a.expr != nil { + // store.Val is v, converted for assignability. + emitDebugRef(fn, a.expr, store.Val, false) + } +} + +func (a *address) address(fn *Function) Value { + if a.expr != nil { + emitDebugRef(fn, a.expr, a.addr, true) + } + return a.addr +} + +func (a *address) typ() types.Type { + return typeparams.MustDeref(a.addr.Type()) +} + +// An element is an lvalue represented by m[k], the location of an +// element of a map. These locations are not addressable +// since pointers cannot be formed from them, but they do support +// load() and store(). +type element struct { + m, k Value // map + t types.Type // map element type + pos token.Pos // source position of colon ({k:v}) or lbrack (m[k]=v) +} + +func (e *element) load(fn *Function) Value { + l := &Lookup{ + X: e.m, + Index: e.k, + } + l.setPos(e.pos) + l.setType(e.t) + return fn.emit(l) +} + +func (e *element) store(fn *Function, v Value) { + up := &MapUpdate{ + Map: e.m, + Key: e.k, + Value: emitConv(fn, v, e.t), + } + up.pos = e.pos + fn.emit(up) +} + +func (e *element) address(fn *Function) Value { + panic("map elements are not addressable") +} + +func (e *element) typ() types.Type { + return e.t +} + +// A lazyAddress is an lvalue whose address is the result of an instruction. +// These work like an *address except a new address.address() Value +// is created on each load, store and address call. +// A lazyAddress can be used to control when a side effect (nil pointer +// dereference, index out of bounds) of using a location happens. +type lazyAddress struct { + addr func(fn *Function) Value // emit to fn the computation of the address + t types.Type // type of the location + pos token.Pos // source position + expr ast.Expr // source syntax of the value (not address) [debug mode] +} + +func (l *lazyAddress) load(fn *Function) Value { + load := emitLoad(fn, l.addr(fn)) + load.pos = l.pos + return load +} + +func (l *lazyAddress) store(fn *Function, v Value) { + store := emitStore(fn, l.addr(fn), v, l.pos) + if l.expr != nil { + // store.Val is v, converted for assignability. + emitDebugRef(fn, l.expr, store.Val, false) + } +} + +func (l *lazyAddress) address(fn *Function) Value { + addr := l.addr(fn) + if l.expr != nil { + emitDebugRef(fn, l.expr, addr, true) + } + return addr +} + +func (l *lazyAddress) typ() types.Type { return l.t } + +// A blank is a dummy variable whose name is "_". +// It is not reified: loads are illegal and stores are ignored. +type blank struct{} + +func (bl blank) load(fn *Function) Value { + panic("blank.load is illegal") +} + +func (bl blank) store(fn *Function, v Value) { + // no-op +} + +func (bl blank) address(fn *Function) Value { + panic("blank var is not addressable") +} + +func (bl blank) typ() types.Type { + // This should be the type of the blank Ident; the typechecker + // doesn't provide this yet, but fortunately, we don't need it + // yet either. + panic("blank.typ is unimplemented") +} diff --git a/vendor/golang.org/x/tools/go/ssa/methods.go b/vendor/golang.org/x/tools/go/ssa/methods.go new file mode 100644 index 0000000..b956018 --- /dev/null +++ b/vendor/golang.org/x/tools/go/ssa/methods.go @@ -0,0 +1,281 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssa + +// This file defines utilities for population of method sets. + +import ( + "fmt" + "go/types" + + "golang.org/x/tools/go/types/typeutil" + "golang.org/x/tools/internal/aliases" +) + +// MethodValue returns the Function implementing method sel, building +// wrapper methods on demand. It returns nil if sel denotes an +// interface or generic method. +// +// Precondition: sel.Kind() == MethodVal. +// +// Thread-safe. +// +// Acquires prog.methodsMu. +func (prog *Program) MethodValue(sel *types.Selection) *Function { + if sel.Kind() != types.MethodVal { + panic(fmt.Sprintf("MethodValue(%s) kind != MethodVal", sel)) + } + T := sel.Recv() + if types.IsInterface(T) { + return nil // interface method or type parameter + } + + if prog.isParameterized(T) { + return nil // generic method + } + + if prog.mode&LogSource != 0 { + defer logStack("MethodValue %s %v", T, sel)() + } + + var b builder + + m := func() *Function { + prog.methodsMu.Lock() + defer prog.methodsMu.Unlock() + + // Get or create SSA method set. + mset, ok := prog.methodSets.At(T).(*methodSet) + if !ok { + mset = &methodSet{mapping: make(map[string]*Function)} + prog.methodSets.Set(T, mset) + } + + // Get or create SSA method. + id := sel.Obj().Id() + fn, ok := mset.mapping[id] + if !ok { + obj := sel.Obj().(*types.Func) + needsPromotion := len(sel.Index()) > 1 + needsIndirection := !isPointer(recvType(obj)) && isPointer(T) + if needsPromotion || needsIndirection { + fn = createWrapper(prog, toSelection(sel)) + fn.buildshared = b.shared() + b.enqueue(fn) + } else { + fn = prog.objectMethod(obj, &b) + } + if fn.Signature.Recv() == nil { + panic(fn) + } + mset.mapping[id] = fn + } else { + b.waitForSharedFunction(fn) + } + + return fn + }() + + b.iterate() + + return m +} + +// objectMethod returns the Function for a given method symbol. +// The symbol may be an instance of a generic function. It need not +// belong to an existing SSA package created by a call to +// prog.CreatePackage. +// +// objectMethod panics if the function is not a method. +// +// Acquires prog.objectMethodsMu. +func (prog *Program) objectMethod(obj *types.Func, b *builder) *Function { + sig := obj.Type().(*types.Signature) + if sig.Recv() == nil { + panic("not a method: " + obj.String()) + } + + // Belongs to a created package? + if fn := prog.FuncValue(obj); fn != nil { + return fn + } + + // Instantiation of generic? + if originObj := obj.Origin(); originObj != obj { + origin := prog.objectMethod(originObj, b) + assert(origin.typeparams.Len() > 0, "origin is not generic") + targs := receiverTypeArgs(obj) + return origin.instance(targs, b) + } + + // Consult/update cache of methods created from types.Func. + prog.objectMethodsMu.Lock() + defer prog.objectMethodsMu.Unlock() + fn, ok := prog.objectMethods[obj] + if !ok { + fn = createFunction(prog, obj, obj.Name(), nil, nil, "") + fn.Synthetic = "from type information (on demand)" + fn.buildshared = b.shared() + b.enqueue(fn) + + if prog.objectMethods == nil { + prog.objectMethods = make(map[*types.Func]*Function) + } + prog.objectMethods[obj] = fn + } else { + b.waitForSharedFunction(fn) + } + return fn +} + +// LookupMethod returns the implementation of the method of type T +// identified by (pkg, name). It returns nil if the method exists but +// is an interface method or generic method, and panics if T has no such method. +func (prog *Program) LookupMethod(T types.Type, pkg *types.Package, name string) *Function { + sel := prog.MethodSets.MethodSet(T).Lookup(pkg, name) + if sel == nil { + panic(fmt.Sprintf("%s has no method %s", T, types.Id(pkg, name))) + } + return prog.MethodValue(sel) +} + +// methodSet contains the (concrete) methods of a concrete type (non-interface, non-parameterized). +type methodSet struct { + mapping map[string]*Function // populated lazily +} + +// RuntimeTypes returns a new unordered slice containing all types in +// the program for which a runtime type is required. +// +// A runtime type is required for any non-parameterized, non-interface +// type that is converted to an interface, or for any type (including +// interface types) derivable from one through reflection. +// +// The methods of such types may be reachable through reflection or +// interface calls even if they are never called directly. +// +// Thread-safe. +// +// Acquires prog.runtimeTypesMu. +func (prog *Program) RuntimeTypes() []types.Type { + prog.runtimeTypesMu.Lock() + defer prog.runtimeTypesMu.Unlock() + return prog.runtimeTypes.Keys() +} + +// forEachReachable calls f for type T and each type reachable from +// its type through reflection. +// +// The function f must use memoization to break cycles and +// return false when the type has already been visited. +// +// TODO(adonovan): publish in typeutil and share with go/callgraph/rta. +func forEachReachable(msets *typeutil.MethodSetCache, T types.Type, f func(types.Type) bool) { + var visit func(T types.Type, skip bool) + visit = func(T types.Type, skip bool) { + if !skip { + if !f(T) { + return + } + } + + // Recursion over signatures of each method. + tmset := msets.MethodSet(T) + for i := 0; i < tmset.Len(); i++ { + sig := tmset.At(i).Type().(*types.Signature) + // It is tempting to call visit(sig, false) + // but, as noted in golang.org/cl/65450043, + // the Signature.Recv field is ignored by + // types.Identical and typeutil.Map, which + // is confusing at best. + // + // More importantly, the true signature rtype + // reachable from a method using reflection + // has no receiver but an extra ordinary parameter. + // For the Read method of io.Reader we want: + // func(Reader, []byte) (int, error) + // but here sig is: + // func([]byte) (int, error) + // with .Recv = Reader (though it is hard to + // notice because it doesn't affect Signature.String + // or types.Identical). + // + // TODO(adonovan): construct and visit the correct + // non-method signature with an extra parameter + // (though since unnamed func types have no methods + // there is essentially no actual demand for this). + // + // TODO(adonovan): document whether or not it is + // safe to skip non-exported methods (as RTA does). + visit(sig.Params(), true) // skip the Tuple + visit(sig.Results(), true) // skip the Tuple + } + + switch T := T.(type) { + case *aliases.Alias: + visit(aliases.Unalias(T), skip) // emulates the pre-Alias behavior + + case *types.Basic: + // nop + + case *types.Interface: + // nop---handled by recursion over method set. + + case *types.Pointer: + visit(T.Elem(), false) + + case *types.Slice: + visit(T.Elem(), false) + + case *types.Chan: + visit(T.Elem(), false) + + case *types.Map: + visit(T.Key(), false) + visit(T.Elem(), false) + + case *types.Signature: + if T.Recv() != nil { + panic(fmt.Sprintf("Signature %s has Recv %s", T, T.Recv())) + } + visit(T.Params(), true) // skip the Tuple + visit(T.Results(), true) // skip the Tuple + + case *types.Named: + // A pointer-to-named type can be derived from a named + // type via reflection. It may have methods too. + visit(types.NewPointer(T), false) + + // Consider 'type T struct{S}' where S has methods. + // Reflection provides no way to get from T to struct{S}, + // only to S, so the method set of struct{S} is unwanted, + // so set 'skip' flag during recursion. + visit(T.Underlying(), true) // skip the unnamed type + + case *types.Array: + visit(T.Elem(), false) + + case *types.Struct: + for i, n := 0, T.NumFields(); i < n; i++ { + // TODO(adonovan): document whether or not + // it is safe to skip non-exported fields. + visit(T.Field(i).Type(), false) + } + + case *types.Tuple: + for i, n := 0, T.Len(); i < n; i++ { + visit(T.At(i).Type(), false) + } + + case *types.TypeParam, *types.Union: + // forEachReachable must not be called on parameterized types. + panic(T) + + default: + panic(T) + } + } + visit(T, false) +} diff --git a/vendor/golang.org/x/tools/go/ssa/mode.go b/vendor/golang.org/x/tools/go/ssa/mode.go new file mode 100644 index 0000000..8381639 --- /dev/null +++ b/vendor/golang.org/x/tools/go/ssa/mode.go @@ -0,0 +1,111 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssa + +// This file defines the BuilderMode type and its command-line flag. + +import ( + "bytes" + "fmt" +) + +// BuilderMode is a bitmask of options for diagnostics and checking. +// +// *BuilderMode satisfies the flag.Value interface. Example: +// +// var mode = ssa.BuilderMode(0) +// func init() { flag.Var(&mode, "build", ssa.BuilderModeDoc) } +type BuilderMode uint + +const ( + PrintPackages BuilderMode = 1 << iota // Print package inventory to stdout + PrintFunctions // Print function SSA code to stdout + LogSource // Log source locations as SSA builder progresses + SanityCheckFunctions // Perform sanity checking of function bodies + NaiveForm // Build naïve SSA form: don't replace local loads/stores with registers + BuildSerially // Build packages serially, not in parallel. + GlobalDebug // Enable debug info for all packages + BareInits // Build init functions without guards or calls to dependent inits + InstantiateGenerics // Instantiate generics functions (monomorphize) while building +) + +const BuilderModeDoc = `Options controlling the SSA builder. +The value is a sequence of zero or more of these letters: +C perform sanity [C]hecking of the SSA form. +D include [D]ebug info for every function. +P print [P]ackage inventory. +F print [F]unction SSA code. +S log [S]ource locations as SSA builder progresses. +L build distinct packages seria[L]ly instead of in parallel. +N build [N]aive SSA form: don't replace local loads/stores with registers. +I build bare [I]nit functions: no init guards or calls to dependent inits. +G instantiate [G]eneric function bodies via monomorphization +` + +func (m BuilderMode) String() string { + var buf bytes.Buffer + if m&GlobalDebug != 0 { + buf.WriteByte('D') + } + if m&PrintPackages != 0 { + buf.WriteByte('P') + } + if m&PrintFunctions != 0 { + buf.WriteByte('F') + } + if m&LogSource != 0 { + buf.WriteByte('S') + } + if m&SanityCheckFunctions != 0 { + buf.WriteByte('C') + } + if m&NaiveForm != 0 { + buf.WriteByte('N') + } + if m&BuildSerially != 0 { + buf.WriteByte('L') + } + if m&BareInits != 0 { + buf.WriteByte('I') + } + if m&InstantiateGenerics != 0 { + buf.WriteByte('G') + } + return buf.String() +} + +// Set parses the flag characters in s and updates *m. +func (m *BuilderMode) Set(s string) error { + var mode BuilderMode + for _, c := range s { + switch c { + case 'D': + mode |= GlobalDebug + case 'P': + mode |= PrintPackages + case 'F': + mode |= PrintFunctions + case 'S': + mode |= LogSource | BuildSerially + case 'C': + mode |= SanityCheckFunctions + case 'N': + mode |= NaiveForm + case 'L': + mode |= BuildSerially + case 'I': + mode |= BareInits + case 'G': + mode |= InstantiateGenerics + default: + return fmt.Errorf("unknown BuilderMode option: %q", c) + } + } + *m = mode + return nil +} + +// Get returns m. +func (m BuilderMode) Get() interface{} { return m } diff --git a/vendor/golang.org/x/tools/go/ssa/print.go b/vendor/golang.org/x/tools/go/ssa/print.go new file mode 100644 index 0000000..c890d7e --- /dev/null +++ b/vendor/golang.org/x/tools/go/ssa/print.go @@ -0,0 +1,470 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssa + +// This file implements the String() methods for all Value and +// Instruction types. + +import ( + "bytes" + "fmt" + "go/types" + "io" + "reflect" + "sort" + "strings" + + "golang.org/x/tools/go/types/typeutil" + "golang.org/x/tools/internal/typeparams" +) + +// relName returns the name of v relative to i. +// In most cases, this is identical to v.Name(), but references to +// Functions (including methods) and Globals use RelString and +// all types are displayed with relType, so that only cross-package +// references are package-qualified. +func relName(v Value, i Instruction) string { + var from *types.Package + if i != nil { + from = i.Parent().relPkg() + } + switch v := v.(type) { + case Member: // *Function or *Global + return v.RelString(from) + case *Const: + return v.RelString(from) + } + return v.Name() +} + +// normalizeAnyForTesting controls whether we replace occurrences of +// interface{} with any. It is only used for normalizing test output. +var normalizeAnyForTesting bool + +func relType(t types.Type, from *types.Package) string { + s := types.TypeString(t, types.RelativeTo(from)) + if normalizeAnyForTesting { + s = strings.ReplaceAll(s, "interface{}", "any") + } + return s +} + +func relTerm(term *types.Term, from *types.Package) string { + s := relType(term.Type(), from) + if term.Tilde() { + return "~" + s + } + return s +} + +func relString(m Member, from *types.Package) string { + // NB: not all globals have an Object (e.g. init$guard), + // so use Package().Object not Object.Package(). + if pkg := m.Package().Pkg; pkg != nil && pkg != from { + return fmt.Sprintf("%s.%s", pkg.Path(), m.Name()) + } + return m.Name() +} + +// Value.String() +// +// This method is provided only for debugging. +// It never appears in disassembly, which uses Value.Name(). + +func (v *Parameter) String() string { + from := v.Parent().relPkg() + return fmt.Sprintf("parameter %s : %s", v.Name(), relType(v.Type(), from)) +} + +func (v *FreeVar) String() string { + from := v.Parent().relPkg() + return fmt.Sprintf("freevar %s : %s", v.Name(), relType(v.Type(), from)) +} + +func (v *Builtin) String() string { + return fmt.Sprintf("builtin %s", v.Name()) +} + +// Instruction.String() + +func (v *Alloc) String() string { + op := "local" + if v.Heap { + op = "new" + } + from := v.Parent().relPkg() + return fmt.Sprintf("%s %s (%s)", op, relType(typeparams.MustDeref(v.Type()), from), v.Comment) +} + +func (v *Phi) String() string { + var b bytes.Buffer + b.WriteString("phi [") + for i, edge := range v.Edges { + if i > 0 { + b.WriteString(", ") + } + // Be robust against malformed CFG. + if v.block == nil { + b.WriteString("??") + continue + } + block := -1 + if i < len(v.block.Preds) { + block = v.block.Preds[i].Index + } + fmt.Fprintf(&b, "%d: ", block) + edgeVal := "" // be robust + if edge != nil { + edgeVal = relName(edge, v) + } + b.WriteString(edgeVal) + } + b.WriteString("]") + if v.Comment != "" { + b.WriteString(" #") + b.WriteString(v.Comment) + } + return b.String() +} + +func printCall(v *CallCommon, prefix string, instr Instruction) string { + var b bytes.Buffer + b.WriteString(prefix) + if !v.IsInvoke() { + b.WriteString(relName(v.Value, instr)) + } else { + fmt.Fprintf(&b, "invoke %s.%s", relName(v.Value, instr), v.Method.Name()) + } + b.WriteString("(") + for i, arg := range v.Args { + if i > 0 { + b.WriteString(", ") + } + b.WriteString(relName(arg, instr)) + } + if v.Signature().Variadic() { + b.WriteString("...") + } + b.WriteString(")") + return b.String() +} + +func (c *CallCommon) String() string { + return printCall(c, "", nil) +} + +func (v *Call) String() string { + return printCall(&v.Call, "", v) +} + +func (v *BinOp) String() string { + return fmt.Sprintf("%s %s %s", relName(v.X, v), v.Op.String(), relName(v.Y, v)) +} + +func (v *UnOp) String() string { + return fmt.Sprintf("%s%s%s", v.Op, relName(v.X, v), commaOk(v.CommaOk)) +} + +func printConv(prefix string, v, x Value) string { + from := v.Parent().relPkg() + return fmt.Sprintf("%s %s <- %s (%s)", + prefix, + relType(v.Type(), from), + relType(x.Type(), from), + relName(x, v.(Instruction))) +} + +func (v *ChangeType) String() string { return printConv("changetype", v, v.X) } +func (v *Convert) String() string { return printConv("convert", v, v.X) } +func (v *ChangeInterface) String() string { return printConv("change interface", v, v.X) } +func (v *SliceToArrayPointer) String() string { return printConv("slice to array pointer", v, v.X) } +func (v *MakeInterface) String() string { return printConv("make", v, v.X) } + +func (v *MultiConvert) String() string { + from := v.Parent().relPkg() + + var b strings.Builder + b.WriteString(printConv("multiconvert", v, v.X)) + b.WriteString(" [") + for i, s := range v.from { + for j, d := range v.to { + if i != 0 || j != 0 { + b.WriteString(" | ") + } + fmt.Fprintf(&b, "%s <- %s", relTerm(d, from), relTerm(s, from)) + } + } + b.WriteString("]") + return b.String() +} + +func (v *MakeClosure) String() string { + var b bytes.Buffer + fmt.Fprintf(&b, "make closure %s", relName(v.Fn, v)) + if v.Bindings != nil { + b.WriteString(" [") + for i, c := range v.Bindings { + if i > 0 { + b.WriteString(", ") + } + b.WriteString(relName(c, v)) + } + b.WriteString("]") + } + return b.String() +} + +func (v *MakeSlice) String() string { + from := v.Parent().relPkg() + return fmt.Sprintf("make %s %s %s", + relType(v.Type(), from), + relName(v.Len, v), + relName(v.Cap, v)) +} + +func (v *Slice) String() string { + var b bytes.Buffer + b.WriteString("slice ") + b.WriteString(relName(v.X, v)) + b.WriteString("[") + if v.Low != nil { + b.WriteString(relName(v.Low, v)) + } + b.WriteString(":") + if v.High != nil { + b.WriteString(relName(v.High, v)) + } + if v.Max != nil { + b.WriteString(":") + b.WriteString(relName(v.Max, v)) + } + b.WriteString("]") + return b.String() +} + +func (v *MakeMap) String() string { + res := "" + if v.Reserve != nil { + res = relName(v.Reserve, v) + } + from := v.Parent().relPkg() + return fmt.Sprintf("make %s %s", relType(v.Type(), from), res) +} + +func (v *MakeChan) String() string { + from := v.Parent().relPkg() + return fmt.Sprintf("make %s %s", relType(v.Type(), from), relName(v.Size, v)) +} + +func (v *FieldAddr) String() string { + // Be robust against a bad index. + name := "?" + if fld := fieldOf(typeparams.MustDeref(v.X.Type()), v.Field); fld != nil { + name = fld.Name() + } + return fmt.Sprintf("&%s.%s [#%d]", relName(v.X, v), name, v.Field) +} + +func (v *Field) String() string { + // Be robust against a bad index. + name := "?" + if fld := fieldOf(v.X.Type(), v.Field); fld != nil { + name = fld.Name() + } + return fmt.Sprintf("%s.%s [#%d]", relName(v.X, v), name, v.Field) +} + +func (v *IndexAddr) String() string { + return fmt.Sprintf("&%s[%s]", relName(v.X, v), relName(v.Index, v)) +} + +func (v *Index) String() string { + return fmt.Sprintf("%s[%s]", relName(v.X, v), relName(v.Index, v)) +} + +func (v *Lookup) String() string { + return fmt.Sprintf("%s[%s]%s", relName(v.X, v), relName(v.Index, v), commaOk(v.CommaOk)) +} + +func (v *Range) String() string { + return "range " + relName(v.X, v) +} + +func (v *Next) String() string { + return "next " + relName(v.Iter, v) +} + +func (v *TypeAssert) String() string { + from := v.Parent().relPkg() + return fmt.Sprintf("typeassert%s %s.(%s)", commaOk(v.CommaOk), relName(v.X, v), relType(v.AssertedType, from)) +} + +func (v *Extract) String() string { + return fmt.Sprintf("extract %s #%d", relName(v.Tuple, v), v.Index) +} + +func (s *Jump) String() string { + // Be robust against malformed CFG. + block := -1 + if s.block != nil && len(s.block.Succs) == 1 { + block = s.block.Succs[0].Index + } + return fmt.Sprintf("jump %d", block) +} + +func (s *If) String() string { + // Be robust against malformed CFG. + tblock, fblock := -1, -1 + if s.block != nil && len(s.block.Succs) == 2 { + tblock = s.block.Succs[0].Index + fblock = s.block.Succs[1].Index + } + return fmt.Sprintf("if %s goto %d else %d", relName(s.Cond, s), tblock, fblock) +} + +func (s *Go) String() string { + return printCall(&s.Call, "go ", s) +} + +func (s *Panic) String() string { + return "panic " + relName(s.X, s) +} + +func (s *Return) String() string { + var b bytes.Buffer + b.WriteString("return") + for i, r := range s.Results { + if i == 0 { + b.WriteString(" ") + } else { + b.WriteString(", ") + } + b.WriteString(relName(r, s)) + } + return b.String() +} + +func (*RunDefers) String() string { + return "rundefers" +} + +func (s *Send) String() string { + return fmt.Sprintf("send %s <- %s", relName(s.Chan, s), relName(s.X, s)) +} + +func (s *Defer) String() string { + prefix := "defer " + if s.DeferStack != nil { + prefix += "[" + relName(s.DeferStack, s) + "] " + } + c := printCall(&s.Call, prefix, s) + return c +} + +func (s *Select) String() string { + var b bytes.Buffer + for i, st := range s.States { + if i > 0 { + b.WriteString(", ") + } + if st.Dir == types.RecvOnly { + b.WriteString("<-") + b.WriteString(relName(st.Chan, s)) + } else { + b.WriteString(relName(st.Chan, s)) + b.WriteString("<-") + b.WriteString(relName(st.Send, s)) + } + } + non := "" + if !s.Blocking { + non = "non" + } + return fmt.Sprintf("select %sblocking [%s]", non, b.String()) +} + +func (s *Store) String() string { + return fmt.Sprintf("*%s = %s", relName(s.Addr, s), relName(s.Val, s)) +} + +func (s *MapUpdate) String() string { + return fmt.Sprintf("%s[%s] = %s", relName(s.Map, s), relName(s.Key, s), relName(s.Value, s)) +} + +func (s *DebugRef) String() string { + p := s.Parent().Prog.Fset.Position(s.Pos()) + var descr interface{} + if s.object != nil { + descr = s.object // e.g. "var x int" + } else { + descr = reflect.TypeOf(s.Expr) // e.g. "*ast.CallExpr" + } + var addr string + if s.IsAddr { + addr = "address of " + } + return fmt.Sprintf("; %s%s @ %d:%d is %s", addr, descr, p.Line, p.Column, s.X.Name()) +} + +func (p *Package) String() string { + return "package " + p.Pkg.Path() +} + +var _ io.WriterTo = (*Package)(nil) // *Package implements io.Writer + +func (p *Package) WriteTo(w io.Writer) (int64, error) { + var buf bytes.Buffer + WritePackage(&buf, p) + n, err := w.Write(buf.Bytes()) + return int64(n), err +} + +// WritePackage writes to buf a human-readable summary of p. +func WritePackage(buf *bytes.Buffer, p *Package) { + fmt.Fprintf(buf, "%s:\n", p) + + var names []string + maxname := 0 + for name := range p.Members { + if l := len(name); l > maxname { + maxname = l + } + names = append(names, name) + } + + from := p.Pkg + sort.Strings(names) + for _, name := range names { + switch mem := p.Members[name].(type) { + case *NamedConst: + fmt.Fprintf(buf, " const %-*s %s = %s\n", + maxname, name, mem.Name(), mem.Value.RelString(from)) + + case *Function: + fmt.Fprintf(buf, " func %-*s %s\n", + maxname, name, relType(mem.Type(), from)) + + case *Type: + fmt.Fprintf(buf, " type %-*s %s\n", + maxname, name, relType(mem.Type().Underlying(), from)) + for _, meth := range typeutil.IntuitiveMethodSet(mem.Type(), &p.Prog.MethodSets) { + fmt.Fprintf(buf, " %s\n", types.SelectionString(meth, types.RelativeTo(from))) + } + + case *Global: + fmt.Fprintf(buf, " var %-*s %s\n", + maxname, name, relType(typeparams.MustDeref(mem.Type()), from)) + } + } + + fmt.Fprintf(buf, "\n") +} + +func commaOk(x bool) string { + if x { + return ",ok" + } + return "" +} diff --git a/vendor/golang.org/x/tools/go/ssa/sanity.go b/vendor/golang.org/x/tools/go/ssa/sanity.go new file mode 100644 index 0000000..285cba0 --- /dev/null +++ b/vendor/golang.org/x/tools/go/ssa/sanity.go @@ -0,0 +1,560 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssa + +// An optional pass for sanity-checking invariants of the SSA representation. +// Currently it checks CFG invariants but little at the instruction level. + +import ( + "bytes" + "fmt" + "go/ast" + "go/types" + "io" + "os" + "strings" +) + +type sanity struct { + reporter io.Writer + fn *Function + block *BasicBlock + instrs map[Instruction]unit + insane bool +} + +// sanityCheck performs integrity checking of the SSA representation +// of the function fn and returns true if it was valid. Diagnostics +// are written to reporter if non-nil, os.Stderr otherwise. Some +// diagnostics are only warnings and do not imply a negative result. +// +// Sanity-checking is intended to facilitate the debugging of code +// transformation passes. +func sanityCheck(fn *Function, reporter io.Writer) bool { + if reporter == nil { + reporter = os.Stderr + } + return (&sanity{reporter: reporter}).checkFunction(fn) +} + +// mustSanityCheck is like sanityCheck but panics instead of returning +// a negative result. +func mustSanityCheck(fn *Function, reporter io.Writer) { + if !sanityCheck(fn, reporter) { + fn.WriteTo(os.Stderr) + panic("SanityCheck failed") + } +} + +func (s *sanity) diagnostic(prefix, format string, args ...interface{}) { + fmt.Fprintf(s.reporter, "%s: function %s", prefix, s.fn) + if s.block != nil { + fmt.Fprintf(s.reporter, ", block %s", s.block) + } + io.WriteString(s.reporter, ": ") + fmt.Fprintf(s.reporter, format, args...) + io.WriteString(s.reporter, "\n") +} + +func (s *sanity) errorf(format string, args ...interface{}) { + s.insane = true + s.diagnostic("Error", format, args...) +} + +func (s *sanity) warnf(format string, args ...interface{}) { + s.diagnostic("Warning", format, args...) +} + +// findDuplicate returns an arbitrary basic block that appeared more +// than once in blocks, or nil if all were unique. +func findDuplicate(blocks []*BasicBlock) *BasicBlock { + if len(blocks) < 2 { + return nil + } + if blocks[0] == blocks[1] { + return blocks[0] + } + // Slow path: + m := make(map[*BasicBlock]bool) + for _, b := range blocks { + if m[b] { + return b + } + m[b] = true + } + return nil +} + +func (s *sanity) checkInstr(idx int, instr Instruction) { + switch instr := instr.(type) { + case *If, *Jump, *Return, *Panic: + s.errorf("control flow instruction not at end of block") + case *Phi: + if idx == 0 { + // It suffices to apply this check to just the first phi node. + if dup := findDuplicate(s.block.Preds); dup != nil { + s.errorf("phi node in block with duplicate predecessor %s", dup) + } + } else { + prev := s.block.Instrs[idx-1] + if _, ok := prev.(*Phi); !ok { + s.errorf("Phi instruction follows a non-Phi: %T", prev) + } + } + if ne, np := len(instr.Edges), len(s.block.Preds); ne != np { + s.errorf("phi node has %d edges but %d predecessors", ne, np) + + } else { + for i, e := range instr.Edges { + if e == nil { + s.errorf("phi node '%s' has no value for edge #%d from %s", instr.Comment, i, s.block.Preds[i]) + } else if !types.Identical(instr.typ, e.Type()) { + s.errorf("phi node '%s' has a different type (%s) for edge #%d from %s (%s)", + instr.Comment, instr.Type(), i, s.block.Preds[i], e.Type()) + } + } + } + + case *Alloc: + if !instr.Heap { + found := false + for _, l := range s.fn.Locals { + if l == instr { + found = true + break + } + } + if !found { + s.errorf("local alloc %s = %s does not appear in Function.Locals", instr.Name(), instr) + } + } + + case *BinOp: + case *Call: + if common := instr.Call; common.IsInvoke() { + if !types.IsInterface(common.Value.Type()) { + s.errorf("invoke on %s (%s) which is not an interface type (or type param)", common.Value, common.Value.Type()) + } + } + case *ChangeInterface: + case *ChangeType: + case *SliceToArrayPointer: + case *Convert: + if from := instr.X.Type(); !isBasicConvTypes(typeSetOf(from)) { + if to := instr.Type(); !isBasicConvTypes(typeSetOf(to)) { + s.errorf("convert %s -> %s: at least one type must be basic (or all basic, []byte, or []rune)", from, to) + } + } + case *MultiConvert: + case *Defer: + case *Extract: + case *Field: + case *FieldAddr: + case *Go: + case *Index: + case *IndexAddr: + case *Lookup: + case *MakeChan: + case *MakeClosure: + numFree := len(instr.Fn.(*Function).FreeVars) + numBind := len(instr.Bindings) + if numFree != numBind { + s.errorf("MakeClosure has %d Bindings for function %s with %d free vars", + numBind, instr.Fn, numFree) + + } + if recv := instr.Type().(*types.Signature).Recv(); recv != nil { + s.errorf("MakeClosure's type includes receiver %s", recv.Type()) + } + + case *MakeInterface: + case *MakeMap: + case *MakeSlice: + case *MapUpdate: + case *Next: + case *Range: + case *RunDefers: + case *Select: + case *Send: + case *Slice: + case *Store: + case *TypeAssert: + case *UnOp: + case *DebugRef: + // TODO(adonovan): implement checks. + default: + panic(fmt.Sprintf("Unknown instruction type: %T", instr)) + } + + if call, ok := instr.(CallInstruction); ok { + if call.Common().Signature() == nil { + s.errorf("nil signature: %s", call) + } + } + + // Check that value-defining instructions have valid types + // and a valid referrer list. + if v, ok := instr.(Value); ok { + t := v.Type() + if t == nil { + s.errorf("no type: %s = %s", v.Name(), v) + } else if t == tRangeIter || t == tDeferStack { + // not a proper type; ignore. + } else if b, ok := t.Underlying().(*types.Basic); ok && b.Info()&types.IsUntyped != 0 { + s.errorf("instruction has 'untyped' result: %s = %s : %s", v.Name(), v, t) + } + s.checkReferrerList(v) + } + + // Untyped constants are legal as instruction Operands(), + // for example: + // _ = "foo"[0] + // or: + // if wordsize==64 {...} + + // All other non-Instruction Values can be found via their + // enclosing Function or Package. +} + +func (s *sanity) checkFinalInstr(instr Instruction) { + switch instr := instr.(type) { + case *If: + if nsuccs := len(s.block.Succs); nsuccs != 2 { + s.errorf("If-terminated block has %d successors; expected 2", nsuccs) + return + } + if s.block.Succs[0] == s.block.Succs[1] { + s.errorf("If-instruction has same True, False target blocks: %s", s.block.Succs[0]) + return + } + + case *Jump: + if nsuccs := len(s.block.Succs); nsuccs != 1 { + s.errorf("Jump-terminated block has %d successors; expected 1", nsuccs) + return + } + + case *Return: + if nsuccs := len(s.block.Succs); nsuccs != 0 { + s.errorf("Return-terminated block has %d successors; expected none", nsuccs) + return + } + if na, nf := len(instr.Results), s.fn.Signature.Results().Len(); nf != na { + s.errorf("%d-ary return in %d-ary function", na, nf) + } + + case *Panic: + if nsuccs := len(s.block.Succs); nsuccs != 0 { + s.errorf("Panic-terminated block has %d successors; expected none", nsuccs) + return + } + + default: + s.errorf("non-control flow instruction at end of block") + } +} + +func (s *sanity) checkBlock(b *BasicBlock, index int) { + s.block = b + + if b.Index != index { + s.errorf("block has incorrect Index %d", b.Index) + } + if b.parent != s.fn { + s.errorf("block has incorrect parent %s", b.parent) + } + + // Check all blocks are reachable. + // (The entry block is always implicitly reachable, + // as is the Recover block, if any.) + if (index > 0 && b != b.parent.Recover) && len(b.Preds) == 0 { + s.warnf("unreachable block") + if b.Instrs == nil { + // Since this block is about to be pruned, + // tolerating transient problems in it + // simplifies other optimizations. + return + } + } + + // Check predecessor and successor relations are dual, + // and that all blocks in CFG belong to same function. + for _, a := range b.Preds { + found := false + for _, bb := range a.Succs { + if bb == b { + found = true + break + } + } + if !found { + s.errorf("expected successor edge in predecessor %s; found only: %s", a, a.Succs) + } + if a.parent != s.fn { + s.errorf("predecessor %s belongs to different function %s", a, a.parent) + } + } + for _, c := range b.Succs { + found := false + for _, bb := range c.Preds { + if bb == b { + found = true + break + } + } + if !found { + s.errorf("expected predecessor edge in successor %s; found only: %s", c, c.Preds) + } + if c.parent != s.fn { + s.errorf("successor %s belongs to different function %s", c, c.parent) + } + } + + // Check each instruction is sane. + n := len(b.Instrs) + if n == 0 { + s.errorf("basic block contains no instructions") + } + var rands [10]*Value // reuse storage + for j, instr := range b.Instrs { + if instr == nil { + s.errorf("nil instruction at index %d", j) + continue + } + if b2 := instr.Block(); b2 == nil { + s.errorf("nil Block() for instruction at index %d", j) + continue + } else if b2 != b { + s.errorf("wrong Block() (%s) for instruction at index %d ", b2, j) + continue + } + if j < n-1 { + s.checkInstr(j, instr) + } else { + s.checkFinalInstr(instr) + } + + // Check Instruction.Operands. + operands: + for i, op := range instr.Operands(rands[:0]) { + if op == nil { + s.errorf("nil operand pointer %d of %s", i, instr) + continue + } + val := *op + if val == nil { + continue // a nil operand is ok + } + + // Check that "untyped" types only appear on constant operands. + if _, ok := (*op).(*Const); !ok { + if basic, ok := (*op).Type().Underlying().(*types.Basic); ok { + if basic.Info()&types.IsUntyped != 0 { + s.errorf("operand #%d of %s is untyped: %s", i, instr, basic) + } + } + } + + // Check that Operands that are also Instructions belong to same function. + // TODO(adonovan): also check their block dominates block b. + if val, ok := val.(Instruction); ok { + if val.Block() == nil { + s.errorf("operand %d of %s is an instruction (%s) that belongs to no block", i, instr, val) + } else if val.Parent() != s.fn { + s.errorf("operand %d of %s is an instruction (%s) from function %s", i, instr, val, val.Parent()) + } + } + + // Check that each function-local operand of + // instr refers back to instr. (NB: quadratic) + switch val := val.(type) { + case *Const, *Global, *Builtin: + continue // not local + case *Function: + if val.parent == nil { + continue // only anon functions are local + } + } + + // TODO(adonovan): check val.Parent() != nil <=> val.Referrers() is defined. + + if refs := val.Referrers(); refs != nil { + for _, ref := range *refs { + if ref == instr { + continue operands + } + } + s.errorf("operand %d of %s (%s) does not refer to us", i, instr, val) + } else { + s.errorf("operand %d of %s (%s) has no referrers", i, instr, val) + } + } + } +} + +func (s *sanity) checkReferrerList(v Value) { + refs := v.Referrers() + if refs == nil { + s.errorf("%s has missing referrer list", v.Name()) + return + } + for i, ref := range *refs { + if _, ok := s.instrs[ref]; !ok { + s.errorf("%s.Referrers()[%d] = %s is not an instruction belonging to this function", v.Name(), i, ref) + } + } +} + +func (s *sanity) checkFunction(fn *Function) bool { + // TODO(adonovan): check Function invariants: + // - check params match signature + // - check transient fields are nil + // - warn if any fn.Locals do not appear among block instructions. + + // TODO(taking): Sanity check origin, typeparams, and typeargs. + s.fn = fn + if fn.Prog == nil { + s.errorf("nil Prog") + } + + var buf bytes.Buffer + _ = fn.String() // must not crash + _ = fn.RelString(fn.relPkg()) // must not crash + WriteFunction(&buf, fn) // must not crash + + // All functions have a package, except delegates (which are + // shared across packages, or duplicated as weak symbols in a + // separate-compilation model), and error.Error. + if fn.Pkg == nil { + if strings.HasPrefix(fn.Synthetic, "from type information (on demand)") || + strings.HasPrefix(fn.Synthetic, "wrapper ") || + strings.HasPrefix(fn.Synthetic, "bound ") || + strings.HasPrefix(fn.Synthetic, "thunk ") || + strings.HasSuffix(fn.name, "Error") || + strings.HasPrefix(fn.Synthetic, "instance ") || + strings.HasPrefix(fn.Synthetic, "instantiation ") || + (fn.parent != nil && len(fn.typeargs) > 0) /* anon fun in instance */ { + // ok + } else { + s.errorf("nil Pkg") + } + } + if src, syn := fn.Synthetic == "", fn.Syntax() != nil; src != syn { + if len(fn.typeargs) > 0 && fn.Prog.mode&InstantiateGenerics != 0 { + // ok (instantiation with InstantiateGenerics on) + } else if fn.topLevelOrigin != nil && len(fn.typeargs) > 0 { + // ok (we always have the syntax set for instantiation) + } else if _, rng := fn.syntax.(*ast.RangeStmt); rng && fn.Synthetic == "range-over-func yield" { + // ok (range-func-yields are both synthetic and keep syntax) + } else { + s.errorf("got fromSource=%t, hasSyntax=%t; want same values", src, syn) + } + } + for i, l := range fn.Locals { + if l.Parent() != fn { + s.errorf("Local %s at index %d has wrong parent", l.Name(), i) + } + if l.Heap { + s.errorf("Local %s at index %d has Heap flag set", l.Name(), i) + } + } + // Build the set of valid referrers. + s.instrs = make(map[Instruction]unit) + for _, b := range fn.Blocks { + for _, instr := range b.Instrs { + s.instrs[instr] = unit{} + } + } + for i, p := range fn.Params { + if p.Parent() != fn { + s.errorf("Param %s at index %d has wrong parent", p.Name(), i) + } + // Check common suffix of Signature and Params match type. + if sig := fn.Signature; sig != nil { + j := i - len(fn.Params) + sig.Params().Len() // index within sig.Params + if j < 0 { + continue + } + if !types.Identical(p.Type(), sig.Params().At(j).Type()) { + s.errorf("Param %s at index %d has wrong type (%s, versus %s in Signature)", p.Name(), i, p.Type(), sig.Params().At(j).Type()) + + } + } + s.checkReferrerList(p) + } + for i, fv := range fn.FreeVars { + if fv.Parent() != fn { + s.errorf("FreeVar %s at index %d has wrong parent", fv.Name(), i) + } + s.checkReferrerList(fv) + } + + if fn.Blocks != nil && len(fn.Blocks) == 0 { + // Function _had_ blocks (so it's not external) but + // they were "optimized" away, even the entry block. + s.errorf("Blocks slice is non-nil but empty") + } + for i, b := range fn.Blocks { + if b == nil { + s.warnf("nil *BasicBlock at f.Blocks[%d]", i) + continue + } + s.checkBlock(b, i) + } + if fn.Recover != nil && fn.Blocks[fn.Recover.Index] != fn.Recover { + s.errorf("Recover block is not in Blocks slice") + } + + s.block = nil + for i, anon := range fn.AnonFuncs { + if anon.Parent() != fn { + s.errorf("AnonFuncs[%d]=%s but %s.Parent()=%s", i, anon, anon, anon.Parent()) + } + if i != int(anon.anonIdx) { + s.errorf("AnonFuncs[%d]=%s but %s.anonIdx=%d", i, anon, anon, anon.anonIdx) + } + } + s.fn = nil + return !s.insane +} + +// sanityCheckPackage checks invariants of packages upon creation. +// It does not require that the package is built. +// Unlike sanityCheck (for functions), it just panics at the first error. +func sanityCheckPackage(pkg *Package) { + if pkg.Pkg == nil { + panic(fmt.Sprintf("Package %s has no Object", pkg)) + } + _ = pkg.String() // must not crash + + for name, mem := range pkg.Members { + if name != mem.Name() { + panic(fmt.Sprintf("%s: %T.Name() = %s, want %s", + pkg.Pkg.Path(), mem, mem.Name(), name)) + } + obj := mem.Object() + if obj == nil { + // This check is sound because fields + // {Global,Function}.object have type + // types.Object. (If they were declared as + // *types.{Var,Func}, we'd have a non-empty + // interface containing a nil pointer.) + + continue // not all members have typechecker objects + } + if obj.Name() != name { + if obj.Name() == "init" && strings.HasPrefix(mem.Name(), "init#") { + // Ok. The name of a declared init function varies between + // its types.Func ("init") and its ssa.Function ("init#%d"). + } else { + panic(fmt.Sprintf("%s: %T.Object().Name() = %s, want %s", + pkg.Pkg.Path(), mem, obj.Name(), name)) + } + } + if obj.Pos() != mem.Pos() { + panic(fmt.Sprintf("%s Pos=%d obj.Pos=%d", mem, mem.Pos(), obj.Pos())) + } + } +} diff --git a/vendor/golang.org/x/tools/go/ssa/source.go b/vendor/golang.org/x/tools/go/ssa/source.go new file mode 100644 index 0000000..7b71c88 --- /dev/null +++ b/vendor/golang.org/x/tools/go/ssa/source.go @@ -0,0 +1,288 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssa + +// This file defines utilities for working with source positions +// or source-level named entities ("objects"). + +// TODO(adonovan): test that {Value,Instruction}.Pos() positions match +// the originating syntax, as specified. + +import ( + "go/ast" + "go/token" + "go/types" +) + +// EnclosingFunction returns the function that contains the syntax +// node denoted by path. +// +// Syntax associated with package-level variable specifications is +// enclosed by the package's init() function. +// +// Returns nil if not found; reasons might include: +// - the node is not enclosed by any function. +// - the node is within an anonymous function (FuncLit) and +// its SSA function has not been created yet +// (pkg.Build() has not yet been called). +func EnclosingFunction(pkg *Package, path []ast.Node) *Function { + // Start with package-level function... + fn := findEnclosingPackageLevelFunction(pkg, path) + if fn == nil { + return nil // not in any function + } + + // ...then walk down the nested anonymous functions. + n := len(path) +outer: + for i := range path { + if lit, ok := path[n-1-i].(*ast.FuncLit); ok { + for _, anon := range fn.AnonFuncs { + if anon.Pos() == lit.Type.Func { + fn = anon + continue outer + } + } + // SSA function not found: + // - package not yet built, or maybe + // - builder skipped FuncLit in dead block + // (in principle; but currently the Builder + // generates even dead FuncLits). + return nil + } + } + return fn +} + +// HasEnclosingFunction returns true if the AST node denoted by path +// is contained within the declaration of some function or +// package-level variable. +// +// Unlike EnclosingFunction, the behaviour of this function does not +// depend on whether SSA code for pkg has been built, so it can be +// used to quickly reject check inputs that will cause +// EnclosingFunction to fail, prior to SSA building. +func HasEnclosingFunction(pkg *Package, path []ast.Node) bool { + return findEnclosingPackageLevelFunction(pkg, path) != nil +} + +// findEnclosingPackageLevelFunction returns the Function +// corresponding to the package-level function enclosing path. +func findEnclosingPackageLevelFunction(pkg *Package, path []ast.Node) *Function { + if n := len(path); n >= 2 { // [... {Gen,Func}Decl File] + switch decl := path[n-2].(type) { + case *ast.GenDecl: + if decl.Tok == token.VAR && n >= 3 { + // Package-level 'var' initializer. + return pkg.init + } + + case *ast.FuncDecl: + if decl.Recv == nil && decl.Name.Name == "init" { + // Explicit init() function. + for _, b := range pkg.init.Blocks { + for _, instr := range b.Instrs { + if instr, ok := instr.(*Call); ok { + if callee, ok := instr.Call.Value.(*Function); ok && callee.Pkg == pkg && callee.Pos() == decl.Name.NamePos { + return callee + } + } + } + } + // Hack: return non-nil when SSA is not yet + // built so that HasEnclosingFunction works. + return pkg.init + } + // Declared function/method. + return findNamedFunc(pkg, decl.Name.NamePos) + } + } + return nil // not in any function +} + +// findNamedFunc returns the named function whose FuncDecl.Ident is at +// position pos. +func findNamedFunc(pkg *Package, pos token.Pos) *Function { + // Look at all package members and method sets of named types. + // Not very efficient. + for _, mem := range pkg.Members { + switch mem := mem.(type) { + case *Function: + if mem.Pos() == pos { + return mem + } + case *Type: + mset := pkg.Prog.MethodSets.MethodSet(types.NewPointer(mem.Type())) + for i, n := 0, mset.Len(); i < n; i++ { + // Don't call Program.Method: avoid creating wrappers. + obj := mset.At(i).Obj().(*types.Func) + if obj.Pos() == pos { + // obj from MethodSet may not be the origin type. + m := obj.Origin() + return pkg.objects[m].(*Function) + } + } + } + } + return nil +} + +// ValueForExpr returns the SSA Value that corresponds to non-constant +// expression e. +// +// It returns nil if no value was found, e.g. +// - the expression is not lexically contained within f; +// - f was not built with debug information; or +// - e is a constant expression. (For efficiency, no debug +// information is stored for constants. Use +// go/types.Info.Types[e].Value instead.) +// - e is a reference to nil or a built-in function. +// - the value was optimised away. +// +// If e is an addressable expression used in an lvalue context, +// value is the address denoted by e, and isAddr is true. +// +// The types of e (or &e, if isAddr) and the result are equal +// (modulo "untyped" bools resulting from comparisons). +// +// (Tip: to find the ssa.Value given a source position, use +// astutil.PathEnclosingInterval to locate the ast.Node, then +// EnclosingFunction to locate the Function, then ValueForExpr to find +// the ssa.Value.) +func (f *Function) ValueForExpr(e ast.Expr) (value Value, isAddr bool) { + if f.debugInfo() { // (opt) + e = unparen(e) + for _, b := range f.Blocks { + for _, instr := range b.Instrs { + if ref, ok := instr.(*DebugRef); ok { + if ref.Expr == e { + return ref.X, ref.IsAddr + } + } + } + } + } + return +} + +// --- Lookup functions for source-level named entities (types.Objects) --- + +// Package returns the SSA Package corresponding to the specified +// type-checker package. It returns nil if no such Package was +// created by a prior call to prog.CreatePackage. +func (prog *Program) Package(pkg *types.Package) *Package { + return prog.packages[pkg] +} + +// packageLevelMember returns the package-level member corresponding +// to the specified symbol, which may be a package-level const +// (*NamedConst), var (*Global) or func/method (*Function) of some +// package in prog. +// +// It returns nil if the object belongs to a package that has not been +// created by prog.CreatePackage. +func (prog *Program) packageLevelMember(obj types.Object) Member { + if pkg, ok := prog.packages[obj.Pkg()]; ok { + return pkg.objects[obj] + } + return nil +} + +// FuncValue returns the SSA function or (non-interface) method +// denoted by the specified func symbol. It returns nil id the symbol +// denotes an interface method, or belongs to a package that was not +// created by prog.CreatePackage. +func (prog *Program) FuncValue(obj *types.Func) *Function { + fn, _ := prog.packageLevelMember(obj).(*Function) + return fn +} + +// ConstValue returns the SSA constant denoted by the specified const symbol. +func (prog *Program) ConstValue(obj *types.Const) *Const { + // TODO(adonovan): opt: share (don't reallocate) + // Consts for const objects and constant ast.Exprs. + + // Universal constant? {true,false,nil} + if obj.Parent() == types.Universe { + return NewConst(obj.Val(), obj.Type()) + } + // Package-level named constant? + if v := prog.packageLevelMember(obj); v != nil { + return v.(*NamedConst).Value + } + return NewConst(obj.Val(), obj.Type()) +} + +// VarValue returns the SSA Value that corresponds to a specific +// identifier denoting the specified var symbol. +// +// VarValue returns nil if a local variable was not found, perhaps +// because its package was not built, the debug information was not +// requested during SSA construction, or the value was optimized away. +// +// ref is the path to an ast.Ident (e.g. from PathEnclosingInterval), +// and that ident must resolve to obj. +// +// pkg is the package enclosing the reference. (A reference to a var +// always occurs within a function, so we need to know where to find it.) +// +// If the identifier is a field selector and its base expression is +// non-addressable, then VarValue returns the value of that field. +// For example: +// +// func f() struct {x int} +// f().x // VarValue(x) returns a *Field instruction of type int +// +// All other identifiers denote addressable locations (variables). +// For them, VarValue may return either the variable's address or its +// value, even when the expression is evaluated only for its value; the +// situation is reported by isAddr, the second component of the result. +// +// If !isAddr, the returned value is the one associated with the +// specific identifier. For example, +// +// var x int // VarValue(x) returns Const 0 here +// x = 1 // VarValue(x) returns Const 1 here +// +// It is not specified whether the value or the address is returned in +// any particular case, as it may depend upon optimizations performed +// during SSA code generation, such as registerization, constant +// folding, avoidance of materialization of subexpressions, etc. +func (prog *Program) VarValue(obj *types.Var, pkg *Package, ref []ast.Node) (value Value, isAddr bool) { + // All references to a var are local to some function, possibly init. + fn := EnclosingFunction(pkg, ref) + if fn == nil { + return // e.g. def of struct field; SSA not built? + } + + id := ref[0].(*ast.Ident) + + // Defining ident of a parameter? + if id.Pos() == obj.Pos() { + for _, param := range fn.Params { + if param.Object() == obj { + return param, false + } + } + } + + // Other ident? + for _, b := range fn.Blocks { + for _, instr := range b.Instrs { + if dr, ok := instr.(*DebugRef); ok { + if dr.Pos() == id.Pos() { + return dr.X, dr.IsAddr + } + } + } + } + + // Defining ident of package-level var? + if v := prog.packageLevelMember(obj); v != nil { + return v.(*Global), true + } + + return // e.g. debug info not requested, or var optimized away +} diff --git a/vendor/golang.org/x/tools/go/ssa/ssa.go b/vendor/golang.org/x/tools/go/ssa/ssa.go new file mode 100644 index 0000000..1231afd --- /dev/null +++ b/vendor/golang.org/x/tools/go/ssa/ssa.go @@ -0,0 +1,1871 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssa + +// This package defines a high-level intermediate representation for +// Go programs using static single-assignment (SSA) form. + +import ( + "fmt" + "go/ast" + "go/constant" + "go/token" + "go/types" + "sync" + + "golang.org/x/tools/go/types/typeutil" + "golang.org/x/tools/internal/typeparams" +) + +// A Program is a partial or complete Go program converted to SSA form. +type Program struct { + Fset *token.FileSet // position information for the files of this Program + imported map[string]*Package // all importable Packages, keyed by import path + packages map[*types.Package]*Package // all created Packages + mode BuilderMode // set of mode bits for SSA construction + MethodSets typeutil.MethodSetCache // cache of type-checker's method-sets + + canon *canonizer // type canonicalization map + ctxt *types.Context // cache for type checking instantiations + + methodsMu sync.Mutex + methodSets typeutil.Map // maps type to its concrete *methodSet + + // memoization of whether a type refers to type parameters + hasParamsMu sync.Mutex + hasParams typeparams.Free + + runtimeTypesMu sync.Mutex + runtimeTypes typeutil.Map // set of runtime types (from MakeInterface) + + // objectMethods is a memoization of objectMethod + // to avoid creation of duplicate methods from type information. + objectMethodsMu sync.Mutex + objectMethods map[*types.Func]*Function +} + +// A Package is a single analyzed Go package containing Members for +// all package-level functions, variables, constants and types it +// declares. These may be accessed directly via Members, or via the +// type-specific accessor methods Func, Type, Var and Const. +// +// Members also contains entries for "init" (the synthetic package +// initializer) and "init#%d", the nth declared init function, +// and unspecified other things too. +type Package struct { + Prog *Program // the owning program + Pkg *types.Package // the corresponding go/types.Package + Members map[string]Member // all package members keyed by name (incl. init and init#%d) + objects map[types.Object]Member // mapping of package objects to members (incl. methods). Contains *NamedConst, *Global, *Function (values but not types) + init *Function // Func("init"); the package's init function + debug bool // include full debug info in this package + syntax bool // package was loaded from syntax + + // The following fields are set transiently, then cleared + // after building. + buildOnce sync.Once // ensures package building occurs once + ninit int32 // number of init functions + info *types.Info // package type information + files []*ast.File // package ASTs + created []*Function // members created as a result of building this package (includes declared functions, wrappers) + initVersion map[ast.Expr]string // goversion to use for each global var init expr +} + +// A Member is a member of a Go package, implemented by *NamedConst, +// *Global, *Function, or *Type; they are created by package-level +// const, var, func and type declarations respectively. +type Member interface { + Name() string // declared name of the package member + String() string // package-qualified name of the package member + RelString(*types.Package) string // like String, but relative refs are unqualified + Object() types.Object // typechecker's object for this member, if any + Pos() token.Pos // position of member's declaration, if known + Type() types.Type // type of the package member + Token() token.Token // token.{VAR,FUNC,CONST,TYPE} + Package() *Package // the containing package +} + +// A Type is a Member of a Package representing a package-level named type. +type Type struct { + object *types.TypeName + pkg *Package +} + +// A NamedConst is a Member of a Package representing a package-level +// named constant. +// +// Pos() returns the position of the declaring ast.ValueSpec.Names[*] +// identifier. +// +// NB: a NamedConst is not a Value; it contains a constant Value, which +// it augments with the name and position of its 'const' declaration. +type NamedConst struct { + object *types.Const + Value *Const + pkg *Package +} + +// A Value is an SSA value that can be referenced by an instruction. +type Value interface { + // Name returns the name of this value, and determines how + // this Value appears when used as an operand of an + // Instruction. + // + // This is the same as the source name for Parameters, + // Builtins, Functions, FreeVars, Globals. + // For constants, it is a representation of the constant's value + // and type. For all other Values this is the name of the + // virtual register defined by the instruction. + // + // The name of an SSA Value is not semantically significant, + // and may not even be unique within a function. + Name() string + + // If this value is an Instruction, String returns its + // disassembled form; otherwise it returns unspecified + // human-readable information about the Value, such as its + // kind, name and type. + String() string + + // Type returns the type of this value. Many instructions + // (e.g. IndexAddr) change their behaviour depending on the + // types of their operands. + Type() types.Type + + // Parent returns the function to which this Value belongs. + // It returns nil for named Functions, Builtin, Const and Global. + Parent() *Function + + // Referrers returns the list of instructions that have this + // value as one of their operands; it may contain duplicates + // if an instruction has a repeated operand. + // + // Referrers actually returns a pointer through which the + // caller may perform mutations to the object's state. + // + // Referrers is currently only defined if Parent()!=nil, + // i.e. for the function-local values FreeVar, Parameter, + // Functions (iff anonymous) and all value-defining instructions. + // It returns nil for named Functions, Builtin, Const and Global. + // + // Instruction.Operands contains the inverse of this relation. + Referrers() *[]Instruction + + // Pos returns the location of the AST token most closely + // associated with the operation that gave rise to this value, + // or token.NoPos if it was not explicit in the source. + // + // For each ast.Node type, a particular token is designated as + // the closest location for the expression, e.g. the Lparen + // for an *ast.CallExpr. This permits a compact but + // approximate mapping from Values to source positions for use + // in diagnostic messages, for example. + // + // (Do not use this position to determine which Value + // corresponds to an ast.Expr; use Function.ValueForExpr + // instead. NB: it requires that the function was built with + // debug information.) + Pos() token.Pos +} + +// An Instruction is an SSA instruction that computes a new Value or +// has some effect. +// +// An Instruction that defines a value (e.g. BinOp) also implements +// the Value interface; an Instruction that only has an effect (e.g. Store) +// does not. +type Instruction interface { + // String returns the disassembled form of this value. + // + // Examples of Instructions that are Values: + // "x + y" (BinOp) + // "len([])" (Call) + // Note that the name of the Value is not printed. + // + // Examples of Instructions that are not Values: + // "return x" (Return) + // "*y = x" (Store) + // + // (The separation Value.Name() from Value.String() is useful + // for some analyses which distinguish the operation from the + // value it defines, e.g., 'y = local int' is both an allocation + // of memory 'local int' and a definition of a pointer y.) + String() string + + // Parent returns the function to which this instruction + // belongs. + Parent() *Function + + // Block returns the basic block to which this instruction + // belongs. + Block() *BasicBlock + + // setBlock sets the basic block to which this instruction belongs. + setBlock(*BasicBlock) + + // Operands returns the operands of this instruction: the + // set of Values it references. + // + // Specifically, it appends their addresses to rands, a + // user-provided slice, and returns the resulting slice, + // permitting avoidance of memory allocation. + // + // The operands are appended in undefined order, but the order + // is consistent for a given Instruction; the addresses are + // always non-nil but may point to a nil Value. Clients may + // store through the pointers, e.g. to effect a value + // renaming. + // + // Value.Referrers is a subset of the inverse of this + // relation. (Referrers are not tracked for all types of + // Values.) + Operands(rands []*Value) []*Value + + // Pos returns the location of the AST token most closely + // associated with the operation that gave rise to this + // instruction, or token.NoPos if it was not explicit in the + // source. + // + // For each ast.Node type, a particular token is designated as + // the closest location for the expression, e.g. the Go token + // for an *ast.GoStmt. This permits a compact but approximate + // mapping from Instructions to source positions for use in + // diagnostic messages, for example. + // + // (Do not use this position to determine which Instruction + // corresponds to an ast.Expr; see the notes for Value.Pos. + // This position may be used to determine which non-Value + // Instruction corresponds to some ast.Stmts, but not all: If + // and Jump instructions have no Pos(), for example.) + Pos() token.Pos +} + +// A Node is a node in the SSA value graph. Every concrete type that +// implements Node is also either a Value, an Instruction, or both. +// +// Node contains the methods common to Value and Instruction, plus the +// Operands and Referrers methods generalized to return nil for +// non-Instructions and non-Values, respectively. +// +// Node is provided to simplify SSA graph algorithms. Clients should +// use the more specific and informative Value or Instruction +// interfaces where appropriate. +type Node interface { + // Common methods: + String() string + Pos() token.Pos + Parent() *Function + + // Partial methods: + Operands(rands []*Value) []*Value // nil for non-Instructions + Referrers() *[]Instruction // nil for non-Values +} + +// Function represents the parameters, results, and code of a function +// or method. +// +// If Blocks is nil, this indicates an external function for which no +// Go source code is available. In this case, FreeVars, Locals, and +// Params are nil too. Clients performing whole-program analysis must +// handle external functions specially. +// +// Blocks contains the function's control-flow graph (CFG). +// Blocks[0] is the function entry point; block order is not otherwise +// semantically significant, though it may affect the readability of +// the disassembly. +// To iterate over the blocks in dominance order, use DomPreorder(). +// +// Recover is an optional second entry point to which control resumes +// after a recovered panic. The Recover block may contain only a return +// statement, preceded by a load of the function's named return +// parameters, if any. +// +// A nested function (Parent()!=nil) that refers to one or more +// lexically enclosing local variables ("free variables") has FreeVars. +// Such functions cannot be called directly but require a +// value created by MakeClosure which, via its Bindings, supplies +// values for these parameters. +// +// If the function is a method (Signature.Recv() != nil) then the first +// element of Params is the receiver parameter. +// +// A Go package may declare many functions called "init". +// For each one, Object().Name() returns "init" but Name() returns +// "init#1", etc, in declaration order. +// +// Pos() returns the declaring ast.FuncLit.Type.Func or the position +// of the ast.FuncDecl.Name, if the function was explicit in the +// source. Synthetic wrappers, for which Synthetic != "", may share +// the same position as the function they wrap. +// Syntax.Pos() always returns the position of the declaring "func" token. +// +// When the operand of a range statement is an iterator function, +// the loop body is transformed into a synthetic anonymous function +// that is passed as the yield argument in a call to the iterator. +// In that case, Function.Pos is the position of the "range" token, +// and Function.Syntax is the ast.RangeStmt. +// +// Synthetic functions, for which Synthetic != "", are functions +// that do not appear in the source AST. These include: +// - method wrappers, +// - thunks, +// - bound functions, +// - empty functions built from loaded type information, +// - yield functions created from range-over-func loops, +// - package init functions, and +// - instantiations of generic functions. +// +// Synthetic wrapper functions may share the same position +// as the function they wrap. +// +// Type() returns the function's Signature. +// +// A generic function is a function or method that has uninstantiated type +// parameters (TypeParams() != nil). Consider a hypothetical generic +// method, (*Map[K,V]).Get. It may be instantiated with all +// non-parameterized types as (*Map[string,int]).Get or with +// parameterized types as (*Map[string,U]).Get, where U is a type parameter. +// In both instantiations, Origin() refers to the instantiated generic +// method, (*Map[K,V]).Get, TypeParams() refers to the parameters [K,V] of +// the generic method. TypeArgs() refers to [string,U] or [string,int], +// respectively, and is nil in the generic method. +type Function struct { + name string + object *types.Func // symbol for declared function (nil for FuncLit or synthetic init) + method *selection // info about provenance of synthetic methods; thunk => non-nil + Signature *types.Signature + pos token.Pos + + // source information + Synthetic string // provenance of synthetic function; "" for true source functions + syntax ast.Node // *ast.Func{Decl,Lit}, if from syntax (incl. generic instances) or (*ast.RangeStmt if a yield function) + info *types.Info // type annotations (iff syntax != nil) + goversion string // Go version of syntax (NB: init is special) + + parent *Function // enclosing function if anon; nil if global + Pkg *Package // enclosing package; nil for shared funcs (wrappers and error.Error) + Prog *Program // enclosing program + + buildshared *task // wait for a shared function to be done building (may be nil if <=1 builder ever needs to wait) + + // These fields are populated only when the function body is built: + + Params []*Parameter // function parameters; for methods, includes receiver + FreeVars []*FreeVar // free variables whose values must be supplied by closure + Locals []*Alloc // frame-allocated variables of this function + Blocks []*BasicBlock // basic blocks of the function; nil => external + Recover *BasicBlock // optional; control transfers here after recovered panic + AnonFuncs []*Function // anonymous functions (from FuncLit,RangeStmt) directly beneath this one + referrers []Instruction // referring instructions (iff Parent() != nil) + anonIdx int32 // position of a nested function in parent's AnonFuncs. fn.Parent()!=nil => fn.Parent().AnonFunc[fn.anonIdx] == fn. + + typeparams *types.TypeParamList // type parameters of this function. typeparams.Len() > 0 => generic or instance of generic function + typeargs []types.Type // type arguments that instantiated typeparams. len(typeargs) > 0 => instance of generic function + topLevelOrigin *Function // the origin function if this is an instance of a source function. nil if Parent()!=nil. + generic *generic // instances of this function, if generic + + // The following fields are cleared after building. + build buildFunc // algorithm to build function body (nil => built) + currentBlock *BasicBlock // where to emit code + vars map[*types.Var]Value // addresses of local variables + results []*Alloc // result allocations of the current function + returnVars []*types.Var // variables for a return statement. Either results or for range-over-func a parent's results + targets *targets // linked stack of branch targets + lblocks map[*types.Label]*lblock // labelled blocks + subst *subster // type parameter substitutions (if non-nil) + jump *types.Var // synthetic variable for the yield state (non-nil => range-over-func) + deferstack *types.Var // synthetic variable holding enclosing ssa:deferstack() + source *Function // nearest enclosing source function + exits []*exit // exits of the function that need to be resolved + uniq int64 // source of unique ints within the source tree while building +} + +// BasicBlock represents an SSA basic block. +// +// The final element of Instrs is always an explicit transfer of +// control (If, Jump, Return, or Panic). +// +// A block may contain no Instructions only if it is unreachable, +// i.e., Preds is nil. Empty blocks are typically pruned. +// +// BasicBlocks and their Preds/Succs relation form a (possibly cyclic) +// graph independent of the SSA Value graph: the control-flow graph or +// CFG. It is illegal for multiple edges to exist between the same +// pair of blocks. +// +// Each BasicBlock is also a node in the dominator tree of the CFG. +// The tree may be navigated using Idom()/Dominees() and queried using +// Dominates(). +// +// The order of Preds and Succs is significant (to Phi and If +// instructions, respectively). +type BasicBlock struct { + Index int // index of this block within Parent().Blocks + Comment string // optional label; no semantic significance + parent *Function // parent function + Instrs []Instruction // instructions in order + Preds, Succs []*BasicBlock // predecessors and successors + succs2 [2]*BasicBlock // initial space for Succs + dom domInfo // dominator tree info + gaps int // number of nil Instrs (transient) + rundefers int // number of rundefers (transient) +} + +// Pure values ---------------------------------------- + +// A FreeVar represents a free variable of the function to which it +// belongs. +// +// FreeVars are used to implement anonymous functions, whose free +// variables are lexically captured in a closure formed by +// MakeClosure. The value of such a free var is an Alloc or another +// FreeVar and is considered a potentially escaping heap address, with +// pointer type. +// +// FreeVars are also used to implement bound method closures. Such a +// free var represents the receiver value and may be of any type that +// has concrete methods. +// +// Pos() returns the position of the value that was captured, which +// belongs to an enclosing function. +type FreeVar struct { + name string + typ types.Type + pos token.Pos + parent *Function + referrers []Instruction + + // Transiently needed during building. + outer Value // the Value captured from the enclosing context. +} + +// A Parameter represents an input parameter of a function. +type Parameter struct { + name string + object *types.Var // non-nil + typ types.Type + parent *Function + referrers []Instruction +} + +// A Const represents a value known at build time. +// +// Consts include true constants of boolean, numeric, and string types, as +// defined by the Go spec; these are represented by a non-nil Value field. +// +// Consts also include the "zero" value of any type, of which the nil values +// of various pointer-like types are a special case; these are represented +// by a nil Value field. +// +// Pos() returns token.NoPos. +// +// Example printed forms: +// +// 42:int +// "hello":untyped string +// 3+4i:MyComplex +// nil:*int +// nil:[]string +// [3]int{}:[3]int +// struct{x string}{}:struct{x string} +// 0:interface{int|int64} +// nil:interface{bool|int} // no go/constant representation +type Const struct { + typ types.Type + Value constant.Value +} + +// A Global is a named Value holding the address of a package-level +// variable. +// +// Pos() returns the position of the ast.ValueSpec.Names[*] +// identifier. +type Global struct { + name string + object types.Object // a *types.Var; may be nil for synthetics e.g. init$guard + typ types.Type + pos token.Pos + + Pkg *Package +} + +// A Builtin represents a specific use of a built-in function, e.g. len. +// +// Builtins are immutable values. Builtins do not have addresses. +// Builtins can only appear in CallCommon.Value. +// +// Name() indicates the function: one of the built-in functions from the +// Go spec (excluding "make" and "new") or one of these ssa-defined +// intrinsics: +// +// // wrapnilchk returns ptr if non-nil, panics otherwise. +// // (For use in indirection wrappers.) +// func ssa:wrapnilchk(ptr *T, recvType, methodName string) *T +// +// Object() returns a *types.Builtin for built-ins defined by the spec, +// nil for others. +// +// Type() returns a *types.Signature representing the effective +// signature of the built-in for this call. +type Builtin struct { + name string + sig *types.Signature +} + +// Value-defining instructions ---------------------------------------- + +// The Alloc instruction reserves space for a variable of the given type, +// zero-initializes it, and yields its address. +// +// Alloc values are always addresses, and have pointer types, so the +// type of the allocated variable is actually +// Type().Underlying().(*types.Pointer).Elem(). +// +// If Heap is false, Alloc zero-initializes the same local variable in +// the call frame and returns its address; in this case the Alloc must +// be present in Function.Locals. We call this a "local" alloc. +// +// If Heap is true, Alloc allocates a new zero-initialized variable +// each time the instruction is executed. We call this a "new" alloc. +// +// When Alloc is applied to a channel, map or slice type, it returns +// the address of an uninitialized (nil) reference of that kind; store +// the result of MakeSlice, MakeMap or MakeChan in that location to +// instantiate these types. +// +// Pos() returns the ast.CompositeLit.Lbrace for a composite literal, +// or the ast.CallExpr.Rparen for a call to new() or for a call that +// allocates a varargs slice. +// +// Example printed form: +// +// t0 = local int +// t1 = new int +type Alloc struct { + register + Comment string + Heap bool + index int // dense numbering; for lifting +} + +// The Phi instruction represents an SSA φ-node, which combines values +// that differ across incoming control-flow edges and yields a new +// value. Within a block, all φ-nodes must appear before all non-φ +// nodes. +// +// Pos() returns the position of the && or || for short-circuit +// control-flow joins, or that of the *Alloc for φ-nodes inserted +// during SSA renaming. +// +// Example printed form: +// +// t2 = phi [0: t0, 1: t1] +type Phi struct { + register + Comment string // a hint as to its purpose + Edges []Value // Edges[i] is value for Block().Preds[i] +} + +// The Call instruction represents a function or method call. +// +// The Call instruction yields the function result if there is exactly +// one. Otherwise it returns a tuple, the components of which are +// accessed via Extract. +// +// See CallCommon for generic function call documentation. +// +// Pos() returns the ast.CallExpr.Lparen, if explicit in the source. +// +// Example printed form: +// +// t2 = println(t0, t1) +// t4 = t3() +// t7 = invoke t5.Println(...t6) +type Call struct { + register + Call CallCommon +} + +// The BinOp instruction yields the result of binary operation X Op Y. +// +// Pos() returns the ast.BinaryExpr.OpPos, if explicit in the source. +// +// Example printed form: +// +// t1 = t0 + 1:int +type BinOp struct { + register + // One of: + // ADD SUB MUL QUO REM + - * / % + // AND OR XOR SHL SHR AND_NOT & | ^ << >> &^ + // EQL NEQ LSS LEQ GTR GEQ == != < <= < >= + Op token.Token + X, Y Value +} + +// The UnOp instruction yields the result of Op X. +// ARROW is channel receive. +// MUL is pointer indirection (load). +// XOR is bitwise complement. +// SUB is negation. +// NOT is logical negation. +// +// If CommaOk and Op=ARROW, the result is a 2-tuple of the value above +// and a boolean indicating the success of the receive. The +// components of the tuple are accessed using Extract. +// +// Pos() returns the ast.UnaryExpr.OpPos, if explicit in the source. +// For receive operations (ARROW) implicit in ranging over a channel, +// Pos() returns the ast.RangeStmt.For. +// For implicit memory loads (STAR), Pos() returns the position of the +// most closely associated source-level construct; the details are not +// specified. +// +// Example printed form: +// +// t0 = *x +// t2 = <-t1,ok +type UnOp struct { + register + Op token.Token // One of: NOT SUB ARROW MUL XOR ! - <- * ^ + X Value + CommaOk bool +} + +// The ChangeType instruction applies to X a value-preserving type +// change to Type(). +// +// Type changes are permitted: +// - between a named type and its underlying type. +// - between two named types of the same underlying type. +// - between (possibly named) pointers to identical base types. +// - from a bidirectional channel to a read- or write-channel, +// optionally adding/removing a name. +// - between a type (t) and an instance of the type (tσ), i.e. +// Type() == σ(X.Type()) (or X.Type()== σ(Type())) where +// σ is the type substitution of Parent().TypeParams by +// Parent().TypeArgs. +// +// This operation cannot fail dynamically. +// +// Type changes may to be to or from a type parameter (or both). All +// types in the type set of X.Type() have a value-preserving type +// change to all types in the type set of Type(). +// +// Pos() returns the ast.CallExpr.Lparen, if the instruction arose +// from an explicit conversion in the source. +// +// Example printed form: +// +// t1 = changetype *int <- IntPtr (t0) +type ChangeType struct { + register + X Value +} + +// The Convert instruction yields the conversion of value X to type +// Type(). One or both of those types is basic (but possibly named). +// +// A conversion may change the value and representation of its operand. +// Conversions are permitted: +// - between real numeric types. +// - between complex numeric types. +// - between string and []byte or []rune. +// - between pointers and unsafe.Pointer. +// - between unsafe.Pointer and uintptr. +// - from (Unicode) integer to (UTF-8) string. +// +// A conversion may imply a type name change also. +// +// Conversions may to be to or from a type parameter. All types in +// the type set of X.Type() can be converted to all types in the type +// set of Type(). +// +// This operation cannot fail dynamically. +// +// Conversions of untyped string/number/bool constants to a specific +// representation are eliminated during SSA construction. +// +// Pos() returns the ast.CallExpr.Lparen, if the instruction arose +// from an explicit conversion in the source. +// +// Example printed form: +// +// t1 = convert []byte <- string (t0) +type Convert struct { + register + X Value +} + +// The MultiConvert instruction yields the conversion of value X to type +// Type(). Either X.Type() or Type() must be a type parameter. Each +// type in the type set of X.Type() can be converted to each type in the +// type set of Type(). +// +// See the documentation for Convert, ChangeType, and SliceToArrayPointer +// for the conversions that are permitted. Additionally conversions of +// slices to arrays are permitted. +// +// This operation can fail dynamically (see SliceToArrayPointer). +// +// Pos() returns the ast.CallExpr.Lparen, if the instruction arose +// from an explicit conversion in the source. +// +// Example printed form: +// +// t1 = multiconvert D <- S (t0) [*[2]rune <- []rune | string <- []rune] +type MultiConvert struct { + register + X Value + from []*types.Term + to []*types.Term +} + +// ChangeInterface constructs a value of one interface type from a +// value of another interface type known to be assignable to it. +// This operation cannot fail. +// +// Pos() returns the ast.CallExpr.Lparen if the instruction arose from +// an explicit T(e) conversion; the ast.TypeAssertExpr.Lparen if the +// instruction arose from an explicit e.(T) operation; or token.NoPos +// otherwise. +// +// Example printed form: +// +// t1 = change interface interface{} <- I (t0) +type ChangeInterface struct { + register + X Value +} + +// The SliceToArrayPointer instruction yields the conversion of slice X to +// array pointer. +// +// Pos() returns the ast.CallExpr.Lparen, if the instruction arose +// from an explicit conversion in the source. +// +// Conversion may to be to or from a type parameter. All types in +// the type set of X.Type() must be a slice types that can be converted to +// all types in the type set of Type() which must all be pointer to array +// types. +// +// This operation can fail dynamically if the length of the slice is less +// than the length of the array. +// +// Example printed form: +// +// t1 = slice to array pointer *[4]byte <- []byte (t0) +type SliceToArrayPointer struct { + register + X Value +} + +// MakeInterface constructs an instance of an interface type from a +// value of a concrete type. +// +// Use Program.MethodSets.MethodSet(X.Type()) to find the method-set +// of X, and Program.MethodValue(m) to find the implementation of a method. +// +// To construct the zero value of an interface type T, use: +// +// NewConst(constant.MakeNil(), T, pos) +// +// Pos() returns the ast.CallExpr.Lparen, if the instruction arose +// from an explicit conversion in the source. +// +// Example printed form: +// +// t1 = make interface{} <- int (42:int) +// t2 = make Stringer <- t0 +type MakeInterface struct { + register + X Value +} + +// The MakeClosure instruction yields a closure value whose code is +// Fn and whose free variables' values are supplied by Bindings. +// +// Type() returns a (possibly named) *types.Signature. +// +// Pos() returns the ast.FuncLit.Type.Func for a function literal +// closure or the ast.SelectorExpr.Sel for a bound method closure. +// +// Example printed form: +// +// t0 = make closure anon@1.2 [x y z] +// t1 = make closure bound$(main.I).add [i] +type MakeClosure struct { + register + Fn Value // always a *Function + Bindings []Value // values for each free variable in Fn.FreeVars +} + +// The MakeMap instruction creates a new hash-table-based map object +// and yields a value of kind map. +// +// Type() returns a (possibly named) *types.Map. +// +// Pos() returns the ast.CallExpr.Lparen, if created by make(map), or +// the ast.CompositeLit.Lbrack if created by a literal. +// +// Example printed form: +// +// t1 = make map[string]int t0 +// t1 = make StringIntMap t0 +type MakeMap struct { + register + Reserve Value // initial space reservation; nil => default +} + +// The MakeChan instruction creates a new channel object and yields a +// value of kind chan. +// +// Type() returns a (possibly named) *types.Chan. +// +// Pos() returns the ast.CallExpr.Lparen for the make(chan) that +// created it. +// +// Example printed form: +// +// t0 = make chan int 0 +// t0 = make IntChan 0 +type MakeChan struct { + register + Size Value // int; size of buffer; zero => synchronous. +} + +// The MakeSlice instruction yields a slice of length Len backed by a +// newly allocated array of length Cap. +// +// Both Len and Cap must be non-nil Values of integer type. +// +// (Alloc(types.Array) followed by Slice will not suffice because +// Alloc can only create arrays of constant length.) +// +// Type() returns a (possibly named) *types.Slice. +// +// Pos() returns the ast.CallExpr.Lparen for the make([]T) that +// created it. +// +// Example printed form: +// +// t1 = make []string 1:int t0 +// t1 = make StringSlice 1:int t0 +type MakeSlice struct { + register + Len Value + Cap Value +} + +// The Slice instruction yields a slice of an existing string, slice +// or *array X between optional integer bounds Low and High. +// +// Dynamically, this instruction panics if X evaluates to a nil *array +// pointer. +// +// Type() returns string if the type of X was string, otherwise a +// *types.Slice with the same element type as X. +// +// Pos() returns the ast.SliceExpr.Lbrack if created by a x[:] slice +// operation, the ast.CompositeLit.Lbrace if created by a literal, or +// NoPos if not explicit in the source (e.g. a variadic argument slice). +// +// Example printed form: +// +// t1 = slice t0[1:] +type Slice struct { + register + X Value // slice, string, or *array + Low, High, Max Value // each may be nil +} + +// The FieldAddr instruction yields the address of Field of *struct X. +// +// The field is identified by its index within the field list of the +// struct type of X. +// +// Dynamically, this instruction panics if X evaluates to a nil +// pointer. +// +// Type() returns a (possibly named) *types.Pointer. +// +// Pos() returns the position of the ast.SelectorExpr.Sel for the +// field, if explicit in the source. For implicit selections, returns +// the position of the inducing explicit selection. If produced for a +// struct literal S{f: e}, it returns the position of the colon; for +// S{e} it returns the start of expression e. +// +// Example printed form: +// +// t1 = &t0.name [#1] +type FieldAddr struct { + register + X Value // *struct + Field int // index into CoreType(CoreType(X.Type()).(*types.Pointer).Elem()).(*types.Struct).Fields +} + +// The Field instruction yields the Field of struct X. +// +// The field is identified by its index within the field list of the +// struct type of X; by using numeric indices we avoid ambiguity of +// package-local identifiers and permit compact representations. +// +// Pos() returns the position of the ast.SelectorExpr.Sel for the +// field, if explicit in the source. For implicit selections, returns +// the position of the inducing explicit selection. + +// Example printed form: +// +// t1 = t0.name [#1] +type Field struct { + register + X Value // struct + Field int // index into CoreType(X.Type()).(*types.Struct).Fields +} + +// The IndexAddr instruction yields the address of the element at +// index Index of collection X. Index is an integer expression. +// +// The elements of maps and strings are not addressable; use Lookup (map), +// Index (string), or MapUpdate instead. +// +// Dynamically, this instruction panics if X evaluates to a nil *array +// pointer. +// +// Type() returns a (possibly named) *types.Pointer. +// +// Pos() returns the ast.IndexExpr.Lbrack for the index operation, if +// explicit in the source. +// +// Example printed form: +// +// t2 = &t0[t1] +type IndexAddr struct { + register + X Value // *array, slice or type parameter with types array, *array, or slice. + Index Value // numeric index +} + +// The Index instruction yields element Index of collection X, an array, +// string or type parameter containing an array, a string, a pointer to an, +// array or a slice. +// +// Pos() returns the ast.IndexExpr.Lbrack for the index operation, if +// explicit in the source. +// +// Example printed form: +// +// t2 = t0[t1] +type Index struct { + register + X Value // array, string or type parameter with types array, *array, slice, or string. + Index Value // integer index +} + +// The Lookup instruction yields element Index of collection map X. +// Index is the appropriate key type. +// +// If CommaOk, the result is a 2-tuple of the value above and a +// boolean indicating the result of a map membership test for the key. +// The components of the tuple are accessed using Extract. +// +// Pos() returns the ast.IndexExpr.Lbrack, if explicit in the source. +// +// Example printed form: +// +// t2 = t0[t1] +// t5 = t3[t4],ok +type Lookup struct { + register + X Value // map + Index Value // key-typed index + CommaOk bool // return a value,ok pair +} + +// SelectState is a helper for Select. +// It represents one goal state and its corresponding communication. +type SelectState struct { + Dir types.ChanDir // direction of case (SendOnly or RecvOnly) + Chan Value // channel to use (for send or receive) + Send Value // value to send (for send) + Pos token.Pos // position of token.ARROW + DebugNode ast.Node // ast.SendStmt or ast.UnaryExpr(<-) [debug mode] +} + +// The Select instruction tests whether (or blocks until) one +// of the specified sent or received states is entered. +// +// Let n be the number of States for which Dir==RECV and T_i (0<=i string iterator; false => map iterator. +} + +// The TypeAssert instruction tests whether interface value X has type +// AssertedType. +// +// If !CommaOk, on success it returns v, the result of the conversion +// (defined below); on failure it panics. +// +// If CommaOk: on success it returns a pair (v, true) where v is the +// result of the conversion; on failure it returns (z, false) where z +// is AssertedType's zero value. The components of the pair must be +// accessed using the Extract instruction. +// +// If Underlying: tests whether interface value X has the underlying +// type AssertedType. +// +// If AssertedType is a concrete type, TypeAssert checks whether the +// dynamic type in interface X is equal to it, and if so, the result +// of the conversion is a copy of the value in the interface. +// +// If AssertedType is an interface, TypeAssert checks whether the +// dynamic type of the interface is assignable to it, and if so, the +// result of the conversion is a copy of the interface value X. +// If AssertedType is a superinterface of X.Type(), the operation will +// fail iff the operand is nil. (Contrast with ChangeInterface, which +// performs no nil-check.) +// +// Type() reflects the actual type of the result, possibly a +// 2-types.Tuple; AssertedType is the asserted type. +// +// Depending on the TypeAssert's purpose, Pos may return: +// - the ast.CallExpr.Lparen of an explicit T(e) conversion; +// - the ast.TypeAssertExpr.Lparen of an explicit e.(T) operation; +// - the ast.CaseClause.Case of a case of a type-switch statement; +// - the Ident(m).NamePos of an interface method value i.m +// (for which TypeAssert may be used to effect the nil check). +// +// Example printed form: +// +// t1 = typeassert t0.(int) +// t3 = typeassert,ok t2.(T) +type TypeAssert struct { + register + X Value + AssertedType types.Type + CommaOk bool +} + +// The Extract instruction yields component Index of Tuple. +// +// This is used to access the results of instructions with multiple +// return values, such as Call, TypeAssert, Next, UnOp(ARROW) and +// IndexExpr(Map). +// +// Example printed form: +// +// t1 = extract t0 #1 +type Extract struct { + register + Tuple Value + Index int +} + +// Instructions executed for effect. They do not yield a value. -------------------- + +// The Jump instruction transfers control to the sole successor of its +// owning block. +// +// A Jump must be the last instruction of its containing BasicBlock. +// +// Pos() returns NoPos. +// +// Example printed form: +// +// jump done +type Jump struct { + anInstruction +} + +// The If instruction transfers control to one of the two successors +// of its owning block, depending on the boolean Cond: the first if +// true, the second if false. +// +// An If instruction must be the last instruction of its containing +// BasicBlock. +// +// Pos() returns NoPos. +// +// Example printed form: +// +// if t0 goto done else body +type If struct { + anInstruction + Cond Value +} + +// The Return instruction returns values and control back to the calling +// function. +// +// len(Results) is always equal to the number of results in the +// function's signature. +// +// If len(Results) > 1, Return returns a tuple value with the specified +// components which the caller must access using Extract instructions. +// +// There is no instruction to return a ready-made tuple like those +// returned by a "value,ok"-mode TypeAssert, Lookup or UnOp(ARROW) or +// a tail-call to a function with multiple result parameters. +// +// Return must be the last instruction of its containing BasicBlock. +// Such a block has no successors. +// +// Pos() returns the ast.ReturnStmt.Return, if explicit in the source. +// +// Example printed form: +// +// return +// return nil:I, 2:int +type Return struct { + anInstruction + Results []Value + pos token.Pos +} + +// The RunDefers instruction pops and invokes the entire stack of +// procedure calls pushed by Defer instructions in this function. +// +// It is legal to encounter multiple 'rundefers' instructions in a +// single control-flow path through a function; this is useful in +// the combined init() function, for example. +// +// Pos() returns NoPos. +// +// Example printed form: +// +// rundefers +type RunDefers struct { + anInstruction +} + +// The Panic instruction initiates a panic with value X. +// +// A Panic instruction must be the last instruction of its containing +// BasicBlock, which must have no successors. +// +// NB: 'go panic(x)' and 'defer panic(x)' do not use this instruction; +// they are treated as calls to a built-in function. +// +// Pos() returns the ast.CallExpr.Lparen if this panic was explicit +// in the source. +// +// Example printed form: +// +// panic t0 +type Panic struct { + anInstruction + X Value // an interface{} + pos token.Pos +} + +// The Go instruction creates a new goroutine and calls the specified +// function within it. +// +// See CallCommon for generic function call documentation. +// +// Pos() returns the ast.GoStmt.Go. +// +// Example printed form: +// +// go println(t0, t1) +// go t3() +// go invoke t5.Println(...t6) +type Go struct { + anInstruction + Call CallCommon + pos token.Pos +} + +// The Defer instruction pushes the specified call onto a stack of +// functions to be called by a RunDefers instruction or by a panic. +// +// If DeferStack != nil, it indicates the defer list that the defer is +// added to. Defer list values come from the Builtin function +// ssa:deferstack. Calls to ssa:deferstack() produces the defer stack +// of the current function frame. DeferStack allows for deferring into an +// alternative function stack than the current function. +// +// See CallCommon for generic function call documentation. +// +// Pos() returns the ast.DeferStmt.Defer. +// +// Example printed form: +// +// defer println(t0, t1) +// defer t3() +// defer invoke t5.Println(...t6) +type Defer struct { + anInstruction + Call CallCommon + DeferStack Value // stack of deferred functions (from ssa:deferstack() intrinsic) onto which this function is pushed + pos token.Pos +} + +// The Send instruction sends X on channel Chan. +// +// Pos() returns the ast.SendStmt.Arrow, if explicit in the source. +// +// Example printed form: +// +// send t0 <- t1 +type Send struct { + anInstruction + Chan, X Value + pos token.Pos +} + +// The Store instruction stores Val at address Addr. +// Stores can be of arbitrary types. +// +// Pos() returns the position of the source-level construct most closely +// associated with the memory store operation. +// Since implicit memory stores are numerous and varied and depend upon +// implementation choices, the details are not specified. +// +// Example printed form: +// +// *x = y +type Store struct { + anInstruction + Addr Value + Val Value + pos token.Pos +} + +// The MapUpdate instruction updates the association of Map[Key] to +// Value. +// +// Pos() returns the ast.KeyValueExpr.Colon or ast.IndexExpr.Lbrack, +// if explicit in the source. +// +// Example printed form: +// +// t0[t1] = t2 +type MapUpdate struct { + anInstruction + Map Value + Key Value + Value Value + pos token.Pos +} + +// A DebugRef instruction maps a source-level expression Expr to the +// SSA value X that represents the value (!IsAddr) or address (IsAddr) +// of that expression. +// +// DebugRef is a pseudo-instruction: it has no dynamic effect. +// +// Pos() returns Expr.Pos(), the start position of the source-level +// expression. This is not the same as the "designated" token as +// documented at Value.Pos(). e.g. CallExpr.Pos() does not return the +// position of the ("designated") Lparen token. +// +// If Expr is an *ast.Ident denoting a var or func, Object() returns +// the object; though this information can be obtained from the type +// checker, including it here greatly facilitates debugging. +// For non-Ident expressions, Object() returns nil. +// +// DebugRefs are generated only for functions built with debugging +// enabled; see Package.SetDebugMode() and the GlobalDebug builder +// mode flag. +// +// DebugRefs are not emitted for ast.Idents referring to constants or +// predeclared identifiers, since they are trivial and numerous. +// Nor are they emitted for ast.ParenExprs. +// +// (By representing these as instructions, rather than out-of-band, +// consistency is maintained during transformation passes by the +// ordinary SSA renaming machinery.) +// +// Example printed form: +// +// ; *ast.CallExpr @ 102:9 is t5 +// ; var x float64 @ 109:72 is x +// ; address of *ast.CompositeLit @ 216:10 is t0 +type DebugRef struct { + // TODO(generics): Reconsider what DebugRefs are for generics. + anInstruction + Expr ast.Expr // the referring expression (never *ast.ParenExpr) + object types.Object // the identity of the source var/func + IsAddr bool // Expr is addressable and X is the address it denotes + X Value // the value or address of Expr +} + +// Embeddable mix-ins and helpers for common parts of other structs. ----------- + +// register is a mix-in embedded by all SSA values that are also +// instructions, i.e. virtual registers, and provides a uniform +// implementation of most of the Value interface: Value.Name() is a +// numbered register (e.g. "t0"); the other methods are field accessors. +// +// Temporary names are automatically assigned to each register on +// completion of building a function in SSA form. +// +// Clients must not assume that the 'id' value (and the Name() derived +// from it) is unique within a function. As always in this API, +// semantics are determined only by identity; names exist only to +// facilitate debugging. +type register struct { + anInstruction + num int // "name" of virtual register, e.g. "t0". Not guaranteed unique. + typ types.Type // type of virtual register + pos token.Pos // position of source expression, or NoPos + referrers []Instruction +} + +// anInstruction is a mix-in embedded by all Instructions. +// It provides the implementations of the Block and setBlock methods. +type anInstruction struct { + block *BasicBlock // the basic block of this instruction +} + +// CallCommon is contained by Go, Defer and Call to hold the +// common parts of a function or method call. +// +// Each CallCommon exists in one of two modes, function call and +// interface method invocation, or "call" and "invoke" for short. +// +// 1. "call" mode: when Method is nil (!IsInvoke), a CallCommon +// represents an ordinary function call of the value in Value, +// which may be a *Builtin, a *Function or any other value of kind +// 'func'. +// +// Value may be one of: +// +// (a) a *Function, indicating a statically dispatched call +// to a package-level function, an anonymous function, or +// a method of a named type. +// (b) a *MakeClosure, indicating an immediately applied +// function literal with free variables. +// (c) a *Builtin, indicating a statically dispatched call +// to a built-in function. +// (d) any other value, indicating a dynamically dispatched +// function call. +// +// StaticCallee returns the identity of the callee in cases +// (a) and (b), nil otherwise. +// +// Args contains the arguments to the call. If Value is a method, +// Args[0] contains the receiver parameter. +// +// Example printed form: +// +// t2 = println(t0, t1) +// go t3() +// defer t5(...t6) +// +// 2. "invoke" mode: when Method is non-nil (IsInvoke), a CallCommon +// represents a dynamically dispatched call to an interface method. +// In this mode, Value is the interface value and Method is the +// interface's abstract method. The interface value may be a type +// parameter. Note: an interface method may be shared by multiple +// interfaces due to embedding; Value.Type() provides the specific +// interface used for this call. +// +// Value is implicitly supplied to the concrete method implementation +// as the receiver parameter; in other words, Args[0] holds not the +// receiver but the first true argument. +// +// Example printed form: +// +// t1 = invoke t0.String() +// go invoke t3.Run(t2) +// defer invoke t4.Handle(...t5) +// +// For all calls to variadic functions (Signature().Variadic()), +// the last element of Args is a slice. +type CallCommon struct { + Value Value // receiver (invoke mode) or func value (call mode) + Method *types.Func // interface method (invoke mode) + Args []Value // actual parameters (in static method call, includes receiver) + pos token.Pos // position of CallExpr.Lparen, iff explicit in source +} + +// IsInvoke returns true if this call has "invoke" (not "call") mode. +func (c *CallCommon) IsInvoke() bool { + return c.Method != nil +} + +func (c *CallCommon) Pos() token.Pos { return c.pos } + +// Signature returns the signature of the called function. +// +// For an "invoke"-mode call, the signature of the interface method is +// returned. +// +// In either "call" or "invoke" mode, if the callee is a method, its +// receiver is represented by sig.Recv, not sig.Params().At(0). +func (c *CallCommon) Signature() *types.Signature { + if c.Method != nil { + return c.Method.Type().(*types.Signature) + } + return typeparams.CoreType(c.Value.Type()).(*types.Signature) +} + +// StaticCallee returns the callee if this is a trivially static +// "call"-mode call to a function. +func (c *CallCommon) StaticCallee() *Function { + switch fn := c.Value.(type) { + case *Function: + return fn + case *MakeClosure: + return fn.Fn.(*Function) + } + return nil +} + +// Description returns a description of the mode of this call suitable +// for a user interface, e.g., "static method call". +func (c *CallCommon) Description() string { + switch fn := c.Value.(type) { + case *Builtin: + return "built-in function call" + case *MakeClosure: + return "static function closure call" + case *Function: + if fn.Signature.Recv() != nil { + return "static method call" + } + return "static function call" + } + if c.IsInvoke() { + return "dynamic method call" // ("invoke" mode) + } + return "dynamic function call" +} + +// The CallInstruction interface, implemented by *Go, *Defer and *Call, +// exposes the common parts of function-calling instructions, +// yet provides a way back to the Value defined by *Call alone. +type CallInstruction interface { + Instruction + Common() *CallCommon // returns the common parts of the call + Value() *Call // returns the result value of the call (*Call) or nil (*Go, *Defer) +} + +func (s *Call) Common() *CallCommon { return &s.Call } +func (s *Defer) Common() *CallCommon { return &s.Call } +func (s *Go) Common() *CallCommon { return &s.Call } + +func (s *Call) Value() *Call { return s } +func (s *Defer) Value() *Call { return nil } +func (s *Go) Value() *Call { return nil } + +func (v *Builtin) Type() types.Type { return v.sig } +func (v *Builtin) Name() string { return v.name } +func (*Builtin) Referrers() *[]Instruction { return nil } +func (v *Builtin) Pos() token.Pos { return token.NoPos } +func (v *Builtin) Object() types.Object { return types.Universe.Lookup(v.name) } +func (v *Builtin) Parent() *Function { return nil } + +func (v *FreeVar) Type() types.Type { return v.typ } +func (v *FreeVar) Name() string { return v.name } +func (v *FreeVar) Referrers() *[]Instruction { return &v.referrers } +func (v *FreeVar) Pos() token.Pos { return v.pos } +func (v *FreeVar) Parent() *Function { return v.parent } + +func (v *Global) Type() types.Type { return v.typ } +func (v *Global) Name() string { return v.name } +func (v *Global) Parent() *Function { return nil } +func (v *Global) Pos() token.Pos { return v.pos } +func (v *Global) Referrers() *[]Instruction { return nil } +func (v *Global) Token() token.Token { return token.VAR } +func (v *Global) Object() types.Object { return v.object } +func (v *Global) String() string { return v.RelString(nil) } +func (v *Global) Package() *Package { return v.Pkg } +func (v *Global) RelString(from *types.Package) string { return relString(v, from) } + +func (v *Function) Name() string { return v.name } +func (v *Function) Type() types.Type { return v.Signature } +func (v *Function) Pos() token.Pos { return v.pos } +func (v *Function) Token() token.Token { return token.FUNC } +func (v *Function) Object() types.Object { + if v.object != nil { + return types.Object(v.object) + } + return nil +} +func (v *Function) String() string { return v.RelString(nil) } +func (v *Function) Package() *Package { return v.Pkg } +func (v *Function) Parent() *Function { return v.parent } +func (v *Function) Referrers() *[]Instruction { + if v.parent != nil { + return &v.referrers + } + return nil +} + +// TypeParams are the function's type parameters if generic or the +// type parameters that were instantiated if fn is an instantiation. +func (fn *Function) TypeParams() *types.TypeParamList { + return fn.typeparams +} + +// TypeArgs are the types that TypeParams() were instantiated by to create fn +// from fn.Origin(). +func (fn *Function) TypeArgs() []types.Type { return fn.typeargs } + +// Origin returns the generic function from which fn was instantiated, +// or nil if fn is not an instantiation. +func (fn *Function) Origin() *Function { + if fn.parent != nil && len(fn.typeargs) > 0 { + // Nested functions are BUILT at a different time than their instances. + // Build declared package if not yet BUILT. This is not an expected use + // case, but is simple and robust. + fn.declaredPackage().Build() + } + return origin(fn) +} + +// origin is the function that fn is an instantiation of. Returns nil if fn is +// not an instantiation. +// +// Precondition: fn and the origin function are done building. +func origin(fn *Function) *Function { + if fn.parent != nil && len(fn.typeargs) > 0 { + return origin(fn.parent).AnonFuncs[fn.anonIdx] + } + return fn.topLevelOrigin +} + +func (v *Parameter) Type() types.Type { return v.typ } +func (v *Parameter) Name() string { return v.name } +func (v *Parameter) Object() types.Object { return v.object } +func (v *Parameter) Referrers() *[]Instruction { return &v.referrers } +func (v *Parameter) Pos() token.Pos { return v.object.Pos() } +func (v *Parameter) Parent() *Function { return v.parent } + +func (v *Alloc) Type() types.Type { return v.typ } +func (v *Alloc) Referrers() *[]Instruction { return &v.referrers } +func (v *Alloc) Pos() token.Pos { return v.pos } + +func (v *register) Type() types.Type { return v.typ } +func (v *register) setType(typ types.Type) { v.typ = typ } +func (v *register) Name() string { return fmt.Sprintf("t%d", v.num) } +func (v *register) setNum(num int) { v.num = num } +func (v *register) Referrers() *[]Instruction { return &v.referrers } +func (v *register) Pos() token.Pos { return v.pos } +func (v *register) setPos(pos token.Pos) { v.pos = pos } + +func (v *anInstruction) Parent() *Function { return v.block.parent } +func (v *anInstruction) Block() *BasicBlock { return v.block } +func (v *anInstruction) setBlock(block *BasicBlock) { v.block = block } +func (v *anInstruction) Referrers() *[]Instruction { return nil } + +func (t *Type) Name() string { return t.object.Name() } +func (t *Type) Pos() token.Pos { return t.object.Pos() } +func (t *Type) Type() types.Type { return t.object.Type() } +func (t *Type) Token() token.Token { return token.TYPE } +func (t *Type) Object() types.Object { return t.object } +func (t *Type) String() string { return t.RelString(nil) } +func (t *Type) Package() *Package { return t.pkg } +func (t *Type) RelString(from *types.Package) string { return relString(t, from) } + +func (c *NamedConst) Name() string { return c.object.Name() } +func (c *NamedConst) Pos() token.Pos { return c.object.Pos() } +func (c *NamedConst) String() string { return c.RelString(nil) } +func (c *NamedConst) Type() types.Type { return c.object.Type() } +func (c *NamedConst) Token() token.Token { return token.CONST } +func (c *NamedConst) Object() types.Object { return c.object } +func (c *NamedConst) Package() *Package { return c.pkg } +func (c *NamedConst) RelString(from *types.Package) string { return relString(c, from) } + +func (d *DebugRef) Object() types.Object { return d.object } + +// Func returns the package-level function of the specified name, +// or nil if not found. +func (p *Package) Func(name string) (f *Function) { + f, _ = p.Members[name].(*Function) + return +} + +// Var returns the package-level variable of the specified name, +// or nil if not found. +func (p *Package) Var(name string) (g *Global) { + g, _ = p.Members[name].(*Global) + return +} + +// Const returns the package-level constant of the specified name, +// or nil if not found. +func (p *Package) Const(name string) (c *NamedConst) { + c, _ = p.Members[name].(*NamedConst) + return +} + +// Type returns the package-level type of the specified name, +// or nil if not found. +func (p *Package) Type(name string) (t *Type) { + t, _ = p.Members[name].(*Type) + return +} + +func (v *Call) Pos() token.Pos { return v.Call.pos } +func (s *Defer) Pos() token.Pos { return s.pos } +func (s *Go) Pos() token.Pos { return s.pos } +func (s *MapUpdate) Pos() token.Pos { return s.pos } +func (s *Panic) Pos() token.Pos { return s.pos } +func (s *Return) Pos() token.Pos { return s.pos } +func (s *Send) Pos() token.Pos { return s.pos } +func (s *Store) Pos() token.Pos { return s.pos } +func (s *If) Pos() token.Pos { return token.NoPos } +func (s *Jump) Pos() token.Pos { return token.NoPos } +func (s *RunDefers) Pos() token.Pos { return token.NoPos } +func (s *DebugRef) Pos() token.Pos { return s.Expr.Pos() } + +// Operands. + +func (v *Alloc) Operands(rands []*Value) []*Value { + return rands +} + +func (v *BinOp) Operands(rands []*Value) []*Value { + return append(rands, &v.X, &v.Y) +} + +func (c *CallCommon) Operands(rands []*Value) []*Value { + rands = append(rands, &c.Value) + for i := range c.Args { + rands = append(rands, &c.Args[i]) + } + return rands +} + +func (s *Go) Operands(rands []*Value) []*Value { + return s.Call.Operands(rands) +} + +func (s *Call) Operands(rands []*Value) []*Value { + return s.Call.Operands(rands) +} + +func (s *Defer) Operands(rands []*Value) []*Value { + return append(s.Call.Operands(rands), &s.DeferStack) +} + +func (v *ChangeInterface) Operands(rands []*Value) []*Value { + return append(rands, &v.X) +} + +func (v *ChangeType) Operands(rands []*Value) []*Value { + return append(rands, &v.X) +} + +func (v *Convert) Operands(rands []*Value) []*Value { + return append(rands, &v.X) +} + +func (v *MultiConvert) Operands(rands []*Value) []*Value { + return append(rands, &v.X) +} + +func (v *SliceToArrayPointer) Operands(rands []*Value) []*Value { + return append(rands, &v.X) +} + +func (s *DebugRef) Operands(rands []*Value) []*Value { + return append(rands, &s.X) +} + +func (v *Extract) Operands(rands []*Value) []*Value { + return append(rands, &v.Tuple) +} + +func (v *Field) Operands(rands []*Value) []*Value { + return append(rands, &v.X) +} + +func (v *FieldAddr) Operands(rands []*Value) []*Value { + return append(rands, &v.X) +} + +func (s *If) Operands(rands []*Value) []*Value { + return append(rands, &s.Cond) +} + +func (v *Index) Operands(rands []*Value) []*Value { + return append(rands, &v.X, &v.Index) +} + +func (v *IndexAddr) Operands(rands []*Value) []*Value { + return append(rands, &v.X, &v.Index) +} + +func (*Jump) Operands(rands []*Value) []*Value { + return rands +} + +func (v *Lookup) Operands(rands []*Value) []*Value { + return append(rands, &v.X, &v.Index) +} + +func (v *MakeChan) Operands(rands []*Value) []*Value { + return append(rands, &v.Size) +} + +func (v *MakeClosure) Operands(rands []*Value) []*Value { + rands = append(rands, &v.Fn) + for i := range v.Bindings { + rands = append(rands, &v.Bindings[i]) + } + return rands +} + +func (v *MakeInterface) Operands(rands []*Value) []*Value { + return append(rands, &v.X) +} + +func (v *MakeMap) Operands(rands []*Value) []*Value { + return append(rands, &v.Reserve) +} + +func (v *MakeSlice) Operands(rands []*Value) []*Value { + return append(rands, &v.Len, &v.Cap) +} + +func (v *MapUpdate) Operands(rands []*Value) []*Value { + return append(rands, &v.Map, &v.Key, &v.Value) +} + +func (v *Next) Operands(rands []*Value) []*Value { + return append(rands, &v.Iter) +} + +func (s *Panic) Operands(rands []*Value) []*Value { + return append(rands, &s.X) +} + +func (v *Phi) Operands(rands []*Value) []*Value { + for i := range v.Edges { + rands = append(rands, &v.Edges[i]) + } + return rands +} + +func (v *Range) Operands(rands []*Value) []*Value { + return append(rands, &v.X) +} + +func (s *Return) Operands(rands []*Value) []*Value { + for i := range s.Results { + rands = append(rands, &s.Results[i]) + } + return rands +} + +func (*RunDefers) Operands(rands []*Value) []*Value { + return rands +} + +func (v *Select) Operands(rands []*Value) []*Value { + for i := range v.States { + rands = append(rands, &v.States[i].Chan, &v.States[i].Send) + } + return rands +} + +func (s *Send) Operands(rands []*Value) []*Value { + return append(rands, &s.Chan, &s.X) +} + +func (v *Slice) Operands(rands []*Value) []*Value { + return append(rands, &v.X, &v.Low, &v.High, &v.Max) +} + +func (s *Store) Operands(rands []*Value) []*Value { + return append(rands, &s.Addr, &s.Val) +} + +func (v *TypeAssert) Operands(rands []*Value) []*Value { + return append(rands, &v.X) +} + +func (v *UnOp) Operands(rands []*Value) []*Value { + return append(rands, &v.X) +} + +// Non-Instruction Values: +func (v *Builtin) Operands(rands []*Value) []*Value { return rands } +func (v *FreeVar) Operands(rands []*Value) []*Value { return rands } +func (v *Const) Operands(rands []*Value) []*Value { return rands } +func (v *Function) Operands(rands []*Value) []*Value { return rands } +func (v *Global) Operands(rands []*Value) []*Value { return rands } +func (v *Parameter) Operands(rands []*Value) []*Value { return rands } diff --git a/vendor/golang.org/x/tools/go/ssa/ssautil/load.go b/vendor/golang.org/x/tools/go/ssa/ssautil/load.go new file mode 100644 index 0000000..3daa67a --- /dev/null +++ b/vendor/golang.org/x/tools/go/ssa/ssautil/load.go @@ -0,0 +1,214 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssautil + +// This file defines utility functions for constructing programs in SSA form. + +import ( + "go/ast" + "go/token" + "go/types" + + "golang.org/x/tools/go/loader" + "golang.org/x/tools/go/packages" + "golang.org/x/tools/go/ssa" + "golang.org/x/tools/internal/versions" +) + +// Packages creates an SSA program for a set of packages. +// +// The packages must have been loaded from source syntax using the +// [packages.Load] function in [packages.LoadSyntax] or +// [packages.LoadAllSyntax] mode. +// +// Packages creates an SSA package for each well-typed package in the +// initial list, plus all their dependencies. The resulting list of +// packages corresponds to the list of initial packages, and may contain +// a nil if SSA code could not be constructed for the corresponding initial +// package due to type errors. +// +// Code for bodies of functions is not built until [Program.Build] is +// called on the resulting Program. SSA code is constructed only for +// the initial packages with well-typed syntax trees. +// +// The mode parameter controls diagnostics and checking during SSA construction. +func Packages(initial []*packages.Package, mode ssa.BuilderMode) (*ssa.Program, []*ssa.Package) { + // TODO(adonovan): opt: this calls CreatePackage far more than + // necessary: for all dependencies, not just the (non-initial) + // direct dependencies of the initial packages. + // + // But can it reasonably be changed without breaking the + // spirit and/or letter of the law above? Clients may notice + // if we call CreatePackage less, as methods like + // Program.FuncValue will return nil. Or must we provide a new + // function (and perhaps deprecate this one)? Is it worth it? + // + // Tim King makes the interesting point that it would be + // possible to entirely alleviate the client from the burden + // of calling CreatePackage for non-syntax packages, if we + // were to treat vars and funcs lazily in the same way we now + // treat methods. (In essence, try to move away from the + // notion of ssa.Packages, and make the Program answer + // all reasonable questions about any types.Object.) + + return doPackages(initial, mode, false) +} + +// AllPackages creates an SSA program for a set of packages plus all +// their dependencies. +// +// The packages must have been loaded from source syntax using the +// [packages.Load] function in [packages.LoadAllSyntax] mode. +// +// AllPackages creates an SSA package for each well-typed package in the +// initial list, plus all their dependencies. The resulting list of +// packages corresponds to the list of initial packages, and may contain +// a nil if SSA code could not be constructed for the corresponding +// initial package due to type errors. +// +// Code for bodies of functions is not built until Build is called on +// the resulting Program. SSA code is constructed for all packages with +// well-typed syntax trees. +// +// The mode parameter controls diagnostics and checking during SSA construction. +func AllPackages(initial []*packages.Package, mode ssa.BuilderMode) (*ssa.Program, []*ssa.Package) { + return doPackages(initial, mode, true) +} + +func doPackages(initial []*packages.Package, mode ssa.BuilderMode, deps bool) (*ssa.Program, []*ssa.Package) { + + var fset *token.FileSet + if len(initial) > 0 { + fset = initial[0].Fset + } + + prog := ssa.NewProgram(fset, mode) + + isInitial := make(map[*packages.Package]bool, len(initial)) + for _, p := range initial { + isInitial[p] = true + } + + ssamap := make(map[*packages.Package]*ssa.Package) + packages.Visit(initial, nil, func(p *packages.Package) { + if p.Types != nil && !p.IllTyped { + var files []*ast.File + var info *types.Info + if deps || isInitial[p] { + files = p.Syntax + info = p.TypesInfo + } + ssamap[p] = prog.CreatePackage(p.Types, files, info, true) + } + }) + + var ssapkgs []*ssa.Package + for _, p := range initial { + ssapkgs = append(ssapkgs, ssamap[p]) // may be nil + } + return prog, ssapkgs +} + +// CreateProgram returns a new program in SSA form, given a program +// loaded from source. An SSA package is created for each transitively +// error-free package of lprog. +// +// Code for bodies of functions is not built until Build is called +// on the result. +// +// The mode parameter controls diagnostics and checking during SSA construction. +// +// Deprecated: Use [golang.org/x/tools/go/packages] and the [Packages] +// function instead; see ssa.Example_loadPackages. +func CreateProgram(lprog *loader.Program, mode ssa.BuilderMode) *ssa.Program { + prog := ssa.NewProgram(lprog.Fset, mode) + + for _, info := range lprog.AllPackages { + if info.TransitivelyErrorFree { + prog.CreatePackage(info.Pkg, info.Files, &info.Info, info.Importable) + } + } + + return prog +} + +// BuildPackage builds an SSA program with SSA intermediate +// representation (IR) for all functions of a single package. +// +// It populates pkg by type-checking the specified file syntax trees. All +// dependencies are loaded using the importer specified by tc, which +// typically loads compiler export data; SSA code cannot be built for +// those packages. BuildPackage then constructs an [ssa.Program] with all +// dependency packages created, and builds and returns the SSA package +// corresponding to pkg. +// +// The caller must have set pkg.Path to the import path. +// +// The operation fails if there were any type-checking or import errors. +// +// See ../example_test.go for an example. +func BuildPackage(tc *types.Config, fset *token.FileSet, pkg *types.Package, files []*ast.File, mode ssa.BuilderMode) (*ssa.Package, *types.Info, error) { + if fset == nil { + panic("no token.FileSet") + } + if pkg.Path() == "" { + panic("package has no import path") + } + + info := &types.Info{ + Types: make(map[ast.Expr]types.TypeAndValue), + Defs: make(map[*ast.Ident]types.Object), + Uses: make(map[*ast.Ident]types.Object), + Implicits: make(map[ast.Node]types.Object), + Instances: make(map[*ast.Ident]types.Instance), + Scopes: make(map[ast.Node]*types.Scope), + Selections: make(map[*ast.SelectorExpr]*types.Selection), + } + versions.InitFileVersions(info) + if err := types.NewChecker(tc, fset, pkg, info).Files(files); err != nil { + return nil, nil, err + } + + prog := ssa.NewProgram(fset, mode) + + // Create SSA packages for all imports. + // Order is not significant. + created := make(map[*types.Package]bool) + var createAll func(pkgs []*types.Package) + createAll = func(pkgs []*types.Package) { + for _, p := range pkgs { + if !created[p] { + created[p] = true + prog.CreatePackage(p, nil, nil, true) + createAll(p.Imports()) + } + } + } + createAll(pkg.Imports()) + + // TODO(adonovan): we could replace createAll with just: + // + // // Create SSA packages for all imports. + // for _, p := range pkg.Imports() { + // prog.CreatePackage(p, nil, nil, true) + // } + // + // (with minor changes to changes to ../builder_test.go as + // shown in CL 511715 PS 10.) But this would strictly violate + // the letter of the doc comment above, which says "all + // dependencies created". + // + // Tim makes the good point with some extra work we could + // remove the need for any CreatePackage calls except the + // ones with syntax (i.e. primary packages). Of course + // You wouldn't have ssa.Packages and Members for as + // many things but no-one really uses that anyway. + // I wish I had done this from the outset. + + // Create and build the primary package. + ssapkg := prog.CreatePackage(pkg, files, info, false) + ssapkg.Build() + return ssapkg, info, nil +} diff --git a/vendor/golang.org/x/tools/go/ssa/ssautil/switch.go b/vendor/golang.org/x/tools/go/ssa/ssautil/switch.go new file mode 100644 index 0000000..dd4b04e --- /dev/null +++ b/vendor/golang.org/x/tools/go/ssa/ssautil/switch.go @@ -0,0 +1,230 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssautil + +// This file implements discovery of switch and type-switch constructs +// from low-level control flow. +// +// Many techniques exist for compiling a high-level switch with +// constant cases to efficient machine code. The optimal choice will +// depend on the data type, the specific case values, the code in the +// body of each case, and the hardware. +// Some examples: +// - a lookup table (for a switch that maps constants to constants) +// - a computed goto +// - a binary tree +// - a perfect hash +// - a two-level switch (to partition constant strings by their first byte). + +import ( + "bytes" + "fmt" + "go/token" + "go/types" + + "golang.org/x/tools/go/ssa" +) + +// A ConstCase represents a single constant comparison. +// It is part of a Switch. +type ConstCase struct { + Block *ssa.BasicBlock // block performing the comparison + Body *ssa.BasicBlock // body of the case + Value *ssa.Const // case comparand +} + +// A TypeCase represents a single type assertion. +// It is part of a Switch. +type TypeCase struct { + Block *ssa.BasicBlock // block performing the type assert + Body *ssa.BasicBlock // body of the case + Type types.Type // case type + Binding ssa.Value // value bound by this case +} + +// A Switch is a logical high-level control flow operation +// (a multiway branch) discovered by analysis of a CFG containing +// only if/else chains. It is not part of the ssa.Instruction set. +// +// One of ConstCases and TypeCases has length >= 2; +// the other is nil. +// +// In a value switch, the list of cases may contain duplicate constants. +// A type switch may contain duplicate types, or types assignable +// to an interface type also in the list. +// TODO(adonovan): eliminate such duplicates. +type Switch struct { + Start *ssa.BasicBlock // block containing start of if/else chain + X ssa.Value // the switch operand + ConstCases []ConstCase // ordered list of constant comparisons + TypeCases []TypeCase // ordered list of type assertions + Default *ssa.BasicBlock // successor if all comparisons fail +} + +func (sw *Switch) String() string { + // We represent each block by the String() of its + // first Instruction, e.g. "print(42:int)". + var buf bytes.Buffer + if sw.ConstCases != nil { + fmt.Fprintf(&buf, "switch %s {\n", sw.X.Name()) + for _, c := range sw.ConstCases { + fmt.Fprintf(&buf, "case %s: %s\n", c.Value, c.Body.Instrs[0]) + } + } else { + fmt.Fprintf(&buf, "switch %s.(type) {\n", sw.X.Name()) + for _, c := range sw.TypeCases { + fmt.Fprintf(&buf, "case %s %s: %s\n", + c.Binding.Name(), c.Type, c.Body.Instrs[0]) + } + } + if sw.Default != nil { + fmt.Fprintf(&buf, "default: %s\n", sw.Default.Instrs[0]) + } + fmt.Fprintf(&buf, "}") + return buf.String() +} + +// Switches examines the control-flow graph of fn and returns the +// set of inferred value and type switches. A value switch tests an +// ssa.Value for equality against two or more compile-time constant +// values. Switches involving link-time constants (addresses) are +// ignored. A type switch type-asserts an ssa.Value against two or +// more types. +// +// The switches are returned in dominance order. +// +// The resulting switches do not necessarily correspond to uses of the +// 'switch' keyword in the source: for example, a single source-level +// switch statement with non-constant cases may result in zero, one or +// many Switches, one per plural sequence of constant cases. +// Switches may even be inferred from if/else- or goto-based control flow. +// (In general, the control flow constructs of the source program +// cannot be faithfully reproduced from the SSA representation.) +func Switches(fn *ssa.Function) []Switch { + // Traverse the CFG in dominance order, so we don't + // enter an if/else-chain in the middle. + var switches []Switch + seen := make(map[*ssa.BasicBlock]bool) // TODO(adonovan): opt: use ssa.blockSet + for _, b := range fn.DomPreorder() { + if x, k := isComparisonBlock(b); x != nil { + // Block b starts a switch. + sw := Switch{Start: b, X: x} + valueSwitch(&sw, k, seen) + if len(sw.ConstCases) > 1 { + switches = append(switches, sw) + } + } + + if y, x, T := isTypeAssertBlock(b); y != nil { + // Block b starts a type switch. + sw := Switch{Start: b, X: x} + typeSwitch(&sw, y, T, seen) + if len(sw.TypeCases) > 1 { + switches = append(switches, sw) + } + } + } + return switches +} + +func valueSwitch(sw *Switch, k *ssa.Const, seen map[*ssa.BasicBlock]bool) { + b := sw.Start + x := sw.X + for x == sw.X { + if seen[b] { + break + } + seen[b] = true + + sw.ConstCases = append(sw.ConstCases, ConstCase{ + Block: b, + Body: b.Succs[0], + Value: k, + }) + b = b.Succs[1] + if len(b.Instrs) > 2 { + // Block b contains not just 'if x == k', + // so it may have side effects that + // make it unsafe to elide. + break + } + if len(b.Preds) != 1 { + // Block b has multiple predecessors, + // so it cannot be treated as a case. + break + } + x, k = isComparisonBlock(b) + } + sw.Default = b +} + +func typeSwitch(sw *Switch, y ssa.Value, T types.Type, seen map[*ssa.BasicBlock]bool) { + b := sw.Start + x := sw.X + for x == sw.X { + if seen[b] { + break + } + seen[b] = true + + sw.TypeCases = append(sw.TypeCases, TypeCase{ + Block: b, + Body: b.Succs[0], + Type: T, + Binding: y, + }) + b = b.Succs[1] + if len(b.Instrs) > 4 { + // Block b contains not just + // {TypeAssert; Extract #0; Extract #1; If} + // so it may have side effects that + // make it unsafe to elide. + break + } + if len(b.Preds) != 1 { + // Block b has multiple predecessors, + // so it cannot be treated as a case. + break + } + y, x, T = isTypeAssertBlock(b) + } + sw.Default = b +} + +// isComparisonBlock returns the operands (v, k) if a block ends with +// a comparison v==k, where k is a compile-time constant. +func isComparisonBlock(b *ssa.BasicBlock) (v ssa.Value, k *ssa.Const) { + if n := len(b.Instrs); n >= 2 { + if i, ok := b.Instrs[n-1].(*ssa.If); ok { + if binop, ok := i.Cond.(*ssa.BinOp); ok && binop.Block() == b && binop.Op == token.EQL { + if k, ok := binop.Y.(*ssa.Const); ok { + return binop.X, k + } + if k, ok := binop.X.(*ssa.Const); ok { + return binop.Y, k + } + } + } + } + return +} + +// isTypeAssertBlock returns the operands (y, x, T) if a block ends with +// a type assertion "if y, ok := x.(T); ok {". +func isTypeAssertBlock(b *ssa.BasicBlock) (y, x ssa.Value, T types.Type) { + if n := len(b.Instrs); n >= 4 { + if i, ok := b.Instrs[n-1].(*ssa.If); ok { + if ext1, ok := i.Cond.(*ssa.Extract); ok && ext1.Block() == b && ext1.Index == 1 { + if ta, ok := ext1.Tuple.(*ssa.TypeAssert); ok && ta.Block() == b { + // hack: relies upon instruction ordering. + if ext0, ok := b.Instrs[n-3].(*ssa.Extract); ok { + return ext0, ta.X, ta.AssertedType + } + } + } + } + } + return +} diff --git a/vendor/golang.org/x/tools/go/ssa/ssautil/visit.go b/vendor/golang.org/x/tools/go/ssa/ssautil/visit.go new file mode 100644 index 0000000..b4feb42 --- /dev/null +++ b/vendor/golang.org/x/tools/go/ssa/ssautil/visit.go @@ -0,0 +1,157 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssautil // import "golang.org/x/tools/go/ssa/ssautil" + +import ( + "go/ast" + "go/types" + + "golang.org/x/tools/go/ssa" + + _ "unsafe" // for linkname hack +) + +// This file defines utilities for visiting the SSA representation of +// a Program. +// +// TODO(adonovan): test coverage. + +// AllFunctions finds and returns the set of functions potentially +// needed by program prog, as determined by a simple linker-style +// reachability algorithm starting from the members and method-sets of +// each package. The result may include anonymous functions and +// synthetic wrappers. +// +// Precondition: all packages are built. +// +// TODO(adonovan): this function is underspecified. It doesn't +// actually work like a linker, which computes reachability from main +// using something like go/callgraph/cha (without materializing the +// call graph). In fact, it treats all public functions and all +// methods of public non-parameterized types as roots, even though +// they may be unreachable--but only in packages created from syntax. +// +// I think we should deprecate AllFunctions function in favor of two +// clearly defined ones: +// +// 1. The first would efficiently compute CHA reachability from a set +// of main packages, making it suitable for a whole-program +// analysis context with InstantiateGenerics, in conjunction with +// Program.Build. +// +// 2. The second would return only the set of functions corresponding +// to source Func{Decl,Lit} syntax, like SrcFunctions in +// go/analysis/passes/buildssa; this is suitable for +// package-at-a-time (or handful of packages) context. +// ssa.Package could easily expose it as a field. +// +// We could add them unexported for now and use them via the linkname hack. +func AllFunctions(prog *ssa.Program) map[*ssa.Function]bool { + seen := make(map[*ssa.Function]bool) + + var function func(fn *ssa.Function) + function = func(fn *ssa.Function) { + if !seen[fn] { + seen[fn] = true + var buf [10]*ssa.Value // avoid alloc in common case + for _, b := range fn.Blocks { + for _, instr := range b.Instrs { + for _, op := range instr.Operands(buf[:0]) { + if fn, ok := (*op).(*ssa.Function); ok { + function(fn) + } + } + } + } + } + } + + // TODO(adonovan): opt: provide a way to share a builder + // across a sequence of MethodValue calls. + + methodsOf := func(T types.Type) { + if !types.IsInterface(T) { + mset := prog.MethodSets.MethodSet(T) + for i := 0; i < mset.Len(); i++ { + function(prog.MethodValue(mset.At(i))) + } + } + } + + // Historically, Program.RuntimeTypes used to include the type + // of any exported member of a package loaded from syntax that + // has a non-parameterized type, plus all types + // reachable from that type using reflection, even though + // these runtime types may not be required for them. + // + // Rather than break existing programs that rely on + // AllFunctions visiting extra methods that are unreferenced + // by IR and unreachable via reflection, we moved the logic + // here, unprincipled though it is. + // (See doc comment for better ideas.) + // + // Nonetheless, after the move, we no longer visit every + // method of any type recursively reachable from T, only the + // methods of T and *T themselves, and we only apply this to + // named types T, and not to the type of every exported + // package member. + exportedTypeHack := func(t *ssa.Type) { + if isSyntactic(t.Package()) && + ast.IsExported(t.Name()) && + !types.IsInterface(t.Type()) { + // Consider only named types. + // (Ignore aliases and unsafe.Pointer.) + if named, ok := t.Type().(*types.Named); ok { + if named.TypeParams() == nil { + methodsOf(named) // T + methodsOf(types.NewPointer(named)) // *T + } + } + } + } + + for _, pkg := range prog.AllPackages() { + for _, mem := range pkg.Members { + switch mem := mem.(type) { + case *ssa.Function: + // Visit all package-level declared functions. + function(mem) + + case *ssa.Type: + exportedTypeHack(mem) + } + } + } + + // Visit all methods of types for which runtime types were + // materialized, as they are reachable through reflection. + for _, T := range prog.RuntimeTypes() { + methodsOf(T) + } + + return seen +} + +// MainPackages returns the subset of the specified packages +// named "main" that define a main function. +// The result may include synthetic "testmain" packages. +func MainPackages(pkgs []*ssa.Package) []*ssa.Package { + var mains []*ssa.Package + for _, pkg := range pkgs { + if pkg.Pkg.Name() == "main" && pkg.Func("main") != nil { + mains = append(mains, pkg) + } + } + return mains +} + +// TODO(adonovan): propose a principled API for this. One possibility +// is a new field, Package.SrcFunctions []*Function, which would +// contain the list of SrcFunctions described in point 2 of the +// AllFunctions doc comment, or nil if the package is not from syntax. +// But perhaps overloading nil vs empty slice is too subtle. +// +//go:linkname isSyntactic golang.org/x/tools/go/ssa.isSyntactic +func isSyntactic(pkg *ssa.Package) bool diff --git a/vendor/golang.org/x/tools/go/ssa/subst.go b/vendor/golang.org/x/tools/go/ssa/subst.go new file mode 100644 index 0000000..4dcb871 --- /dev/null +++ b/vendor/golang.org/x/tools/go/ssa/subst.go @@ -0,0 +1,642 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssa + +import ( + "go/types" + + "golang.org/x/tools/go/types/typeutil" + "golang.org/x/tools/internal/aliases" +) + +// subster defines a type substitution operation of a set of type parameters +// to type parameter free replacement types. Substitution is done within +// the context of a package-level function instantiation. *Named types +// declared in the function are unique to the instantiation. +// +// For example, given a parameterized function F +// +// func F[S, T any]() any { +// type X struct{ s S; next *X } +// var p *X +// return p +// } +// +// calling the instantiation F[string, int]() returns an interface +// value (*X[string,int], nil) where the underlying value of +// X[string,int] is a struct{s string; next *X[string,int]}. +// +// A nil *subster is a valid, empty substitution map. It always acts as +// the identity function. This allows for treating parameterized and +// non-parameterized functions identically while compiling to ssa. +// +// Not concurrency-safe. +// +// Note: Some may find it helpful to think through some of the most +// complex substitution cases using lambda calculus inspired notation. +// subst.typ() solves evaluating a type expression E +// within the body of a function Fn[m] with the type parameters m +// once we have applied the type arguments N. +// We can succinctly write this as a function application: +// +// ((λm. E) N) +// +// go/types does not provide this interface directly. +// So what subster provides is a type substitution operation +// +// E[m:=N] +type subster struct { + replacements map[*types.TypeParam]types.Type // values should contain no type params + cache map[types.Type]types.Type // cache of subst results + origin *types.Func // types.Objects declared within this origin function are unique within this context + ctxt *types.Context // speeds up repeated instantiations + uniqueness typeutil.Map // determines the uniqueness of the instantiations within the function + // TODO(taking): consider adding Pos +} + +// Returns a subster that replaces tparams[i] with targs[i]. Uses ctxt as a cache. +// targs should not contain any types in tparams. +// fn is the generic function for which we are substituting. +func makeSubster(ctxt *types.Context, fn *types.Func, tparams *types.TypeParamList, targs []types.Type, debug bool) *subster { + assert(tparams.Len() == len(targs), "makeSubster argument count must match") + + subst := &subster{ + replacements: make(map[*types.TypeParam]types.Type, tparams.Len()), + cache: make(map[types.Type]types.Type), + origin: fn.Origin(), + ctxt: ctxt, + } + for i := 0; i < tparams.Len(); i++ { + subst.replacements[tparams.At(i)] = targs[i] + } + return subst +} + +// typ returns the type of t with the type parameter tparams[i] substituted +// for the type targs[i] where subst was created using tparams and targs. +func (subst *subster) typ(t types.Type) (res types.Type) { + if subst == nil { + return t // A nil subst is type preserving. + } + if r, ok := subst.cache[t]; ok { + return r + } + defer func() { + subst.cache[t] = res + }() + + switch t := t.(type) { + case *types.TypeParam: + if r := subst.replacements[t]; r != nil { + return r + } + return t + + case *types.Basic: + return t + + case *types.Array: + if r := subst.typ(t.Elem()); r != t.Elem() { + return types.NewArray(r, t.Len()) + } + return t + + case *types.Slice: + if r := subst.typ(t.Elem()); r != t.Elem() { + return types.NewSlice(r) + } + return t + + case *types.Pointer: + if r := subst.typ(t.Elem()); r != t.Elem() { + return types.NewPointer(r) + } + return t + + case *types.Tuple: + return subst.tuple(t) + + case *types.Struct: + return subst.struct_(t) + + case *types.Map: + key := subst.typ(t.Key()) + elem := subst.typ(t.Elem()) + if key != t.Key() || elem != t.Elem() { + return types.NewMap(key, elem) + } + return t + + case *types.Chan: + if elem := subst.typ(t.Elem()); elem != t.Elem() { + return types.NewChan(t.Dir(), elem) + } + return t + + case *types.Signature: + return subst.signature(t) + + case *types.Union: + return subst.union(t) + + case *types.Interface: + return subst.interface_(t) + + case *aliases.Alias: + return subst.alias(t) + + case *types.Named: + return subst.named(t) + + case *opaqueType: + return t // opaque types are never substituted + + default: + panic("unreachable") + } +} + +// types returns the result of {subst.typ(ts[i])}. +func (subst *subster) types(ts []types.Type) []types.Type { + res := make([]types.Type, len(ts)) + for i := range ts { + res[i] = subst.typ(ts[i]) + } + return res +} + +func (subst *subster) tuple(t *types.Tuple) *types.Tuple { + if t != nil { + if vars := subst.varlist(t); vars != nil { + return types.NewTuple(vars...) + } + } + return t +} + +type varlist interface { + At(i int) *types.Var + Len() int +} + +// fieldlist is an adapter for structs for the varlist interface. +type fieldlist struct { + str *types.Struct +} + +func (fl fieldlist) At(i int) *types.Var { return fl.str.Field(i) } +func (fl fieldlist) Len() int { return fl.str.NumFields() } + +func (subst *subster) struct_(t *types.Struct) *types.Struct { + if t != nil { + if fields := subst.varlist(fieldlist{t}); fields != nil { + tags := make([]string, t.NumFields()) + for i, n := 0, t.NumFields(); i < n; i++ { + tags[i] = t.Tag(i) + } + return types.NewStruct(fields, tags) + } + } + return t +} + +// varlist returns subst(in[i]) or return nils if subst(v[i]) == v[i] for all i. +func (subst *subster) varlist(in varlist) []*types.Var { + var out []*types.Var // nil => no updates + for i, n := 0, in.Len(); i < n; i++ { + v := in.At(i) + w := subst.var_(v) + if v != w && out == nil { + out = make([]*types.Var, n) + for j := 0; j < i; j++ { + out[j] = in.At(j) + } + } + if out != nil { + out[i] = w + } + } + return out +} + +func (subst *subster) var_(v *types.Var) *types.Var { + if v != nil { + if typ := subst.typ(v.Type()); typ != v.Type() { + if v.IsField() { + return types.NewField(v.Pos(), v.Pkg(), v.Name(), typ, v.Embedded()) + } + return types.NewVar(v.Pos(), v.Pkg(), v.Name(), typ) + } + } + return v +} + +func (subst *subster) union(u *types.Union) *types.Union { + var out []*types.Term // nil => no updates + + for i, n := 0, u.Len(); i < n; i++ { + t := u.Term(i) + r := subst.typ(t.Type()) + if r != t.Type() && out == nil { + out = make([]*types.Term, n) + for j := 0; j < i; j++ { + out[j] = u.Term(j) + } + } + if out != nil { + out[i] = types.NewTerm(t.Tilde(), r) + } + } + + if out != nil { + return types.NewUnion(out) + } + return u +} + +func (subst *subster) interface_(iface *types.Interface) *types.Interface { + if iface == nil { + return nil + } + + // methods for the interface. Initially nil if there is no known change needed. + // Signatures for the method where recv is nil. NewInterfaceType fills in the receivers. + var methods []*types.Func + initMethods := func(n int) { // copy first n explicit methods + methods = make([]*types.Func, iface.NumExplicitMethods()) + for i := 0; i < n; i++ { + f := iface.ExplicitMethod(i) + norecv := changeRecv(f.Type().(*types.Signature), nil) + methods[i] = types.NewFunc(f.Pos(), f.Pkg(), f.Name(), norecv) + } + } + for i := 0; i < iface.NumExplicitMethods(); i++ { + f := iface.ExplicitMethod(i) + // On interfaces, we need to cycle break on anonymous interface types + // being in a cycle with their signatures being in cycles with their receivers + // that do not go through a Named. + norecv := changeRecv(f.Type().(*types.Signature), nil) + sig := subst.typ(norecv) + if sig != norecv && methods == nil { + initMethods(i) + } + if methods != nil { + methods[i] = types.NewFunc(f.Pos(), f.Pkg(), f.Name(), sig.(*types.Signature)) + } + } + + var embeds []types.Type + initEmbeds := func(n int) { // copy first n embedded types + embeds = make([]types.Type, iface.NumEmbeddeds()) + for i := 0; i < n; i++ { + embeds[i] = iface.EmbeddedType(i) + } + } + for i := 0; i < iface.NumEmbeddeds(); i++ { + e := iface.EmbeddedType(i) + r := subst.typ(e) + if e != r && embeds == nil { + initEmbeds(i) + } + if embeds != nil { + embeds[i] = r + } + } + + if methods == nil && embeds == nil { + return iface + } + if methods == nil { + initMethods(iface.NumExplicitMethods()) + } + if embeds == nil { + initEmbeds(iface.NumEmbeddeds()) + } + return types.NewInterfaceType(methods, embeds).Complete() +} + +func (subst *subster) alias(t *aliases.Alias) types.Type { + // See subster.named. This follows the same strategy. + tparams := aliases.TypeParams(t) + targs := aliases.TypeArgs(t) + tname := t.Obj() + torigin := aliases.Origin(t) + + if !declaredWithin(tname, subst.origin) { + // t is declared outside of the function origin. So t is a package level type alias. + if targs.Len() == 0 { + // No type arguments so no instantiation needed. + return t + } + + // Instantiate with the substituted type arguments. + newTArgs := subst.typelist(targs) + return subst.instantiate(torigin, newTArgs) + } + + if targs.Len() == 0 { + // t is declared within the function origin and has no type arguments. + // + // Example: This corresponds to A or B in F, but not A[int]: + // + // func F[T any]() { + // type A[S any] = struct{t T, s S} + // type B = T + // var x A[int] + // ... + // } + // + // This is somewhat different than *Named as *Alias cannot be created recursively. + + // Copy and substitute type params. + var newTParams []*types.TypeParam + for i := 0; i < tparams.Len(); i++ { + cur := tparams.At(i) + cobj := cur.Obj() + cname := types.NewTypeName(cobj.Pos(), cobj.Pkg(), cobj.Name(), nil) + ntp := types.NewTypeParam(cname, nil) + subst.cache[cur] = ntp // See the comment "Note: Subtle" in subster.named. + newTParams = append(newTParams, ntp) + } + + // Substitute rhs. + rhs := subst.typ(aliases.Rhs(t)) + + // Create the fresh alias. + obj := aliases.NewAlias(true, tname.Pos(), tname.Pkg(), tname.Name(), rhs) + fresh := obj.Type() + if fresh, ok := fresh.(*aliases.Alias); ok { + // TODO: assume ok when aliases are always materialized (go1.27). + aliases.SetTypeParams(fresh, newTParams) + } + + // Substitute into all of the constraints after they are created. + for i, ntp := range newTParams { + bound := tparams.At(i).Constraint() + ntp.SetConstraint(subst.typ(bound)) + } + return fresh + } + + // t is declared within the function origin and has type arguments. + // + // Example: This corresponds to A[int] in F. Cases A and B are handled above. + // func F[T any]() { + // type A[S any] = struct{t T, s S} + // type B = T + // var x A[int] + // ... + // } + subOrigin := subst.typ(torigin) + subTArgs := subst.typelist(targs) + return subst.instantiate(subOrigin, subTArgs) +} + +func (subst *subster) named(t *types.Named) types.Type { + // A Named type is a user defined type. + // Ignoring generics, Named types are canonical: they are identical if + // and only if they have the same defining symbol. + // Generics complicate things, both if the type definition itself is + // parameterized, and if the type is defined within the scope of a + // parameterized function. In this case, two named types are identical if + // and only if their identifying symbols are identical, and all type + // arguments bindings in scope of the named type definition (including the + // type parameters of the definition itself) are equivalent. + // + // Notably: + // 1. For type definition type T[P1 any] struct{}, T[A] and T[B] are identical + // only if A and B are identical. + // 2. Inside the generic func Fn[m any]() any { type T struct{}; return T{} }, + // the result of Fn[A] and Fn[B] have identical type if and only if A and + // B are identical. + // 3. Both 1 and 2 could apply, such as in + // func F[m any]() any { type T[x any] struct{}; return T{} } + // + // A subster replaces type parameters within a function scope, and therefore must + // also replace free type parameters in the definitions of local types. + // + // Note: There are some detailed notes sprinkled throughout that borrow from + // lambda calculus notation. These contain some over simplifying math. + // + // LC: One way to think about subster is that it is a way of evaluating + // ((λm. E) N) as E[m:=N]. + // Each Named type t has an object *TypeName within a scope S that binds an + // underlying type expression U. U can refer to symbols within S (+ S's ancestors). + // Let x = t.TypeParams() and A = t.TypeArgs(). + // Each Named type t is then either: + // U where len(x) == 0 && len(A) == 0 + // λx. U where len(x) != 0 && len(A) == 0 + // ((λx. U) A) where len(x) == len(A) + // In each case, we will evaluate t[m:=N]. + tparams := t.TypeParams() // x + targs := t.TypeArgs() // A + + if !declaredWithin(t.Obj(), subst.origin) { + // t is declared outside of Fn[m]. + // + // In this case, we can skip substituting t.Underlying(). + // The underlying type cannot refer to the type parameters. + // + // LC: Let free(E) be the set of free type parameters in an expression E. + // Then whenever m ∉ free(E), then E = E[m:=N]. + // t ∉ Scope(fn) so therefore m ∉ free(U) and m ∩ x = ∅. + if targs.Len() == 0 { + // t has no type arguments. So it does not need to be instantiated. + // + // This is the normal case in real Go code, where t is not parameterized, + // declared at some package scope, and m is a TypeParam from a parameterized + // function F[m] or method. + // + // LC: m ∉ free(A) lets us conclude m ∉ free(t). So t=t[m:=N]. + return t + } + + // t is declared outside of Fn[m] and has type arguments. + // The type arguments may contain type parameters m so + // substitute the type arguments, and instantiate the substituted + // type arguments. + // + // LC: Evaluate this as ((λx. U) A') where A' = A[m := N]. + newTArgs := subst.typelist(targs) + return subst.instantiate(t.Origin(), newTArgs) + } + + // t is declared within Fn[m]. + + if targs.Len() == 0 { // no type arguments? + assert(t == t.Origin(), "local parameterized type abstraction must be an origin type") + + // t has no type arguments. + // The underlying type of t may contain the function's type parameters, + // replace these, and create a new type. + // + // Subtle: We short circuit substitution and use a newly created type in + // subst, i.e. cache[t]=fresh, to preemptively replace t with fresh + // in recursive types during traversal. This both breaks infinite cycles + // and allows for constructing types with the replacement applied in + // subst.typ(U). + // + // A new copy of the Named and Typename (and constraints) per function + // instantiation matches the semantics of Go, which treats all function + // instantiations F[N] as having distinct local types. + // + // LC: x.Len()=0 can be thought of as a special case of λx. U. + // LC: Evaluate (λx. U)[m:=N] as (λx'. U') where U'=U[x:=x',m:=N]. + tname := t.Obj() + obj := types.NewTypeName(tname.Pos(), tname.Pkg(), tname.Name(), nil) + fresh := types.NewNamed(obj, nil, nil) + var newTParams []*types.TypeParam + for i := 0; i < tparams.Len(); i++ { + cur := tparams.At(i) + cobj := cur.Obj() + cname := types.NewTypeName(cobj.Pos(), cobj.Pkg(), cobj.Name(), nil) + ntp := types.NewTypeParam(cname, nil) + subst.cache[cur] = ntp + newTParams = append(newTParams, ntp) + } + fresh.SetTypeParams(newTParams) + subst.cache[t] = fresh + subst.cache[fresh] = fresh + fresh.SetUnderlying(subst.typ(t.Underlying())) + // Substitute into all of the constraints after they are created. + for i, ntp := range newTParams { + bound := tparams.At(i).Constraint() + ntp.SetConstraint(subst.typ(bound)) + } + return fresh + } + + // t is defined within Fn[m] and t has type arguments (an instantiation). + // We reduce this to the two cases above: + // (1) substitute the function's type parameters into t.Origin(). + // (2) substitute t's type arguments A and instantiate the updated t.Origin() with these. + // + // LC: Evaluate ((λx. U) A)[m:=N] as (t' A') where t' = (λx. U)[m:=N] and A'=A [m:=N] + subOrigin := subst.typ(t.Origin()) + subTArgs := subst.typelist(targs) + return subst.instantiate(subOrigin, subTArgs) +} + +func (subst *subster) instantiate(orig types.Type, targs []types.Type) types.Type { + i, err := types.Instantiate(subst.ctxt, orig, targs, false) + assert(err == nil, "failed to Instantiate named (Named or Alias) type") + if c, _ := subst.uniqueness.At(i).(types.Type); c != nil { + return c.(types.Type) + } + subst.uniqueness.Set(i, i) + return i +} + +func (subst *subster) typelist(l *types.TypeList) []types.Type { + res := make([]types.Type, l.Len()) + for i := 0; i < l.Len(); i++ { + res[i] = subst.typ(l.At(i)) + } + return res +} + +func (subst *subster) signature(t *types.Signature) types.Type { + tparams := t.TypeParams() + + // We are choosing not to support tparams.Len() > 0 until a need has been observed in practice. + // + // There are some known usages for types.Types coming from types.{Eval,CheckExpr}. + // To support tparams.Len() > 0, we just need to do the following [psuedocode]: + // targs := {subst.replacements[tparams[i]]]}; Instantiate(ctxt, t, targs, false) + + assert(tparams.Len() == 0, "Substituting types.Signatures with generic functions are currently unsupported.") + + // Either: + // (1)non-generic function. + // no type params to substitute + // (2)generic method and recv needs to be substituted. + + // Receivers can be either: + // named + // pointer to named + // interface + // nil + // interface is the problematic case. We need to cycle break there! + recv := subst.var_(t.Recv()) + params := subst.tuple(t.Params()) + results := subst.tuple(t.Results()) + if recv != t.Recv() || params != t.Params() || results != t.Results() { + return types.NewSignatureType(recv, nil, nil, params, results, t.Variadic()) + } + return t +} + +// reaches returns true if a type t reaches any type t' s.t. c[t'] == true. +// It updates c to cache results. +// +// reaches is currently only part of the wellFormed debug logic, and +// in practice c is initially only type parameters. It is not currently +// relied on in production. +func reaches(t types.Type, c map[types.Type]bool) (res bool) { + if c, ok := c[t]; ok { + return c + } + + // c is populated with temporary false entries as types are visited. + // This avoids repeat visits and break cycles. + c[t] = false + defer func() { + c[t] = res + }() + + switch t := t.(type) { + case *types.TypeParam, *types.Basic: + return false + case *types.Array: + return reaches(t.Elem(), c) + case *types.Slice: + return reaches(t.Elem(), c) + case *types.Pointer: + return reaches(t.Elem(), c) + case *types.Tuple: + for i := 0; i < t.Len(); i++ { + if reaches(t.At(i).Type(), c) { + return true + } + } + case *types.Struct: + for i := 0; i < t.NumFields(); i++ { + if reaches(t.Field(i).Type(), c) { + return true + } + } + case *types.Map: + return reaches(t.Key(), c) || reaches(t.Elem(), c) + case *types.Chan: + return reaches(t.Elem(), c) + case *types.Signature: + if t.Recv() != nil && reaches(t.Recv().Type(), c) { + return true + } + return reaches(t.Params(), c) || reaches(t.Results(), c) + case *types.Union: + for i := 0; i < t.Len(); i++ { + if reaches(t.Term(i).Type(), c) { + return true + } + } + case *types.Interface: + for i := 0; i < t.NumEmbeddeds(); i++ { + if reaches(t.Embedded(i), c) { + return true + } + } + for i := 0; i < t.NumExplicitMethods(); i++ { + if reaches(t.ExplicitMethod(i).Type(), c) { + return true + } + } + case *types.Named, *aliases.Alias: + return reaches(t.Underlying(), c) + default: + panic("unreachable") + } + return false +} diff --git a/vendor/golang.org/x/tools/go/ssa/task.go b/vendor/golang.org/x/tools/go/ssa/task.go new file mode 100644 index 0000000..5024985 --- /dev/null +++ b/vendor/golang.org/x/tools/go/ssa/task.go @@ -0,0 +1,103 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssa + +import ( + "sync/atomic" +) + +// Each task has two states: it is initially "active", +// and transitions to "done". +// +// tasks form a directed graph. An edge from x to y (with y in x.edges) +// indicates that the task x waits on the task y to be done. +// Cycles are permitted. +// +// Calling x.wait() blocks the calling goroutine until task x, +// and all the tasks transitively reachable from x are done. +// +// The nil *task is always considered done. +type task struct { + done chan unit // close when the task is done. + edges map[*task]unit // set of predecessors of this task. + transitive atomic.Bool // true once it is known all predecessors are done. +} + +func (x *task) isTransitivelyDone() bool { return x == nil || x.transitive.Load() } + +// addEdge creates an edge from x to y, indicating that +// x.wait() will not return before y is done. +// All calls to x.addEdge(...) should happen before x.markDone(). +func (x *task) addEdge(y *task) { + if x == y || y.isTransitivelyDone() { + return // no work remaining + } + + // heuristic done check + select { + case <-x.done: + panic("cannot add an edge to a done task") + default: + } + + if x.edges == nil { + x.edges = make(map[*task]unit) + } + x.edges[y] = unit{} +} + +// markDone changes the task's state to markDone. +func (x *task) markDone() { + if x != nil { + close(x.done) + } +} + +// wait blocks until x and all the tasks it can reach through edges are done. +func (x *task) wait() { + if x.isTransitivelyDone() { + return // already known to be done. Skip allocations. + } + + // Use BFS to wait on u.done to be closed, for all u transitively + // reachable from x via edges. + // + // This work can be repeated by multiple workers doing wait(). + // + // Note: Tarjan's SCC algorithm is able to mark SCCs as transitively done + // as soon as the SCC has been visited. This is theoretically faster, but is + // a more complex algorithm. Until we have evidence, we need the more complex + // algorithm, the simpler algorithm BFS is implemented. + // + // In Go 1.23, ssa/TestStdlib reaches <=3 *tasks per wait() in most schedules + // On some schedules, there is a cycle building net/http and internal/trace/testtrace + // due to slices functions. + work := []*task{x} + enqueued := map[*task]unit{x: {}} + for i := 0; i < len(work); i++ { + u := work[i] + if u.isTransitivelyDone() { // already transitively done + work[i] = nil + continue + } + <-u.done // wait for u to be marked done. + + for v := range u.edges { + if _, ok := enqueued[v]; !ok { + enqueued[v] = unit{} + work = append(work, v) + } + } + } + + // work is transitively closed over dependencies. + // u in work is done (or transitively done and skipped). + // u is transitively done. + for _, u := range work { + if u != nil { + x.transitive.Store(true) + } + } +} diff --git a/vendor/golang.org/x/tools/go/ssa/util.go b/vendor/golang.org/x/tools/go/ssa/util.go new file mode 100644 index 0000000..549c9c8 --- /dev/null +++ b/vendor/golang.org/x/tools/go/ssa/util.go @@ -0,0 +1,430 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssa + +// This file defines a number of miscellaneous utility functions. + +import ( + "fmt" + "go/ast" + "go/token" + "go/types" + "io" + "os" + "sync" + + "golang.org/x/tools/go/ast/astutil" + "golang.org/x/tools/go/types/typeutil" + "golang.org/x/tools/internal/aliases" + "golang.org/x/tools/internal/typeparams" + "golang.org/x/tools/internal/typesinternal" +) + +type unit struct{} + +//// Sanity checking utilities + +// assert panics with the mesage msg if p is false. +// Avoid combining with expensive string formatting. +func assert(p bool, msg string) { + if !p { + panic(msg) + } +} + +//// AST utilities + +func unparen(e ast.Expr) ast.Expr { return astutil.Unparen(e) } + +// isBlankIdent returns true iff e is an Ident with name "_". +// They have no associated types.Object, and thus no type. +func isBlankIdent(e ast.Expr) bool { + id, ok := e.(*ast.Ident) + return ok && id.Name == "_" +} + +// rangePosition is the position to give for the `range` token in a RangeStmt. +var rangePosition = func(rng *ast.RangeStmt) token.Pos { + // Before 1.20, this is unreachable. + // rng.For is a close, but incorrect position. + return rng.For +} + +//// Type utilities. Some of these belong in go/types. + +// isNonTypeParamInterface reports whether t is an interface type but not a type parameter. +func isNonTypeParamInterface(t types.Type) bool { + return !typeparams.IsTypeParam(t) && types.IsInterface(t) +} + +// isBasic reports whether t is a basic type. +// t is assumed to be an Underlying type (not Named or Alias). +func isBasic(t types.Type) bool { + _, ok := t.(*types.Basic) + return ok +} + +// isString reports whether t is exactly a string type. +// t is assumed to be an Underlying type (not Named or Alias). +func isString(t types.Type) bool { + basic, ok := t.(*types.Basic) + return ok && basic.Info()&types.IsString != 0 +} + +// isByteSlice reports whether t is of the form []~bytes. +// t is assumed to be an Underlying type (not Named or Alias). +func isByteSlice(t types.Type) bool { + if b, ok := t.(*types.Slice); ok { + e, _ := b.Elem().Underlying().(*types.Basic) + return e != nil && e.Kind() == types.Byte + } + return false +} + +// isRuneSlice reports whether t is of the form []~runes. +// t is assumed to be an Underlying type (not Named or Alias). +func isRuneSlice(t types.Type) bool { + if b, ok := t.(*types.Slice); ok { + e, _ := b.Elem().Underlying().(*types.Basic) + return e != nil && e.Kind() == types.Rune + } + return false +} + +// isBasicConvTypes returns true when a type set can be +// one side of a Convert operation. This is when: +// - All are basic, []byte, or []rune. +// - At least 1 is basic. +// - At most 1 is []byte or []rune. +func isBasicConvTypes(tset termList) bool { + basics := 0 + all := underIs(tset, func(t types.Type) bool { + if isBasic(t) { + basics++ + return true + } + return isByteSlice(t) || isRuneSlice(t) + }) + return all && basics >= 1 && tset.Len()-basics <= 1 +} + +// isPointer reports whether t's underlying type is a pointer. +func isPointer(t types.Type) bool { + return is[*types.Pointer](t.Underlying()) +} + +// isPointerCore reports whether t's core type is a pointer. +// +// (Most pointer manipulation is related to receivers, in which case +// isPointer is appropriate. tecallers can use isPointer(t). +func isPointerCore(t types.Type) bool { + return is[*types.Pointer](typeparams.CoreType(t)) +} + +func is[T any](x any) bool { + _, ok := x.(T) + return ok +} + +// recvType returns the receiver type of method obj. +func recvType(obj *types.Func) types.Type { + return obj.Type().(*types.Signature).Recv().Type() +} + +// fieldOf returns the index'th field of the (core type of) a struct type; +// otherwise returns nil. +func fieldOf(typ types.Type, index int) *types.Var { + if st, ok := typeparams.CoreType(typ).(*types.Struct); ok { + if 0 <= index && index < st.NumFields() { + return st.Field(index) + } + } + return nil +} + +// isUntyped reports whether typ is the type of an untyped constant. +func isUntyped(typ types.Type) bool { + // No Underlying/Unalias: untyped constant types cannot be Named or Alias. + b, ok := typ.(*types.Basic) + return ok && b.Info()&types.IsUntyped != 0 +} + +// declaredWithin reports whether an object is declared within a function. +// +// obj must not be a method or a field. +func declaredWithin(obj types.Object, fn *types.Func) bool { + if obj.Pos() != token.NoPos { + return fn.Scope().Contains(obj.Pos()) // trust the positions if they exist. + } + if fn.Pkg() != obj.Pkg() { + return false // fast path for different packages + } + + // Traverse Parent() scopes for fn.Scope(). + for p := obj.Parent(); p != nil; p = p.Parent() { + if p == fn.Scope() { + return true + } + } + return false +} + +// logStack prints the formatted "start" message to stderr and +// returns a closure that prints the corresponding "end" message. +// Call using 'defer logStack(...)()' to show builder stack on panic. +// Don't forget trailing parens! +func logStack(format string, args ...interface{}) func() { + msg := fmt.Sprintf(format, args...) + io.WriteString(os.Stderr, msg) + io.WriteString(os.Stderr, "\n") + return func() { + io.WriteString(os.Stderr, msg) + io.WriteString(os.Stderr, " end\n") + } +} + +// newVar creates a 'var' for use in a types.Tuple. +func newVar(name string, typ types.Type) *types.Var { + return types.NewParam(token.NoPos, nil, name, typ) +} + +// anonVar creates an anonymous 'var' for use in a types.Tuple. +func anonVar(typ types.Type) *types.Var { + return newVar("", typ) +} + +var lenResults = types.NewTuple(anonVar(tInt)) + +// makeLen returns the len builtin specialized to type func(T)int. +func makeLen(T types.Type) *Builtin { + lenParams := types.NewTuple(anonVar(T)) + return &Builtin{ + name: "len", + sig: types.NewSignature(nil, lenParams, lenResults, false), + } +} + +// receiverTypeArgs returns the type arguments to a method's receiver. +// Returns an empty list if the receiver does not have type arguments. +func receiverTypeArgs(method *types.Func) []types.Type { + recv := method.Type().(*types.Signature).Recv() + _, named := typesinternal.ReceiverNamed(recv) + if named == nil { + return nil // recv is anonymous struct/interface + } + ts := named.TypeArgs() + if ts.Len() == 0 { + return nil + } + targs := make([]types.Type, ts.Len()) + for i := 0; i < ts.Len(); i++ { + targs[i] = ts.At(i) + } + return targs +} + +// recvAsFirstArg takes a method signature and returns a function +// signature with receiver as the first parameter. +func recvAsFirstArg(sig *types.Signature) *types.Signature { + params := make([]*types.Var, 0, 1+sig.Params().Len()) + params = append(params, sig.Recv()) + for i := 0; i < sig.Params().Len(); i++ { + params = append(params, sig.Params().At(i)) + } + return types.NewSignatureType(nil, nil, nil, types.NewTuple(params...), sig.Results(), sig.Variadic()) +} + +// instance returns whether an expression is a simple or qualified identifier +// that is a generic instantiation. +func instance(info *types.Info, expr ast.Expr) bool { + // Compare the logic here against go/types.instantiatedIdent, + // which also handles *IndexExpr and *IndexListExpr. + var id *ast.Ident + switch x := expr.(type) { + case *ast.Ident: + id = x + case *ast.SelectorExpr: + id = x.Sel + default: + return false + } + _, ok := info.Instances[id] + return ok +} + +// instanceArgs returns the Instance[id].TypeArgs as a slice. +func instanceArgs(info *types.Info, id *ast.Ident) []types.Type { + targList := info.Instances[id].TypeArgs + if targList == nil { + return nil + } + + targs := make([]types.Type, targList.Len()) + for i, n := 0, targList.Len(); i < n; i++ { + targs[i] = targList.At(i) + } + return targs +} + +// Mapping of a type T to a canonical instance C s.t. types.Indentical(T, C). +// Thread-safe. +type canonizer struct { + mu sync.Mutex + types typeutil.Map // map from type to a canonical instance + lists typeListMap // map from a list of types to a canonical instance +} + +func newCanonizer() *canonizer { + c := &canonizer{} + h := typeutil.MakeHasher() + c.types.SetHasher(h) + c.lists.hasher = h + return c +} + +// List returns a canonical representative of a list of types. +// Representative of the empty list is nil. +func (c *canonizer) List(ts []types.Type) *typeList { + if len(ts) == 0 { + return nil + } + + unaliasAll := func(ts []types.Type) []types.Type { + // Is there some top level alias? + var found bool + for _, t := range ts { + if _, ok := t.(*aliases.Alias); ok { + found = true + break + } + } + if !found { + return ts // no top level alias + } + + cp := make([]types.Type, len(ts)) // copy with top level aliases removed. + for i, t := range ts { + cp[i] = aliases.Unalias(t) + } + return cp + } + l := unaliasAll(ts) + + c.mu.Lock() + defer c.mu.Unlock() + return c.lists.rep(l) +} + +// Type returns a canonical representative of type T. +// Removes top-level aliases. +// +// For performance, reasons the canonical instance is order-dependent, +// and may contain deeply nested aliases. +func (c *canonizer) Type(T types.Type) types.Type { + T = aliases.Unalias(T) // remove the top level alias. + + c.mu.Lock() + defer c.mu.Unlock() + + if r := c.types.At(T); r != nil { + return r.(types.Type) + } + c.types.Set(T, T) + return T +} + +// A type for representing a canonized list of types. +type typeList []types.Type + +func (l *typeList) identical(ts []types.Type) bool { + if l == nil { + return len(ts) == 0 + } + n := len(*l) + if len(ts) != n { + return false + } + for i, left := range *l { + right := ts[i] + if !types.Identical(left, right) { + return false + } + } + return true +} + +type typeListMap struct { + hasher typeutil.Hasher + buckets map[uint32][]*typeList +} + +// rep returns a canonical representative of a slice of types. +func (m *typeListMap) rep(ts []types.Type) *typeList { + if m == nil || len(ts) == 0 { + return nil + } + + if m.buckets == nil { + m.buckets = make(map[uint32][]*typeList) + } + + h := m.hash(ts) + bucket := m.buckets[h] + for _, l := range bucket { + if l.identical(ts) { + return l + } + } + + // not present. create a representative. + cp := make(typeList, len(ts)) + copy(cp, ts) + rep := &cp + + m.buckets[h] = append(bucket, rep) + return rep +} + +func (m *typeListMap) hash(ts []types.Type) uint32 { + if m == nil { + return 0 + } + // Some smallish prime far away from typeutil.Hash. + n := len(ts) + h := uint32(13619) + 2*uint32(n) + for i := 0; i < n; i++ { + h += 3 * m.hasher.Hash(ts[i]) + } + return h +} + +// instantiateMethod instantiates m with targs and returns a canonical representative for this method. +func (canon *canonizer) instantiateMethod(m *types.Func, targs []types.Type, ctxt *types.Context) *types.Func { + recv := recvType(m) + if p, ok := aliases.Unalias(recv).(*types.Pointer); ok { + recv = p.Elem() + } + named := aliases.Unalias(recv).(*types.Named) + inst, err := types.Instantiate(ctxt, named.Origin(), targs, false) + if err != nil { + panic(err) + } + rep := canon.Type(inst) + obj, _, _ := types.LookupFieldOrMethod(rep, true, m.Pkg(), m.Name()) + return obj.(*types.Func) +} + +// Exposed to ssautil using the linkname hack. +func isSyntactic(pkg *Package) bool { return pkg.syntax } + +// mapValues returns a new unordered array of map values. +func mapValues[K comparable, V any](m map[K]V) []V { + vals := make([]V, 0, len(m)) + for _, fn := range m { + vals = append(vals, fn) + } + return vals + +} diff --git a/vendor/golang.org/x/tools/go/ssa/util_go120.go b/vendor/golang.org/x/tools/go/ssa/util_go120.go new file mode 100644 index 0000000..9e8ea87 --- /dev/null +++ b/vendor/golang.org/x/tools/go/ssa/util_go120.go @@ -0,0 +1,17 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.20 +// +build go1.20 + +package ssa + +import ( + "go/ast" + "go/token" +) + +func init() { + rangePosition = func(rng *ast.RangeStmt) token.Pos { return rng.Range } +} diff --git a/vendor/golang.org/x/tools/go/ssa/wrappers.go b/vendor/golang.org/x/tools/go/ssa/wrappers.go new file mode 100644 index 0000000..d09b4f2 --- /dev/null +++ b/vendor/golang.org/x/tools/go/ssa/wrappers.go @@ -0,0 +1,348 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssa + +// This file defines synthesis of Functions that delegate to declared +// methods; they come in three kinds: +// +// (1) wrappers: methods that wrap declared methods, performing +// implicit pointer indirections and embedded field selections. +// +// (2) thunks: funcs that wrap declared methods. Like wrappers, +// thunks perform indirections and field selections. The thunk's +// first parameter is used as the receiver for the method call. +// +// (3) bounds: funcs that wrap declared methods. The bound's sole +// free variable, supplied by a closure, is used as the receiver +// for the method call. No indirections or field selections are +// performed since they can be done before the call. + +import ( + "fmt" + + "go/token" + "go/types" + + "golang.org/x/tools/internal/typeparams" +) + +// -- wrappers ----------------------------------------------------------- + +// createWrapper returns a synthetic method that delegates to the +// declared method denoted by meth.Obj(), first performing any +// necessary pointer indirections or field selections implied by meth. +// +// The resulting method's receiver type is meth.Recv(). +// +// This function is versatile but quite subtle! Consider the +// following axes of variation when making changes: +// - optional receiver indirection +// - optional implicit field selections +// - meth.Obj() may denote a concrete or an interface method +// - the result may be a thunk or a wrapper. +func createWrapper(prog *Program, sel *selection) *Function { + obj := sel.obj.(*types.Func) // the declared function + sig := sel.typ.(*types.Signature) // type of this wrapper + + var recv *types.Var // wrapper's receiver or thunk's params[0] + name := obj.Name() + var description string + if sel.kind == types.MethodExpr { + name += "$thunk" + description = "thunk" + recv = sig.Params().At(0) + } else { + description = "wrapper" + recv = sig.Recv() + } + + description = fmt.Sprintf("%s for %s", description, sel.obj) + if prog.mode&LogSource != 0 { + defer logStack("create %s to (%s)", description, recv.Type())() + } + /* method wrapper */ + return &Function{ + name: name, + method: sel, + object: obj, + Signature: sig, + Synthetic: description, + Prog: prog, + pos: obj.Pos(), + // wrappers have no syntax + build: (*builder).buildWrapper, + syntax: nil, + info: nil, + goversion: "", + } +} + +// buildWrapper builds fn.Body for a method wrapper. +func (b *builder) buildWrapper(fn *Function) { + var recv *types.Var // wrapper's receiver or thunk's params[0] + var start int // first regular param + if fn.method.kind == types.MethodExpr { + recv = fn.Signature.Params().At(0) + start = 1 + } else { + recv = fn.Signature.Recv() + } + + fn.startBody() + fn.addSpilledParam(recv) + createParams(fn, start) + + indices := fn.method.index + + var v Value = fn.Locals[0] // spilled receiver + if isPointer(fn.method.recv) { + v = emitLoad(fn, v) + + // For simple indirection wrappers, perform an informative nil-check: + // "value method (T).f called using nil *T pointer" + if len(indices) == 1 && !isPointer(recvType(fn.object)) { + var c Call + c.Call.Value = &Builtin{ + name: "ssa:wrapnilchk", + sig: types.NewSignature(nil, + types.NewTuple(anonVar(fn.method.recv), anonVar(tString), anonVar(tString)), + types.NewTuple(anonVar(fn.method.recv)), false), + } + c.Call.Args = []Value{ + v, + stringConst(typeparams.MustDeref(fn.method.recv).String()), + stringConst(fn.method.obj.Name()), + } + c.setType(v.Type()) + v = fn.emit(&c) + } + } + + // Invariant: v is a pointer, either + // value of *A receiver param, or + // address of A spilled receiver. + + // We use pointer arithmetic (FieldAddr possibly followed by + // Load) in preference to value extraction (Field possibly + // preceded by Load). + + v = emitImplicitSelections(fn, v, indices[:len(indices)-1], token.NoPos) + + // Invariant: v is a pointer, either + // value of implicit *C field, or + // address of implicit C field. + + var c Call + if r := recvType(fn.object); !types.IsInterface(r) { // concrete method + if !isPointer(r) { + v = emitLoad(fn, v) + } + c.Call.Value = fn.Prog.objectMethod(fn.object, b) + c.Call.Args = append(c.Call.Args, v) + } else { + c.Call.Method = fn.object + c.Call.Value = emitLoad(fn, v) // interface (possibly a typeparam) + } + for _, arg := range fn.Params[1:] { + c.Call.Args = append(c.Call.Args, arg) + } + emitTailCall(fn, &c) + fn.finishBody() +} + +// createParams creates parameters for wrapper method fn based on its +// Signature.Params, which do not include the receiver. +// start is the index of the first regular parameter to use. +func createParams(fn *Function, start int) { + tparams := fn.Signature.Params() + for i, n := start, tparams.Len(); i < n; i++ { + fn.addParamVar(tparams.At(i)) + } +} + +// -- bounds ----------------------------------------------------------- + +// createBound returns a bound method wrapper (or "bound"), a synthetic +// function that delegates to a concrete or interface method denoted +// by obj. The resulting function has no receiver, but has one free +// variable which will be used as the method's receiver in the +// tail-call. +// +// Use MakeClosure with such a wrapper to construct a bound method +// closure. e.g.: +// +// type T int or: type T interface { meth() } +// func (t T) meth() +// var t T +// f := t.meth +// f() // calls t.meth() +// +// f is a closure of a synthetic wrapper defined as if by: +// +// f := func() { return t.meth() } +// +// Unlike createWrapper, createBound need perform no indirection or field +// selections because that can be done before the closure is +// constructed. +func createBound(prog *Program, obj *types.Func) *Function { + description := fmt.Sprintf("bound method wrapper for %s", obj) + if prog.mode&LogSource != 0 { + defer logStack("%s", description)() + } + /* bound method wrapper */ + fn := &Function{ + name: obj.Name() + "$bound", + object: obj, + Signature: changeRecv(obj.Type().(*types.Signature), nil), // drop receiver + Synthetic: description, + Prog: prog, + pos: obj.Pos(), + // wrappers have no syntax + build: (*builder).buildBound, + syntax: nil, + info: nil, + goversion: "", + } + fn.FreeVars = []*FreeVar{{name: "recv", typ: recvType(obj), parent: fn}} // (cyclic) + return fn +} + +// buildBound builds fn.Body for a bound method closure. +func (b *builder) buildBound(fn *Function) { + fn.startBody() + createParams(fn, 0) + var c Call + + recv := fn.FreeVars[0] + if !types.IsInterface(recvType(fn.object)) { // concrete + c.Call.Value = fn.Prog.objectMethod(fn.object, b) + c.Call.Args = []Value{recv} + } else { + c.Call.Method = fn.object + c.Call.Value = recv // interface (possibly a typeparam) + } + for _, arg := range fn.Params { + c.Call.Args = append(c.Call.Args, arg) + } + emitTailCall(fn, &c) + fn.finishBody() +} + +// -- thunks ----------------------------------------------------------- + +// createThunk returns a thunk, a synthetic function that delegates to a +// concrete or interface method denoted by sel.obj. The resulting +// function has no receiver, but has an additional (first) regular +// parameter. +// +// Precondition: sel.kind == types.MethodExpr. +// +// type T int or: type T interface { meth() } +// func (t T) meth() +// f := T.meth +// var t T +// f(t) // calls t.meth() +// +// f is a synthetic wrapper defined as if by: +// +// f := func(t T) { return t.meth() } +func createThunk(prog *Program, sel *selection) *Function { + if sel.kind != types.MethodExpr { + panic(sel) + } + + fn := createWrapper(prog, sel) + if fn.Signature.Recv() != nil { + panic(fn) // unexpected receiver + } + + return fn +} + +func changeRecv(s *types.Signature, recv *types.Var) *types.Signature { + return types.NewSignature(recv, s.Params(), s.Results(), s.Variadic()) +} + +// A local version of *types.Selection. +// Needed for some additional control, such as creating a MethodExpr for an instantiation. +type selection struct { + kind types.SelectionKind + recv types.Type + typ types.Type + obj types.Object + index []int + indirect bool +} + +func toSelection(sel *types.Selection) *selection { + return &selection{ + kind: sel.Kind(), + recv: sel.Recv(), + typ: sel.Type(), + obj: sel.Obj(), + index: sel.Index(), + indirect: sel.Indirect(), + } +} + +// -- instantiations -------------------------------------------------- + +// buildInstantiationWrapper builds the body of an instantiation +// wrapper fn. The body calls the original generic function, +// bracketed by ChangeType conversions on its arguments and results. +func (b *builder) buildInstantiationWrapper(fn *Function) { + orig := fn.topLevelOrigin + sig := fn.Signature + + fn.startBody() + if sig.Recv() != nil { + fn.addParamVar(sig.Recv()) + } + createParams(fn, 0) + + // Create body. Add a call to origin generic function + // and make type changes between argument and parameters, + // as well as return values. + var c Call + c.Call.Value = orig + if res := orig.Signature.Results(); res.Len() == 1 { + c.typ = res.At(0).Type() + } else { + c.typ = res + } + + // parameter of instance becomes an argument to the call + // to the original generic function. + argOffset := 0 + for i, arg := range fn.Params { + var typ types.Type + if i == 0 && sig.Recv() != nil { + typ = orig.Signature.Recv().Type() + argOffset = 1 + } else { + typ = orig.Signature.Params().At(i - argOffset).Type() + } + c.Call.Args = append(c.Call.Args, emitTypeCoercion(fn, arg, typ)) + } + + results := fn.emit(&c) + var ret Return + switch res := sig.Results(); res.Len() { + case 0: + // no results, do nothing. + case 1: + ret.Results = []Value{emitTypeCoercion(fn, results, res.At(0).Type())} + default: + for i := 0; i < sig.Results().Len(); i++ { + v := emitExtract(fn, results, i) + ret.Results = append(ret.Results, emitTypeCoercion(fn, v, res.At(i).Type())) + } + } + + fn.emit(&ret) + fn.currentBlock = nil + + fn.finishBody() +} diff --git a/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go b/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go new file mode 100644 index 0000000..9ada177 --- /dev/null +++ b/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go @@ -0,0 +1,788 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package objectpath defines a naming scheme for types.Objects +// (that is, named entities in Go programs) relative to their enclosing +// package. +// +// Type-checker objects are canonical, so they are usually identified by +// their address in memory (a pointer), but a pointer has meaning only +// within one address space. By contrast, objectpath names allow the +// identity of an object to be sent from one program to another, +// establishing a correspondence between types.Object variables that are +// distinct but logically equivalent. +// +// A single object may have multiple paths. In this example, +// +// type A struct{ X int } +// type B A +// +// the field X has two paths due to its membership of both A and B. +// The For(obj) function always returns one of these paths, arbitrarily +// but consistently. +package objectpath + +import ( + "fmt" + "go/types" + "strconv" + "strings" + + "golang.org/x/tools/internal/aliases" + "golang.org/x/tools/internal/typesinternal" +) + +// TODO(adonovan): think about generic aliases. + +// A Path is an opaque name that identifies a types.Object +// relative to its package. Conceptually, the name consists of a +// sequence of destructuring operations applied to the package scope +// to obtain the original object. +// The name does not include the package itself. +type Path string + +// Encoding +// +// An object path is a textual and (with training) human-readable encoding +// of a sequence of destructuring operators, starting from a types.Package. +// The sequences represent a path through the package/object/type graph. +// We classify these operators by their type: +// +// PO package->object Package.Scope.Lookup +// OT object->type Object.Type +// TT type->type Type.{Elem,Key,{,{,Recv}Type}Params,Results,Underlying,Rhs} [EKPRUTrCa] +// TO type->object Type.{At,Field,Method,Obj} [AFMO] +// +// All valid paths start with a package and end at an object +// and thus may be defined by the regular language: +// +// objectpath = PO (OT TT* TO)* +// +// The concrete encoding follows directly: +// - The only PO operator is Package.Scope.Lookup, which requires an identifier. +// - The only OT operator is Object.Type, +// which we encode as '.' because dot cannot appear in an identifier. +// - The TT operators are encoded as [EKPRUTrCa]; +// two of these ({,Recv}TypeParams) require an integer operand, +// which is encoded as a string of decimal digits. +// - The TO operators are encoded as [AFMO]; +// three of these (At,Field,Method) require an integer operand, +// which is encoded as a string of decimal digits. +// These indices are stable across different representations +// of the same package, even source and export data. +// The indices used are implementation specific and may not correspond to +// the argument to the go/types function. +// +// In the example below, +// +// package p +// +// type T interface { +// f() (a string, b struct{ X int }) +// } +// +// field X has the path "T.UM0.RA1.F0", +// representing the following sequence of operations: +// +// p.Lookup("T") T +// .Type().Underlying().Method(0). f +// .Type().Results().At(1) b +// .Type().Field(0) X +// +// The encoding is not maximally compact---every R or P is +// followed by an A, for example---but this simplifies the +// encoder and decoder. +const ( + // object->type operators + opType = '.' // .Type() (Object) + + // type->type operators + opElem = 'E' // .Elem() (Pointer, Slice, Array, Chan, Map) + opKey = 'K' // .Key() (Map) + opParams = 'P' // .Params() (Signature) + opResults = 'R' // .Results() (Signature) + opUnderlying = 'U' // .Underlying() (Named) + opTypeParam = 'T' // .TypeParams.At(i) (Named, Signature) + opRecvTypeParam = 'r' // .RecvTypeParams.At(i) (Signature) + opConstraint = 'C' // .Constraint() (TypeParam) + opRhs = 'a' // .Rhs() (Alias) + + // type->object operators + opAt = 'A' // .At(i) (Tuple) + opField = 'F' // .Field(i) (Struct) + opMethod = 'M' // .Method(i) (Named or Interface; not Struct: "promoted" names are ignored) + opObj = 'O' // .Obj() (Named, TypeParam) +) + +// For is equivalent to new(Encoder).For(obj). +// +// It may be more efficient to reuse a single Encoder across several calls. +func For(obj types.Object) (Path, error) { + return new(Encoder).For(obj) +} + +// An Encoder amortizes the cost of encoding the paths of multiple objects. +// The zero value of an Encoder is ready to use. +type Encoder struct { + scopeMemo map[*types.Scope][]types.Object // memoization of scopeObjects +} + +// For returns the path to an object relative to its package, +// or an error if the object is not accessible from the package's Scope. +// +// The For function guarantees to return a path only for the following objects: +// - package-level types +// - exported package-level non-types +// - methods +// - parameter and result variables +// - struct fields +// These objects are sufficient to define the API of their package. +// The objects described by a package's export data are drawn from this set. +// +// The set of objects accessible from a package's Scope depends on +// whether the package was produced by type-checking syntax, or +// reading export data; the latter may have a smaller Scope since +// export data trims objects that are not reachable from an exported +// declaration. For example, the For function will return a path for +// an exported method of an unexported type that is not reachable +// from any public declaration; this path will cause the Object +// function to fail if called on a package loaded from export data. +// TODO(adonovan): is this a bug or feature? Should this package +// compute accessibility in the same way? +// +// For does not return a path for predeclared names, imported package +// names, local names, and unexported package-level names (except +// types). +// +// Example: given this definition, +// +// package p +// +// type T interface { +// f() (a string, b struct{ X int }) +// } +// +// For(X) would return a path that denotes the following sequence of operations: +// +// p.Scope().Lookup("T") (TypeName T) +// .Type().Underlying().Method(0). (method Func f) +// .Type().Results().At(1) (field Var b) +// .Type().Field(0) (field Var X) +// +// where p is the package (*types.Package) to which X belongs. +func (enc *Encoder) For(obj types.Object) (Path, error) { + pkg := obj.Pkg() + + // This table lists the cases of interest. + // + // Object Action + // ------ ------ + // nil reject + // builtin reject + // pkgname reject + // label reject + // var + // package-level accept + // func param/result accept + // local reject + // struct field accept + // const + // package-level accept + // local reject + // func + // package-level accept + // init functions reject + // concrete method accept + // interface method accept + // type + // package-level accept + // local reject + // + // The only accessible package-level objects are members of pkg itself. + // + // The cases are handled in four steps: + // + // 1. reject nil and builtin + // 2. accept package-level objects + // 3. reject obviously invalid objects + // 4. search the API for the path to the param/result/field/method. + + // 1. reference to nil or builtin? + if pkg == nil { + return "", fmt.Errorf("predeclared %s has no path", obj) + } + scope := pkg.Scope() + + // 2. package-level object? + if scope.Lookup(obj.Name()) == obj { + // Only exported objects (and non-exported types) have a path. + // Non-exported types may be referenced by other objects. + if _, ok := obj.(*types.TypeName); !ok && !obj.Exported() { + return "", fmt.Errorf("no path for non-exported %v", obj) + } + return Path(obj.Name()), nil + } + + // 3. Not a package-level object. + // Reject obviously non-viable cases. + switch obj := obj.(type) { + case *types.TypeName: + if _, ok := aliases.Unalias(obj.Type()).(*types.TypeParam); !ok { + // With the exception of type parameters, only package-level type names + // have a path. + return "", fmt.Errorf("no path for %v", obj) + } + case *types.Const, // Only package-level constants have a path. + *types.Label, // Labels are function-local. + *types.PkgName: // PkgNames are file-local. + return "", fmt.Errorf("no path for %v", obj) + + case *types.Var: + // Could be: + // - a field (obj.IsField()) + // - a func parameter or result + // - a local var. + // Sadly there is no way to distinguish + // a param/result from a local + // so we must proceed to the find. + + case *types.Func: + // A func, if not package-level, must be a method. + if recv := obj.Type().(*types.Signature).Recv(); recv == nil { + return "", fmt.Errorf("func is not a method: %v", obj) + } + + if path, ok := enc.concreteMethod(obj); ok { + // Fast path for concrete methods that avoids looping over scope. + return path, nil + } + + default: + panic(obj) + } + + // 4. Search the API for the path to the var (field/param/result) or method. + + // First inspect package-level named types. + // In the presence of path aliases, these give + // the best paths because non-types may + // refer to types, but not the reverse. + empty := make([]byte, 0, 48) // initial space + objs := enc.scopeObjects(scope) + for _, o := range objs { + tname, ok := o.(*types.TypeName) + if !ok { + continue // handle non-types in second pass + } + + path := append(empty, o.Name()...) + path = append(path, opType) + + T := o.Type() + if alias, ok := T.(*aliases.Alias); ok { + if r := findTypeParam(obj, aliases.TypeParams(alias), path, opTypeParam, nil); r != nil { + return Path(r), nil + } + if r := find(obj, aliases.Rhs(alias), append(path, opRhs), nil); r != nil { + return Path(r), nil + } + + } else if tname.IsAlias() { + // legacy alias + if r := find(obj, T, path, nil); r != nil { + return Path(r), nil + } + + } else if named, ok := T.(*types.Named); ok { + // defined (named) type + if r := findTypeParam(obj, named.TypeParams(), path, opTypeParam, nil); r != nil { + return Path(r), nil + } + if r := find(obj, named.Underlying(), append(path, opUnderlying), nil); r != nil { + return Path(r), nil + } + } + } + + // Then inspect everything else: + // non-types, and declared methods of defined types. + for _, o := range objs { + path := append(empty, o.Name()...) + if _, ok := o.(*types.TypeName); !ok { + if o.Exported() { + // exported non-type (const, var, func) + if r := find(obj, o.Type(), append(path, opType), nil); r != nil { + return Path(r), nil + } + } + continue + } + + // Inspect declared methods of defined types. + if T, ok := aliases.Unalias(o.Type()).(*types.Named); ok { + path = append(path, opType) + // The method index here is always with respect + // to the underlying go/types data structures, + // which ultimately derives from source order + // and must be preserved by export data. + for i := 0; i < T.NumMethods(); i++ { + m := T.Method(i) + path2 := appendOpArg(path, opMethod, i) + if m == obj { + return Path(path2), nil // found declared method + } + if r := find(obj, m.Type(), append(path2, opType), nil); r != nil { + return Path(r), nil + } + } + } + } + + return "", fmt.Errorf("can't find path for %v in %s", obj, pkg.Path()) +} + +func appendOpArg(path []byte, op byte, arg int) []byte { + path = append(path, op) + path = strconv.AppendInt(path, int64(arg), 10) + return path +} + +// concreteMethod returns the path for meth, which must have a non-nil receiver. +// The second return value indicates success and may be false if the method is +// an interface method or if it is an instantiated method. +// +// This function is just an optimization that avoids the general scope walking +// approach. You are expected to fall back to the general approach if this +// function fails. +func (enc *Encoder) concreteMethod(meth *types.Func) (Path, bool) { + // Concrete methods can only be declared on package-scoped named types. For + // that reason we can skip the expensive walk over the package scope: the + // path will always be package -> named type -> method. We can trivially get + // the type name from the receiver, and only have to look over the type's + // methods to find the method index. + // + // Methods on generic types require special consideration, however. Consider + // the following package: + // + // L1: type S[T any] struct{} + // L2: func (recv S[A]) Foo() { recv.Bar() } + // L3: func (recv S[B]) Bar() { } + // L4: type Alias = S[int] + // L5: func _[T any]() { var s S[int]; s.Foo() } + // + // The receivers of methods on generic types are instantiations. L2 and L3 + // instantiate S with the type-parameters A and B, which are scoped to the + // respective methods. L4 and L5 each instantiate S with int. Each of these + // instantiations has its own method set, full of methods (and thus objects) + // with receivers whose types are the respective instantiations. In other + // words, we have + // + // S[A].Foo, S[A].Bar + // S[B].Foo, S[B].Bar + // S[int].Foo, S[int].Bar + // + // We may thus be trying to produce object paths for any of these objects. + // + // S[A].Foo and S[B].Bar are the origin methods, and their paths are S.Foo + // and S.Bar, which are the paths that this function naturally produces. + // + // S[A].Bar, S[B].Foo, and both methods on S[int] are instantiations that + // don't correspond to the origin methods. For S[int], this is significant. + // The most precise object path for S[int].Foo, for example, is Alias.Foo, + // not S.Foo. Our function, however, would produce S.Foo, which would + // resolve to a different object. + // + // For S[A].Bar and S[B].Foo it could be argued that S.Bar and S.Foo are + // still the correct paths, since only the origin methods have meaningful + // paths. But this is likely only true for trivial cases and has edge cases. + // Since this function is only an optimization, we err on the side of giving + // up, deferring to the slower but definitely correct algorithm. Most users + // of objectpath will only be giving us origin methods, anyway, as referring + // to instantiated methods is usually not useful. + + if meth.Origin() != meth { + return "", false + } + + _, named := typesinternal.ReceiverNamed(meth.Type().(*types.Signature).Recv()) + if named == nil { + return "", false + } + + if types.IsInterface(named) { + // Named interfaces don't have to be package-scoped + // + // TODO(dominikh): opt: if scope.Lookup(name) == named, then we can apply this optimization to interface + // methods, too, I think. + return "", false + } + + // Preallocate space for the name, opType, opMethod, and some digits. + name := named.Obj().Name() + path := make([]byte, 0, len(name)+8) + path = append(path, name...) + path = append(path, opType) + + // Method indices are w.r.t. the go/types data structures, + // ultimately deriving from source order, + // which is preserved by export data. + for i := 0; i < named.NumMethods(); i++ { + if named.Method(i) == meth { + path = appendOpArg(path, opMethod, i) + return Path(path), true + } + } + + // Due to golang/go#59944, go/types fails to associate the receiver with + // certain methods on cgo types. + // + // TODO(rfindley): replace this panic once golang/go#59944 is fixed in all Go + // versions gopls supports. + return "", false + // panic(fmt.Sprintf("couldn't find method %s on type %s; methods: %#v", meth, named, enc.namedMethods(named))) +} + +// find finds obj within type T, returning the path to it, or nil if not found. +// +// The seen map is used to short circuit cycles through type parameters. If +// nil, it will be allocated as necessary. +func find(obj types.Object, T types.Type, path []byte, seen map[*types.TypeName]bool) []byte { + switch T := T.(type) { + case *aliases.Alias: + return find(obj, aliases.Unalias(T), path, seen) + case *types.Basic, *types.Named: + // Named types belonging to pkg were handled already, + // so T must belong to another package. No path. + return nil + case *types.Pointer: + return find(obj, T.Elem(), append(path, opElem), seen) + case *types.Slice: + return find(obj, T.Elem(), append(path, opElem), seen) + case *types.Array: + return find(obj, T.Elem(), append(path, opElem), seen) + case *types.Chan: + return find(obj, T.Elem(), append(path, opElem), seen) + case *types.Map: + if r := find(obj, T.Key(), append(path, opKey), seen); r != nil { + return r + } + return find(obj, T.Elem(), append(path, opElem), seen) + case *types.Signature: + if r := findTypeParam(obj, T.RecvTypeParams(), path, opRecvTypeParam, nil); r != nil { + return r + } + if r := findTypeParam(obj, T.TypeParams(), path, opTypeParam, seen); r != nil { + return r + } + if r := find(obj, T.Params(), append(path, opParams), seen); r != nil { + return r + } + return find(obj, T.Results(), append(path, opResults), seen) + case *types.Struct: + for i := 0; i < T.NumFields(); i++ { + fld := T.Field(i) + path2 := appendOpArg(path, opField, i) + if fld == obj { + return path2 // found field var + } + if r := find(obj, fld.Type(), append(path2, opType), seen); r != nil { + return r + } + } + return nil + case *types.Tuple: + for i := 0; i < T.Len(); i++ { + v := T.At(i) + path2 := appendOpArg(path, opAt, i) + if v == obj { + return path2 // found param/result var + } + if r := find(obj, v.Type(), append(path2, opType), seen); r != nil { + return r + } + } + return nil + case *types.Interface: + for i := 0; i < T.NumMethods(); i++ { + m := T.Method(i) + path2 := appendOpArg(path, opMethod, i) + if m == obj { + return path2 // found interface method + } + if r := find(obj, m.Type(), append(path2, opType), seen); r != nil { + return r + } + } + return nil + case *types.TypeParam: + name := T.Obj() + if name == obj { + return append(path, opObj) + } + if seen[name] { + return nil + } + if seen == nil { + seen = make(map[*types.TypeName]bool) + } + seen[name] = true + if r := find(obj, T.Constraint(), append(path, opConstraint), seen); r != nil { + return r + } + return nil + } + panic(T) +} + +func findTypeParam(obj types.Object, list *types.TypeParamList, path []byte, op byte, seen map[*types.TypeName]bool) []byte { + for i := 0; i < list.Len(); i++ { + tparam := list.At(i) + path2 := appendOpArg(path, op, i) + if r := find(obj, tparam, path2, seen); r != nil { + return r + } + } + return nil +} + +// Object returns the object denoted by path p within the package pkg. +func Object(pkg *types.Package, p Path) (types.Object, error) { + pathstr := string(p) + if pathstr == "" { + return nil, fmt.Errorf("empty path") + } + + var pkgobj, suffix string + if dot := strings.IndexByte(pathstr, opType); dot < 0 { + pkgobj = pathstr + } else { + pkgobj = pathstr[:dot] + suffix = pathstr[dot:] // suffix starts with "." + } + + obj := pkg.Scope().Lookup(pkgobj) + if obj == nil { + return nil, fmt.Errorf("package %s does not contain %q", pkg.Path(), pkgobj) + } + + // abstraction of *types.{Pointer,Slice,Array,Chan,Map} + type hasElem interface { + Elem() types.Type + } + // abstraction of *types.{Named,Signature} + type hasTypeParams interface { + TypeParams() *types.TypeParamList + } + // abstraction of *types.{Named,TypeParam} + type hasObj interface { + Obj() *types.TypeName + } + + // The loop state is the pair (t, obj), + // exactly one of which is non-nil, initially obj. + // All suffixes start with '.' (the only object->type operation), + // followed by optional type->type operations, + // then a type->object operation. + // The cycle then repeats. + var t types.Type + for suffix != "" { + code := suffix[0] + suffix = suffix[1:] + + // Codes [AFMTr] have an integer operand. + var index int + switch code { + case opAt, opField, opMethod, opTypeParam, opRecvTypeParam: + rest := strings.TrimLeft(suffix, "0123456789") + numerals := suffix[:len(suffix)-len(rest)] + suffix = rest + i, err := strconv.Atoi(numerals) + if err != nil { + return nil, fmt.Errorf("invalid path: bad numeric operand %q for code %q", numerals, code) + } + index = int(i) + case opObj: + // no operand + default: + // The suffix must end with a type->object operation. + if suffix == "" { + return nil, fmt.Errorf("invalid path: ends with %q, want [AFMO]", code) + } + } + + if code == opType { + if t != nil { + return nil, fmt.Errorf("invalid path: unexpected %q in type context", opType) + } + t = obj.Type() + obj = nil + continue + } + + if t == nil { + return nil, fmt.Errorf("invalid path: code %q in object context", code) + } + + // Inv: t != nil, obj == nil + + t = aliases.Unalias(t) + switch code { + case opElem: + hasElem, ok := t.(hasElem) // Pointer, Slice, Array, Chan, Map + if !ok { + return nil, fmt.Errorf("cannot apply %q to %s (got %T, want pointer, slice, array, chan or map)", code, t, t) + } + t = hasElem.Elem() + + case opKey: + mapType, ok := t.(*types.Map) + if !ok { + return nil, fmt.Errorf("cannot apply %q to %s (got %T, want map)", code, t, t) + } + t = mapType.Key() + + case opParams: + sig, ok := t.(*types.Signature) + if !ok { + return nil, fmt.Errorf("cannot apply %q to %s (got %T, want signature)", code, t, t) + } + t = sig.Params() + + case opResults: + sig, ok := t.(*types.Signature) + if !ok { + return nil, fmt.Errorf("cannot apply %q to %s (got %T, want signature)", code, t, t) + } + t = sig.Results() + + case opUnderlying: + named, ok := t.(*types.Named) + if !ok { + return nil, fmt.Errorf("cannot apply %q to %s (got %T, want named)", code, t, t) + } + t = named.Underlying() + + case opRhs: + if alias, ok := t.(*aliases.Alias); ok { + t = aliases.Rhs(alias) + } else if false && aliases.Enabled() { + // The Enabled check is too expensive, so for now we + // simply assume that aliases are not enabled. + // TODO(adonovan): replace with "if true {" when go1.24 is assured. + return nil, fmt.Errorf("cannot apply %q to %s (got %T, want alias)", code, t, t) + } + + case opTypeParam: + hasTypeParams, ok := t.(hasTypeParams) // Named, Signature + if !ok { + return nil, fmt.Errorf("cannot apply %q to %s (got %T, want named or signature)", code, t, t) + } + tparams := hasTypeParams.TypeParams() + if n := tparams.Len(); index >= n { + return nil, fmt.Errorf("tuple index %d out of range [0-%d)", index, n) + } + t = tparams.At(index) + + case opRecvTypeParam: + sig, ok := t.(*types.Signature) // Signature + if !ok { + return nil, fmt.Errorf("cannot apply %q to %s (got %T, want signature)", code, t, t) + } + rtparams := sig.RecvTypeParams() + if n := rtparams.Len(); index >= n { + return nil, fmt.Errorf("tuple index %d out of range [0-%d)", index, n) + } + t = rtparams.At(index) + + case opConstraint: + tparam, ok := t.(*types.TypeParam) + if !ok { + return nil, fmt.Errorf("cannot apply %q to %s (got %T, want type parameter)", code, t, t) + } + t = tparam.Constraint() + + case opAt: + tuple, ok := t.(*types.Tuple) + if !ok { + return nil, fmt.Errorf("cannot apply %q to %s (got %T, want tuple)", code, t, t) + } + if n := tuple.Len(); index >= n { + return nil, fmt.Errorf("tuple index %d out of range [0-%d)", index, n) + } + obj = tuple.At(index) + t = nil + + case opField: + structType, ok := t.(*types.Struct) + if !ok { + return nil, fmt.Errorf("cannot apply %q to %s (got %T, want struct)", code, t, t) + } + if n := structType.NumFields(); index >= n { + return nil, fmt.Errorf("field index %d out of range [0-%d)", index, n) + } + obj = structType.Field(index) + t = nil + + case opMethod: + switch t := t.(type) { + case *types.Interface: + if index >= t.NumMethods() { + return nil, fmt.Errorf("method index %d out of range [0-%d)", index, t.NumMethods()) + } + obj = t.Method(index) // Id-ordered + + case *types.Named: + if index >= t.NumMethods() { + return nil, fmt.Errorf("method index %d out of range [0-%d)", index, t.NumMethods()) + } + obj = t.Method(index) + + default: + return nil, fmt.Errorf("cannot apply %q to %s (got %T, want interface or named)", code, t, t) + } + t = nil + + case opObj: + hasObj, ok := t.(hasObj) + if !ok { + return nil, fmt.Errorf("cannot apply %q to %s (got %T, want named or type param)", code, t, t) + } + obj = hasObj.Obj() + t = nil + + default: + return nil, fmt.Errorf("invalid path: unknown code %q", code) + } + } + + if obj == nil { + panic(p) // path does not end in an object-valued operator + } + + if obj.Pkg() != pkg { + return nil, fmt.Errorf("path denotes %s, which belongs to a different package", obj) + } + + return obj, nil // success +} + +// scopeObjects is a memoization of scope objects. +// Callers must not modify the result. +func (enc *Encoder) scopeObjects(scope *types.Scope) []types.Object { + m := enc.scopeMemo + if m == nil { + m = make(map[*types.Scope][]types.Object) + enc.scopeMemo = m + } + objs, ok := m[scope] + if !ok { + names := scope.Names() // allocates and sorts + objs = make([]types.Object, len(names)) + for i, name := range names { + objs[i] = scope.Lookup(name) + } + m[scope] = objs + } + return objs +} diff --git a/vendor/golang.org/x/tools/go/types/typeutil/callee.go b/vendor/golang.org/x/tools/go/types/typeutil/callee.go new file mode 100644 index 0000000..90dc541 --- /dev/null +++ b/vendor/golang.org/x/tools/go/types/typeutil/callee.go @@ -0,0 +1,69 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package typeutil + +import ( + "go/ast" + "go/types" + + "golang.org/x/tools/go/ast/astutil" + "golang.org/x/tools/internal/typeparams" +) + +// Callee returns the named target of a function call, if any: +// a function, method, builtin, or variable. +// +// Functions and methods may potentially have type parameters. +func Callee(info *types.Info, call *ast.CallExpr) types.Object { + fun := astutil.Unparen(call.Fun) + + // Look through type instantiation if necessary. + isInstance := false + switch fun.(type) { + case *ast.IndexExpr, *ast.IndexListExpr: + // When extracting the callee from an *IndexExpr, we need to check that + // it is a *types.Func and not a *types.Var. + // Example: Don't match a slice m within the expression `m[0]()`. + isInstance = true + fun, _, _, _ = typeparams.UnpackIndexExpr(fun) + } + + var obj types.Object + switch fun := fun.(type) { + case *ast.Ident: + obj = info.Uses[fun] // type, var, builtin, or declared func + case *ast.SelectorExpr: + if sel, ok := info.Selections[fun]; ok { + obj = sel.Obj() // method or field + } else { + obj = info.Uses[fun.Sel] // qualified identifier? + } + } + if _, ok := obj.(*types.TypeName); ok { + return nil // T(x) is a conversion, not a call + } + // A Func is required to match instantiations. + if _, ok := obj.(*types.Func); isInstance && !ok { + return nil // Was not a Func. + } + return obj +} + +// StaticCallee returns the target (function or method) of a static function +// call, if any. It returns nil for calls to builtins. +// +// Note: for calls of instantiated functions and methods, StaticCallee returns +// the corresponding generic function or method on the generic type. +func StaticCallee(info *types.Info, call *ast.CallExpr) *types.Func { + if f, ok := Callee(info, call).(*types.Func); ok && !interfaceMethod(f) { + return f + } + return nil +} + +func interfaceMethod(f *types.Func) bool { + recv := f.Type().(*types.Signature).Recv() + return recv != nil && types.IsInterface(recv.Type()) +} diff --git a/vendor/golang.org/x/tools/go/types/typeutil/imports.go b/vendor/golang.org/x/tools/go/types/typeutil/imports.go new file mode 100644 index 0000000..b81ce0c --- /dev/null +++ b/vendor/golang.org/x/tools/go/types/typeutil/imports.go @@ -0,0 +1,30 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package typeutil + +import "go/types" + +// Dependencies returns all dependencies of the specified packages. +// +// Dependent packages appear in topological order: if package P imports +// package Q, Q appears earlier than P in the result. +// The algorithm follows import statements in the order they +// appear in the source code, so the result is a total order. +func Dependencies(pkgs ...*types.Package) []*types.Package { + var result []*types.Package + seen := make(map[*types.Package]bool) + var visit func(pkgs []*types.Package) + visit = func(pkgs []*types.Package) { + for _, p := range pkgs { + if !seen[p] { + seen[p] = true + visit(p.Imports()) + result = append(result, p) + } + } + } + visit(pkgs) + return result +} diff --git a/vendor/golang.org/x/tools/go/types/typeutil/map.go b/vendor/golang.org/x/tools/go/types/typeutil/map.go new file mode 100644 index 0000000..a92f80d --- /dev/null +++ b/vendor/golang.org/x/tools/go/types/typeutil/map.go @@ -0,0 +1,518 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package typeutil defines various utilities for types, such as Map, +// a mapping from types.Type to any values. +package typeutil // import "golang.org/x/tools/go/types/typeutil" + +import ( + "bytes" + "fmt" + "go/types" + "reflect" + + "golang.org/x/tools/internal/aliases" + "golang.org/x/tools/internal/typeparams" +) + +// Map is a hash-table-based mapping from types (types.Type) to +// arbitrary any values. The concrete types that implement +// the Type interface are pointers. Since they are not canonicalized, +// == cannot be used to check for equivalence, and thus we cannot +// simply use a Go map. +// +// Just as with map[K]V, a nil *Map is a valid empty map. +// +// Not thread-safe. +type Map struct { + hasher Hasher // shared by many Maps + table map[uint32][]entry // maps hash to bucket; entry.key==nil means unused + length int // number of map entries +} + +// entry is an entry (key/value association) in a hash bucket. +type entry struct { + key types.Type + value any +} + +// SetHasher sets the hasher used by Map. +// +// All Hashers are functionally equivalent but contain internal state +// used to cache the results of hashing previously seen types. +// +// A single Hasher created by MakeHasher() may be shared among many +// Maps. This is recommended if the instances have many keys in +// common, as it will amortize the cost of hash computation. +// +// A Hasher may grow without bound as new types are seen. Even when a +// type is deleted from the map, the Hasher never shrinks, since other +// types in the map may reference the deleted type indirectly. +// +// Hashers are not thread-safe, and read-only operations such as +// Map.Lookup require updates to the hasher, so a full Mutex lock (not a +// read-lock) is require around all Map operations if a shared +// hasher is accessed from multiple threads. +// +// If SetHasher is not called, the Map will create a private hasher at +// the first call to Insert. +func (m *Map) SetHasher(hasher Hasher) { + m.hasher = hasher +} + +// Delete removes the entry with the given key, if any. +// It returns true if the entry was found. +func (m *Map) Delete(key types.Type) bool { + if m != nil && m.table != nil { + hash := m.hasher.Hash(key) + bucket := m.table[hash] + for i, e := range bucket { + if e.key != nil && types.Identical(key, e.key) { + // We can't compact the bucket as it + // would disturb iterators. + bucket[i] = entry{} + m.length-- + return true + } + } + } + return false +} + +// At returns the map entry for the given key. +// The result is nil if the entry is not present. +func (m *Map) At(key types.Type) any { + if m != nil && m.table != nil { + for _, e := range m.table[m.hasher.Hash(key)] { + if e.key != nil && types.Identical(key, e.key) { + return e.value + } + } + } + return nil +} + +// Set sets the map entry for key to val, +// and returns the previous entry, if any. +func (m *Map) Set(key types.Type, value any) (prev any) { + if m.table != nil { + hash := m.hasher.Hash(key) + bucket := m.table[hash] + var hole *entry + for i, e := range bucket { + if e.key == nil { + hole = &bucket[i] + } else if types.Identical(key, e.key) { + prev = e.value + bucket[i].value = value + return + } + } + + if hole != nil { + *hole = entry{key, value} // overwrite deleted entry + } else { + m.table[hash] = append(bucket, entry{key, value}) + } + } else { + if m.hasher.memo == nil { + m.hasher = MakeHasher() + } + hash := m.hasher.Hash(key) + m.table = map[uint32][]entry{hash: {entry{key, value}}} + } + + m.length++ + return +} + +// Len returns the number of map entries. +func (m *Map) Len() int { + if m != nil { + return m.length + } + return 0 +} + +// Iterate calls function f on each entry in the map in unspecified order. +// +// If f should mutate the map, Iterate provides the same guarantees as +// Go maps: if f deletes a map entry that Iterate has not yet reached, +// f will not be invoked for it, but if f inserts a map entry that +// Iterate has not yet reached, whether or not f will be invoked for +// it is unspecified. +func (m *Map) Iterate(f func(key types.Type, value any)) { + if m != nil { + for _, bucket := range m.table { + for _, e := range bucket { + if e.key != nil { + f(e.key, e.value) + } + } + } + } +} + +// Keys returns a new slice containing the set of map keys. +// The order is unspecified. +func (m *Map) Keys() []types.Type { + keys := make([]types.Type, 0, m.Len()) + m.Iterate(func(key types.Type, _ any) { + keys = append(keys, key) + }) + return keys +} + +func (m *Map) toString(values bool) string { + if m == nil { + return "{}" + } + var buf bytes.Buffer + fmt.Fprint(&buf, "{") + sep := "" + m.Iterate(func(key types.Type, value any) { + fmt.Fprint(&buf, sep) + sep = ", " + fmt.Fprint(&buf, key) + if values { + fmt.Fprintf(&buf, ": %q", value) + } + }) + fmt.Fprint(&buf, "}") + return buf.String() +} + +// String returns a string representation of the map's entries. +// Values are printed using fmt.Sprintf("%v", v). +// Order is unspecified. +func (m *Map) String() string { + return m.toString(true) +} + +// KeysString returns a string representation of the map's key set. +// Order is unspecified. +func (m *Map) KeysString() string { + return m.toString(false) +} + +//////////////////////////////////////////////////////////////////////// +// Hasher + +// A Hasher maps each type to its hash value. +// For efficiency, a hasher uses memoization; thus its memory +// footprint grows monotonically over time. +// Hashers are not thread-safe. +// Hashers have reference semantics. +// Call MakeHasher to create a Hasher. +type Hasher struct { + memo map[types.Type]uint32 + + // ptrMap records pointer identity. + ptrMap map[any]uint32 + + // sigTParams holds type parameters from the signature being hashed. + // Signatures are considered identical modulo renaming of type parameters, so + // within the scope of a signature type the identity of the signature's type + // parameters is just their index. + // + // Since the language does not currently support referring to uninstantiated + // generic types or functions, and instantiated signatures do not have type + // parameter lists, we should never encounter a second non-empty type + // parameter list when hashing a generic signature. + sigTParams *types.TypeParamList +} + +// MakeHasher returns a new Hasher instance. +func MakeHasher() Hasher { + return Hasher{ + memo: make(map[types.Type]uint32), + ptrMap: make(map[any]uint32), + sigTParams: nil, + } +} + +// Hash computes a hash value for the given type t such that +// Identical(t, t') => Hash(t) == Hash(t'). +func (h Hasher) Hash(t types.Type) uint32 { + hash, ok := h.memo[t] + if !ok { + hash = h.hashFor(t) + h.memo[t] = hash + } + return hash +} + +// hashString computes the Fowler–Noll–Vo hash of s. +func hashString(s string) uint32 { + var h uint32 + for i := 0; i < len(s); i++ { + h ^= uint32(s[i]) + h *= 16777619 + } + return h +} + +// hashFor computes the hash of t. +func (h Hasher) hashFor(t types.Type) uint32 { + // See Identical for rationale. + switch t := t.(type) { + case *types.Basic: + return uint32(t.Kind()) + + case *aliases.Alias: + return h.Hash(aliases.Unalias(t)) + + case *types.Array: + return 9043 + 2*uint32(t.Len()) + 3*h.Hash(t.Elem()) + + case *types.Slice: + return 9049 + 2*h.Hash(t.Elem()) + + case *types.Struct: + var hash uint32 = 9059 + for i, n := 0, t.NumFields(); i < n; i++ { + f := t.Field(i) + if f.Anonymous() { + hash += 8861 + } + hash += hashString(t.Tag(i)) + hash += hashString(f.Name()) // (ignore f.Pkg) + hash += h.Hash(f.Type()) + } + return hash + + case *types.Pointer: + return 9067 + 2*h.Hash(t.Elem()) + + case *types.Signature: + var hash uint32 = 9091 + if t.Variadic() { + hash *= 8863 + } + + // Use a separate hasher for types inside of the signature, where type + // parameter identity is modified to be (index, constraint). We must use a + // new memo for this hasher as type identity may be affected by this + // masking. For example, in func[T any](*T), the identity of *T depends on + // whether we are mapping the argument in isolation, or recursively as part + // of hashing the signature. + // + // We should never encounter a generic signature while hashing another + // generic signature, but defensively set sigTParams only if h.mask is + // unset. + tparams := t.TypeParams() + if h.sigTParams == nil && tparams.Len() != 0 { + h = Hasher{ + // There may be something more efficient than discarding the existing + // memo, but it would require detecting whether types are 'tainted' by + // references to type parameters. + memo: make(map[types.Type]uint32), + // Re-using ptrMap ensures that pointer identity is preserved in this + // hasher. + ptrMap: h.ptrMap, + sigTParams: tparams, + } + } + + for i := 0; i < tparams.Len(); i++ { + tparam := tparams.At(i) + hash += 7 * h.Hash(tparam.Constraint()) + } + + return hash + 3*h.hashTuple(t.Params()) + 5*h.hashTuple(t.Results()) + + case *types.Union: + return h.hashUnion(t) + + case *types.Interface: + // Interfaces are identical if they have the same set of methods, with + // identical names and types, and they have the same set of type + // restrictions. See go/types.identical for more details. + var hash uint32 = 9103 + + // Hash methods. + for i, n := 0, t.NumMethods(); i < n; i++ { + // Method order is not significant. + // Ignore m.Pkg(). + m := t.Method(i) + // Use shallow hash on method signature to + // avoid anonymous interface cycles. + hash += 3*hashString(m.Name()) + 5*h.shallowHash(m.Type()) + } + + // Hash type restrictions. + terms, err := typeparams.InterfaceTermSet(t) + // if err != nil t has invalid type restrictions. + if err == nil { + hash += h.hashTermSet(terms) + } + + return hash + + case *types.Map: + return 9109 + 2*h.Hash(t.Key()) + 3*h.Hash(t.Elem()) + + case *types.Chan: + return 9127 + 2*uint32(t.Dir()) + 3*h.Hash(t.Elem()) + + case *types.Named: + hash := h.hashPtr(t.Obj()) + targs := t.TypeArgs() + for i := 0; i < targs.Len(); i++ { + targ := targs.At(i) + hash += 2 * h.Hash(targ) + } + return hash + + case *types.TypeParam: + return h.hashTypeParam(t) + + case *types.Tuple: + return h.hashTuple(t) + } + + panic(fmt.Sprintf("%T: %v", t, t)) +} + +func (h Hasher) hashTuple(tuple *types.Tuple) uint32 { + // See go/types.identicalTypes for rationale. + n := tuple.Len() + hash := 9137 + 2*uint32(n) + for i := 0; i < n; i++ { + hash += 3 * h.Hash(tuple.At(i).Type()) + } + return hash +} + +func (h Hasher) hashUnion(t *types.Union) uint32 { + // Hash type restrictions. + terms, err := typeparams.UnionTermSet(t) + // if err != nil t has invalid type restrictions. Fall back on a non-zero + // hash. + if err != nil { + return 9151 + } + return h.hashTermSet(terms) +} + +func (h Hasher) hashTermSet(terms []*types.Term) uint32 { + hash := 9157 + 2*uint32(len(terms)) + for _, term := range terms { + // term order is not significant. + termHash := h.Hash(term.Type()) + if term.Tilde() { + termHash *= 9161 + } + hash += 3 * termHash + } + return hash +} + +// hashTypeParam returns a hash of the type parameter t, with a hash value +// depending on whether t is contained in h.sigTParams. +// +// If h.sigTParams is set and contains t, then we are in the process of hashing +// a signature, and the hash value of t must depend only on t's index and +// constraint: signatures are considered identical modulo type parameter +// renaming. To avoid infinite recursion, we only hash the type parameter +// index, and rely on types.Identical to handle signatures where constraints +// are not identical. +// +// Otherwise the hash of t depends only on t's pointer identity. +func (h Hasher) hashTypeParam(t *types.TypeParam) uint32 { + if h.sigTParams != nil { + i := t.Index() + if i >= 0 && i < h.sigTParams.Len() && t == h.sigTParams.At(i) { + return 9173 + 3*uint32(i) + } + } + return h.hashPtr(t.Obj()) +} + +// hashPtr hashes the pointer identity of ptr. It uses h.ptrMap to ensure that +// pointers values are not dependent on the GC. +func (h Hasher) hashPtr(ptr any) uint32 { + if hash, ok := h.ptrMap[ptr]; ok { + return hash + } + hash := uint32(reflect.ValueOf(ptr).Pointer()) + h.ptrMap[ptr] = hash + return hash +} + +// shallowHash computes a hash of t without looking at any of its +// element Types, to avoid potential anonymous cycles in the types of +// interface methods. +// +// When an unnamed non-empty interface type appears anywhere among the +// arguments or results of an interface method, there is a potential +// for endless recursion. Consider: +// +// type X interface { m() []*interface { X } } +// +// The problem is that the Methods of the interface in m's result type +// include m itself; there is no mention of the named type X that +// might help us break the cycle. +// (See comment in go/types.identical, case *Interface, for more.) +func (h Hasher) shallowHash(t types.Type) uint32 { + // t is the type of an interface method (Signature), + // its params or results (Tuples), or their immediate + // elements (mostly Slice, Pointer, Basic, Named), + // so there's no need to optimize anything else. + switch t := t.(type) { + case *aliases.Alias: + return h.shallowHash(aliases.Unalias(t)) + + case *types.Signature: + var hash uint32 = 604171 + if t.Variadic() { + hash *= 971767 + } + // The Signature/Tuple recursion is always finite + // and invariably shallow. + return hash + 1062599*h.shallowHash(t.Params()) + 1282529*h.shallowHash(t.Results()) + + case *types.Tuple: + n := t.Len() + hash := 9137 + 2*uint32(n) + for i := 0; i < n; i++ { + hash += 53471161 * h.shallowHash(t.At(i).Type()) + } + return hash + + case *types.Basic: + return 45212177 * uint32(t.Kind()) + + case *types.Array: + return 1524181 + 2*uint32(t.Len()) + + case *types.Slice: + return 2690201 + + case *types.Struct: + return 3326489 + + case *types.Pointer: + return 4393139 + + case *types.Union: + return 562448657 + + case *types.Interface: + return 2124679 // no recursion here + + case *types.Map: + return 9109 + + case *types.Chan: + return 9127 + + case *types.Named: + return h.hashPtr(t.Obj()) + + case *types.TypeParam: + return h.hashPtr(t.Obj()) + } + panic(fmt.Sprintf("shallowHash: %T: %v", t, t)) +} diff --git a/vendor/golang.org/x/tools/go/types/typeutil/methodsetcache.go b/vendor/golang.org/x/tools/go/types/typeutil/methodsetcache.go new file mode 100644 index 0000000..bd71aaf --- /dev/null +++ b/vendor/golang.org/x/tools/go/types/typeutil/methodsetcache.go @@ -0,0 +1,73 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file implements a cache of method sets. + +package typeutil + +import ( + "go/types" + "sync" + + "golang.org/x/tools/internal/aliases" +) + +// A MethodSetCache records the method set of each type T for which +// MethodSet(T) is called so that repeat queries are fast. +// The zero value is a ready-to-use cache instance. +type MethodSetCache struct { + mu sync.Mutex + named map[*types.Named]struct{ value, pointer *types.MethodSet } // method sets for named N and *N + others map[types.Type]*types.MethodSet // all other types +} + +// MethodSet returns the method set of type T. It is thread-safe. +// +// If cache is nil, this function is equivalent to types.NewMethodSet(T). +// Utility functions can thus expose an optional *MethodSetCache +// parameter to clients that care about performance. +func (cache *MethodSetCache) MethodSet(T types.Type) *types.MethodSet { + if cache == nil { + return types.NewMethodSet(T) + } + cache.mu.Lock() + defer cache.mu.Unlock() + + switch T := aliases.Unalias(T).(type) { + case *types.Named: + return cache.lookupNamed(T).value + + case *types.Pointer: + if N, ok := aliases.Unalias(T.Elem()).(*types.Named); ok { + return cache.lookupNamed(N).pointer + } + } + + // all other types + // (The map uses pointer equivalence, not type identity.) + mset := cache.others[T] + if mset == nil { + mset = types.NewMethodSet(T) + if cache.others == nil { + cache.others = make(map[types.Type]*types.MethodSet) + } + cache.others[T] = mset + } + return mset +} + +func (cache *MethodSetCache) lookupNamed(named *types.Named) struct{ value, pointer *types.MethodSet } { + if cache.named == nil { + cache.named = make(map[*types.Named]struct{ value, pointer *types.MethodSet }) + } + // Avoid recomputing mset(*T) for each distinct Pointer + // instance whose underlying type is a named type. + msets, ok := cache.named[named] + if !ok { + msets.value = types.NewMethodSet(named) + msets.pointer = types.NewMethodSet(types.NewPointer(named)) + cache.named[named] = msets + } + return msets +} diff --git a/vendor/golang.org/x/tools/go/types/typeutil/ui.go b/vendor/golang.org/x/tools/go/types/typeutil/ui.go new file mode 100644 index 0000000..a0c1a60 --- /dev/null +++ b/vendor/golang.org/x/tools/go/types/typeutil/ui.go @@ -0,0 +1,55 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package typeutil + +// This file defines utilities for user interfaces that display types. + +import ( + "go/types" + + "golang.org/x/tools/internal/aliases" +) + +// IntuitiveMethodSet returns the intuitive method set of a type T, +// which is the set of methods you can call on an addressable value of +// that type. +// +// The result always contains MethodSet(T), and is exactly MethodSet(T) +// for interface types and for pointer-to-concrete types. +// For all other concrete types T, the result additionally +// contains each method belonging to *T if there is no identically +// named method on T itself. +// +// This corresponds to user intuition about method sets; +// this function is intended only for user interfaces. +// +// The order of the result is as for types.MethodSet(T). +func IntuitiveMethodSet(T types.Type, msets *MethodSetCache) []*types.Selection { + isPointerToConcrete := func(T types.Type) bool { + ptr, ok := aliases.Unalias(T).(*types.Pointer) + return ok && !types.IsInterface(ptr.Elem()) + } + + var result []*types.Selection + mset := msets.MethodSet(T) + if types.IsInterface(T) || isPointerToConcrete(T) { + for i, n := 0, mset.Len(); i < n; i++ { + result = append(result, mset.At(i)) + } + } else { + // T is some other concrete type. + // Report methods of T and *T, preferring those of T. + pmset := msets.MethodSet(types.NewPointer(T)) + for i, n := 0, pmset.Len(); i < n; i++ { + meth := pmset.At(i) + if m := mset.Lookup(meth.Obj().Pkg(), meth.Obj().Name()); m != nil { + meth = m + } + result = append(result, meth) + } + + } + return result +} -- cgit v1.2.3