Source file src/cmd/compile/internal/inline/inl.go

     1  // Copyright 2011 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  //
     5  // The inlining facility makes 2 passes: first CanInline determines which
     6  // functions are suitable for inlining, and for those that are it
     7  // saves a copy of the body. Then InlineCalls walks each function body to
     8  // expand calls to inlinable functions.
     9  //
    10  // The Debug.l flag controls the aggressiveness. Note that main() swaps level 0 and 1,
    11  // making 1 the default and -l disable. Additional levels (beyond -l) may be buggy and
    12  // are not supported.
    13  //      0: disabled
    14  //      1: 80-nodes leaf functions, oneliners, panic, lazy typechecking (default)
    15  //      2: (unassigned)
    16  //      3: (unassigned)
    17  //      4: allow non-leaf functions
    18  //
    19  // At some point this may get another default and become switch-offable with -N.
    20  //
    21  // The -d typcheckinl flag enables early typechecking of all imported bodies,
    22  // which is useful to flush out bugs.
    23  //
    24  // The Debug.m flag enables diagnostic output.  a single -m is useful for verifying
    25  // which calls get inlined or not, more is for debugging, and may go away at any point.
    26  
    27  package inline
    28  
    29  import (
    30  	"fmt"
    31  	"go/constant"
    32  	"internal/buildcfg"
    33  	"strconv"
    34  	"strings"
    35  
    36  	"cmd/compile/internal/base"
    37  	"cmd/compile/internal/inline/inlheur"
    38  	"cmd/compile/internal/ir"
    39  	"cmd/compile/internal/logopt"
    40  	"cmd/compile/internal/pgoir"
    41  	"cmd/compile/internal/typecheck"
    42  	"cmd/compile/internal/types"
    43  	"cmd/internal/obj"
    44  	"cmd/internal/pgo"
    45  	"cmd/internal/src"
    46  )
    47  
    48  // Inlining budget parameters, gathered in one place
    49  const (
    50  	inlineMaxBudget       = 80
    51  	inlineExtraAppendCost = 0
    52  	// default is to inline if there's at most one call. -l=4 overrides this by using 1 instead.
    53  	inlineExtraCallCost  = 57              // 57 was benchmarked to provided most benefit with no bad surprises; see https://github.com/golang/go/issues/19348#issuecomment-439370742
    54  	inlineParamCallCost  = 17              // calling a parameter only costs this much extra (inlining might expose a constant function)
    55  	inlineExtraPanicCost = 1               // do not penalize inlining panics.
    56  	inlineExtraThrowCost = inlineMaxBudget // with current (2018-05/1.11) code, inlining runtime.throw does not help.
    57  
    58  	inlineBigFunctionNodes      = 5000                 // Functions with this many nodes are considered "big".
    59  	inlineBigFunctionMaxCost    = 20                   // Max cost of inlinee when inlining into a "big" function.
    60  	inlineClosureCalledOnceCost = 10 * inlineMaxBudget // if a closure is just called once, inline it.
    61  )
    62  
    63  var (
    64  	// List of all hot callee nodes.
    65  	// TODO(prattmic): Make this non-global.
    66  	candHotCalleeMap = make(map[*pgoir.IRNode]struct{})
    67  
    68  	// Set of functions that contain hot call sites.
    69  	hasHotCall = make(map[*ir.Func]struct{})
    70  
    71  	// List of all hot call sites. CallSiteInfo.Callee is always nil.
    72  	// TODO(prattmic): Make this non-global.
    73  	candHotEdgeMap = make(map[pgoir.CallSiteInfo]struct{})
    74  
    75  	// Threshold in percentage for hot callsite inlining.
    76  	inlineHotCallSiteThresholdPercent float64
    77  
    78  	// Threshold in CDF percentage for hot callsite inlining,
    79  	// that is, for a threshold of X the hottest callsites that
    80  	// make up the top X% of total edge weight will be
    81  	// considered hot for inlining candidates.
    82  	inlineCDFHotCallSiteThresholdPercent = float64(99)
    83  
    84  	// Budget increased due to hotness.
    85  	inlineHotMaxBudget int32 = 2000
    86  )
    87  
    88  func IsPgoHotFunc(fn *ir.Func, profile *pgoir.Profile) bool {
    89  	if profile == nil {
    90  		return false
    91  	}
    92  	if n, ok := profile.WeightedCG.IRNodes[ir.LinkFuncName(fn)]; ok {
    93  		_, ok := candHotCalleeMap[n]
    94  		return ok
    95  	}
    96  	return false
    97  }
    98  
    99  func HasPgoHotInline(fn *ir.Func) bool {
   100  	_, has := hasHotCall[fn]
   101  	return has
   102  }
   103  
   104  // PGOInlinePrologue records the hot callsites from ir-graph.
   105  func PGOInlinePrologue(p *pgoir.Profile) {
   106  	if base.Debug.PGOInlineCDFThreshold != "" {
   107  		if s, err := strconv.ParseFloat(base.Debug.PGOInlineCDFThreshold, 64); err == nil && s >= 0 && s <= 100 {
   108  			inlineCDFHotCallSiteThresholdPercent = s
   109  		} else {
   110  			base.Fatalf("invalid PGOInlineCDFThreshold, must be between 0 and 100")
   111  		}
   112  	}
   113  	var hotCallsites []pgo.NamedCallEdge
   114  	inlineHotCallSiteThresholdPercent, hotCallsites = hotNodesFromCDF(p)
   115  	if base.Debug.PGODebug > 0 {
   116  		fmt.Printf("hot-callsite-thres-from-CDF=%v\n", inlineHotCallSiteThresholdPercent)
   117  	}
   118  
   119  	if x := base.Debug.PGOInlineBudget; x != 0 {
   120  		inlineHotMaxBudget = int32(x)
   121  	}
   122  
   123  	for _, n := range hotCallsites {
   124  		// mark inlineable callees from hot edges
   125  		if callee := p.WeightedCG.IRNodes[n.CalleeName]; callee != nil {
   126  			candHotCalleeMap[callee] = struct{}{}
   127  		}
   128  		// mark hot call sites
   129  		if caller := p.WeightedCG.IRNodes[n.CallerName]; caller != nil && caller.AST != nil {
   130  			csi := pgoir.CallSiteInfo{LineOffset: n.CallSiteOffset, Caller: caller.AST}
   131  			candHotEdgeMap[csi] = struct{}{}
   132  		}
   133  	}
   134  
   135  	if base.Debug.PGODebug >= 3 {
   136  		fmt.Printf("hot-cg before inline in dot format:")
   137  		p.PrintWeightedCallGraphDOT(inlineHotCallSiteThresholdPercent)
   138  	}
   139  }
   140  
   141  // hotNodesFromCDF computes an edge weight threshold and the list of hot
   142  // nodes that make up the given percentage of the CDF. The threshold, as
   143  // a percent, is the lower bound of weight for nodes to be considered hot
   144  // (currently only used in debug prints) (in case of equal weights,
   145  // comparing with the threshold may not accurately reflect which nodes are
   146  // considered hot).
   147  func hotNodesFromCDF(p *pgoir.Profile) (float64, []pgo.NamedCallEdge) {
   148  	cum := int64(0)
   149  	for i, n := range p.NamedEdgeMap.ByWeight {
   150  		w := p.NamedEdgeMap.Weight[n]
   151  		cum += w
   152  		if pgo.WeightInPercentage(cum, p.TotalWeight) > inlineCDFHotCallSiteThresholdPercent {
   153  			// nodes[:i+1] to include the very last node that makes it to go over the threshold.
   154  			// (Say, if the CDF threshold is 50% and one hot node takes 60% of weight, we want to
   155  			// include that node instead of excluding it.)
   156  			return pgo.WeightInPercentage(w, p.TotalWeight), p.NamedEdgeMap.ByWeight[:i+1]
   157  		}
   158  	}
   159  	return 0, p.NamedEdgeMap.ByWeight
   160  }
   161  
   162  // CanInlineFuncs computes whether a batch of functions are inlinable.
   163  func CanInlineFuncs(funcs []*ir.Func, profile *pgoir.Profile) {
   164  	if profile != nil {
   165  		PGOInlinePrologue(profile)
   166  	}
   167  
   168  	if base.Flag.LowerL == 0 {
   169  		return
   170  	}
   171  
   172  	ir.VisitFuncsBottomUp(funcs, func(funcs []*ir.Func, recursive bool) {
   173  		for _, fn := range funcs {
   174  			CanInline(fn, profile)
   175  			if inlheur.Enabled() {
   176  				analyzeFuncProps(fn, profile)
   177  			}
   178  		}
   179  	})
   180  }
   181  
   182  // inlineBudget determines the max budget for function 'fn' prior to
   183  // analyzing the hairiness of the body of 'fn'. We pass in the pgo
   184  // profile if available (which can change the budget), also a
   185  // 'relaxed' flag, which expands the budget slightly to allow for the
   186  // possibility that a call to the function might have its score
   187  // adjusted downwards. If 'verbose' is set, then print a remark where
   188  // we boost the budget due to PGO.
   189  func inlineBudget(fn *ir.Func, profile *pgoir.Profile, relaxed bool, verbose bool) int32 {
   190  	// Update the budget for profile-guided inlining.
   191  	budget := int32(inlineMaxBudget)
   192  	if IsPgoHotFunc(fn, profile) {
   193  		budget = inlineHotMaxBudget
   194  		if verbose {
   195  			fmt.Printf("hot-node enabled increased budget=%v for func=%v\n", budget, ir.PkgFuncName(fn))
   196  		}
   197  	}
   198  	if relaxed {
   199  		budget += inlheur.BudgetExpansion(inlineMaxBudget)
   200  	}
   201  	if fn.ClosureParent != nil {
   202  		// be very liberal here, if the closure is only called once, the budget is large
   203  		budget = max(budget, inlineClosureCalledOnceCost)
   204  	}
   205  	return budget
   206  }
   207  
   208  // CanInline determines whether fn is inlineable.
   209  // If so, CanInline saves copies of fn.Body and fn.Dcl in fn.Inl.
   210  // fn and fn.Body will already have been typechecked.
   211  func CanInline(fn *ir.Func, profile *pgoir.Profile) {
   212  	if fn.Nname == nil {
   213  		base.Fatalf("CanInline no nname %+v", fn)
   214  	}
   215  
   216  	var reason string // reason, if any, that the function was not inlined
   217  	if base.Flag.LowerM > 1 || logopt.Enabled() {
   218  		defer func() {
   219  			if reason != "" {
   220  				if base.Flag.LowerM > 1 {
   221  					fmt.Printf("%v: cannot inline %v: %s\n", ir.Line(fn), fn.Nname, reason)
   222  				}
   223  				if logopt.Enabled() {
   224  					logopt.LogOpt(fn.Pos(), "cannotInlineFunction", "inline", ir.FuncName(fn), reason)
   225  				}
   226  			}
   227  		}()
   228  	}
   229  
   230  	reason = InlineImpossible(fn)
   231  	if reason != "" {
   232  		return
   233  	}
   234  	if fn.Typecheck() == 0 {
   235  		base.Fatalf("CanInline on non-typechecked function %v", fn)
   236  	}
   237  
   238  	n := fn.Nname
   239  	if n.Func.InlinabilityChecked() {
   240  		return
   241  	}
   242  	defer n.Func.SetInlinabilityChecked(true)
   243  
   244  	cc := int32(inlineExtraCallCost)
   245  	if base.Flag.LowerL == 4 {
   246  		cc = 1 // this appears to yield better performance than 0.
   247  	}
   248  
   249  	// Used a "relaxed" inline budget if the new inliner is enabled.
   250  	relaxed := inlheur.Enabled()
   251  
   252  	// Compute the inline budget for this func.
   253  	budget := inlineBudget(fn, profile, relaxed, base.Debug.PGODebug > 0)
   254  
   255  	// At this point in the game the function we're looking at may
   256  	// have "stale" autos, vars that still appear in the Dcl list, but
   257  	// which no longer have any uses in the function body (due to
   258  	// elimination by deadcode). We'd like to exclude these dead vars
   259  	// when creating the "Inline.Dcl" field below; to accomplish this,
   260  	// the hairyVisitor below builds up a map of used/referenced
   261  	// locals, and we use this map to produce a pruned Inline.Dcl
   262  	// list. See issue 25459 for more context.
   263  
   264  	visitor := hairyVisitor{
   265  		curFunc:       fn,
   266  		isBigFunc:     IsBigFunc(fn),
   267  		budget:        budget,
   268  		maxBudget:     budget,
   269  		extraCallCost: cc,
   270  		profile:       profile,
   271  	}
   272  	if visitor.tooHairy(fn) {
   273  		reason = visitor.reason
   274  		return
   275  	}
   276  
   277  	n.Func.Inl = &ir.Inline{
   278  		Cost:            budget - visitor.budget,
   279  		Dcl:             pruneUnusedAutos(n.Func.Dcl, &visitor),
   280  		HaveDcl:         true,
   281  		CanDelayResults: canDelayResults(fn),
   282  	}
   283  	if base.Flag.LowerM != 0 || logopt.Enabled() {
   284  		noteInlinableFunc(n, fn, budget-visitor.budget)
   285  	}
   286  }
   287  
   288  // noteInlinableFunc issues a message to the user that the specified
   289  // function is inlinable.
   290  func noteInlinableFunc(n *ir.Name, fn *ir.Func, cost int32) {
   291  	if base.Flag.LowerM > 1 {
   292  		fmt.Printf("%v: can inline %v with cost %d as: %v { %v }\n", ir.Line(fn), n, cost, fn.Type(), ir.Nodes(fn.Body))
   293  	} else if base.Flag.LowerM != 0 {
   294  		fmt.Printf("%v: can inline %v\n", ir.Line(fn), n)
   295  	}
   296  	// JSON optimization log output.
   297  	if logopt.Enabled() {
   298  		logopt.LogOpt(fn.Pos(), "canInlineFunction", "inline", ir.FuncName(fn), fmt.Sprintf("cost: %d", cost))
   299  	}
   300  }
   301  
   302  // InlineImpossible returns a non-empty reason string if fn is impossible to
   303  // inline regardless of cost or contents.
   304  func InlineImpossible(fn *ir.Func) string {
   305  	var reason string // reason, if any, that the function can not be inlined.
   306  	if fn.Nname == nil {
   307  		reason = "no name"
   308  		return reason
   309  	}
   310  
   311  	// If marked "go:noinline", don't inline.
   312  	if fn.Pragma&ir.Noinline != 0 {
   313  		reason = "marked go:noinline"
   314  		return reason
   315  	}
   316  
   317  	// If marked "go:norace" and -race compilation, don't inline.
   318  	if base.Flag.Race && fn.Pragma&ir.Norace != 0 {
   319  		reason = "marked go:norace with -race compilation"
   320  		return reason
   321  	}
   322  
   323  	// If marked "go:nocheckptr" and -d checkptr compilation, don't inline.
   324  	if base.Debug.Checkptr != 0 && fn.Pragma&ir.NoCheckPtr != 0 {
   325  		reason = "marked go:nocheckptr"
   326  		return reason
   327  	}
   328  
   329  	// If marked "go:cgo_unsafe_args", don't inline, since the function
   330  	// makes assumptions about its argument frame layout.
   331  	if fn.Pragma&ir.CgoUnsafeArgs != 0 {
   332  		reason = "marked go:cgo_unsafe_args"
   333  		return reason
   334  	}
   335  
   336  	// If marked as "go:uintptrkeepalive", don't inline, since the keep
   337  	// alive information is lost during inlining.
   338  	//
   339  	// TODO(prattmic): This is handled on calls during escape analysis,
   340  	// which is after inlining. Move prior to inlining so the keep-alive is
   341  	// maintained after inlining.
   342  	if fn.Pragma&ir.UintptrKeepAlive != 0 {
   343  		reason = "marked as having a keep-alive uintptr argument"
   344  		return reason
   345  	}
   346  
   347  	// If marked as "go:uintptrescapes", don't inline, since the escape
   348  	// information is lost during inlining.
   349  	if fn.Pragma&ir.UintptrEscapes != 0 {
   350  		reason = "marked as having an escaping uintptr argument"
   351  		return reason
   352  	}
   353  
   354  	// The nowritebarrierrec checker currently works at function
   355  	// granularity, so inlining yeswritebarrierrec functions can confuse it
   356  	// (#22342). As a workaround, disallow inlining them for now.
   357  	if fn.Pragma&ir.Yeswritebarrierrec != 0 {
   358  		reason = "marked go:yeswritebarrierrec"
   359  		return reason
   360  	}
   361  
   362  	// If a local function has no fn.Body (is defined outside of Go), cannot inline it.
   363  	// Imported functions don't have fn.Body but might have inline body in fn.Inl.
   364  	if len(fn.Body) == 0 && !typecheck.HaveInlineBody(fn) {
   365  		reason = "no function body"
   366  		return reason
   367  	}
   368  
   369  	return ""
   370  }
   371  
   372  // canDelayResults reports whether inlined calls to fn can delay
   373  // declaring the result parameter until the "return" statement.
   374  func canDelayResults(fn *ir.Func) bool {
   375  	// We can delay declaring+initializing result parameters if:
   376  	// (1) there's exactly one "return" statement in the inlined function;
   377  	// (2) it's not an empty return statement (#44355); and
   378  	// (3) the result parameters aren't named.
   379  
   380  	nreturns := 0
   381  	ir.VisitList(fn.Body, func(n ir.Node) {
   382  		if n, ok := n.(*ir.ReturnStmt); ok {
   383  			nreturns++
   384  			if len(n.Results) == 0 {
   385  				nreturns++ // empty return statement (case 2)
   386  			}
   387  		}
   388  	})
   389  
   390  	if nreturns != 1 {
   391  		return false // not exactly one return statement (case 1)
   392  	}
   393  
   394  	// temporaries for return values.
   395  	for _, param := range fn.Type().Results() {
   396  		if sym := param.Sym; sym != nil && !sym.IsBlank() {
   397  			return false // found a named result parameter (case 3)
   398  		}
   399  	}
   400  
   401  	return true
   402  }
   403  
   404  // hairyVisitor visits a function body to determine its inlining
   405  // hairiness and whether or not it can be inlined.
   406  type hairyVisitor struct {
   407  	// This is needed to access the current caller in the doNode function.
   408  	curFunc       *ir.Func
   409  	isBigFunc     bool
   410  	budget        int32
   411  	maxBudget     int32
   412  	reason        string
   413  	extraCallCost int32
   414  	usedLocals    ir.NameSet
   415  	do            func(ir.Node) bool
   416  	profile       *pgoir.Profile
   417  }
   418  
   419  func (v *hairyVisitor) tooHairy(fn *ir.Func) bool {
   420  	v.do = v.doNode // cache closure
   421  	if ir.DoChildren(fn, v.do) {
   422  		return true
   423  	}
   424  	if v.budget < 0 {
   425  		v.reason = fmt.Sprintf("function too complex: cost %d exceeds budget %d", v.maxBudget-v.budget, v.maxBudget)
   426  		return true
   427  	}
   428  	return false
   429  }
   430  
   431  // doNode visits n and its children, updates the state in v, and returns true if
   432  // n makes the current function too hairy for inlining.
   433  func (v *hairyVisitor) doNode(n ir.Node) bool {
   434  	if n == nil {
   435  		return false
   436  	}
   437  opSwitch:
   438  	switch n.Op() {
   439  	// Call is okay if inlinable and we have the budget for the body.
   440  	case ir.OCALLFUNC:
   441  		n := n.(*ir.CallExpr)
   442  		var cheap bool
   443  		if n.Fun.Op() == ir.ONAME {
   444  			name := n.Fun.(*ir.Name)
   445  			if name.Class == ir.PFUNC {
   446  				s := name.Sym()
   447  				fn := s.Name
   448  				switch s.Pkg.Path {
   449  				case "internal/abi":
   450  					switch fn {
   451  					case "NoEscape":
   452  						// Special case for internal/abi.NoEscape. It does just type
   453  						// conversions to appease the escape analysis, and doesn't
   454  						// generate code.
   455  						cheap = true
   456  					}
   457  				case "internal/runtime/sys":
   458  					switch fn {
   459  					case "GetCallerPC", "GetCallerSP":
   460  						// Functions that call GetCallerPC/SP can not be inlined
   461  						// because users expect the PC/SP of the logical caller,
   462  						// but GetCallerPC/SP returns the physical caller.
   463  						v.reason = "call to " + fn
   464  						return true
   465  					}
   466  				case "go.runtime":
   467  					switch fn {
   468  					case "throw":
   469  						// runtime.throw is a "cheap call" like panic in normal code.
   470  						v.budget -= inlineExtraThrowCost
   471  						break opSwitch
   472  					case "panicrangestate":
   473  						cheap = true
   474  					}
   475  				case "hash/maphash":
   476  					if strings.HasPrefix(fn, "escapeForHash[") {
   477  						// hash/maphash.escapeForHash[T] is a compiler intrinsic
   478  						// implemented in the escape analysis phase.
   479  						cheap = true
   480  					}
   481  				}
   482  			}
   483  			// Special case for coverage counter updates; although
   484  			// these correspond to real operations, we treat them as
   485  			// zero cost for the moment. This is due to the existence
   486  			// of tests that are sensitive to inlining-- if the
   487  			// insertion of coverage instrumentation happens to tip a
   488  			// given function over the threshold and move it from
   489  			// "inlinable" to "not-inlinable", this can cause changes
   490  			// in allocation behavior, which can then result in test
   491  			// failures (a good example is the TestAllocations in
   492  			// crypto/ed25519).
   493  			if isAtomicCoverageCounterUpdate(n) {
   494  				return false
   495  			}
   496  		}
   497  		if n.Fun.Op() == ir.OMETHEXPR {
   498  			if meth := ir.MethodExprName(n.Fun); meth != nil {
   499  				if fn := meth.Func; fn != nil {
   500  					s := fn.Sym()
   501  					if types.RuntimeSymName(s) == "heapBits.nextArena" {
   502  						// Special case: explicitly allow mid-stack inlining of
   503  						// runtime.heapBits.next even though it calls slow-path
   504  						// runtime.heapBits.nextArena.
   505  						cheap = true
   506  					}
   507  					// Special case: on architectures that can do unaligned loads,
   508  					// explicitly mark encoding/binary methods as cheap,
   509  					// because in practice they are, even though our inlining
   510  					// budgeting system does not see that. See issue 42958.
   511  					if base.Ctxt.Arch.CanMergeLoads && s.Pkg.Path == "encoding/binary" {
   512  						switch s.Name {
   513  						case "littleEndian.Uint64", "littleEndian.Uint32", "littleEndian.Uint16",
   514  							"bigEndian.Uint64", "bigEndian.Uint32", "bigEndian.Uint16",
   515  							"littleEndian.PutUint64", "littleEndian.PutUint32", "littleEndian.PutUint16",
   516  							"bigEndian.PutUint64", "bigEndian.PutUint32", "bigEndian.PutUint16",
   517  							"littleEndian.AppendUint64", "littleEndian.AppendUint32", "littleEndian.AppendUint16",
   518  							"bigEndian.AppendUint64", "bigEndian.AppendUint32", "bigEndian.AppendUint16":
   519  							cheap = true
   520  						}
   521  					}
   522  				}
   523  			}
   524  		}
   525  
   526  		// A call to a parameter is optimistically a cheap call, if it's a constant function
   527  		// perhaps it will inline, it also can simplify escape analysis.
   528  		extraCost := v.extraCallCost
   529  
   530  		if n.Fun.Op() == ir.ONAME {
   531  			name := n.Fun.(*ir.Name)
   532  			if name.Class == ir.PFUNC {
   533  				// Special case: on architectures that can do unaligned loads,
   534  				// explicitly mark internal/byteorder methods as cheap,
   535  				// because in practice they are, even though our inlining
   536  				// budgeting system does not see that. See issue 42958.
   537  				if base.Ctxt.Arch.CanMergeLoads && name.Sym().Pkg.Path == "internal/byteorder" {
   538  					switch name.Sym().Name {
   539  					case "LEUint64", "LEUint32", "LEUint16",
   540  						"BEUint64", "BEUint32", "BEUint16",
   541  						"LEPutUint64", "LEPutUint32", "LEPutUint16",
   542  						"BEPutUint64", "BEPutUint32", "BEPutUint16",
   543  						"LEAppendUint64", "LEAppendUint32", "LEAppendUint16",
   544  						"BEAppendUint64", "BEAppendUint32", "BEAppendUint16":
   545  						cheap = true
   546  					}
   547  				}
   548  			}
   549  			if name.Class == ir.PPARAM || name.Class == ir.PAUTOHEAP && name.IsClosureVar() {
   550  				extraCost = min(extraCost, inlineParamCallCost)
   551  			}
   552  		}
   553  
   554  		if cheap {
   555  			break // treat like any other node, that is, cost of 1
   556  		}
   557  
   558  		if ir.IsIntrinsicCall(n) {
   559  			// Treat like any other node.
   560  			break
   561  		}
   562  
   563  		if callee := inlCallee(v.curFunc, n.Fun, v.profile, false); callee != nil && typecheck.HaveInlineBody(callee) {
   564  			// Check whether we'd actually inline this call. Set
   565  			// log == false since we aren't actually doing inlining
   566  			// yet.
   567  			if ok, _, _ := canInlineCallExpr(v.curFunc, n, callee, v.isBigFunc, false, false); ok {
   568  				// mkinlcall would inline this call [1], so use
   569  				// the cost of the inline body as the cost of
   570  				// the call, as that is what will actually
   571  				// appear in the code.
   572  				//
   573  				// [1] This is almost a perfect match to the
   574  				// mkinlcall logic, except that
   575  				// canInlineCallExpr considers inlining cycles
   576  				// by looking at what has already been inlined.
   577  				// Since we haven't done any inlining yet we
   578  				// will miss those.
   579  				//
   580  				// TODO: in the case of a single-call closure, the inlining budget here is potentially much, much larger.
   581  				//
   582  				v.budget -= callee.Inl.Cost
   583  				break
   584  			}
   585  		}
   586  
   587  		// Call cost for non-leaf inlining.
   588  		v.budget -= extraCost
   589  
   590  	case ir.OCALLMETH:
   591  		base.FatalfAt(n.Pos(), "OCALLMETH missed by typecheck")
   592  
   593  	// Things that are too hairy, irrespective of the budget
   594  	case ir.OCALL, ir.OCALLINTER:
   595  		// Call cost for non-leaf inlining.
   596  		v.budget -= v.extraCallCost
   597  
   598  	case ir.OPANIC:
   599  		n := n.(*ir.UnaryExpr)
   600  		if n.X.Op() == ir.OCONVIFACE && n.X.(*ir.ConvExpr).Implicit() {
   601  			// Hack to keep reflect.flag.mustBe inlinable for TestIntendedInlining.
   602  			// Before CL 284412, these conversions were introduced later in the
   603  			// compiler, so they didn't count against inlining budget.
   604  			v.budget++
   605  		}
   606  		v.budget -= inlineExtraPanicCost
   607  
   608  	case ir.ORECOVER:
   609  		base.FatalfAt(n.Pos(), "ORECOVER missed typecheck")
   610  	case ir.ORECOVERFP:
   611  		// recover matches the argument frame pointer to find
   612  		// the right panic value, so it needs an argument frame.
   613  		v.reason = "call to recover"
   614  		return true
   615  
   616  	case ir.OCLOSURE:
   617  		if base.Debug.InlFuncsWithClosures == 0 {
   618  			v.reason = "not inlining functions with closures"
   619  			return true
   620  		}
   621  
   622  		// TODO(danscales): Maybe make budget proportional to number of closure
   623  		// variables, e.g.:
   624  		//v.budget -= int32(len(n.(*ir.ClosureExpr).Func.ClosureVars) * 3)
   625  		// TODO(austin): However, if we're able to inline this closure into
   626  		// v.curFunc, then we actually pay nothing for the closure captures. We
   627  		// should try to account for that if we're going to account for captures.
   628  		v.budget -= 15
   629  
   630  	case ir.OGO, ir.ODEFER, ir.OTAILCALL:
   631  		v.reason = "unhandled op " + n.Op().String()
   632  		return true
   633  
   634  	case ir.OAPPEND:
   635  		v.budget -= inlineExtraAppendCost
   636  
   637  	case ir.OADDR:
   638  		n := n.(*ir.AddrExpr)
   639  		// Make "&s.f" cost 0 when f's offset is zero.
   640  		if dot, ok := n.X.(*ir.SelectorExpr); ok && (dot.Op() == ir.ODOT || dot.Op() == ir.ODOTPTR) {
   641  			if _, ok := dot.X.(*ir.Name); ok && dot.Selection.Offset == 0 {
   642  				v.budget += 2 // undo ir.OADDR+ir.ODOT/ir.ODOTPTR
   643  			}
   644  		}
   645  
   646  	case ir.ODEREF:
   647  		// *(*X)(unsafe.Pointer(&x)) is low-cost
   648  		n := n.(*ir.StarExpr)
   649  
   650  		ptr := n.X
   651  		for ptr.Op() == ir.OCONVNOP {
   652  			ptr = ptr.(*ir.ConvExpr).X
   653  		}
   654  		if ptr.Op() == ir.OADDR {
   655  			v.budget += 1 // undo half of default cost of ir.ODEREF+ir.OADDR
   656  		}
   657  
   658  	case ir.OCONVNOP:
   659  		// This doesn't produce code, but the children might.
   660  		v.budget++ // undo default cost
   661  
   662  	case ir.OFALL, ir.OTYPE:
   663  		// These nodes don't produce code; omit from inlining budget.
   664  		return false
   665  
   666  	case ir.OIF:
   667  		n := n.(*ir.IfStmt)
   668  		if ir.IsConst(n.Cond, constant.Bool) {
   669  			// This if and the condition cost nothing.
   670  			if doList(n.Init(), v.do) {
   671  				return true
   672  			}
   673  			if ir.BoolVal(n.Cond) {
   674  				return doList(n.Body, v.do)
   675  			} else {
   676  				return doList(n.Else, v.do)
   677  			}
   678  		}
   679  
   680  	case ir.ONAME:
   681  		n := n.(*ir.Name)
   682  		if n.Class == ir.PAUTO {
   683  			v.usedLocals.Add(n)
   684  		}
   685  
   686  	case ir.OBLOCK:
   687  		// The only OBLOCK we should see at this point is an empty one.
   688  		// In any event, let the visitList(n.List()) below take care of the statements,
   689  		// and don't charge for the OBLOCK itself. The ++ undoes the -- below.
   690  		v.budget++
   691  
   692  	case ir.OMETHVALUE, ir.OSLICELIT:
   693  		v.budget-- // Hack for toolstash -cmp.
   694  
   695  	case ir.OMETHEXPR:
   696  		v.budget++ // Hack for toolstash -cmp.
   697  
   698  	case ir.OAS2:
   699  		n := n.(*ir.AssignListStmt)
   700  
   701  		// Unified IR unconditionally rewrites:
   702  		//
   703  		//	a, b = f()
   704  		//
   705  		// into:
   706  		//
   707  		//	DCL tmp1
   708  		//	DCL tmp2
   709  		//	tmp1, tmp2 = f()
   710  		//	a, b = tmp1, tmp2
   711  		//
   712  		// so that it can insert implicit conversions as necessary. To
   713  		// minimize impact to the existing inlining heuristics (in
   714  		// particular, to avoid breaking the existing inlinability regress
   715  		// tests), we need to compensate for this here.
   716  		//
   717  		// See also identical logic in IsBigFunc.
   718  		if len(n.Rhs) > 0 {
   719  			if init := n.Rhs[0].Init(); len(init) == 1 {
   720  				if _, ok := init[0].(*ir.AssignListStmt); ok {
   721  					// 4 for each value, because each temporary variable now
   722  					// appears 3 times (DCL, LHS, RHS), plus an extra DCL node.
   723  					//
   724  					// 1 for the extra "tmp1, tmp2 = f()" assignment statement.
   725  					v.budget += 4*int32(len(n.Lhs)) + 1
   726  				}
   727  			}
   728  		}
   729  
   730  	case ir.OAS:
   731  		// Special case for coverage counter updates and coverage
   732  		// function registrations. Although these correspond to real
   733  		// operations, we treat them as zero cost for the moment. This
   734  		// is primarily due to the existence of tests that are
   735  		// sensitive to inlining-- if the insertion of coverage
   736  		// instrumentation happens to tip a given function over the
   737  		// threshold and move it from "inlinable" to "not-inlinable",
   738  		// this can cause changes in allocation behavior, which can
   739  		// then result in test failures (a good example is the
   740  		// TestAllocations in crypto/ed25519).
   741  		n := n.(*ir.AssignStmt)
   742  		if n.X.Op() == ir.OINDEX && isIndexingCoverageCounter(n.X) {
   743  			return false
   744  		}
   745  	}
   746  
   747  	v.budget--
   748  
   749  	// When debugging, don't stop early, to get full cost of inlining this function
   750  	if v.budget < 0 && base.Flag.LowerM < 2 && !logopt.Enabled() {
   751  		v.reason = "too expensive"
   752  		return true
   753  	}
   754  
   755  	return ir.DoChildren(n, v.do)
   756  }
   757  
   758  // IsBigFunc reports whether fn is a "big" function.
   759  //
   760  // Note: The criteria for "big" is heuristic and subject to change.
   761  func IsBigFunc(fn *ir.Func) bool {
   762  	budget := inlineBigFunctionNodes
   763  	return ir.Any(fn, func(n ir.Node) bool {
   764  		// See logic in hairyVisitor.doNode, explaining unified IR's
   765  		// handling of "a, b = f()" assignments.
   766  		if n, ok := n.(*ir.AssignListStmt); ok && n.Op() == ir.OAS2 && len(n.Rhs) > 0 {
   767  			if init := n.Rhs[0].Init(); len(init) == 1 {
   768  				if _, ok := init[0].(*ir.AssignListStmt); ok {
   769  					budget += 4*len(n.Lhs) + 1
   770  				}
   771  			}
   772  		}
   773  
   774  		budget--
   775  		return budget <= 0
   776  	})
   777  }
   778  
   779  // inlineCallCheck returns whether a call will never be inlineable
   780  // for basic reasons, and whether the call is an intrinisic call.
   781  // The intrinsic result singles out intrinsic calls for debug logging.
   782  func inlineCallCheck(callerfn *ir.Func, call *ir.CallExpr) (bool, bool) {
   783  	if base.Flag.LowerL == 0 {
   784  		return false, false
   785  	}
   786  	if call.Op() != ir.OCALLFUNC {
   787  		return false, false
   788  	}
   789  	if call.GoDefer {
   790  		return false, false
   791  	}
   792  
   793  	// Prevent inlining some reflect.Value methods when using checkptr,
   794  	// even when package reflect was compiled without it (#35073).
   795  	if base.Debug.Checkptr != 0 && call.Fun.Op() == ir.OMETHEXPR {
   796  		if method := ir.MethodExprName(call.Fun); method != nil {
   797  			switch types.ReflectSymName(method.Sym()) {
   798  			case "Value.UnsafeAddr", "Value.Pointer":
   799  				return false, false
   800  			}
   801  		}
   802  	}
   803  
   804  	// hash/maphash.escapeForHash[T] is a compiler intrinsic implemented
   805  	// in the escape analysis phase.
   806  	if fn := ir.StaticCalleeName(call.Fun); fn != nil && fn.Sym().Pkg.Path == "hash/maphash" &&
   807  		strings.HasPrefix(fn.Sym().Name, "escapeForHash[") {
   808  		return false, true
   809  	}
   810  
   811  	if ir.IsIntrinsicCall(call) {
   812  		return false, true
   813  	}
   814  	return true, false
   815  }
   816  
   817  // InlineCallTarget returns the resolved-for-inlining target of a call.
   818  // It does not necessarily guarantee that the target can be inlined, though
   819  // obvious exclusions are applied.
   820  func InlineCallTarget(callerfn *ir.Func, call *ir.CallExpr, profile *pgoir.Profile) *ir.Func {
   821  	if mightInline, _ := inlineCallCheck(callerfn, call); !mightInline {
   822  		return nil
   823  	}
   824  	return inlCallee(callerfn, call.Fun, profile, true)
   825  }
   826  
   827  // TryInlineCall returns an inlined call expression for call, or nil
   828  // if inlining is not possible.
   829  func TryInlineCall(callerfn *ir.Func, call *ir.CallExpr, bigCaller bool, profile *pgoir.Profile, closureCalledOnce bool) *ir.InlinedCallExpr {
   830  	mightInline, isIntrinsic := inlineCallCheck(callerfn, call)
   831  
   832  	// Preserve old logging behavior
   833  	if (mightInline || isIntrinsic) && base.Flag.LowerM > 3 {
   834  		fmt.Printf("%v:call to func %+v\n", ir.Line(call), call.Fun)
   835  	}
   836  	if !mightInline {
   837  		return nil
   838  	}
   839  
   840  	if fn := inlCallee(callerfn, call.Fun, profile, false); fn != nil && typecheck.HaveInlineBody(fn) {
   841  		return mkinlcall(callerfn, call, fn, bigCaller, closureCalledOnce)
   842  	}
   843  	return nil
   844  }
   845  
   846  // inlCallee takes a function-typed expression and returns the underlying function ONAME
   847  // that it refers to if statically known. Otherwise, it returns nil.
   848  // resolveOnly skips cost-based inlineability checks for closures; the result may not actually be inlineable.
   849  func inlCallee(caller *ir.Func, fn ir.Node, profile *pgoir.Profile, resolveOnly bool) (res *ir.Func) {
   850  	fn = ir.StaticValue(fn)
   851  	switch fn.Op() {
   852  	case ir.OMETHEXPR:
   853  		fn := fn.(*ir.SelectorExpr)
   854  		n := ir.MethodExprName(fn)
   855  		// Check that receiver type matches fn.X.
   856  		// TODO(mdempsky): Handle implicit dereference
   857  		// of pointer receiver argument?
   858  		if n == nil || !types.Identical(n.Type().Recv().Type, fn.X.Type()) {
   859  			return nil
   860  		}
   861  		return n.Func
   862  	case ir.ONAME:
   863  		fn := fn.(*ir.Name)
   864  		if fn.Class == ir.PFUNC {
   865  			return fn.Func
   866  		}
   867  	case ir.OCLOSURE:
   868  		fn := fn.(*ir.ClosureExpr)
   869  		c := fn.Func
   870  		if len(c.ClosureVars) != 0 && c.ClosureVars[0].Outer.Curfn != caller {
   871  			return nil // inliner doesn't support inlining across closure frames
   872  		}
   873  		if !resolveOnly {
   874  			CanInline(c, profile)
   875  		}
   876  		return c
   877  	}
   878  	return nil
   879  }
   880  
   881  var inlgen int
   882  
   883  // SSADumpInline gives the SSA back end a chance to dump the function
   884  // when producing output for debugging the compiler itself.
   885  var SSADumpInline = func(*ir.Func) {}
   886  
   887  // InlineCall allows the inliner implementation to be overridden.
   888  // If it returns nil, the function will not be inlined.
   889  var InlineCall = func(callerfn *ir.Func, call *ir.CallExpr, fn *ir.Func, inlIndex int) *ir.InlinedCallExpr {
   890  	base.Fatalf("inline.InlineCall not overridden")
   891  	panic("unreachable")
   892  }
   893  
   894  // inlineCostOK returns true if call n from caller to callee is cheap enough to
   895  // inline. bigCaller indicates that caller is a big function.
   896  //
   897  // In addition to the "cost OK" boolean, it also returns
   898  //   - the "max cost" limit used to make the decision (which may differ depending on func size)
   899  //   - the score assigned to this specific callsite
   900  //   - whether the inlined function is "hot" according to PGO.
   901  func inlineCostOK(n *ir.CallExpr, caller, callee *ir.Func, bigCaller, closureCalledOnce bool) (bool, int32, int32, bool) {
   902  	maxCost := int32(inlineMaxBudget)
   903  
   904  	if bigCaller {
   905  		// We use this to restrict inlining into very big functions.
   906  		// See issue 26546 and 17566.
   907  		maxCost = inlineBigFunctionMaxCost
   908  	}
   909  
   910  	if callee.ClosureParent != nil {
   911  		maxCost *= 2           // favor inlining closures
   912  		if closureCalledOnce { // really favor inlining the one call to this closure
   913  			maxCost = max(maxCost, inlineClosureCalledOnceCost)
   914  		}
   915  	}
   916  
   917  	metric := callee.Inl.Cost
   918  	if inlheur.Enabled() {
   919  		score, ok := inlheur.GetCallSiteScore(caller, n)
   920  		if ok {
   921  			metric = int32(score)
   922  		}
   923  	}
   924  
   925  	lineOffset := pgoir.NodeLineOffset(n, caller)
   926  	csi := pgoir.CallSiteInfo{LineOffset: lineOffset, Caller: caller}
   927  	_, hot := candHotEdgeMap[csi]
   928  
   929  	if metric <= maxCost {
   930  		// Simple case. Function is already cheap enough.
   931  		return true, 0, metric, hot
   932  	}
   933  
   934  	// We'll also allow inlining of hot functions below inlineHotMaxBudget,
   935  	// but only in small functions.
   936  
   937  	if !hot {
   938  		// Cold
   939  		return false, maxCost, metric, false
   940  	}
   941  
   942  	// Hot
   943  
   944  	if bigCaller {
   945  		if base.Debug.PGODebug > 0 {
   946  			fmt.Printf("hot-big check disallows inlining for call %s (cost %d) at %v in big function %s\n", ir.PkgFuncName(callee), callee.Inl.Cost, ir.Line(n), ir.PkgFuncName(caller))
   947  		}
   948  		return false, maxCost, metric, false
   949  	}
   950  
   951  	if metric > inlineHotMaxBudget {
   952  		return false, inlineHotMaxBudget, metric, false
   953  	}
   954  
   955  	if !base.PGOHash.MatchPosWithInfo(n.Pos(), "inline", nil) {
   956  		// De-selected by PGO Hash.
   957  		return false, maxCost, metric, false
   958  	}
   959  
   960  	if base.Debug.PGODebug > 0 {
   961  		fmt.Printf("hot-budget check allows inlining for call %s (cost %d) at %v in function %s\n", ir.PkgFuncName(callee), callee.Inl.Cost, ir.Line(n), ir.PkgFuncName(caller))
   962  	}
   963  
   964  	return true, 0, metric, hot
   965  }
   966  
   967  // parsePos returns all the inlining positions and the innermost position.
   968  func parsePos(pos src.XPos, posTmp []src.Pos) ([]src.Pos, src.Pos) {
   969  	ctxt := base.Ctxt
   970  	ctxt.AllPos(pos, func(p src.Pos) {
   971  		posTmp = append(posTmp, p)
   972  	})
   973  	l := len(posTmp) - 1
   974  	return posTmp[:l], posTmp[l]
   975  }
   976  
   977  // canInlineCallExpr returns true if the call n from caller to callee
   978  // can be inlined, plus the score computed for the call expr in question,
   979  // and whether the callee is hot according to PGO.
   980  // bigCaller indicates that caller is a big function. log
   981  // indicates that the 'cannot inline' reason should be logged.
   982  //
   983  // Preconditions: CanInline(callee) has already been called.
   984  func canInlineCallExpr(callerfn *ir.Func, n *ir.CallExpr, callee *ir.Func, bigCaller, closureCalledOnce bool, log bool) (bool, int32, bool) {
   985  	if callee.Inl == nil {
   986  		// callee is never inlinable.
   987  		if log && logopt.Enabled() {
   988  			logopt.LogOpt(n.Pos(), "cannotInlineCall", "inline", ir.FuncName(callerfn),
   989  				fmt.Sprintf("%s cannot be inlined", ir.PkgFuncName(callee)))
   990  		}
   991  		return false, 0, false
   992  	}
   993  
   994  	ok, maxCost, callSiteScore, hot := inlineCostOK(n, callerfn, callee, bigCaller, closureCalledOnce)
   995  	if !ok {
   996  		// callee cost too high for this call site.
   997  		if log && logopt.Enabled() {
   998  			logopt.LogOpt(n.Pos(), "cannotInlineCall", "inline", ir.FuncName(callerfn),
   999  				fmt.Sprintf("cost %d of %s exceeds max caller cost %d", callee.Inl.Cost, ir.PkgFuncName(callee), maxCost))
  1000  		}
  1001  		return false, 0, false
  1002  	}
  1003  
  1004  	callees, calleeInner := parsePos(n.Pos(), make([]src.Pos, 0, 10))
  1005  
  1006  	for _, p := range callees {
  1007  		if p.Line() == calleeInner.Line() && p.Col() == calleeInner.Col() && p.AbsFilename() == calleeInner.AbsFilename() {
  1008  			if log && logopt.Enabled() {
  1009  				logopt.LogOpt(n.Pos(), "cannotInlineCall", "inline", fmt.Sprintf("recursive call to %s", ir.FuncName(callerfn)))
  1010  			}
  1011  			return false, 0, false
  1012  		}
  1013  	}
  1014  
  1015  	if base.Flag.Cfg.Instrumenting && types.IsNoInstrumentPkg(callee.Sym().Pkg) {
  1016  		// Runtime package must not be instrumented.
  1017  		// Instrument skips runtime package. However, some runtime code can be
  1018  		// inlined into other packages and instrumented there. To avoid this,
  1019  		// we disable inlining of runtime functions when instrumenting.
  1020  		// The example that we observed is inlining of LockOSThread,
  1021  		// which lead to false race reports on m contents.
  1022  		if log && logopt.Enabled() {
  1023  			logopt.LogOpt(n.Pos(), "cannotInlineCall", "inline", ir.FuncName(callerfn),
  1024  				fmt.Sprintf("call to runtime function %s in instrumented build", ir.PkgFuncName(callee)))
  1025  		}
  1026  		return false, 0, false
  1027  	}
  1028  
  1029  	if base.Flag.Race && types.IsNoRacePkg(callee.Sym().Pkg) {
  1030  		if log && logopt.Enabled() {
  1031  			logopt.LogOpt(n.Pos(), "cannotInlineCall", "inline", ir.FuncName(callerfn),
  1032  				fmt.Sprintf(`call to into "no-race" package function %s in race build`, ir.PkgFuncName(callee)))
  1033  		}
  1034  		return false, 0, false
  1035  	}
  1036  
  1037  	if base.Debug.Checkptr != 0 && types.IsRuntimePkg(callee.Sym().Pkg) {
  1038  		// We don't instrument runtime packages for checkptr (see base/flag.go).
  1039  		if log && logopt.Enabled() {
  1040  			logopt.LogOpt(n.Pos(), "cannotInlineCall", "inline", ir.FuncName(callerfn),
  1041  				fmt.Sprintf(`call to into runtime package function %s in -d=checkptr build`, ir.PkgFuncName(callee)))
  1042  		}
  1043  		return false, 0, false
  1044  	}
  1045  
  1046  	// Check if we've already inlined this function at this particular
  1047  	// call site, in order to stop inlining when we reach the beginning
  1048  	// of a recursion cycle again. We don't inline immediately recursive
  1049  	// functions, but allow inlining if there is a recursion cycle of
  1050  	// many functions. Most likely, the inlining will stop before we
  1051  	// even hit the beginning of the cycle again, but this catches the
  1052  	// unusual case.
  1053  	parent := base.Ctxt.PosTable.Pos(n.Pos()).Base().InliningIndex()
  1054  	sym := callee.Linksym()
  1055  	for inlIndex := parent; inlIndex >= 0; inlIndex = base.Ctxt.InlTree.Parent(inlIndex) {
  1056  		if base.Ctxt.InlTree.InlinedFunction(inlIndex) == sym {
  1057  			if log {
  1058  				if base.Flag.LowerM > 1 {
  1059  					fmt.Printf("%v: cannot inline %v into %v: repeated recursive cycle\n", ir.Line(n), callee, ir.FuncName(callerfn))
  1060  				}
  1061  				if logopt.Enabled() {
  1062  					logopt.LogOpt(n.Pos(), "cannotInlineCall", "inline", ir.FuncName(callerfn),
  1063  						fmt.Sprintf("repeated recursive cycle to %s", ir.PkgFuncName(callee)))
  1064  				}
  1065  			}
  1066  			return false, 0, false
  1067  		}
  1068  	}
  1069  
  1070  	return true, callSiteScore, hot
  1071  }
  1072  
  1073  // mkinlcall returns an OINLCALL node that can replace OCALLFUNC n, or
  1074  // nil if it cannot be inlined. callerfn is the function that contains
  1075  // n, and fn is the function being called.
  1076  //
  1077  // The result of mkinlcall MUST be assigned back to n, e.g.
  1078  //
  1079  //	n.Left = mkinlcall(n.Left, fn, isddd)
  1080  func mkinlcall(callerfn *ir.Func, n *ir.CallExpr, fn *ir.Func, bigCaller, closureCalledOnce bool) *ir.InlinedCallExpr {
  1081  	ok, score, hot := canInlineCallExpr(callerfn, n, fn, bigCaller, closureCalledOnce, true)
  1082  	if !ok {
  1083  		return nil
  1084  	}
  1085  	if hot {
  1086  		hasHotCall[callerfn] = struct{}{}
  1087  	}
  1088  	typecheck.AssertFixedCall(n)
  1089  
  1090  	parent := base.Ctxt.PosTable.Pos(n.Pos()).Base().InliningIndex()
  1091  	sym := fn.Linksym()
  1092  	inlIndex := base.Ctxt.InlTree.Add(parent, n.Pos(), sym, ir.FuncName(fn))
  1093  
  1094  	closureInitLSym := func(n *ir.CallExpr, fn *ir.Func) {
  1095  		// The linker needs FuncInfo metadata for all inlined
  1096  		// functions. This is typically handled by gc.enqueueFunc
  1097  		// calling ir.InitLSym for all function declarations in
  1098  		// typecheck.Target.Decls (ir.UseClosure adds all closures to
  1099  		// Decls).
  1100  		//
  1101  		// However, closures in Decls are ignored, and are
  1102  		// instead enqueued when walk of the calling function
  1103  		// discovers them.
  1104  		//
  1105  		// This presents a problem for direct calls to closures.
  1106  		// Inlining will replace the entire closure definition with its
  1107  		// body, which hides the closure from walk and thus suppresses
  1108  		// symbol creation.
  1109  		//
  1110  		// Explicitly create a symbol early in this edge case to ensure
  1111  		// we keep this metadata.
  1112  		//
  1113  		// TODO: Refactor to keep a reference so this can all be done
  1114  		// by enqueueFunc.
  1115  
  1116  		if n.Op() != ir.OCALLFUNC {
  1117  			// Not a standard call.
  1118  			return
  1119  		}
  1120  		if n.Fun.Op() != ir.OCLOSURE {
  1121  			// Not a direct closure call.
  1122  			return
  1123  		}
  1124  
  1125  		clo := n.Fun.(*ir.ClosureExpr)
  1126  		if !clo.Func.IsClosure() {
  1127  			// enqueueFunc will handle non closures anyways.
  1128  			return
  1129  		}
  1130  
  1131  		ir.InitLSym(fn, true)
  1132  	}
  1133  
  1134  	closureInitLSym(n, fn)
  1135  
  1136  	if base.Flag.GenDwarfInl > 0 {
  1137  		if !sym.WasInlined() {
  1138  			base.Ctxt.DwFixups.SetPrecursorFunc(sym, fn)
  1139  			sym.Set(obj.AttrWasInlined, true)
  1140  		}
  1141  	}
  1142  
  1143  	if base.Flag.LowerM != 0 {
  1144  		if buildcfg.Experiment.NewInliner {
  1145  			fmt.Printf("%v: inlining call to %v with score %d\n",
  1146  				ir.Line(n), fn, score)
  1147  		} else {
  1148  			fmt.Printf("%v: inlining call to %v\n", ir.Line(n), fn)
  1149  		}
  1150  	}
  1151  	if base.Flag.LowerM > 2 {
  1152  		fmt.Printf("%v: Before inlining: %+v\n", ir.Line(n), n)
  1153  	}
  1154  
  1155  	res := InlineCall(callerfn, n, fn, inlIndex)
  1156  
  1157  	if res == nil {
  1158  		base.FatalfAt(n.Pos(), "inlining call to %v failed", fn)
  1159  	}
  1160  
  1161  	if base.Flag.LowerM > 2 {
  1162  		fmt.Printf("%v: After inlining %+v\n\n", ir.Line(res), res)
  1163  	}
  1164  
  1165  	if inlheur.Enabled() {
  1166  		inlheur.UpdateCallsiteTable(callerfn, n, res)
  1167  	}
  1168  
  1169  	return res
  1170  }
  1171  
  1172  // CalleeEffects appends any side effects from evaluating callee to init.
  1173  func CalleeEffects(init *ir.Nodes, callee ir.Node) {
  1174  	for {
  1175  		init.Append(ir.TakeInit(callee)...)
  1176  
  1177  		switch callee.Op() {
  1178  		case ir.ONAME, ir.OCLOSURE, ir.OMETHEXPR:
  1179  			return // done
  1180  
  1181  		case ir.OCONVNOP:
  1182  			conv := callee.(*ir.ConvExpr)
  1183  			callee = conv.X
  1184  
  1185  		case ir.OINLCALL:
  1186  			ic := callee.(*ir.InlinedCallExpr)
  1187  			init.Append(ic.Body.Take()...)
  1188  			callee = ic.SingleResult()
  1189  
  1190  		default:
  1191  			base.FatalfAt(callee.Pos(), "unexpected callee expression: %v", callee)
  1192  		}
  1193  	}
  1194  }
  1195  
  1196  func pruneUnusedAutos(ll []*ir.Name, vis *hairyVisitor) []*ir.Name {
  1197  	s := make([]*ir.Name, 0, len(ll))
  1198  	for _, n := range ll {
  1199  		if n.Class == ir.PAUTO {
  1200  			if !vis.usedLocals.Has(n) {
  1201  				// TODO(mdempsky): Simplify code after confident that this
  1202  				// never happens anymore.
  1203  				base.FatalfAt(n.Pos(), "unused auto: %v", n)
  1204  				continue
  1205  			}
  1206  		}
  1207  		s = append(s, n)
  1208  	}
  1209  	return s
  1210  }
  1211  
  1212  // numNonClosures returns the number of functions in list which are not closures.
  1213  func numNonClosures(list []*ir.Func) int {
  1214  	count := 0
  1215  	for _, fn := range list {
  1216  		if fn.OClosure == nil {
  1217  			count++
  1218  		}
  1219  	}
  1220  	return count
  1221  }
  1222  
  1223  func doList(list []ir.Node, do func(ir.Node) bool) bool {
  1224  	for _, x := range list {
  1225  		if x != nil {
  1226  			if do(x) {
  1227  				return true
  1228  			}
  1229  		}
  1230  	}
  1231  	return false
  1232  }
  1233  
  1234  // isIndexingCoverageCounter returns true if the specified node 'n' is indexing
  1235  // into a coverage counter array.
  1236  func isIndexingCoverageCounter(n ir.Node) bool {
  1237  	if n.Op() != ir.OINDEX {
  1238  		return false
  1239  	}
  1240  	ixn := n.(*ir.IndexExpr)
  1241  	if ixn.X.Op() != ir.ONAME || !ixn.X.Type().IsArray() {
  1242  		return false
  1243  	}
  1244  	nn := ixn.X.(*ir.Name)
  1245  	// CoverageAuxVar implies either a coverage counter or a package
  1246  	// ID; since the cover tool never emits code to index into ID vars
  1247  	// this is effectively testing whether nn is a coverage counter.
  1248  	return nn.CoverageAuxVar()
  1249  }
  1250  
  1251  // isAtomicCoverageCounterUpdate examines the specified node to
  1252  // determine whether it represents a call to sync/atomic.AddUint32 to
  1253  // increment a coverage counter.
  1254  func isAtomicCoverageCounterUpdate(cn *ir.CallExpr) bool {
  1255  	if cn.Fun.Op() != ir.ONAME {
  1256  		return false
  1257  	}
  1258  	name := cn.Fun.(*ir.Name)
  1259  	if name.Class != ir.PFUNC {
  1260  		return false
  1261  	}
  1262  	fn := name.Sym().Name
  1263  	if name.Sym().Pkg.Path != "sync/atomic" ||
  1264  		(fn != "AddUint32" && fn != "StoreUint32") {
  1265  		return false
  1266  	}
  1267  	if len(cn.Args) != 2 || cn.Args[0].Op() != ir.OADDR {
  1268  		return false
  1269  	}
  1270  	adn := cn.Args[0].(*ir.AddrExpr)
  1271  	v := isIndexingCoverageCounter(adn.X)
  1272  	return v
  1273  }
  1274  
  1275  func PostProcessCallSites(profile *pgoir.Profile) {
  1276  	if base.Debug.DumpInlCallSiteScores != 0 {
  1277  		budgetCallback := func(fn *ir.Func, prof *pgoir.Profile) (int32, bool) {
  1278  			v := inlineBudget(fn, prof, false, false)
  1279  			return v, v == inlineHotMaxBudget
  1280  		}
  1281  		inlheur.DumpInlCallSiteScores(profile, budgetCallback)
  1282  	}
  1283  }
  1284  
  1285  func analyzeFuncProps(fn *ir.Func, p *pgoir.Profile) {
  1286  	canInline := func(fn *ir.Func) { CanInline(fn, p) }
  1287  	budgetForFunc := func(fn *ir.Func) int32 {
  1288  		return inlineBudget(fn, p, true, false)
  1289  	}
  1290  	inlheur.AnalyzeFunc(fn, canInline, budgetForFunc, inlineMaxBudget)
  1291  }
  1292  

View as plain text