Source file src/testing/benchmark.go

     1  // Copyright 2009 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package testing
     6  
     7  import (
     8  	"context"
     9  	"flag"
    10  	"fmt"
    11  	"internal/sysinfo"
    12  	"io"
    13  	"math"
    14  	"os"
    15  	"runtime"
    16  	"slices"
    17  	"strconv"
    18  	"strings"
    19  	"sync"
    20  	"sync/atomic"
    21  	"time"
    22  	"unicode"
    23  )
    24  
    25  func initBenchmarkFlags() {
    26  	matchBenchmarks = flag.String("test.bench", "", "run only benchmarks matching `regexp`")
    27  	benchmarkMemory = flag.Bool("test.benchmem", false, "print memory allocations for benchmarks")
    28  	flag.Var(&benchTime, "test.benchtime", "run each benchmark for duration `d` or N times if `d` is of the form Nx")
    29  }
    30  
    31  var (
    32  	matchBenchmarks *string
    33  	benchmarkMemory *bool
    34  
    35  	benchTime = durationOrCountFlag{d: 1 * time.Second} // changed during test of testing package
    36  )
    37  
    38  type durationOrCountFlag struct {
    39  	d         time.Duration
    40  	n         int
    41  	allowZero bool
    42  }
    43  
    44  func (f *durationOrCountFlag) String() string {
    45  	if f.n > 0 {
    46  		return fmt.Sprintf("%dx", f.n)
    47  	}
    48  	return f.d.String()
    49  }
    50  
    51  func (f *durationOrCountFlag) Set(s string) error {
    52  	if strings.HasSuffix(s, "x") {
    53  		n, err := strconv.ParseInt(s[:len(s)-1], 10, 0)
    54  		if err != nil || n < 0 || (!f.allowZero && n == 0) {
    55  			return fmt.Errorf("invalid count")
    56  		}
    57  		*f = durationOrCountFlag{n: int(n)}
    58  		return nil
    59  	}
    60  	d, err := time.ParseDuration(s)
    61  	if err != nil || d < 0 || (!f.allowZero && d == 0) {
    62  		return fmt.Errorf("invalid duration")
    63  	}
    64  	*f = durationOrCountFlag{d: d}
    65  	return nil
    66  }
    67  
    68  // Global lock to ensure only one benchmark runs at a time.
    69  var benchmarkLock sync.Mutex
    70  
    71  // Used for every benchmark for measuring memory.
    72  var memStats runtime.MemStats
    73  
    74  // InternalBenchmark is an internal type but exported because it is cross-package;
    75  // it is part of the implementation of the "go test" command.
    76  type InternalBenchmark struct {
    77  	Name string
    78  	F    func(b *B)
    79  }
    80  
    81  // B is a type passed to [Benchmark] functions to manage benchmark
    82  // timing and control the number of iterations.
    83  //
    84  // A benchmark ends when its Benchmark function returns or calls any of the methods
    85  // [B.FailNow], [B.Fatal], [B.Fatalf], [B.SkipNow], [B.Skip], or [B.Skipf].
    86  // Those methods must be called only from the goroutine running the Benchmark function.
    87  // The other reporting methods, such as the variations of [B.Log] and [B.Error],
    88  // may be called simultaneously from multiple goroutines.
    89  //
    90  // Like in tests, benchmark logs are accumulated during execution
    91  // and dumped to standard output when done. Unlike in tests, benchmark logs
    92  // are always printed, so as not to hide output whose existence may be
    93  // affecting benchmark results.
    94  type B struct {
    95  	common
    96  	importPath       string // import path of the package containing the benchmark
    97  	bstate           *benchState
    98  	N                int
    99  	previousN        int           // number of iterations in the previous run
   100  	previousDuration time.Duration // total duration of the previous run
   101  	benchFunc        func(b *B)
   102  	benchTime        durationOrCountFlag
   103  	bytes            int64
   104  	missingBytes     bool // one of the subbenchmarks does not have bytes set.
   105  	timerOn          bool
   106  	showAllocResult  bool
   107  	result           BenchmarkResult
   108  	parallelism      int // RunParallel creates parallelism*GOMAXPROCS goroutines
   109  	// The initial states of memStats.Mallocs and memStats.TotalAlloc.
   110  	startAllocs uint64
   111  	startBytes  uint64
   112  	// The net total of this test after being run.
   113  	netAllocs uint64
   114  	netBytes  uint64
   115  	// Extra metrics collected by ReportMetric.
   116  	extra map[string]float64
   117  
   118  	// loop tracks the state of B.Loop
   119  	loop struct {
   120  		// n is the target number of iterations. It gets bumped up as we go.
   121  		// When the benchmark loop is done, we commit this to b.N so users can
   122  		// do reporting based on it, but we avoid exposing it until then.
   123  		n uint64
   124  		// i is the current Loop iteration. It's strictly monotonically
   125  		// increasing toward n.
   126  		//
   127  		// The high bit is used to poison the Loop fast path and fall back to
   128  		// the slow path.
   129  		i uint64
   130  
   131  		done bool // set when B.Loop return false
   132  	}
   133  }
   134  
   135  // StartTimer starts timing a test. This function is called automatically
   136  // before a benchmark starts, but it can also be used to resume timing after
   137  // a call to [B.StopTimer].
   138  func (b *B) StartTimer() {
   139  	if !b.timerOn {
   140  		runtime.ReadMemStats(&memStats)
   141  		b.startAllocs = memStats.Mallocs
   142  		b.startBytes = memStats.TotalAlloc
   143  		b.start = highPrecisionTimeNow()
   144  		b.timerOn = true
   145  		b.loop.i &^= loopPoisonTimer
   146  	}
   147  }
   148  
   149  // StopTimer stops timing a test. This can be used to pause the timer
   150  // while performing steps that you don't want to measure.
   151  func (b *B) StopTimer() {
   152  	if b.timerOn {
   153  		b.duration += highPrecisionTimeSince(b.start)
   154  		runtime.ReadMemStats(&memStats)
   155  		b.netAllocs += memStats.Mallocs - b.startAllocs
   156  		b.netBytes += memStats.TotalAlloc - b.startBytes
   157  		b.timerOn = false
   158  		// If we hit B.Loop with the timer stopped, fail.
   159  		b.loop.i |= loopPoisonTimer
   160  	}
   161  }
   162  
   163  // ResetTimer zeroes the elapsed benchmark time and memory allocation counters
   164  // and deletes user-reported metrics.
   165  // It does not affect whether the timer is running.
   166  func (b *B) ResetTimer() {
   167  	if b.extra == nil {
   168  		// Allocate the extra map before reading memory stats.
   169  		// Pre-size it to make more allocation unlikely.
   170  		b.extra = make(map[string]float64, 16)
   171  	} else {
   172  		clear(b.extra)
   173  	}
   174  	if b.timerOn {
   175  		runtime.ReadMemStats(&memStats)
   176  		b.startAllocs = memStats.Mallocs
   177  		b.startBytes = memStats.TotalAlloc
   178  		b.start = highPrecisionTimeNow()
   179  	}
   180  	b.duration = 0
   181  	b.netAllocs = 0
   182  	b.netBytes = 0
   183  }
   184  
   185  // SetBytes records the number of bytes processed in a single operation.
   186  // If this is called, the benchmark will report ns/op and MB/s.
   187  func (b *B) SetBytes(n int64) { b.bytes = n }
   188  
   189  // ReportAllocs enables malloc statistics for this benchmark.
   190  // It is equivalent to setting -test.benchmem, but it only affects the
   191  // benchmark function that calls ReportAllocs.
   192  func (b *B) ReportAllocs() {
   193  	b.showAllocResult = true
   194  }
   195  
   196  // runN runs a single benchmark for the specified number of iterations.
   197  func (b *B) runN(n int) {
   198  	benchmarkLock.Lock()
   199  	defer benchmarkLock.Unlock()
   200  	ctx, cancelCtx := context.WithCancel(context.Background())
   201  	defer func() {
   202  		b.runCleanup(normalPanic)
   203  		b.checkRaces()
   204  	}()
   205  	// Try to get a comparable environment for each run
   206  	// by clearing garbage from previous runs.
   207  	runtime.GC()
   208  	b.resetRaces()
   209  	b.N = n
   210  	b.loop.n = 0
   211  	b.loop.i = 0
   212  	b.loop.done = false
   213  	b.ctx = ctx
   214  	b.cancelCtx = cancelCtx
   215  
   216  	b.parallelism = 1
   217  	b.ResetTimer()
   218  	b.StartTimer()
   219  	b.benchFunc(b)
   220  	b.StopTimer()
   221  	b.previousN = n
   222  	b.previousDuration = b.duration
   223  
   224  	if b.loop.n > 0 && !b.loop.done && !b.failed {
   225  		b.Error("benchmark function returned without B.Loop() == false (break or return in loop?)")
   226  	}
   227  }
   228  
   229  // run1 runs the first iteration of benchFunc. It reports whether more
   230  // iterations of this benchmarks should be run.
   231  func (b *B) run1() bool {
   232  	if bstate := b.bstate; bstate != nil {
   233  		// Extend maxLen, if needed.
   234  		if n := len(b.name) + bstate.extLen + 1; n > bstate.maxLen {
   235  			bstate.maxLen = n + 8 // Add additional slack to avoid too many jumps in size.
   236  		}
   237  	}
   238  	go func() {
   239  		// Signal that we're done whether we return normally
   240  		// or by FailNow's runtime.Goexit.
   241  		defer func() {
   242  			b.signal <- true
   243  		}()
   244  
   245  		b.runN(1)
   246  	}()
   247  	<-b.signal
   248  	if b.failed {
   249  		fmt.Fprintf(b.w, "%s--- FAIL: %s\n%s", b.chatty.prefix(), b.name, b.output)
   250  		return false
   251  	}
   252  	// Only print the output if we know we are not going to proceed.
   253  	// Otherwise it is printed in processBench.
   254  	b.mu.RLock()
   255  	finished := b.finished
   256  	b.mu.RUnlock()
   257  	if b.hasSub.Load() || finished {
   258  		tag := "BENCH"
   259  		if b.skipped {
   260  			tag = "SKIP"
   261  		}
   262  		if b.chatty != nil && (len(b.output) > 0 || finished) {
   263  			b.trimOutput()
   264  			fmt.Fprintf(b.w, "%s--- %s: %s\n%s", b.chatty.prefix(), tag, b.name, b.output)
   265  		}
   266  		return false
   267  	}
   268  	return true
   269  }
   270  
   271  var labelsOnce sync.Once
   272  
   273  // run executes the benchmark in a separate goroutine, including all of its
   274  // subbenchmarks. b must not have subbenchmarks.
   275  func (b *B) run() {
   276  	labelsOnce.Do(func() {
   277  		fmt.Fprintf(b.w, "goos: %s\n", runtime.GOOS)
   278  		fmt.Fprintf(b.w, "goarch: %s\n", runtime.GOARCH)
   279  		if b.importPath != "" {
   280  			fmt.Fprintf(b.w, "pkg: %s\n", b.importPath)
   281  		}
   282  		if cpu := sysinfo.CPUName(); cpu != "" {
   283  			fmt.Fprintf(b.w, "cpu: %s\n", cpu)
   284  		}
   285  	})
   286  	if b.bstate != nil {
   287  		// Running go test --test.bench
   288  		b.bstate.processBench(b) // Must call doBench.
   289  	} else {
   290  		// Running func Benchmark.
   291  		b.doBench()
   292  	}
   293  }
   294  
   295  func (b *B) doBench() BenchmarkResult {
   296  	go b.launch()
   297  	<-b.signal
   298  	return b.result
   299  }
   300  
   301  // Don't run more than 1e9 times. (This also keeps n in int range on 32 bit platforms.)
   302  const maxBenchPredictIters = 1_000_000_000
   303  
   304  func predictN(goalns int64, prevIters int64, prevns int64, last int64) int {
   305  	if prevns == 0 {
   306  		// Round up to dodge divide by zero. See https://go.dev/issue/70709.
   307  		prevns = 1
   308  	}
   309  
   310  	// Order of operations matters.
   311  	// For very fast benchmarks, prevIters ~= prevns.
   312  	// If you divide first, you get 0 or 1,
   313  	// which can hide an order of magnitude in execution time.
   314  	// So multiply first, then divide.
   315  	n := goalns * prevIters / prevns
   316  	// Run more iterations than we think we'll need (1.2x).
   317  	n += n / 5
   318  	// Don't grow too fast in case we had timing errors previously.
   319  	n = min(n, 100*last)
   320  	// Be sure to run at least one more than last time.
   321  	n = max(n, last+1)
   322  	// Don't run more than 1e9 times. (This also keeps n in int range on 32 bit platforms.)
   323  	n = min(n, maxBenchPredictIters)
   324  	return int(n)
   325  }
   326  
   327  // launch launches the benchmark function. It gradually increases the number
   328  // of benchmark iterations until the benchmark runs for the requested benchtime.
   329  // launch is run by the doBench function as a separate goroutine.
   330  // run1 must have been called on b.
   331  func (b *B) launch() {
   332  	// Signal that we're done whether we return normally
   333  	// or by FailNow's runtime.Goexit.
   334  	defer func() {
   335  		b.signal <- true
   336  	}()
   337  
   338  	// b.Loop does its own ramp-up logic so we just need to run it once.
   339  	// If b.loop.n is non zero, it means b.Loop has already run.
   340  	if b.loop.n == 0 {
   341  		// Run the benchmark for at least the specified amount of time.
   342  		if b.benchTime.n > 0 {
   343  			// We already ran a single iteration in run1.
   344  			// If -benchtime=1x was requested, use that result.
   345  			// See https://golang.org/issue/32051.
   346  			if b.benchTime.n > 1 {
   347  				b.runN(b.benchTime.n)
   348  			}
   349  		} else {
   350  			d := b.benchTime.d
   351  			for n := int64(1); !b.failed && b.duration < d && n < 1e9; {
   352  				last := n
   353  				// Predict required iterations.
   354  				goalns := d.Nanoseconds()
   355  				prevIters := int64(b.N)
   356  				n = int64(predictN(goalns, prevIters, b.duration.Nanoseconds(), last))
   357  				b.runN(int(n))
   358  			}
   359  		}
   360  	}
   361  	b.result = BenchmarkResult{b.N, b.duration, b.bytes, b.netAllocs, b.netBytes, b.extra}
   362  }
   363  
   364  // Elapsed returns the measured elapsed time of the benchmark.
   365  // The duration reported by Elapsed matches the one measured by
   366  // [B.StartTimer], [B.StopTimer], and [B.ResetTimer].
   367  func (b *B) Elapsed() time.Duration {
   368  	d := b.duration
   369  	if b.timerOn {
   370  		d += highPrecisionTimeSince(b.start)
   371  	}
   372  	return d
   373  }
   374  
   375  // ReportMetric adds "n unit" to the reported benchmark results.
   376  // If the metric is per-iteration, the caller should divide by b.N,
   377  // and by convention units should end in "/op".
   378  // ReportMetric overrides any previously reported value for the same unit.
   379  // ReportMetric panics if unit is the empty string or if unit contains
   380  // any whitespace.
   381  // If unit is a unit normally reported by the benchmark framework itself
   382  // (such as "allocs/op"), ReportMetric will override that metric.
   383  // Setting "ns/op" to 0 will suppress that built-in metric.
   384  func (b *B) ReportMetric(n float64, unit string) {
   385  	if unit == "" {
   386  		panic("metric unit must not be empty")
   387  	}
   388  	if strings.IndexFunc(unit, unicode.IsSpace) >= 0 {
   389  		panic("metric unit must not contain whitespace")
   390  	}
   391  	b.extra[unit] = n
   392  }
   393  
   394  func (b *B) stopOrScaleBLoop() bool {
   395  	t := b.Elapsed()
   396  	if t >= b.benchTime.d {
   397  		// We've reached the target
   398  		return false
   399  	}
   400  	// Loop scaling
   401  	goalns := b.benchTime.d.Nanoseconds()
   402  	prevIters := int64(b.loop.n)
   403  	b.loop.n = uint64(predictN(goalns, prevIters, t.Nanoseconds(), prevIters))
   404  	if b.loop.n&loopPoisonMask != 0 {
   405  		// The iteration count should never get this high, but if it did we'd be
   406  		// in big trouble.
   407  		panic("loop iteration target overflow")
   408  	}
   409  	// predictN may have capped the number of iterations; make sure to
   410  	// terminate if we've already hit that cap.
   411  	return uint64(prevIters) < b.loop.n
   412  }
   413  
   414  func (b *B) loopSlowPath() bool {
   415  	// Consistency checks
   416  	if !b.timerOn {
   417  		b.Fatal("B.Loop called with timer stopped")
   418  	}
   419  	if b.loop.i&loopPoisonMask != 0 {
   420  		panic(fmt.Sprintf("unknown loop stop condition: %#x", b.loop.i))
   421  	}
   422  
   423  	if b.loop.n == 0 {
   424  		// It's the first call to b.Loop() in the benchmark function.
   425  		if b.benchTime.n > 0 {
   426  			// Fixed iteration count.
   427  			b.loop.n = uint64(b.benchTime.n)
   428  		} else {
   429  			// Initialize target to 1 to kick start loop scaling.
   430  			b.loop.n = 1
   431  		}
   432  		// Within a b.Loop loop, we don't use b.N (to avoid confusion).
   433  		b.N = 0
   434  		b.ResetTimer()
   435  
   436  		// Start the next iteration.
   437  		b.loop.i++
   438  		return true
   439  	}
   440  
   441  	// Should we keep iterating?
   442  	var more bool
   443  	if b.benchTime.n > 0 {
   444  		// The iteration count is fixed, so we should have run this many and now
   445  		// be done.
   446  		if b.loop.i != uint64(b.benchTime.n) {
   447  			// We shouldn't be able to reach the slow path in this case.
   448  			panic(fmt.Sprintf("iteration count %d < fixed target %d", b.loop.i, b.benchTime.n))
   449  		}
   450  		more = false
   451  	} else {
   452  		// Handle fixed time case
   453  		more = b.stopOrScaleBLoop()
   454  	}
   455  	if !more {
   456  		b.StopTimer()
   457  		// Commit iteration count
   458  		b.N = int(b.loop.n)
   459  		b.loop.done = true
   460  		return false
   461  	}
   462  
   463  	// Start the next iteration.
   464  	b.loop.i++
   465  	return true
   466  }
   467  
   468  // Loop returns true as long as the benchmark should continue running.
   469  //
   470  // A typical benchmark is structured like:
   471  //
   472  //	func Benchmark(b *testing.B) {
   473  //		... setup ...
   474  //		for b.Loop() {
   475  //			... code to measure ...
   476  //		}
   477  //		... cleanup ...
   478  //	}
   479  //
   480  // Loop resets the benchmark timer the first time it is called in a benchmark,
   481  // so any setup performed prior to starting the benchmark loop does not count
   482  // toward the benchmark measurement. Likewise, when it returns false, it stops
   483  // the timer so cleanup code is not measured.
   484  //
   485  // Within the body of a "for b.Loop() { ... }" loop, arguments to and
   486  // results from function calls within the loop are kept alive, preventing
   487  // the compiler from fully optimizing away the loop body. Currently, this is
   488  // implemented by disabling inlining of functions called in a b.Loop loop.
   489  // This applies only to calls syntactically between the curly braces of the loop,
   490  // and the loop condition must be written exactly as "b.Loop()". Optimizations
   491  // are performed as usual in any functions called by the loop.
   492  //
   493  // After Loop returns false, b.N contains the total number of iterations that
   494  // ran, so the benchmark may use b.N to compute other average metrics.
   495  //
   496  // Prior to the introduction of Loop, benchmarks were expected to contain an
   497  // explicit loop from 0 to b.N. Benchmarks should either use Loop or contain a
   498  // loop to b.N, but not both. Loop offers more automatic management of the
   499  // benchmark timer, and runs each benchmark function only once per measurement,
   500  // whereas b.N-based benchmarks must run the benchmark function (and any
   501  // associated setup and cleanup) several times.
   502  func (b *B) Loop() bool {
   503  	// This is written such that the fast path is as fast as possible and can be
   504  	// inlined.
   505  	//
   506  	// There are three cases where we'll fall out of the fast path:
   507  	//
   508  	// - On the first call, both i and n are 0.
   509  	//
   510  	// - If the loop reaches the n'th iteration, then i == n and we need
   511  	//   to figure out the new target iteration count or if we're done.
   512  	//
   513  	// - If the timer is stopped, it poisons the top bit of i so the slow
   514  	//   path can do consistency checks and fail.
   515  	if b.loop.i < b.loop.n {
   516  		b.loop.i++
   517  		return true
   518  	}
   519  	return b.loopSlowPath()
   520  }
   521  
   522  // The loopPoison constants can be OR'd into B.loop.i to cause it to fall back
   523  // to the slow path.
   524  const (
   525  	loopPoisonTimer = uint64(1 << (63 - iota))
   526  	// If necessary, add more poison bits here.
   527  
   528  	// loopPoisonMask is the set of all loop poison bits. (iota-1) is the index
   529  	// of the bit we just set, from which we recreate that bit mask. We subtract
   530  	// 1 to set all of the bits below that bit, then complement the result to
   531  	// get the mask. Sorry, not sorry.
   532  	loopPoisonMask = ^uint64((1 << (63 - (iota - 1))) - 1)
   533  )
   534  
   535  // BenchmarkResult contains the results of a benchmark run.
   536  type BenchmarkResult struct {
   537  	N         int           // The number of iterations.
   538  	T         time.Duration // The total time taken.
   539  	Bytes     int64         // Bytes processed in one iteration.
   540  	MemAllocs uint64        // The total number of memory allocations.
   541  	MemBytes  uint64        // The total number of bytes allocated.
   542  
   543  	// Extra records additional metrics reported by ReportMetric.
   544  	Extra map[string]float64
   545  }
   546  
   547  // NsPerOp returns the "ns/op" metric.
   548  func (r BenchmarkResult) NsPerOp() int64 {
   549  	if v, ok := r.Extra["ns/op"]; ok {
   550  		return int64(v)
   551  	}
   552  	if r.N <= 0 {
   553  		return 0
   554  	}
   555  	return r.T.Nanoseconds() / int64(r.N)
   556  }
   557  
   558  // mbPerSec returns the "MB/s" metric.
   559  func (r BenchmarkResult) mbPerSec() float64 {
   560  	if v, ok := r.Extra["MB/s"]; ok {
   561  		return v
   562  	}
   563  	if r.Bytes <= 0 || r.T <= 0 || r.N <= 0 {
   564  		return 0
   565  	}
   566  	return (float64(r.Bytes) * float64(r.N) / 1e6) / r.T.Seconds()
   567  }
   568  
   569  // AllocsPerOp returns the "allocs/op" metric,
   570  // which is calculated as r.MemAllocs / r.N.
   571  func (r BenchmarkResult) AllocsPerOp() int64 {
   572  	if v, ok := r.Extra["allocs/op"]; ok {
   573  		return int64(v)
   574  	}
   575  	if r.N <= 0 {
   576  		return 0
   577  	}
   578  	return int64(r.MemAllocs) / int64(r.N)
   579  }
   580  
   581  // AllocedBytesPerOp returns the "B/op" metric,
   582  // which is calculated as r.MemBytes / r.N.
   583  func (r BenchmarkResult) AllocedBytesPerOp() int64 {
   584  	if v, ok := r.Extra["B/op"]; ok {
   585  		return int64(v)
   586  	}
   587  	if r.N <= 0 {
   588  		return 0
   589  	}
   590  	return int64(r.MemBytes) / int64(r.N)
   591  }
   592  
   593  // String returns a summary of the benchmark results.
   594  // It follows the benchmark result line format from
   595  // https://golang.org/design/14313-benchmark-format, not including the
   596  // benchmark name.
   597  // Extra metrics override built-in metrics of the same name.
   598  // String does not include allocs/op or B/op, since those are reported
   599  // by [BenchmarkResult.MemString].
   600  func (r BenchmarkResult) String() string {
   601  	buf := new(strings.Builder)
   602  	fmt.Fprintf(buf, "%8d", r.N)
   603  
   604  	// Get ns/op as a float.
   605  	ns, ok := r.Extra["ns/op"]
   606  	if !ok {
   607  		ns = float64(r.T.Nanoseconds()) / float64(r.N)
   608  	}
   609  	if ns != 0 {
   610  		buf.WriteByte('\t')
   611  		prettyPrint(buf, ns, "ns/op")
   612  	}
   613  
   614  	if mbs := r.mbPerSec(); mbs != 0 {
   615  		fmt.Fprintf(buf, "\t%7.2f MB/s", mbs)
   616  	}
   617  
   618  	// Print extra metrics that aren't represented in the standard
   619  	// metrics.
   620  	var extraKeys []string
   621  	for k := range r.Extra {
   622  		switch k {
   623  		case "ns/op", "MB/s", "B/op", "allocs/op":
   624  			// Built-in metrics reported elsewhere.
   625  			continue
   626  		}
   627  		extraKeys = append(extraKeys, k)
   628  	}
   629  	slices.Sort(extraKeys)
   630  	for _, k := range extraKeys {
   631  		buf.WriteByte('\t')
   632  		prettyPrint(buf, r.Extra[k], k)
   633  	}
   634  	return buf.String()
   635  }
   636  
   637  func prettyPrint(w io.Writer, x float64, unit string) {
   638  	// Print all numbers with 10 places before the decimal point
   639  	// and small numbers with four sig figs. Field widths are
   640  	// chosen to fit the whole part in 10 places while aligning
   641  	// the decimal point of all fractional formats.
   642  	var format string
   643  	switch y := math.Abs(x); {
   644  	case y == 0 || y >= 999.95:
   645  		format = "%10.0f %s"
   646  	case y >= 99.995:
   647  		format = "%12.1f %s"
   648  	case y >= 9.9995:
   649  		format = "%13.2f %s"
   650  	case y >= 0.99995:
   651  		format = "%14.3f %s"
   652  	case y >= 0.099995:
   653  		format = "%15.4f %s"
   654  	case y >= 0.0099995:
   655  		format = "%16.5f %s"
   656  	case y >= 0.00099995:
   657  		format = "%17.6f %s"
   658  	default:
   659  		format = "%18.7f %s"
   660  	}
   661  	fmt.Fprintf(w, format, x, unit)
   662  }
   663  
   664  // MemString returns r.AllocedBytesPerOp and r.AllocsPerOp in the same format as 'go test'.
   665  func (r BenchmarkResult) MemString() string {
   666  	return fmt.Sprintf("%8d B/op\t%8d allocs/op",
   667  		r.AllocedBytesPerOp(), r.AllocsPerOp())
   668  }
   669  
   670  // benchmarkName returns full name of benchmark including procs suffix.
   671  func benchmarkName(name string, n int) string {
   672  	if n != 1 {
   673  		return fmt.Sprintf("%s-%d", name, n)
   674  	}
   675  	return name
   676  }
   677  
   678  type benchState struct {
   679  	match *matcher
   680  
   681  	maxLen int // The largest recorded benchmark name.
   682  	extLen int // Maximum extension length.
   683  }
   684  
   685  // RunBenchmarks is an internal function but exported because it is cross-package;
   686  // it is part of the implementation of the "go test" command.
   687  func RunBenchmarks(matchString func(pat, str string) (bool, error), benchmarks []InternalBenchmark) {
   688  	runBenchmarks("", matchString, benchmarks)
   689  }
   690  
   691  func runBenchmarks(importPath string, matchString func(pat, str string) (bool, error), benchmarks []InternalBenchmark) bool {
   692  	// If no flag was specified, don't run benchmarks.
   693  	if len(*matchBenchmarks) == 0 {
   694  		return true
   695  	}
   696  	// Collect matching benchmarks and determine longest name.
   697  	maxprocs := 1
   698  	for _, procs := range cpuList {
   699  		if procs > maxprocs {
   700  			maxprocs = procs
   701  		}
   702  	}
   703  	bstate := &benchState{
   704  		match:  newMatcher(matchString, *matchBenchmarks, "-test.bench", *skip),
   705  		extLen: len(benchmarkName("", maxprocs)),
   706  	}
   707  	var bs []InternalBenchmark
   708  	for _, Benchmark := range benchmarks {
   709  		if _, matched, _ := bstate.match.fullName(nil, Benchmark.Name); matched {
   710  			bs = append(bs, Benchmark)
   711  			benchName := benchmarkName(Benchmark.Name, maxprocs)
   712  			if l := len(benchName) + bstate.extLen + 1; l > bstate.maxLen {
   713  				bstate.maxLen = l
   714  			}
   715  		}
   716  	}
   717  	main := &B{
   718  		common: common{
   719  			name:  "Main",
   720  			w:     os.Stdout,
   721  			bench: true,
   722  		},
   723  		importPath: importPath,
   724  		benchFunc: func(b *B) {
   725  			for _, Benchmark := range bs {
   726  				b.Run(Benchmark.Name, Benchmark.F)
   727  			}
   728  		},
   729  		benchTime: benchTime,
   730  		bstate:    bstate,
   731  	}
   732  	if Verbose() {
   733  		main.chatty = newChattyPrinter(main.w)
   734  	}
   735  	main.runN(1)
   736  	return !main.failed
   737  }
   738  
   739  // processBench runs bench b for the configured CPU counts and prints the results.
   740  func (s *benchState) processBench(b *B) {
   741  	for i, procs := range cpuList {
   742  		for j := uint(0); j < *count; j++ {
   743  			runtime.GOMAXPROCS(procs)
   744  			benchName := benchmarkName(b.name, procs)
   745  
   746  			// If it's chatty, we've already printed this information.
   747  			if b.chatty == nil {
   748  				fmt.Fprintf(b.w, "%-*s\t", s.maxLen, benchName)
   749  			}
   750  			// Recompute the running time for all but the first iteration.
   751  			if i > 0 || j > 0 {
   752  				b = &B{
   753  					common: common{
   754  						signal: make(chan bool),
   755  						name:   b.name,
   756  						w:      b.w,
   757  						chatty: b.chatty,
   758  						bench:  true,
   759  					},
   760  					benchFunc: b.benchFunc,
   761  					benchTime: b.benchTime,
   762  				}
   763  				b.setOutputWriter()
   764  				b.run1()
   765  			}
   766  			r := b.doBench()
   767  			if b.failed {
   768  				// The output could be very long here, but probably isn't.
   769  				// We print it all, regardless, because we don't want to trim the reason
   770  				// the benchmark failed.
   771  				fmt.Fprintf(b.w, "%s--- FAIL: %s\n%s", b.chatty.prefix(), benchName, b.output)
   772  				continue
   773  			}
   774  			results := r.String()
   775  			if b.chatty != nil {
   776  				fmt.Fprintf(b.w, "%-*s\t", s.maxLen, benchName)
   777  			}
   778  			if *benchmarkMemory || b.showAllocResult {
   779  				results += "\t" + r.MemString()
   780  			}
   781  			fmt.Fprintln(b.w, results)
   782  			// Unlike with tests, we ignore the -chatty flag and always print output for
   783  			// benchmarks since the output generation time will skew the results.
   784  			if len(b.output) > 0 {
   785  				b.trimOutput()
   786  				fmt.Fprintf(b.w, "%s--- BENCH: %s\n%s", b.chatty.prefix(), benchName, b.output)
   787  			}
   788  			if p := runtime.GOMAXPROCS(-1); p != procs {
   789  				fmt.Fprintf(os.Stderr, "testing: %s left GOMAXPROCS set to %d\n", benchName, p)
   790  			}
   791  			if b.chatty != nil && b.chatty.json {
   792  				b.chatty.Updatef("", "=== NAME  %s\n", "")
   793  			}
   794  		}
   795  	}
   796  }
   797  
   798  // If hideStdoutForTesting is true, Run does not print the benchName.
   799  // This avoids a spurious print during 'go test' on package testing itself,
   800  // which invokes b.Run in its own tests (see sub_test.go).
   801  var hideStdoutForTesting = false
   802  
   803  // Run benchmarks f as a subbenchmark with the given name. It reports
   804  // whether there were any failures.
   805  //
   806  // A subbenchmark is like any other benchmark. A benchmark that calls Run at
   807  // least once will not be measured itself and will be called once with N=1.
   808  func (b *B) Run(name string, f func(b *B)) bool {
   809  	// Since b has subbenchmarks, we will no longer run it as a benchmark itself.
   810  	// Release the lock and acquire it on exit to ensure locks stay paired.
   811  	b.hasSub.Store(true)
   812  	benchmarkLock.Unlock()
   813  	defer benchmarkLock.Lock()
   814  
   815  	benchName, ok, partial := b.name, true, false
   816  	if b.bstate != nil {
   817  		benchName, ok, partial = b.bstate.match.fullName(&b.common, name)
   818  	}
   819  	if !ok {
   820  		return true
   821  	}
   822  	var pc [maxStackLen]uintptr
   823  	n := runtime.Callers(2, pc[:])
   824  	sub := &B{
   825  		common: common{
   826  			signal:  make(chan bool),
   827  			name:    benchName,
   828  			parent:  &b.common,
   829  			level:   b.level + 1,
   830  			creator: pc[:n],
   831  			w:       b.w,
   832  			chatty:  b.chatty,
   833  			bench:   true,
   834  		},
   835  		importPath: b.importPath,
   836  		benchFunc:  f,
   837  		benchTime:  b.benchTime,
   838  		bstate:     b.bstate,
   839  	}
   840  	sub.setOutputWriter()
   841  	if partial {
   842  		// Partial name match, like -bench=X/Y matching BenchmarkX.
   843  		// Only process sub-benchmarks, if any.
   844  		sub.hasSub.Store(true)
   845  	}
   846  
   847  	if b.chatty != nil {
   848  		labelsOnce.Do(func() {
   849  			fmt.Printf("goos: %s\n", runtime.GOOS)
   850  			fmt.Printf("goarch: %s\n", runtime.GOARCH)
   851  			if b.importPath != "" {
   852  				fmt.Printf("pkg: %s\n", b.importPath)
   853  			}
   854  			if cpu := sysinfo.CPUName(); cpu != "" {
   855  				fmt.Printf("cpu: %s\n", cpu)
   856  			}
   857  		})
   858  
   859  		if !hideStdoutForTesting {
   860  			if b.chatty.json {
   861  				b.chatty.Updatef(benchName, "=== RUN   %s\n", benchName)
   862  			}
   863  			fmt.Println(benchName)
   864  		}
   865  	}
   866  
   867  	if sub.run1() {
   868  		sub.run()
   869  	}
   870  	b.add(sub.result)
   871  	return !sub.failed
   872  }
   873  
   874  // add simulates running benchmarks in sequence in a single iteration. It is
   875  // used to give some meaningful results in case func Benchmark is used in
   876  // combination with Run.
   877  func (b *B) add(other BenchmarkResult) {
   878  	r := &b.result
   879  	// The aggregated BenchmarkResults resemble running all subbenchmarks as
   880  	// in sequence in a single benchmark.
   881  	r.N = 1
   882  	r.T += time.Duration(other.NsPerOp())
   883  	if other.Bytes == 0 {
   884  		// Summing Bytes is meaningless in aggregate if not all subbenchmarks
   885  		// set it.
   886  		b.missingBytes = true
   887  		r.Bytes = 0
   888  	}
   889  	if !b.missingBytes {
   890  		r.Bytes += other.Bytes
   891  	}
   892  	r.MemAllocs += uint64(other.AllocsPerOp())
   893  	r.MemBytes += uint64(other.AllocedBytesPerOp())
   894  }
   895  
   896  // trimOutput shortens the output from a benchmark, which can be very long.
   897  func (b *B) trimOutput() {
   898  	// The output is likely to appear multiple times because the benchmark
   899  	// is run multiple times, but at least it will be seen. This is not a big deal
   900  	// because benchmarks rarely print, but just in case, we trim it if it's too long.
   901  	const maxNewlines = 10
   902  	for nlCount, j := 0, 0; j < len(b.output); j++ {
   903  		if b.output[j] == '\n' {
   904  			nlCount++
   905  			if nlCount >= maxNewlines {
   906  				b.output = append(b.output[:j], "\n\t... [output truncated]\n"...)
   907  				break
   908  			}
   909  		}
   910  	}
   911  }
   912  
   913  // A PB is used by RunParallel for running parallel benchmarks.
   914  type PB struct {
   915  	globalN *atomic.Uint64 // shared between all worker goroutines iteration counter
   916  	grain   uint64         // acquire that many iterations from globalN at once
   917  	cache   uint64         // local cache of acquired iterations
   918  	bN      uint64         // total number of iterations to execute (b.N)
   919  }
   920  
   921  // Next reports whether there are more iterations to execute.
   922  func (pb *PB) Next() bool {
   923  	if pb.cache == 0 {
   924  		n := pb.globalN.Add(pb.grain)
   925  		if n <= pb.bN {
   926  			pb.cache = pb.grain
   927  		} else if n < pb.bN+pb.grain {
   928  			pb.cache = pb.bN + pb.grain - n
   929  		} else {
   930  			return false
   931  		}
   932  	}
   933  	pb.cache--
   934  	return true
   935  }
   936  
   937  // RunParallel runs a benchmark in parallel.
   938  // It creates multiple goroutines and distributes b.N iterations among them.
   939  // The number of goroutines defaults to GOMAXPROCS. To increase parallelism for
   940  // non-CPU-bound benchmarks, call [B.SetParallelism] before RunParallel.
   941  // RunParallel is usually used with the go test -cpu flag.
   942  //
   943  // The body function will be run in each goroutine. It should set up any
   944  // goroutine-local state and then iterate until pb.Next returns false.
   945  // It should not use the [B.StartTimer], [B.StopTimer], or [B.ResetTimer] functions,
   946  // because they have global effect. It should also not call [B.Run].
   947  //
   948  // RunParallel reports ns/op values as wall time for the benchmark as a whole,
   949  // not the sum of wall time or CPU time over each parallel goroutine.
   950  func (b *B) RunParallel(body func(*PB)) {
   951  	if b.N == 0 {
   952  		return // Nothing to do when probing.
   953  	}
   954  	// Calculate grain size as number of iterations that take ~100µs.
   955  	// 100µs is enough to amortize the overhead and provide sufficient
   956  	// dynamic load balancing.
   957  	grain := uint64(0)
   958  	if b.previousN > 0 && b.previousDuration > 0 {
   959  		grain = 1e5 * uint64(b.previousN) / uint64(b.previousDuration)
   960  	}
   961  	if grain < 1 {
   962  		grain = 1
   963  	}
   964  	// We expect the inner loop and function call to take at least 10ns,
   965  	// so do not do more than 100µs/10ns=1e4 iterations.
   966  	if grain > 1e4 {
   967  		grain = 1e4
   968  	}
   969  
   970  	var n atomic.Uint64
   971  	numProcs := b.parallelism * runtime.GOMAXPROCS(0)
   972  	var wg sync.WaitGroup
   973  	wg.Add(numProcs)
   974  	for p := 0; p < numProcs; p++ {
   975  		go func() {
   976  			defer wg.Done()
   977  			pb := &PB{
   978  				globalN: &n,
   979  				grain:   grain,
   980  				bN:      uint64(b.N),
   981  			}
   982  			body(pb)
   983  		}()
   984  	}
   985  	wg.Wait()
   986  	if n.Load() <= uint64(b.N) && !b.Failed() {
   987  		b.Fatal("RunParallel: body exited without pb.Next() == false")
   988  	}
   989  }
   990  
   991  // SetParallelism sets the number of goroutines used by [B.RunParallel] to p*GOMAXPROCS.
   992  // There is usually no need to call SetParallelism for CPU-bound benchmarks.
   993  // If p is less than 1, this call will have no effect.
   994  func (b *B) SetParallelism(p int) {
   995  	if p >= 1 {
   996  		b.parallelism = p
   997  	}
   998  }
   999  
  1000  // Benchmark benchmarks a single function. It is useful for creating
  1001  // custom benchmarks that do not use the "go test" command.
  1002  //
  1003  // If f depends on testing flags, then [Init] must be used to register
  1004  // those flags before calling Benchmark and before calling [flag.Parse].
  1005  //
  1006  // If f calls Run, the result will be an estimate of running all its
  1007  // subbenchmarks that don't call Run in sequence in a single benchmark.
  1008  func Benchmark(f func(b *B)) BenchmarkResult {
  1009  	b := &B{
  1010  		common: common{
  1011  			signal: make(chan bool),
  1012  			w:      discard{},
  1013  		},
  1014  		benchFunc: f,
  1015  		benchTime: benchTime,
  1016  	}
  1017  	b.setOutputWriter()
  1018  	if b.run1() {
  1019  		b.run()
  1020  	}
  1021  	return b.result
  1022  }
  1023  
  1024  type discard struct{}
  1025  
  1026  func (discard) Write(b []byte) (n int, err error) { return len(b), nil }
  1027  

View as plain text