...

Source file src/testing/benchmark.go

Documentation: testing

		 1  // Copyright 2009 The Go Authors. All rights reserved.
		 2  // Use of this source code is governed by a BSD-style
		 3  // license that can be found in the LICENSE file.
		 4  
		 5  package testing
		 6  
		 7  import (
		 8  	"flag"
		 9  	"fmt"
		10  	"internal/race"
		11  	"internal/sysinfo"
		12  	"io"
		13  	"math"
		14  	"os"
		15  	"runtime"
		16  	"sort"
		17  	"strconv"
		18  	"strings"
		19  	"sync"
		20  	"sync/atomic"
		21  	"time"
		22  	"unicode"
		23  )
		24  
		25  func initBenchmarkFlags() {
		26  	matchBenchmarks = flag.String("test.bench", "", "run only benchmarks matching `regexp`")
		27  	benchmarkMemory = flag.Bool("test.benchmem", false, "print memory allocations for benchmarks")
		28  	flag.Var(&benchTime, "test.benchtime", "run each benchmark for duration `d`")
		29  }
		30  
		31  var (
		32  	matchBenchmarks *string
		33  	benchmarkMemory *bool
		34  
		35  	benchTime = benchTimeFlag{d: 1 * time.Second} // changed during test of testing package
		36  )
		37  
		38  type benchTimeFlag struct {
		39  	d time.Duration
		40  	n int
		41  }
		42  
		43  func (f *benchTimeFlag) String() string {
		44  	if f.n > 0 {
		45  		return fmt.Sprintf("%dx", f.n)
		46  	}
		47  	return time.Duration(f.d).String()
		48  }
		49  
		50  func (f *benchTimeFlag) Set(s string) error {
		51  	if strings.HasSuffix(s, "x") {
		52  		n, err := strconv.ParseInt(s[:len(s)-1], 10, 0)
		53  		if err != nil || n <= 0 {
		54  			return fmt.Errorf("invalid count")
		55  		}
		56  		*f = benchTimeFlag{n: int(n)}
		57  		return nil
		58  	}
		59  	d, err := time.ParseDuration(s)
		60  	if err != nil || d <= 0 {
		61  		return fmt.Errorf("invalid duration")
		62  	}
		63  	*f = benchTimeFlag{d: d}
		64  	return nil
		65  }
		66  
		67  // Global lock to ensure only one benchmark runs at a time.
		68  var benchmarkLock sync.Mutex
		69  
		70  // Used for every benchmark for measuring memory.
		71  var memStats runtime.MemStats
		72  
		73  // InternalBenchmark is an internal type but exported because it is cross-package;
		74  // it is part of the implementation of the "go test" command.
		75  type InternalBenchmark struct {
		76  	Name string
		77  	F		func(b *B)
		78  }
		79  
		80  // B is a type passed to Benchmark functions to manage benchmark
		81  // timing and to specify the number of iterations to run.
		82  //
		83  // A benchmark ends when its Benchmark function returns or calls any of the methods
		84  // FailNow, Fatal, Fatalf, SkipNow, Skip, or Skipf. Those methods must be called
		85  // only from the goroutine running the Benchmark function.
		86  // The other reporting methods, such as the variations of Log and Error,
		87  // may be called simultaneously from multiple goroutines.
		88  //
		89  // Like in tests, benchmark logs are accumulated during execution
		90  // and dumped to standard output when done. Unlike in tests, benchmark logs
		91  // are always printed, so as not to hide output whose existence may be
		92  // affecting benchmark results.
		93  type B struct {
		94  	common
		95  	importPath			 string // import path of the package containing the benchmark
		96  	context					*benchContext
		97  	N								int
		98  	previousN				int					 // number of iterations in the previous run
		99  	previousDuration time.Duration // total duration of the previous run
	 100  	benchFunc				func(b *B)
	 101  	benchTime				benchTimeFlag
	 102  	bytes						int64
	 103  	missingBytes		 bool // one of the subbenchmarks does not have bytes set.
	 104  	timerOn					bool
	 105  	showAllocResult	bool
	 106  	result					 BenchmarkResult
	 107  	parallelism			int // RunParallel creates parallelism*GOMAXPROCS goroutines
	 108  	// The initial states of memStats.Mallocs and memStats.TotalAlloc.
	 109  	startAllocs uint64
	 110  	startBytes	uint64
	 111  	// The net total of this test after being run.
	 112  	netAllocs uint64
	 113  	netBytes	uint64
	 114  	// Extra metrics collected by ReportMetric.
	 115  	extra map[string]float64
	 116  }
	 117  
	 118  // StartTimer starts timing a test. This function is called automatically
	 119  // before a benchmark starts, but it can also be used to resume timing after
	 120  // a call to StopTimer.
	 121  func (b *B) StartTimer() {
	 122  	if !b.timerOn {
	 123  		runtime.ReadMemStats(&memStats)
	 124  		b.startAllocs = memStats.Mallocs
	 125  		b.startBytes = memStats.TotalAlloc
	 126  		b.start = time.Now()
	 127  		b.timerOn = true
	 128  	}
	 129  }
	 130  
	 131  // StopTimer stops timing a test. This can be used to pause the timer
	 132  // while performing complex initialization that you don't
	 133  // want to measure.
	 134  func (b *B) StopTimer() {
	 135  	if b.timerOn {
	 136  		b.duration += time.Since(b.start)
	 137  		runtime.ReadMemStats(&memStats)
	 138  		b.netAllocs += memStats.Mallocs - b.startAllocs
	 139  		b.netBytes += memStats.TotalAlloc - b.startBytes
	 140  		b.timerOn = false
	 141  	}
	 142  }
	 143  
	 144  // ResetTimer zeroes the elapsed benchmark time and memory allocation counters
	 145  // and deletes user-reported metrics.
	 146  // It does not affect whether the timer is running.
	 147  func (b *B) ResetTimer() {
	 148  	if b.extra == nil {
	 149  		// Allocate the extra map before reading memory stats.
	 150  		// Pre-size it to make more allocation unlikely.
	 151  		b.extra = make(map[string]float64, 16)
	 152  	} else {
	 153  		for k := range b.extra {
	 154  			delete(b.extra, k)
	 155  		}
	 156  	}
	 157  	if b.timerOn {
	 158  		runtime.ReadMemStats(&memStats)
	 159  		b.startAllocs = memStats.Mallocs
	 160  		b.startBytes = memStats.TotalAlloc
	 161  		b.start = time.Now()
	 162  	}
	 163  	b.duration = 0
	 164  	b.netAllocs = 0
	 165  	b.netBytes = 0
	 166  }
	 167  
	 168  // SetBytes records the number of bytes processed in a single operation.
	 169  // If this is called, the benchmark will report ns/op and MB/s.
	 170  func (b *B) SetBytes(n int64) { b.bytes = n }
	 171  
	 172  // ReportAllocs enables malloc statistics for this benchmark.
	 173  // It is equivalent to setting -test.benchmem, but it only affects the
	 174  // benchmark function that calls ReportAllocs.
	 175  func (b *B) ReportAllocs() {
	 176  	b.showAllocResult = true
	 177  }
	 178  
	 179  // runN runs a single benchmark for the specified number of iterations.
	 180  func (b *B) runN(n int) {
	 181  	benchmarkLock.Lock()
	 182  	defer benchmarkLock.Unlock()
	 183  	defer b.runCleanup(normalPanic)
	 184  	// Try to get a comparable environment for each run
	 185  	// by clearing garbage from previous runs.
	 186  	runtime.GC()
	 187  	b.raceErrors = -race.Errors()
	 188  	b.N = n
	 189  	b.parallelism = 1
	 190  	b.ResetTimer()
	 191  	b.StartTimer()
	 192  	b.benchFunc(b)
	 193  	b.StopTimer()
	 194  	b.previousN = n
	 195  	b.previousDuration = b.duration
	 196  	b.raceErrors += race.Errors()
	 197  	if b.raceErrors > 0 {
	 198  		b.Errorf("race detected during execution of benchmark")
	 199  	}
	 200  }
	 201  
	 202  func min(x, y int64) int64 {
	 203  	if x > y {
	 204  		return y
	 205  	}
	 206  	return x
	 207  }
	 208  
	 209  func max(x, y int64) int64 {
	 210  	if x < y {
	 211  		return y
	 212  	}
	 213  	return x
	 214  }
	 215  
	 216  // run1 runs the first iteration of benchFunc. It reports whether more
	 217  // iterations of this benchmarks should be run.
	 218  func (b *B) run1() bool {
	 219  	if ctx := b.context; ctx != nil {
	 220  		// Extend maxLen, if needed.
	 221  		if n := len(b.name) + ctx.extLen + 1; n > ctx.maxLen {
	 222  			ctx.maxLen = n + 8 // Add additional slack to avoid too many jumps in size.
	 223  		}
	 224  	}
	 225  	go func() {
	 226  		// Signal that we're done whether we return normally
	 227  		// or by FailNow's runtime.Goexit.
	 228  		defer func() {
	 229  			b.signal <- true
	 230  		}()
	 231  
	 232  		b.runN(1)
	 233  	}()
	 234  	<-b.signal
	 235  	if b.failed {
	 236  		fmt.Fprintf(b.w, "--- FAIL: %s\n%s", b.name, b.output)
	 237  		return false
	 238  	}
	 239  	// Only print the output if we know we are not going to proceed.
	 240  	// Otherwise it is printed in processBench.
	 241  	b.mu.RLock()
	 242  	finished := b.finished
	 243  	b.mu.RUnlock()
	 244  	if atomic.LoadInt32(&b.hasSub) != 0 || finished {
	 245  		tag := "BENCH"
	 246  		if b.skipped {
	 247  			tag = "SKIP"
	 248  		}
	 249  		if b.chatty != nil && (len(b.output) > 0 || finished) {
	 250  			b.trimOutput()
	 251  			fmt.Fprintf(b.w, "--- %s: %s\n%s", tag, b.name, b.output)
	 252  		}
	 253  		return false
	 254  	}
	 255  	return true
	 256  }
	 257  
	 258  var labelsOnce sync.Once
	 259  
	 260  // run executes the benchmark in a separate goroutine, including all of its
	 261  // subbenchmarks. b must not have subbenchmarks.
	 262  func (b *B) run() {
	 263  	labelsOnce.Do(func() {
	 264  		fmt.Fprintf(b.w, "goos: %s\n", runtime.GOOS)
	 265  		fmt.Fprintf(b.w, "goarch: %s\n", runtime.GOARCH)
	 266  		if b.importPath != "" {
	 267  			fmt.Fprintf(b.w, "pkg: %s\n", b.importPath)
	 268  		}
	 269  		if cpu := sysinfo.CPU.Name(); cpu != "" {
	 270  			fmt.Fprintf(b.w, "cpu: %s\n", cpu)
	 271  		}
	 272  	})
	 273  	if b.context != nil {
	 274  		// Running go test --test.bench
	 275  		b.context.processBench(b) // Must call doBench.
	 276  	} else {
	 277  		// Running func Benchmark.
	 278  		b.doBench()
	 279  	}
	 280  }
	 281  
	 282  func (b *B) doBench() BenchmarkResult {
	 283  	go b.launch()
	 284  	<-b.signal
	 285  	return b.result
	 286  }
	 287  
	 288  // launch launches the benchmark function. It gradually increases the number
	 289  // of benchmark iterations until the benchmark runs for the requested benchtime.
	 290  // launch is run by the doBench function as a separate goroutine.
	 291  // run1 must have been called on b.
	 292  func (b *B) launch() {
	 293  	// Signal that we're done whether we return normally
	 294  	// or by FailNow's runtime.Goexit.
	 295  	defer func() {
	 296  		b.signal <- true
	 297  	}()
	 298  
	 299  	// Run the benchmark for at least the specified amount of time.
	 300  	if b.benchTime.n > 0 {
	 301  		b.runN(b.benchTime.n)
	 302  	} else {
	 303  		d := b.benchTime.d
	 304  		for n := int64(1); !b.failed && b.duration < d && n < 1e9; {
	 305  			last := n
	 306  			// Predict required iterations.
	 307  			goalns := d.Nanoseconds()
	 308  			prevIters := int64(b.N)
	 309  			prevns := b.duration.Nanoseconds()
	 310  			if prevns <= 0 {
	 311  				// Round up, to avoid div by zero.
	 312  				prevns = 1
	 313  			}
	 314  			// Order of operations matters.
	 315  			// For very fast benchmarks, prevIters ~= prevns.
	 316  			// If you divide first, you get 0 or 1,
	 317  			// which can hide an order of magnitude in execution time.
	 318  			// So multiply first, then divide.
	 319  			n = goalns * prevIters / prevns
	 320  			// Run more iterations than we think we'll need (1.2x).
	 321  			n += n / 5
	 322  			// Don't grow too fast in case we had timing errors previously.
	 323  			n = min(n, 100*last)
	 324  			// Be sure to run at least one more than last time.
	 325  			n = max(n, last+1)
	 326  			// Don't run more than 1e9 times. (This also keeps n in int range on 32 bit platforms.)
	 327  			n = min(n, 1e9)
	 328  			b.runN(int(n))
	 329  		}
	 330  	}
	 331  	b.result = BenchmarkResult{b.N, b.duration, b.bytes, b.netAllocs, b.netBytes, b.extra}
	 332  }
	 333  
	 334  // ReportMetric adds "n unit" to the reported benchmark results.
	 335  // If the metric is per-iteration, the caller should divide by b.N,
	 336  // and by convention units should end in "/op".
	 337  // ReportMetric overrides any previously reported value for the same unit.
	 338  // ReportMetric panics if unit is the empty string or if unit contains
	 339  // any whitespace.
	 340  // If unit is a unit normally reported by the benchmark framework itself
	 341  // (such as "allocs/op"), ReportMetric will override that metric.
	 342  // Setting "ns/op" to 0 will suppress that built-in metric.
	 343  func (b *B) ReportMetric(n float64, unit string) {
	 344  	if unit == "" {
	 345  		panic("metric unit must not be empty")
	 346  	}
	 347  	if strings.IndexFunc(unit, unicode.IsSpace) >= 0 {
	 348  		panic("metric unit must not contain whitespace")
	 349  	}
	 350  	b.extra[unit] = n
	 351  }
	 352  
	 353  // BenchmarkResult contains the results of a benchmark run.
	 354  type BenchmarkResult struct {
	 355  	N				 int					 // The number of iterations.
	 356  	T				 time.Duration // The total time taken.
	 357  	Bytes		 int64				 // Bytes processed in one iteration.
	 358  	MemAllocs uint64				// The total number of memory allocations.
	 359  	MemBytes	uint64				// The total number of bytes allocated.
	 360  
	 361  	// Extra records additional metrics reported by ReportMetric.
	 362  	Extra map[string]float64
	 363  }
	 364  
	 365  // NsPerOp returns the "ns/op" metric.
	 366  func (r BenchmarkResult) NsPerOp() int64 {
	 367  	if v, ok := r.Extra["ns/op"]; ok {
	 368  		return int64(v)
	 369  	}
	 370  	if r.N <= 0 {
	 371  		return 0
	 372  	}
	 373  	return r.T.Nanoseconds() / int64(r.N)
	 374  }
	 375  
	 376  // mbPerSec returns the "MB/s" metric.
	 377  func (r BenchmarkResult) mbPerSec() float64 {
	 378  	if v, ok := r.Extra["MB/s"]; ok {
	 379  		return v
	 380  	}
	 381  	if r.Bytes <= 0 || r.T <= 0 || r.N <= 0 {
	 382  		return 0
	 383  	}
	 384  	return (float64(r.Bytes) * float64(r.N) / 1e6) / r.T.Seconds()
	 385  }
	 386  
	 387  // AllocsPerOp returns the "allocs/op" metric,
	 388  // which is calculated as r.MemAllocs / r.N.
	 389  func (r BenchmarkResult) AllocsPerOp() int64 {
	 390  	if v, ok := r.Extra["allocs/op"]; ok {
	 391  		return int64(v)
	 392  	}
	 393  	if r.N <= 0 {
	 394  		return 0
	 395  	}
	 396  	return int64(r.MemAllocs) / int64(r.N)
	 397  }
	 398  
	 399  // AllocedBytesPerOp returns the "B/op" metric,
	 400  // which is calculated as r.MemBytes / r.N.
	 401  func (r BenchmarkResult) AllocedBytesPerOp() int64 {
	 402  	if v, ok := r.Extra["B/op"]; ok {
	 403  		return int64(v)
	 404  	}
	 405  	if r.N <= 0 {
	 406  		return 0
	 407  	}
	 408  	return int64(r.MemBytes) / int64(r.N)
	 409  }
	 410  
	 411  // String returns a summary of the benchmark results.
	 412  // It follows the benchmark result line format from
	 413  // https://golang.org/design/14313-benchmark-format, not including the
	 414  // benchmark name.
	 415  // Extra metrics override built-in metrics of the same name.
	 416  // String does not include allocs/op or B/op, since those are reported
	 417  // by MemString.
	 418  func (r BenchmarkResult) String() string {
	 419  	buf := new(strings.Builder)
	 420  	fmt.Fprintf(buf, "%8d", r.N)
	 421  
	 422  	// Get ns/op as a float.
	 423  	ns, ok := r.Extra["ns/op"]
	 424  	if !ok {
	 425  		ns = float64(r.T.Nanoseconds()) / float64(r.N)
	 426  	}
	 427  	if ns != 0 {
	 428  		buf.WriteByte('\t')
	 429  		prettyPrint(buf, ns, "ns/op")
	 430  	}
	 431  
	 432  	if mbs := r.mbPerSec(); mbs != 0 {
	 433  		fmt.Fprintf(buf, "\t%7.2f MB/s", mbs)
	 434  	}
	 435  
	 436  	// Print extra metrics that aren't represented in the standard
	 437  	// metrics.
	 438  	var extraKeys []string
	 439  	for k := range r.Extra {
	 440  		switch k {
	 441  		case "ns/op", "MB/s", "B/op", "allocs/op":
	 442  			// Built-in metrics reported elsewhere.
	 443  			continue
	 444  		}
	 445  		extraKeys = append(extraKeys, k)
	 446  	}
	 447  	sort.Strings(extraKeys)
	 448  	for _, k := range extraKeys {
	 449  		buf.WriteByte('\t')
	 450  		prettyPrint(buf, r.Extra[k], k)
	 451  	}
	 452  	return buf.String()
	 453  }
	 454  
	 455  func prettyPrint(w io.Writer, x float64, unit string) {
	 456  	// Print all numbers with 10 places before the decimal point
	 457  	// and small numbers with four sig figs. Field widths are
	 458  	// chosen to fit the whole part in 10 places while aligning
	 459  	// the decimal point of all fractional formats.
	 460  	var format string
	 461  	switch y := math.Abs(x); {
	 462  	case y == 0 || y >= 999.95:
	 463  		format = "%10.0f %s"
	 464  	case y >= 99.995:
	 465  		format = "%12.1f %s"
	 466  	case y >= 9.9995:
	 467  		format = "%13.2f %s"
	 468  	case y >= 0.99995:
	 469  		format = "%14.3f %s"
	 470  	case y >= 0.099995:
	 471  		format = "%15.4f %s"
	 472  	case y >= 0.0099995:
	 473  		format = "%16.5f %s"
	 474  	case y >= 0.00099995:
	 475  		format = "%17.6f %s"
	 476  	default:
	 477  		format = "%18.7f %s"
	 478  	}
	 479  	fmt.Fprintf(w, format, x, unit)
	 480  }
	 481  
	 482  // MemString returns r.AllocedBytesPerOp and r.AllocsPerOp in the same format as 'go test'.
	 483  func (r BenchmarkResult) MemString() string {
	 484  	return fmt.Sprintf("%8d B/op\t%8d allocs/op",
	 485  		r.AllocedBytesPerOp(), r.AllocsPerOp())
	 486  }
	 487  
	 488  // benchmarkName returns full name of benchmark including procs suffix.
	 489  func benchmarkName(name string, n int) string {
	 490  	if n != 1 {
	 491  		return fmt.Sprintf("%s-%d", name, n)
	 492  	}
	 493  	return name
	 494  }
	 495  
	 496  type benchContext struct {
	 497  	match *matcher
	 498  
	 499  	maxLen int // The largest recorded benchmark name.
	 500  	extLen int // Maximum extension length.
	 501  }
	 502  
	 503  // RunBenchmarks is an internal function but exported because it is cross-package;
	 504  // it is part of the implementation of the "go test" command.
	 505  func RunBenchmarks(matchString func(pat, str string) (bool, error), benchmarks []InternalBenchmark) {
	 506  	runBenchmarks("", matchString, benchmarks)
	 507  }
	 508  
	 509  func runBenchmarks(importPath string, matchString func(pat, str string) (bool, error), benchmarks []InternalBenchmark) bool {
	 510  	// If no flag was specified, don't run benchmarks.
	 511  	if len(*matchBenchmarks) == 0 {
	 512  		return true
	 513  	}
	 514  	// Collect matching benchmarks and determine longest name.
	 515  	maxprocs := 1
	 516  	for _, procs := range cpuList {
	 517  		if procs > maxprocs {
	 518  			maxprocs = procs
	 519  		}
	 520  	}
	 521  	ctx := &benchContext{
	 522  		match:	newMatcher(matchString, *matchBenchmarks, "-test.bench"),
	 523  		extLen: len(benchmarkName("", maxprocs)),
	 524  	}
	 525  	var bs []InternalBenchmark
	 526  	for _, Benchmark := range benchmarks {
	 527  		if _, matched, _ := ctx.match.fullName(nil, Benchmark.Name); matched {
	 528  			bs = append(bs, Benchmark)
	 529  			benchName := benchmarkName(Benchmark.Name, maxprocs)
	 530  			if l := len(benchName) + ctx.extLen + 1; l > ctx.maxLen {
	 531  				ctx.maxLen = l
	 532  			}
	 533  		}
	 534  	}
	 535  	main := &B{
	 536  		common: common{
	 537  			name:	"Main",
	 538  			w:		 os.Stdout,
	 539  			bench: true,
	 540  		},
	 541  		importPath: importPath,
	 542  		benchFunc: func(b *B) {
	 543  			for _, Benchmark := range bs {
	 544  				b.Run(Benchmark.Name, Benchmark.F)
	 545  			}
	 546  		},
	 547  		benchTime: benchTime,
	 548  		context:	 ctx,
	 549  	}
	 550  	if Verbose() {
	 551  		main.chatty = newChattyPrinter(main.w)
	 552  	}
	 553  	main.runN(1)
	 554  	return !main.failed
	 555  }
	 556  
	 557  // processBench runs bench b for the configured CPU counts and prints the results.
	 558  func (ctx *benchContext) processBench(b *B) {
	 559  	for i, procs := range cpuList {
	 560  		for j := uint(0); j < *count; j++ {
	 561  			runtime.GOMAXPROCS(procs)
	 562  			benchName := benchmarkName(b.name, procs)
	 563  
	 564  			// If it's chatty, we've already printed this information.
	 565  			if b.chatty == nil {
	 566  				fmt.Fprintf(b.w, "%-*s\t", ctx.maxLen, benchName)
	 567  			}
	 568  			// Recompute the running time for all but the first iteration.
	 569  			if i > 0 || j > 0 {
	 570  				b = &B{
	 571  					common: common{
	 572  						signal: make(chan bool),
	 573  						name:	 b.name,
	 574  						w:			b.w,
	 575  						chatty: b.chatty,
	 576  						bench:	true,
	 577  					},
	 578  					benchFunc: b.benchFunc,
	 579  					benchTime: b.benchTime,
	 580  				}
	 581  				b.run1()
	 582  			}
	 583  			r := b.doBench()
	 584  			if b.failed {
	 585  				// The output could be very long here, but probably isn't.
	 586  				// We print it all, regardless, because we don't want to trim the reason
	 587  				// the benchmark failed.
	 588  				fmt.Fprintf(b.w, "--- FAIL: %s\n%s", benchName, b.output)
	 589  				continue
	 590  			}
	 591  			results := r.String()
	 592  			if b.chatty != nil {
	 593  				fmt.Fprintf(b.w, "%-*s\t", ctx.maxLen, benchName)
	 594  			}
	 595  			if *benchmarkMemory || b.showAllocResult {
	 596  				results += "\t" + r.MemString()
	 597  			}
	 598  			fmt.Fprintln(b.w, results)
	 599  			// Unlike with tests, we ignore the -chatty flag and always print output for
	 600  			// benchmarks since the output generation time will skew the results.
	 601  			if len(b.output) > 0 {
	 602  				b.trimOutput()
	 603  				fmt.Fprintf(b.w, "--- BENCH: %s\n%s", benchName, b.output)
	 604  			}
	 605  			if p := runtime.GOMAXPROCS(-1); p != procs {
	 606  				fmt.Fprintf(os.Stderr, "testing: %s left GOMAXPROCS set to %d\n", benchName, p)
	 607  			}
	 608  		}
	 609  	}
	 610  }
	 611  
	 612  // Run benchmarks f as a subbenchmark with the given name. It reports
	 613  // whether there were any failures.
	 614  //
	 615  // A subbenchmark is like any other benchmark. A benchmark that calls Run at
	 616  // least once will not be measured itself and will be called once with N=1.
	 617  func (b *B) Run(name string, f func(b *B)) bool {
	 618  	// Since b has subbenchmarks, we will no longer run it as a benchmark itself.
	 619  	// Release the lock and acquire it on exit to ensure locks stay paired.
	 620  	atomic.StoreInt32(&b.hasSub, 1)
	 621  	benchmarkLock.Unlock()
	 622  	defer benchmarkLock.Lock()
	 623  
	 624  	benchName, ok, partial := b.name, true, false
	 625  	if b.context != nil {
	 626  		benchName, ok, partial = b.context.match.fullName(&b.common, name)
	 627  	}
	 628  	if !ok {
	 629  		return true
	 630  	}
	 631  	var pc [maxStackLen]uintptr
	 632  	n := runtime.Callers(2, pc[:])
	 633  	sub := &B{
	 634  		common: common{
	 635  			signal:	make(chan bool),
	 636  			name:		benchName,
	 637  			parent:	&b.common,
	 638  			level:	 b.level + 1,
	 639  			creator: pc[:n],
	 640  			w:			 b.w,
	 641  			chatty:	b.chatty,
	 642  			bench:	 true,
	 643  		},
	 644  		importPath: b.importPath,
	 645  		benchFunc:	f,
	 646  		benchTime:	b.benchTime,
	 647  		context:		b.context,
	 648  	}
	 649  	if partial {
	 650  		// Partial name match, like -bench=X/Y matching BenchmarkX.
	 651  		// Only process sub-benchmarks, if any.
	 652  		atomic.StoreInt32(&sub.hasSub, 1)
	 653  	}
	 654  
	 655  	if b.chatty != nil {
	 656  		labelsOnce.Do(func() {
	 657  			fmt.Printf("goos: %s\n", runtime.GOOS)
	 658  			fmt.Printf("goarch: %s\n", runtime.GOARCH)
	 659  			if b.importPath != "" {
	 660  				fmt.Printf("pkg: %s\n", b.importPath)
	 661  			}
	 662  			if cpu := sysinfo.CPU.Name(); cpu != "" {
	 663  				fmt.Printf("cpu: %s\n", cpu)
	 664  			}
	 665  		})
	 666  
	 667  		fmt.Println(benchName)
	 668  	}
	 669  
	 670  	if sub.run1() {
	 671  		sub.run()
	 672  	}
	 673  	b.add(sub.result)
	 674  	return !sub.failed
	 675  }
	 676  
	 677  // add simulates running benchmarks in sequence in a single iteration. It is
	 678  // used to give some meaningful results in case func Benchmark is used in
	 679  // combination with Run.
	 680  func (b *B) add(other BenchmarkResult) {
	 681  	r := &b.result
	 682  	// The aggregated BenchmarkResults resemble running all subbenchmarks as
	 683  	// in sequence in a single benchmark.
	 684  	r.N = 1
	 685  	r.T += time.Duration(other.NsPerOp())
	 686  	if other.Bytes == 0 {
	 687  		// Summing Bytes is meaningless in aggregate if not all subbenchmarks
	 688  		// set it.
	 689  		b.missingBytes = true
	 690  		r.Bytes = 0
	 691  	}
	 692  	if !b.missingBytes {
	 693  		r.Bytes += other.Bytes
	 694  	}
	 695  	r.MemAllocs += uint64(other.AllocsPerOp())
	 696  	r.MemBytes += uint64(other.AllocedBytesPerOp())
	 697  }
	 698  
	 699  // trimOutput shortens the output from a benchmark, which can be very long.
	 700  func (b *B) trimOutput() {
	 701  	// The output is likely to appear multiple times because the benchmark
	 702  	// is run multiple times, but at least it will be seen. This is not a big deal
	 703  	// because benchmarks rarely print, but just in case, we trim it if it's too long.
	 704  	const maxNewlines = 10
	 705  	for nlCount, j := 0, 0; j < len(b.output); j++ {
	 706  		if b.output[j] == '\n' {
	 707  			nlCount++
	 708  			if nlCount >= maxNewlines {
	 709  				b.output = append(b.output[:j], "\n\t... [output truncated]\n"...)
	 710  				break
	 711  			}
	 712  		}
	 713  	}
	 714  }
	 715  
	 716  // A PB is used by RunParallel for running parallel benchmarks.
	 717  type PB struct {
	 718  	globalN *uint64 // shared between all worker goroutines iteration counter
	 719  	grain	 uint64	// acquire that many iterations from globalN at once
	 720  	cache	 uint64	// local cache of acquired iterations
	 721  	bN			uint64	// total number of iterations to execute (b.N)
	 722  }
	 723  
	 724  // Next reports whether there are more iterations to execute.
	 725  func (pb *PB) Next() bool {
	 726  	if pb.cache == 0 {
	 727  		n := atomic.AddUint64(pb.globalN, pb.grain)
	 728  		if n <= pb.bN {
	 729  			pb.cache = pb.grain
	 730  		} else if n < pb.bN+pb.grain {
	 731  			pb.cache = pb.bN + pb.grain - n
	 732  		} else {
	 733  			return false
	 734  		}
	 735  	}
	 736  	pb.cache--
	 737  	return true
	 738  }
	 739  
	 740  // RunParallel runs a benchmark in parallel.
	 741  // It creates multiple goroutines and distributes b.N iterations among them.
	 742  // The number of goroutines defaults to GOMAXPROCS. To increase parallelism for
	 743  // non-CPU-bound benchmarks, call SetParallelism before RunParallel.
	 744  // RunParallel is usually used with the go test -cpu flag.
	 745  //
	 746  // The body function will be run in each goroutine. It should set up any
	 747  // goroutine-local state and then iterate until pb.Next returns false.
	 748  // It should not use the StartTimer, StopTimer, or ResetTimer functions,
	 749  // because they have global effect. It should also not call Run.
	 750  func (b *B) RunParallel(body func(*PB)) {
	 751  	if b.N == 0 {
	 752  		return // Nothing to do when probing.
	 753  	}
	 754  	// Calculate grain size as number of iterations that take ~100µs.
	 755  	// 100µs is enough to amortize the overhead and provide sufficient
	 756  	// dynamic load balancing.
	 757  	grain := uint64(0)
	 758  	if b.previousN > 0 && b.previousDuration > 0 {
	 759  		grain = 1e5 * uint64(b.previousN) / uint64(b.previousDuration)
	 760  	}
	 761  	if grain < 1 {
	 762  		grain = 1
	 763  	}
	 764  	// We expect the inner loop and function call to take at least 10ns,
	 765  	// so do not do more than 100µs/10ns=1e4 iterations.
	 766  	if grain > 1e4 {
	 767  		grain = 1e4
	 768  	}
	 769  
	 770  	n := uint64(0)
	 771  	numProcs := b.parallelism * runtime.GOMAXPROCS(0)
	 772  	var wg sync.WaitGroup
	 773  	wg.Add(numProcs)
	 774  	for p := 0; p < numProcs; p++ {
	 775  		go func() {
	 776  			defer wg.Done()
	 777  			pb := &PB{
	 778  				globalN: &n,
	 779  				grain:	 grain,
	 780  				bN:			uint64(b.N),
	 781  			}
	 782  			body(pb)
	 783  		}()
	 784  	}
	 785  	wg.Wait()
	 786  	if n <= uint64(b.N) && !b.Failed() {
	 787  		b.Fatal("RunParallel: body exited without pb.Next() == false")
	 788  	}
	 789  }
	 790  
	 791  // SetParallelism sets the number of goroutines used by RunParallel to p*GOMAXPROCS.
	 792  // There is usually no need to call SetParallelism for CPU-bound benchmarks.
	 793  // If p is less than 1, this call will have no effect.
	 794  func (b *B) SetParallelism(p int) {
	 795  	if p >= 1 {
	 796  		b.parallelism = p
	 797  	}
	 798  }
	 799  
	 800  // Benchmark benchmarks a single function. It is useful for creating
	 801  // custom benchmarks that do not use the "go test" command.
	 802  //
	 803  // If f depends on testing flags, then Init must be used to register
	 804  // those flags before calling Benchmark and before calling flag.Parse.
	 805  //
	 806  // If f calls Run, the result will be an estimate of running all its
	 807  // subbenchmarks that don't call Run in sequence in a single benchmark.
	 808  func Benchmark(f func(b *B)) BenchmarkResult {
	 809  	b := &B{
	 810  		common: common{
	 811  			signal: make(chan bool),
	 812  			w:			discard{},
	 813  		},
	 814  		benchFunc: f,
	 815  		benchTime: benchTime,
	 816  	}
	 817  	if b.run1() {
	 818  		b.run()
	 819  	}
	 820  	return b.result
	 821  }
	 822  
	 823  type discard struct{}
	 824  
	 825  func (discard) Write(b []byte) (n int, err error) { return len(b), nil }
	 826  

View as plain text