...

Source file src/runtime/pprof/pprof_test.go

Documentation: runtime/pprof

		 1  // Copyright 2011 The Go Authors. All rights reserved.
		 2  // Use of this source code is governed by a BSD-style
		 3  // license that can be found in the LICENSE file.
		 4  
		 5  //go:build !js
		 6  // +build !js
		 7  
		 8  package pprof
		 9  
		10  import (
		11  	"bytes"
		12  	"context"
		13  	"fmt"
		14  	"internal/profile"
		15  	"internal/testenv"
		16  	"io"
		17  	"math"
		18  	"math/big"
		19  	"os"
		20  	"os/exec"
		21  	"regexp"
		22  	"runtime"
		23  	"strings"
		24  	"sync"
		25  	"sync/atomic"
		26  	"testing"
		27  	"time"
		28  	_ "unsafe"
		29  )
		30  
		31  func cpuHogger(f func(x int) int, y *int, dur time.Duration) {
		32  	// We only need to get one 100 Hz clock tick, so we've got
		33  	// a large safety buffer.
		34  	// But do at least 500 iterations (which should take about 100ms),
		35  	// otherwise TestCPUProfileMultithreaded can fail if only one
		36  	// thread is scheduled during the testing period.
		37  	t0 := time.Now()
		38  	accum := *y
		39  	for i := 0; i < 500 || time.Since(t0) < dur; i++ {
		40  		accum = f(accum)
		41  	}
		42  	*y = accum
		43  }
		44  
		45  var (
		46  	salt1 = 0
		47  	salt2 = 0
		48  )
		49  
		50  // The actual CPU hogging function.
		51  // Must not call other functions nor access heap/globals in the loop,
		52  // otherwise under race detector the samples will be in the race runtime.
		53  func cpuHog1(x int) int {
		54  	return cpuHog0(x, 1e5)
		55  }
		56  
		57  func cpuHog0(x, n int) int {
		58  	foo := x
		59  	for i := 0; i < n; i++ {
		60  		if foo > 0 {
		61  			foo *= foo
		62  		} else {
		63  			foo *= foo + 1
		64  		}
		65  	}
		66  	return foo
		67  }
		68  
		69  func cpuHog2(x int) int {
		70  	foo := x
		71  	for i := 0; i < 1e5; i++ {
		72  		if foo > 0 {
		73  			foo *= foo
		74  		} else {
		75  			foo *= foo + 2
		76  		}
		77  	}
		78  	return foo
		79  }
		80  
		81  // Return a list of functions that we don't want to ever appear in CPU
		82  // profiles. For gccgo, that list includes the sigprof handler itself.
		83  func avoidFunctions() []string {
		84  	if runtime.Compiler == "gccgo" {
		85  		return []string{"runtime.sigprof"}
		86  	}
		87  	return nil
		88  }
		89  
		90  func TestCPUProfile(t *testing.T) {
		91  	testCPUProfile(t, stackContains, []string{"runtime/pprof.cpuHog1"}, avoidFunctions(), func(dur time.Duration) {
		92  		cpuHogger(cpuHog1, &salt1, dur)
		93  	})
		94  }
		95  
		96  func TestCPUProfileMultithreaded(t *testing.T) {
		97  	defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(2))
		98  	testCPUProfile(t, stackContains, []string{"runtime/pprof.cpuHog1", "runtime/pprof.cpuHog2"}, avoidFunctions(), func(dur time.Duration) {
		99  		c := make(chan int)
	 100  		go func() {
	 101  			cpuHogger(cpuHog1, &salt1, dur)
	 102  			c <- 1
	 103  		}()
	 104  		cpuHogger(cpuHog2, &salt2, dur)
	 105  		<-c
	 106  	})
	 107  }
	 108  
	 109  // containsInlinedCall reports whether the function body for the function f is
	 110  // known to contain an inlined function call within the first maxBytes bytes.
	 111  func containsInlinedCall(f interface{}, maxBytes int) bool {
	 112  	_, found := findInlinedCall(f, maxBytes)
	 113  	return found
	 114  }
	 115  
	 116  // findInlinedCall returns the PC of an inlined function call within
	 117  // the function body for the function f if any.
	 118  func findInlinedCall(f interface{}, maxBytes int) (pc uint64, found bool) {
	 119  	fFunc := runtime.FuncForPC(uintptr(funcPC(f)))
	 120  	if fFunc == nil || fFunc.Entry() == 0 {
	 121  		panic("failed to locate function entry")
	 122  	}
	 123  
	 124  	for offset := 0; offset < maxBytes; offset++ {
	 125  		innerPC := fFunc.Entry() + uintptr(offset)
	 126  		inner := runtime.FuncForPC(innerPC)
	 127  		if inner == nil {
	 128  			// No function known for this PC value.
	 129  			// It might simply be misaligned, so keep searching.
	 130  			continue
	 131  		}
	 132  		if inner.Entry() != fFunc.Entry() {
	 133  			// Scanned past f and didn't find any inlined functions.
	 134  			break
	 135  		}
	 136  		if inner.Name() != fFunc.Name() {
	 137  			// This PC has f as its entry-point, but is not f. Therefore, it must be a
	 138  			// function inlined into f.
	 139  			return uint64(innerPC), true
	 140  		}
	 141  	}
	 142  
	 143  	return 0, false
	 144  }
	 145  
	 146  func TestCPUProfileInlining(t *testing.T) {
	 147  	if !containsInlinedCall(inlinedCaller, 4<<10) {
	 148  		t.Skip("Can't determine whether inlinedCallee was inlined into inlinedCaller.")
	 149  	}
	 150  
	 151  	p := testCPUProfile(t, stackContains, []string{"runtime/pprof.inlinedCallee", "runtime/pprof.inlinedCaller"}, avoidFunctions(), func(dur time.Duration) {
	 152  		cpuHogger(inlinedCaller, &salt1, dur)
	 153  	})
	 154  
	 155  	// Check if inlined function locations are encoded correctly. The inlinedCalee and inlinedCaller should be in one location.
	 156  	for _, loc := range p.Location {
	 157  		hasInlinedCallerAfterInlinedCallee, hasInlinedCallee := false, false
	 158  		for _, line := range loc.Line {
	 159  			if line.Function.Name == "runtime/pprof.inlinedCallee" {
	 160  				hasInlinedCallee = true
	 161  			}
	 162  			if hasInlinedCallee && line.Function.Name == "runtime/pprof.inlinedCaller" {
	 163  				hasInlinedCallerAfterInlinedCallee = true
	 164  			}
	 165  		}
	 166  		if hasInlinedCallee != hasInlinedCallerAfterInlinedCallee {
	 167  			t.Fatalf("want inlinedCallee followed by inlinedCaller, got separate Location entries:\n%v", p)
	 168  		}
	 169  	}
	 170  }
	 171  
	 172  func inlinedCaller(x int) int {
	 173  	x = inlinedCallee(x, 1e5)
	 174  	return x
	 175  }
	 176  
	 177  func inlinedCallee(x, n int) int {
	 178  	return cpuHog0(x, n)
	 179  }
	 180  
	 181  //go:noinline
	 182  func dumpCallers(pcs []uintptr) {
	 183  	if pcs == nil {
	 184  		return
	 185  	}
	 186  
	 187  	skip := 2 // Callers and dumpCallers
	 188  	runtime.Callers(skip, pcs)
	 189  }
	 190  
	 191  //go:noinline
	 192  func inlinedCallerDump(pcs []uintptr) {
	 193  	inlinedCalleeDump(pcs)
	 194  }
	 195  
	 196  func inlinedCalleeDump(pcs []uintptr) {
	 197  	dumpCallers(pcs)
	 198  }
	 199  
	 200  func TestCPUProfileRecursion(t *testing.T) {
	 201  	p := testCPUProfile(t, stackContains, []string{"runtime/pprof.inlinedCallee", "runtime/pprof.recursionCallee", "runtime/pprof.recursionCaller"}, avoidFunctions(), func(dur time.Duration) {
	 202  		cpuHogger(recursionCaller, &salt1, dur)
	 203  	})
	 204  
	 205  	// check the Location encoding was not confused by recursive calls.
	 206  	for i, loc := range p.Location {
	 207  		recursionFunc := 0
	 208  		for _, line := range loc.Line {
	 209  			if name := line.Function.Name; name == "runtime/pprof.recursionCaller" || name == "runtime/pprof.recursionCallee" {
	 210  				recursionFunc++
	 211  			}
	 212  		}
	 213  		if recursionFunc > 1 {
	 214  			t.Fatalf("want at most one recursionCaller or recursionCallee in one Location, got a violating Location (index: %d):\n%v", i, p)
	 215  		}
	 216  	}
	 217  }
	 218  
	 219  func recursionCaller(x int) int {
	 220  	y := recursionCallee(3, x)
	 221  	return y
	 222  }
	 223  
	 224  func recursionCallee(n, x int) int {
	 225  	if n == 0 {
	 226  		return 1
	 227  	}
	 228  	y := inlinedCallee(x, 1e4)
	 229  	return y * recursionCallee(n-1, x)
	 230  }
	 231  
	 232  func recursionChainTop(x int, pcs []uintptr) {
	 233  	if x < 0 {
	 234  		return
	 235  	}
	 236  	recursionChainMiddle(x, pcs)
	 237  }
	 238  
	 239  func recursionChainMiddle(x int, pcs []uintptr) {
	 240  	recursionChainBottom(x, pcs)
	 241  }
	 242  
	 243  func recursionChainBottom(x int, pcs []uintptr) {
	 244  	// This will be called each time, we only care about the last. We
	 245  	// can't make this conditional or this function won't be inlined.
	 246  	dumpCallers(pcs)
	 247  
	 248  	recursionChainTop(x-1, pcs)
	 249  }
	 250  
	 251  func parseProfile(t *testing.T, valBytes []byte, f func(uintptr, []*profile.Location, map[string][]string)) *profile.Profile {
	 252  	p, err := profile.Parse(bytes.NewReader(valBytes))
	 253  	if err != nil {
	 254  		t.Fatal(err)
	 255  	}
	 256  	for _, sample := range p.Sample {
	 257  		count := uintptr(sample.Value[0])
	 258  		f(count, sample.Location, sample.Label)
	 259  	}
	 260  	return p
	 261  }
	 262  
	 263  func cpuProfilingBroken() bool {
	 264  	switch runtime.GOOS {
	 265  	case "plan9":
	 266  		// Profiling unimplemented.
	 267  		return true
	 268  	case "aix":
	 269  		// See https://golang.org/issue/45170.
	 270  		return true
	 271  	case "ios", "dragonfly", "netbsd", "illumos", "solaris":
	 272  		// See https://golang.org/issue/13841.
	 273  		return true
	 274  	case "openbsd":
	 275  		if runtime.GOARCH == "arm" || runtime.GOARCH == "arm64" {
	 276  			// See https://golang.org/issue/13841.
	 277  			return true
	 278  		}
	 279  	}
	 280  
	 281  	return false
	 282  }
	 283  
	 284  // testCPUProfile runs f under the CPU profiler, checking for some conditions specified by need,
	 285  // as interpreted by matches, and returns the parsed profile.
	 286  func testCPUProfile(t *testing.T, matches matchFunc, need []string, avoid []string, f func(dur time.Duration)) *profile.Profile {
	 287  	switch runtime.GOOS {
	 288  	case "darwin":
	 289  		out, err := exec.Command("uname", "-a").CombinedOutput()
	 290  		if err != nil {
	 291  			t.Fatal(err)
	 292  		}
	 293  		vers := string(out)
	 294  		t.Logf("uname -a: %v", vers)
	 295  	case "plan9":
	 296  		t.Skip("skipping on plan9")
	 297  	}
	 298  
	 299  	broken := cpuProfilingBroken()
	 300  
	 301  	maxDuration := 5 * time.Second
	 302  	if testing.Short() && broken {
	 303  		// If it's expected to be broken, no point waiting around.
	 304  		maxDuration /= 10
	 305  	}
	 306  
	 307  	// If we're running a long test, start with a long duration
	 308  	// for tests that try to make sure something *doesn't* happen.
	 309  	duration := 5 * time.Second
	 310  	if testing.Short() {
	 311  		duration = 100 * time.Millisecond
	 312  	}
	 313  
	 314  	// Profiling tests are inherently flaky, especially on a
	 315  	// loaded system, such as when this test is running with
	 316  	// several others under go test std. If a test fails in a way
	 317  	// that could mean it just didn't run long enough, try with a
	 318  	// longer duration.
	 319  	for duration <= maxDuration {
	 320  		var prof bytes.Buffer
	 321  		if err := StartCPUProfile(&prof); err != nil {
	 322  			t.Fatal(err)
	 323  		}
	 324  		f(duration)
	 325  		StopCPUProfile()
	 326  
	 327  		if p, ok := profileOk(t, matches, need, avoid, prof, duration); ok {
	 328  			return p
	 329  		}
	 330  
	 331  		duration *= 2
	 332  		if duration <= maxDuration {
	 333  			t.Logf("retrying with %s duration", duration)
	 334  		}
	 335  	}
	 336  
	 337  	if broken {
	 338  		t.Skipf("ignoring failure on %s/%s; see golang.org/issue/13841", runtime.GOOS, runtime.GOARCH)
	 339  	}
	 340  
	 341  	// Ignore the failure if the tests are running in a QEMU-based emulator,
	 342  	// QEMU is not perfect at emulating everything.
	 343  	// IN_QEMU environmental variable is set by some of the Go builders.
	 344  	// IN_QEMU=1 indicates that the tests are running in QEMU. See issue 9605.
	 345  	if os.Getenv("IN_QEMU") == "1" {
	 346  		t.Skip("ignore the failure in QEMU; see golang.org/issue/9605")
	 347  	}
	 348  	t.FailNow()
	 349  	return nil
	 350  }
	 351  
	 352  func contains(slice []string, s string) bool {
	 353  	for i := range slice {
	 354  		if slice[i] == s {
	 355  			return true
	 356  		}
	 357  	}
	 358  	return false
	 359  }
	 360  
	 361  // stackContains matches if a function named spec appears anywhere in the stack trace.
	 362  func stackContains(spec string, count uintptr, stk []*profile.Location, labels map[string][]string) bool {
	 363  	for _, loc := range stk {
	 364  		for _, line := range loc.Line {
	 365  			if strings.Contains(line.Function.Name, spec) {
	 366  				return true
	 367  			}
	 368  		}
	 369  	}
	 370  	return false
	 371  }
	 372  
	 373  type matchFunc func(spec string, count uintptr, stk []*profile.Location, labels map[string][]string) bool
	 374  
	 375  func profileOk(t *testing.T, matches matchFunc, need []string, avoid []string, prof bytes.Buffer, duration time.Duration) (_ *profile.Profile, ok bool) {
	 376  	ok = true
	 377  
	 378  	// Check that profile is well formed, contains 'need', and does not contain
	 379  	// anything from 'avoid'.
	 380  	have := make([]uintptr, len(need))
	 381  	avoidSamples := make([]uintptr, len(avoid))
	 382  	var samples uintptr
	 383  	var buf bytes.Buffer
	 384  	p := parseProfile(t, prof.Bytes(), func(count uintptr, stk []*profile.Location, labels map[string][]string) {
	 385  		fmt.Fprintf(&buf, "%d:", count)
	 386  		fprintStack(&buf, stk)
	 387  		samples += count
	 388  		for i, spec := range need {
	 389  			if matches(spec, count, stk, labels) {
	 390  				have[i] += count
	 391  			}
	 392  		}
	 393  		for i, name := range avoid {
	 394  			for _, loc := range stk {
	 395  				for _, line := range loc.Line {
	 396  					if strings.Contains(line.Function.Name, name) {
	 397  						avoidSamples[i] += count
	 398  					}
	 399  				}
	 400  			}
	 401  		}
	 402  		fmt.Fprintf(&buf, "\n")
	 403  	})
	 404  	t.Logf("total %d CPU profile samples collected:\n%s", samples, buf.String())
	 405  
	 406  	if samples < 10 && runtime.GOOS == "windows" {
	 407  		// On some windows machines we end up with
	 408  		// not enough samples due to coarse timer
	 409  		// resolution. Let it go.
	 410  		t.Log("too few samples on Windows (golang.org/issue/10842)")
	 411  		return p, false
	 412  	}
	 413  
	 414  	// Check that we got a reasonable number of samples.
	 415  	// We used to always require at least ideal/4 samples,
	 416  	// but that is too hard to guarantee on a loaded system.
	 417  	// Now we accept 10 or more samples, which we take to be
	 418  	// enough to show that at least some profiling is occurring.
	 419  	if ideal := uintptr(duration * 100 / time.Second); samples == 0 || (samples < ideal/4 && samples < 10) {
	 420  		t.Logf("too few samples; got %d, want at least %d, ideally %d", samples, ideal/4, ideal)
	 421  		ok = false
	 422  	}
	 423  
	 424  	for i, name := range avoid {
	 425  		bad := avoidSamples[i]
	 426  		if bad != 0 {
	 427  			t.Logf("found %d samples in avoid-function %s\n", bad, name)
	 428  			ok = false
	 429  		}
	 430  	}
	 431  
	 432  	if len(need) == 0 {
	 433  		return p, ok
	 434  	}
	 435  
	 436  	var total uintptr
	 437  	for i, name := range need {
	 438  		total += have[i]
	 439  		t.Logf("%s: %d\n", name, have[i])
	 440  	}
	 441  	if total == 0 {
	 442  		t.Logf("no samples in expected functions")
	 443  		ok = false
	 444  	}
	 445  	// We'd like to check a reasonable minimum, like
	 446  	// total / len(have) / smallconstant, but this test is
	 447  	// pretty flaky (see bug 7095).	So we'll just test to
	 448  	// make sure we got at least one sample.
	 449  	min := uintptr(1)
	 450  	for i, name := range need {
	 451  		if have[i] < min {
	 452  			t.Logf("%s has %d samples out of %d, want at least %d, ideally %d", name, have[i], total, min, total/uintptr(len(have)))
	 453  			ok = false
	 454  		}
	 455  	}
	 456  	return p, ok
	 457  }
	 458  
	 459  // Fork can hang if preempted with signals frequently enough (see issue 5517).
	 460  // Ensure that we do not do this.
	 461  func TestCPUProfileWithFork(t *testing.T) {
	 462  	testenv.MustHaveExec(t)
	 463  
	 464  	heap := 1 << 30
	 465  	if runtime.GOOS == "android" {
	 466  		// Use smaller size for Android to avoid crash.
	 467  		heap = 100 << 20
	 468  	}
	 469  	if runtime.GOOS == "windows" && runtime.GOARCH == "arm" {
	 470  		// Use smaller heap for Windows/ARM to avoid crash.
	 471  		heap = 100 << 20
	 472  	}
	 473  	if testing.Short() {
	 474  		heap = 100 << 20
	 475  	}
	 476  	// This makes fork slower.
	 477  	garbage := make([]byte, heap)
	 478  	// Need to touch the slice, otherwise it won't be paged in.
	 479  	done := make(chan bool)
	 480  	go func() {
	 481  		for i := range garbage {
	 482  			garbage[i] = 42
	 483  		}
	 484  		done <- true
	 485  	}()
	 486  	<-done
	 487  
	 488  	var prof bytes.Buffer
	 489  	if err := StartCPUProfile(&prof); err != nil {
	 490  		t.Fatal(err)
	 491  	}
	 492  	defer StopCPUProfile()
	 493  
	 494  	for i := 0; i < 10; i++ {
	 495  		exec.Command(os.Args[0], "-h").CombinedOutput()
	 496  	}
	 497  }
	 498  
	 499  // Test that profiler does not observe runtime.gogo as "user" goroutine execution.
	 500  // If it did, it would see inconsistent state and would either record an incorrect stack
	 501  // or crash because the stack was malformed.
	 502  func TestGoroutineSwitch(t *testing.T) {
	 503  	if runtime.Compiler == "gccgo" {
	 504  		t.Skip("not applicable for gccgo")
	 505  	}
	 506  	// How much to try. These defaults take about 1 seconds
	 507  	// on a 2012 MacBook Pro. The ones in short mode take
	 508  	// about 0.1 seconds.
	 509  	tries := 10
	 510  	count := 1000000
	 511  	if testing.Short() {
	 512  		tries = 1
	 513  	}
	 514  	for try := 0; try < tries; try++ {
	 515  		var prof bytes.Buffer
	 516  		if err := StartCPUProfile(&prof); err != nil {
	 517  			t.Fatal(err)
	 518  		}
	 519  		for i := 0; i < count; i++ {
	 520  			runtime.Gosched()
	 521  		}
	 522  		StopCPUProfile()
	 523  
	 524  		// Read profile to look for entries for gogo with an attempt at a traceback.
	 525  		// "runtime.gogo" is OK, because that's the part of the context switch
	 526  		// before the actual switch begins. But we should not see "gogo",
	 527  		// aka "gogo<>(SB)", which does the actual switch and is marked SPWRITE.
	 528  		parseProfile(t, prof.Bytes(), func(count uintptr, stk []*profile.Location, _ map[string][]string) {
	 529  			// An entry with two frames with 'System' in its top frame
	 530  			// exists to record a PC without a traceback. Those are okay.
	 531  			if len(stk) == 2 {
	 532  				name := stk[1].Line[0].Function.Name
	 533  				if name == "runtime._System" || name == "runtime._ExternalCode" || name == "runtime._GC" {
	 534  					return
	 535  				}
	 536  			}
	 537  
	 538  			// An entry with just one frame is OK too:
	 539  			// it knew to stop at gogo.
	 540  			if len(stk) == 1 {
	 541  				return
	 542  			}
	 543  
	 544  			// Otherwise, should not see gogo.
	 545  			// The place we'd see it would be the inner most frame.
	 546  			name := stk[0].Line[0].Function.Name
	 547  			if name == "gogo" {
	 548  				var buf bytes.Buffer
	 549  				fprintStack(&buf, stk)
	 550  				t.Fatalf("found profile entry for gogo:\n%s", buf.String())
	 551  			}
	 552  		})
	 553  	}
	 554  }
	 555  
	 556  func fprintStack(w io.Writer, stk []*profile.Location) {
	 557  	for _, loc := range stk {
	 558  		fmt.Fprintf(w, " %#x", loc.Address)
	 559  		fmt.Fprintf(w, " (")
	 560  		for i, line := range loc.Line {
	 561  			if i > 0 {
	 562  				fmt.Fprintf(w, " ")
	 563  			}
	 564  			fmt.Fprintf(w, "%s:%d", line.Function.Name, line.Line)
	 565  		}
	 566  		fmt.Fprintf(w, ")")
	 567  	}
	 568  	fmt.Fprintf(w, "\n")
	 569  }
	 570  
	 571  // Test that profiling of division operations is okay, especially on ARM. See issue 6681.
	 572  func TestMathBigDivide(t *testing.T) {
	 573  	testCPUProfile(t, nil, nil, nil, func(duration time.Duration) {
	 574  		t := time.After(duration)
	 575  		pi := new(big.Int)
	 576  		for {
	 577  			for i := 0; i < 100; i++ {
	 578  				n := big.NewInt(2646693125139304345)
	 579  				d := big.NewInt(842468587426513207)
	 580  				pi.Div(n, d)
	 581  			}
	 582  			select {
	 583  			case <-t:
	 584  				return
	 585  			default:
	 586  			}
	 587  		}
	 588  	})
	 589  }
	 590  
	 591  // stackContainsAll matches if all functions in spec (comma-separated) appear somewhere in the stack trace.
	 592  func stackContainsAll(spec string, count uintptr, stk []*profile.Location, labels map[string][]string) bool {
	 593  	for _, f := range strings.Split(spec, ",") {
	 594  		if !stackContains(f, count, stk, labels) {
	 595  			return false
	 596  		}
	 597  	}
	 598  	return true
	 599  }
	 600  
	 601  func TestMorestack(t *testing.T) {
	 602  	testCPUProfile(t, stackContainsAll, []string{"runtime.newstack,runtime/pprof.growstack"}, avoidFunctions(), func(duration time.Duration) {
	 603  		t := time.After(duration)
	 604  		c := make(chan bool)
	 605  		for {
	 606  			go func() {
	 607  				growstack1()
	 608  				c <- true
	 609  			}()
	 610  			select {
	 611  			case <-t:
	 612  				return
	 613  			case <-c:
	 614  			}
	 615  		}
	 616  	})
	 617  }
	 618  
	 619  //go:noinline
	 620  func growstack1() {
	 621  	growstack(10)
	 622  }
	 623  
	 624  //go:noinline
	 625  func growstack(n int) {
	 626  	var buf [8 << 18]byte
	 627  	use(buf)
	 628  	if n > 0 {
	 629  		growstack(n - 1)
	 630  	}
	 631  }
	 632  
	 633  //go:noinline
	 634  func use(x [8 << 18]byte) {}
	 635  
	 636  func TestBlockProfile(t *testing.T) {
	 637  	type TestCase struct {
	 638  		name string
	 639  		f		func()
	 640  		stk	[]string
	 641  		re	 string
	 642  	}
	 643  	tests := [...]TestCase{
	 644  		{
	 645  			name: "chan recv",
	 646  			f:		blockChanRecv,
	 647  			stk: []string{
	 648  				"runtime.chanrecv1",
	 649  				"runtime/pprof.blockChanRecv",
	 650  				"runtime/pprof.TestBlockProfile",
	 651  			},
	 652  			re: `
	 653  [0-9]+ [0-9]+ @( 0x[[:xdigit:]]+)+
	 654  #	0x[0-9a-f]+	runtime\.chanrecv1\+0x[0-9a-f]+	.*/src/runtime/chan.go:[0-9]+
	 655  #	0x[0-9a-f]+	runtime/pprof\.blockChanRecv\+0x[0-9a-f]+	.*/src/runtime/pprof/pprof_test.go:[0-9]+
	 656  #	0x[0-9a-f]+	runtime/pprof\.TestBlockProfile\+0x[0-9a-f]+	.*/src/runtime/pprof/pprof_test.go:[0-9]+
	 657  `},
	 658  		{
	 659  			name: "chan send",
	 660  			f:		blockChanSend,
	 661  			stk: []string{
	 662  				"runtime.chansend1",
	 663  				"runtime/pprof.blockChanSend",
	 664  				"runtime/pprof.TestBlockProfile",
	 665  			},
	 666  			re: `
	 667  [0-9]+ [0-9]+ @( 0x[[:xdigit:]]+)+
	 668  #	0x[0-9a-f]+	runtime\.chansend1\+0x[0-9a-f]+	.*/src/runtime/chan.go:[0-9]+
	 669  #	0x[0-9a-f]+	runtime/pprof\.blockChanSend\+0x[0-9a-f]+	.*/src/runtime/pprof/pprof_test.go:[0-9]+
	 670  #	0x[0-9a-f]+	runtime/pprof\.TestBlockProfile\+0x[0-9a-f]+	.*/src/runtime/pprof/pprof_test.go:[0-9]+
	 671  `},
	 672  		{
	 673  			name: "chan close",
	 674  			f:		blockChanClose,
	 675  			stk: []string{
	 676  				"runtime.chanrecv1",
	 677  				"runtime/pprof.blockChanClose",
	 678  				"runtime/pprof.TestBlockProfile",
	 679  			},
	 680  			re: `
	 681  [0-9]+ [0-9]+ @( 0x[[:xdigit:]]+)+
	 682  #	0x[0-9a-f]+	runtime\.chanrecv1\+0x[0-9a-f]+	.*/src/runtime/chan.go:[0-9]+
	 683  #	0x[0-9a-f]+	runtime/pprof\.blockChanClose\+0x[0-9a-f]+	.*/src/runtime/pprof/pprof_test.go:[0-9]+
	 684  #	0x[0-9a-f]+	runtime/pprof\.TestBlockProfile\+0x[0-9a-f]+	.*/src/runtime/pprof/pprof_test.go:[0-9]+
	 685  `},
	 686  		{
	 687  			name: "select recv async",
	 688  			f:		blockSelectRecvAsync,
	 689  			stk: []string{
	 690  				"runtime.selectgo",
	 691  				"runtime/pprof.blockSelectRecvAsync",
	 692  				"runtime/pprof.TestBlockProfile",
	 693  			},
	 694  			re: `
	 695  [0-9]+ [0-9]+ @( 0x[[:xdigit:]]+)+
	 696  #	0x[0-9a-f]+	runtime\.selectgo\+0x[0-9a-f]+	.*/src/runtime/select.go:[0-9]+
	 697  #	0x[0-9a-f]+	runtime/pprof\.blockSelectRecvAsync\+0x[0-9a-f]+	.*/src/runtime/pprof/pprof_test.go:[0-9]+
	 698  #	0x[0-9a-f]+	runtime/pprof\.TestBlockProfile\+0x[0-9a-f]+	.*/src/runtime/pprof/pprof_test.go:[0-9]+
	 699  `},
	 700  		{
	 701  			name: "select send sync",
	 702  			f:		blockSelectSendSync,
	 703  			stk: []string{
	 704  				"runtime.selectgo",
	 705  				"runtime/pprof.blockSelectSendSync",
	 706  				"runtime/pprof.TestBlockProfile",
	 707  			},
	 708  			re: `
	 709  [0-9]+ [0-9]+ @( 0x[[:xdigit:]]+)+
	 710  #	0x[0-9a-f]+	runtime\.selectgo\+0x[0-9a-f]+	.*/src/runtime/select.go:[0-9]+
	 711  #	0x[0-9a-f]+	runtime/pprof\.blockSelectSendSync\+0x[0-9a-f]+	.*/src/runtime/pprof/pprof_test.go:[0-9]+
	 712  #	0x[0-9a-f]+	runtime/pprof\.TestBlockProfile\+0x[0-9a-f]+	.*/src/runtime/pprof/pprof_test.go:[0-9]+
	 713  `},
	 714  		{
	 715  			name: "mutex",
	 716  			f:		blockMutex,
	 717  			stk: []string{
	 718  				"sync.(*Mutex).Lock",
	 719  				"runtime/pprof.blockMutex",
	 720  				"runtime/pprof.TestBlockProfile",
	 721  			},
	 722  			re: `
	 723  [0-9]+ [0-9]+ @( 0x[[:xdigit:]]+)+
	 724  #	0x[0-9a-f]+	sync\.\(\*Mutex\)\.Lock\+0x[0-9a-f]+	.*/src/sync/mutex\.go:[0-9]+
	 725  #	0x[0-9a-f]+	runtime/pprof\.blockMutex\+0x[0-9a-f]+	.*/src/runtime/pprof/pprof_test.go:[0-9]+
	 726  #	0x[0-9a-f]+	runtime/pprof\.TestBlockProfile\+0x[0-9a-f]+	.*/src/runtime/pprof/pprof_test.go:[0-9]+
	 727  `},
	 728  		{
	 729  			name: "cond",
	 730  			f:		blockCond,
	 731  			stk: []string{
	 732  				"sync.(*Cond).Wait",
	 733  				"runtime/pprof.blockCond",
	 734  				"runtime/pprof.TestBlockProfile",
	 735  			},
	 736  			re: `
	 737  [0-9]+ [0-9]+ @( 0x[[:xdigit:]]+)+
	 738  #	0x[0-9a-f]+	sync\.\(\*Cond\)\.Wait\+0x[0-9a-f]+	.*/src/sync/cond\.go:[0-9]+
	 739  #	0x[0-9a-f]+	runtime/pprof\.blockCond\+0x[0-9a-f]+	.*/src/runtime/pprof/pprof_test.go:[0-9]+
	 740  #	0x[0-9a-f]+	runtime/pprof\.TestBlockProfile\+0x[0-9a-f]+	.*/src/runtime/pprof/pprof_test.go:[0-9]+
	 741  `},
	 742  	}
	 743  
	 744  	// Generate block profile
	 745  	runtime.SetBlockProfileRate(1)
	 746  	defer runtime.SetBlockProfileRate(0)
	 747  	for _, test := range tests {
	 748  		test.f()
	 749  	}
	 750  
	 751  	t.Run("debug=1", func(t *testing.T) {
	 752  		var w bytes.Buffer
	 753  		Lookup("block").WriteTo(&w, 1)
	 754  		prof := w.String()
	 755  
	 756  		if !strings.HasPrefix(prof, "--- contention:\ncycles/second=") {
	 757  			t.Fatalf("Bad profile header:\n%v", prof)
	 758  		}
	 759  
	 760  		if strings.HasSuffix(prof, "#\t0x0\n\n") {
	 761  			t.Errorf("Useless 0 suffix:\n%v", prof)
	 762  		}
	 763  
	 764  		for _, test := range tests {
	 765  			if !regexp.MustCompile(strings.ReplaceAll(test.re, "\t", "\t+")).MatchString(prof) {
	 766  				t.Errorf("Bad %v entry, expect:\n%v\ngot:\n%v", test.name, test.re, prof)
	 767  			}
	 768  		}
	 769  	})
	 770  
	 771  	t.Run("proto", func(t *testing.T) {
	 772  		// proto format
	 773  		var w bytes.Buffer
	 774  		Lookup("block").WriteTo(&w, 0)
	 775  		p, err := profile.Parse(&w)
	 776  		if err != nil {
	 777  			t.Fatalf("failed to parse profile: %v", err)
	 778  		}
	 779  		t.Logf("parsed proto: %s", p)
	 780  		if err := p.CheckValid(); err != nil {
	 781  			t.Fatalf("invalid profile: %v", err)
	 782  		}
	 783  
	 784  		stks := stacks(p)
	 785  		for _, test := range tests {
	 786  			if !containsStack(stks, test.stk) {
	 787  				t.Errorf("No matching stack entry for %v, want %+v", test.name, test.stk)
	 788  			}
	 789  		}
	 790  	})
	 791  
	 792  }
	 793  
	 794  func stacks(p *profile.Profile) (res [][]string) {
	 795  	for _, s := range p.Sample {
	 796  		var stk []string
	 797  		for _, l := range s.Location {
	 798  			for _, line := range l.Line {
	 799  				stk = append(stk, line.Function.Name)
	 800  			}
	 801  		}
	 802  		res = append(res, stk)
	 803  	}
	 804  	return res
	 805  }
	 806  
	 807  func containsStack(got [][]string, want []string) bool {
	 808  	for _, stk := range got {
	 809  		if len(stk) < len(want) {
	 810  			continue
	 811  		}
	 812  		for i, f := range want {
	 813  			if f != stk[i] {
	 814  				break
	 815  			}
	 816  			if i == len(want)-1 {
	 817  				return true
	 818  			}
	 819  		}
	 820  	}
	 821  	return false
	 822  }
	 823  
	 824  const blockDelay = 10 * time.Millisecond
	 825  
	 826  func blockChanRecv() {
	 827  	c := make(chan bool)
	 828  	go func() {
	 829  		time.Sleep(blockDelay)
	 830  		c <- true
	 831  	}()
	 832  	<-c
	 833  }
	 834  
	 835  func blockChanSend() {
	 836  	c := make(chan bool)
	 837  	go func() {
	 838  		time.Sleep(blockDelay)
	 839  		<-c
	 840  	}()
	 841  	c <- true
	 842  }
	 843  
	 844  func blockChanClose() {
	 845  	c := make(chan bool)
	 846  	go func() {
	 847  		time.Sleep(blockDelay)
	 848  		close(c)
	 849  	}()
	 850  	<-c
	 851  }
	 852  
	 853  func blockSelectRecvAsync() {
	 854  	const numTries = 3
	 855  	c := make(chan bool, 1)
	 856  	c2 := make(chan bool, 1)
	 857  	go func() {
	 858  		for i := 0; i < numTries; i++ {
	 859  			time.Sleep(blockDelay)
	 860  			c <- true
	 861  		}
	 862  	}()
	 863  	for i := 0; i < numTries; i++ {
	 864  		select {
	 865  		case <-c:
	 866  		case <-c2:
	 867  		}
	 868  	}
	 869  }
	 870  
	 871  func blockSelectSendSync() {
	 872  	c := make(chan bool)
	 873  	c2 := make(chan bool)
	 874  	go func() {
	 875  		time.Sleep(blockDelay)
	 876  		<-c
	 877  	}()
	 878  	select {
	 879  	case c <- true:
	 880  	case c2 <- true:
	 881  	}
	 882  }
	 883  
	 884  func blockMutex() {
	 885  	var mu sync.Mutex
	 886  	mu.Lock()
	 887  	go func() {
	 888  		time.Sleep(blockDelay)
	 889  		mu.Unlock()
	 890  	}()
	 891  	// Note: Unlock releases mu before recording the mutex event,
	 892  	// so it's theoretically possible for this to proceed and
	 893  	// capture the profile before the event is recorded. As long
	 894  	// as this is blocked before the unlock happens, it's okay.
	 895  	mu.Lock()
	 896  }
	 897  
	 898  func blockCond() {
	 899  	var mu sync.Mutex
	 900  	c := sync.NewCond(&mu)
	 901  	mu.Lock()
	 902  	go func() {
	 903  		time.Sleep(blockDelay)
	 904  		mu.Lock()
	 905  		c.Signal()
	 906  		mu.Unlock()
	 907  	}()
	 908  	c.Wait()
	 909  	mu.Unlock()
	 910  }
	 911  
	 912  // See http://golang.org/cl/299991.
	 913  func TestBlockProfileBias(t *testing.T) {
	 914  	rate := int(1000) // arbitrary value
	 915  	runtime.SetBlockProfileRate(rate)
	 916  	defer runtime.SetBlockProfileRate(0)
	 917  
	 918  	// simulate blocking events
	 919  	blockFrequentShort(rate)
	 920  	blockInfrequentLong(rate)
	 921  
	 922  	var w bytes.Buffer
	 923  	Lookup("block").WriteTo(&w, 0)
	 924  	p, err := profile.Parse(&w)
	 925  	if err != nil {
	 926  		t.Fatalf("failed to parse profile: %v", err)
	 927  	}
	 928  	t.Logf("parsed proto: %s", p)
	 929  
	 930  	il := float64(-1) // blockInfrequentLong duration
	 931  	fs := float64(-1) // blockFrequentShort duration
	 932  	for _, s := range p.Sample {
	 933  		for _, l := range s.Location {
	 934  			for _, line := range l.Line {
	 935  				if len(s.Value) < 2 {
	 936  					t.Fatal("block profile has less than 2 sample types")
	 937  				}
	 938  
	 939  				if line.Function.Name == "runtime/pprof.blockInfrequentLong" {
	 940  					il = float64(s.Value[1])
	 941  				} else if line.Function.Name == "runtime/pprof.blockFrequentShort" {
	 942  					fs = float64(s.Value[1])
	 943  				}
	 944  			}
	 945  		}
	 946  	}
	 947  	if il == -1 || fs == -1 {
	 948  		t.Fatal("block profile is missing expected functions")
	 949  	}
	 950  
	 951  	// stddev of bias from 100 runs on local machine multiplied by 10x
	 952  	const threshold = 0.2
	 953  	if bias := (il - fs) / il; math.Abs(bias) > threshold {
	 954  		t.Fatalf("bias: abs(%f) > %f", bias, threshold)
	 955  	} else {
	 956  		t.Logf("bias: abs(%f) < %f", bias, threshold)
	 957  	}
	 958  }
	 959  
	 960  // blockFrequentShort produces 100000 block events with an average duration of
	 961  // rate / 10.
	 962  func blockFrequentShort(rate int) {
	 963  	for i := 0; i < 100000; i++ {
	 964  		blockevent(int64(rate/10), 1)
	 965  	}
	 966  }
	 967  
	 968  // blockFrequentShort produces 10000 block events with an average duration of
	 969  // rate.
	 970  func blockInfrequentLong(rate int) {
	 971  	for i := 0; i < 10000; i++ {
	 972  		blockevent(int64(rate), 1)
	 973  	}
	 974  }
	 975  
	 976  // Used by TestBlockProfileBias.
	 977  //go:linkname blockevent runtime.blockevent
	 978  func blockevent(cycles int64, skip int)
	 979  
	 980  func TestMutexProfile(t *testing.T) {
	 981  	// Generate mutex profile
	 982  
	 983  	old := runtime.SetMutexProfileFraction(1)
	 984  	defer runtime.SetMutexProfileFraction(old)
	 985  	if old != 0 {
	 986  		t.Fatalf("need MutexProfileRate 0, got %d", old)
	 987  	}
	 988  
	 989  	blockMutex()
	 990  
	 991  	t.Run("debug=1", func(t *testing.T) {
	 992  		var w bytes.Buffer
	 993  		Lookup("mutex").WriteTo(&w, 1)
	 994  		prof := w.String()
	 995  		t.Logf("received profile: %v", prof)
	 996  
	 997  		if !strings.HasPrefix(prof, "--- mutex:\ncycles/second=") {
	 998  			t.Errorf("Bad profile header:\n%v", prof)
	 999  		}
	1000  		prof = strings.Trim(prof, "\n")
	1001  		lines := strings.Split(prof, "\n")
	1002  		if len(lines) != 6 {
	1003  			t.Errorf("expected 6 lines, got %d %q\n%s", len(lines), prof, prof)
	1004  		}
	1005  		if len(lines) < 6 {
	1006  			return
	1007  		}
	1008  		// checking that the line is like "35258904 1 @ 0x48288d 0x47cd28 0x458931"
	1009  		r2 := `^\d+ \d+ @(?: 0x[[:xdigit:]]+)+`
	1010  		//r2 := "^[0-9]+ 1 @ 0x[0-9a-f x]+$"
	1011  		if ok, err := regexp.MatchString(r2, lines[3]); err != nil || !ok {
	1012  			t.Errorf("%q didn't match %q", lines[3], r2)
	1013  		}
	1014  		r3 := "^#.*runtime/pprof.blockMutex.*$"
	1015  		if ok, err := regexp.MatchString(r3, lines[5]); err != nil || !ok {
	1016  			t.Errorf("%q didn't match %q", lines[5], r3)
	1017  		}
	1018  		t.Logf(prof)
	1019  	})
	1020  	t.Run("proto", func(t *testing.T) {
	1021  		// proto format
	1022  		var w bytes.Buffer
	1023  		Lookup("mutex").WriteTo(&w, 0)
	1024  		p, err := profile.Parse(&w)
	1025  		if err != nil {
	1026  			t.Fatalf("failed to parse profile: %v", err)
	1027  		}
	1028  		t.Logf("parsed proto: %s", p)
	1029  		if err := p.CheckValid(); err != nil {
	1030  			t.Fatalf("invalid profile: %v", err)
	1031  		}
	1032  
	1033  		stks := stacks(p)
	1034  		for _, want := range [][]string{
	1035  			{"sync.(*Mutex).Unlock", "runtime/pprof.blockMutex.func1"},
	1036  		} {
	1037  			if !containsStack(stks, want) {
	1038  				t.Errorf("No matching stack entry for %+v", want)
	1039  			}
	1040  		}
	1041  	})
	1042  }
	1043  
	1044  func func1(c chan int) { <-c }
	1045  func func2(c chan int) { <-c }
	1046  func func3(c chan int) { <-c }
	1047  func func4(c chan int) { <-c }
	1048  
	1049  func TestGoroutineCounts(t *testing.T) {
	1050  	// Setting GOMAXPROCS to 1 ensures we can force all goroutines to the
	1051  	// desired blocking point.
	1052  	defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(1))
	1053  
	1054  	c := make(chan int)
	1055  	for i := 0; i < 100; i++ {
	1056  		switch {
	1057  		case i%10 == 0:
	1058  			go func1(c)
	1059  		case i%2 == 0:
	1060  			go func2(c)
	1061  		default:
	1062  			go func3(c)
	1063  		}
	1064  		// Let goroutines block on channel
	1065  		for j := 0; j < 5; j++ {
	1066  			runtime.Gosched()
	1067  		}
	1068  	}
	1069  	ctx := context.Background()
	1070  
	1071  	// ... and again, with labels this time (just with fewer iterations to keep
	1072  	// sorting deterministic).
	1073  	Do(ctx, Labels("label", "value"), func(context.Context) {
	1074  		for i := 0; i < 89; i++ {
	1075  			switch {
	1076  			case i%10 == 0:
	1077  				go func1(c)
	1078  			case i%2 == 0:
	1079  				go func2(c)
	1080  			default:
	1081  				go func3(c)
	1082  			}
	1083  			// Let goroutines block on channel
	1084  			for j := 0; j < 5; j++ {
	1085  				runtime.Gosched()
	1086  			}
	1087  		}
	1088  	})
	1089  
	1090  	var w bytes.Buffer
	1091  	goroutineProf := Lookup("goroutine")
	1092  
	1093  	// Check debug profile
	1094  	goroutineProf.WriteTo(&w, 1)
	1095  	prof := w.String()
	1096  
	1097  	labels := labelMap{"label": "value"}
	1098  	labelStr := "\n# labels: " + labels.String()
	1099  	if !containsInOrder(prof, "\n50 @ ", "\n44 @", labelStr,
	1100  		"\n40 @", "\n36 @", labelStr, "\n10 @", "\n9 @", labelStr, "\n1 @") {
	1101  		t.Errorf("expected sorted goroutine counts with Labels:\n%s", prof)
	1102  	}
	1103  
	1104  	// Check proto profile
	1105  	w.Reset()
	1106  	goroutineProf.WriteTo(&w, 0)
	1107  	p, err := profile.Parse(&w)
	1108  	if err != nil {
	1109  		t.Errorf("error parsing protobuf profile: %v", err)
	1110  	}
	1111  	if err := p.CheckValid(); err != nil {
	1112  		t.Errorf("protobuf profile is invalid: %v", err)
	1113  	}
	1114  	expectedLabels := map[int64]map[string]string{
	1115  		50: map[string]string{},
	1116  		44: map[string]string{"label": "value"},
	1117  		40: map[string]string{},
	1118  		36: map[string]string{"label": "value"},
	1119  		10: map[string]string{},
	1120  		9:	map[string]string{"label": "value"},
	1121  		1:	map[string]string{},
	1122  	}
	1123  	if !containsCountsLabels(p, expectedLabels) {
	1124  		t.Errorf("expected count profile to contain goroutines with counts and labels %v, got %v",
	1125  			expectedLabels, p)
	1126  	}
	1127  
	1128  	close(c)
	1129  
	1130  	time.Sleep(10 * time.Millisecond) // let goroutines exit
	1131  }
	1132  
	1133  func containsInOrder(s string, all ...string) bool {
	1134  	for _, t := range all {
	1135  		i := strings.Index(s, t)
	1136  		if i < 0 {
	1137  			return false
	1138  		}
	1139  		s = s[i+len(t):]
	1140  	}
	1141  	return true
	1142  }
	1143  
	1144  func containsCountsLabels(prof *profile.Profile, countLabels map[int64]map[string]string) bool {
	1145  	m := make(map[int64]int)
	1146  	type nkey struct {
	1147  		count		int64
	1148  		key, val string
	1149  	}
	1150  	n := make(map[nkey]int)
	1151  	for c, kv := range countLabels {
	1152  		m[c]++
	1153  		for k, v := range kv {
	1154  			n[nkey{
	1155  				count: c,
	1156  				key:	 k,
	1157  				val:	 v,
	1158  			}]++
	1159  
	1160  		}
	1161  	}
	1162  	for _, s := range prof.Sample {
	1163  		// The count is the single value in the sample
	1164  		if len(s.Value) != 1 {
	1165  			return false
	1166  		}
	1167  		m[s.Value[0]]--
	1168  		for k, vs := range s.Label {
	1169  			for _, v := range vs {
	1170  				n[nkey{
	1171  					count: s.Value[0],
	1172  					key:	 k,
	1173  					val:	 v,
	1174  				}]--
	1175  			}
	1176  		}
	1177  	}
	1178  	for _, n := range m {
	1179  		if n > 0 {
	1180  			return false
	1181  		}
	1182  	}
	1183  	for _, ncnt := range n {
	1184  		if ncnt != 0 {
	1185  			return false
	1186  		}
	1187  	}
	1188  	return true
	1189  }
	1190  
	1191  var emptyCallStackTestRun int64
	1192  
	1193  // Issue 18836.
	1194  func TestEmptyCallStack(t *testing.T) {
	1195  	name := fmt.Sprintf("test18836_%d", emptyCallStackTestRun)
	1196  	emptyCallStackTestRun++
	1197  
	1198  	t.Parallel()
	1199  	var buf bytes.Buffer
	1200  	p := NewProfile(name)
	1201  
	1202  	p.Add("foo", 47674)
	1203  	p.WriteTo(&buf, 1)
	1204  	p.Remove("foo")
	1205  	got := buf.String()
	1206  	prefix := name + " profile: total 1\n"
	1207  	if !strings.HasPrefix(got, prefix) {
	1208  		t.Fatalf("got:\n\t%q\nwant prefix:\n\t%q\n", got, prefix)
	1209  	}
	1210  	lostevent := "lostProfileEvent"
	1211  	if !strings.Contains(got, lostevent) {
	1212  		t.Fatalf("got:\n\t%q\ndoes not contain:\n\t%q\n", got, lostevent)
	1213  	}
	1214  }
	1215  
	1216  // stackContainsLabeled takes a spec like funcname;key=value and matches if the stack has that key
	1217  // and value and has funcname somewhere in the stack.
	1218  func stackContainsLabeled(spec string, count uintptr, stk []*profile.Location, labels map[string][]string) bool {
	1219  	semi := strings.Index(spec, ";")
	1220  	if semi == -1 {
	1221  		panic("no semicolon in key/value spec")
	1222  	}
	1223  	kv := strings.SplitN(spec[semi+1:], "=", 2)
	1224  	if len(kv) != 2 {
	1225  		panic("missing = in key/value spec")
	1226  	}
	1227  	if !contains(labels[kv[0]], kv[1]) {
	1228  		return false
	1229  	}
	1230  	return stackContains(spec[:semi], count, stk, labels)
	1231  }
	1232  
	1233  func TestCPUProfileLabel(t *testing.T) {
	1234  	testCPUProfile(t, stackContainsLabeled, []string{"runtime/pprof.cpuHogger;key=value"}, avoidFunctions(), func(dur time.Duration) {
	1235  		Do(context.Background(), Labels("key", "value"), func(context.Context) {
	1236  			cpuHogger(cpuHog1, &salt1, dur)
	1237  		})
	1238  	})
	1239  }
	1240  
	1241  func TestLabelRace(t *testing.T) {
	1242  	// Test the race detector annotations for synchronization
	1243  	// between settings labels and consuming them from the
	1244  	// profile.
	1245  	testCPUProfile(t, stackContainsLabeled, []string{"runtime/pprof.cpuHogger;key=value"}, nil, func(dur time.Duration) {
	1246  		start := time.Now()
	1247  		var wg sync.WaitGroup
	1248  		for time.Since(start) < dur {
	1249  			var salts [10]int
	1250  			for i := 0; i < 10; i++ {
	1251  				wg.Add(1)
	1252  				go func(j int) {
	1253  					Do(context.Background(), Labels("key", "value"), func(context.Context) {
	1254  						cpuHogger(cpuHog1, &salts[j], time.Millisecond)
	1255  					})
	1256  					wg.Done()
	1257  				}(i)
	1258  			}
	1259  			wg.Wait()
	1260  		}
	1261  	})
	1262  }
	1263  
	1264  // Check that there is no deadlock when the program receives SIGPROF while in
	1265  // 64bit atomics' critical section. Used to happen on mips{,le}. See #20146.
	1266  func TestAtomicLoadStore64(t *testing.T) {
	1267  	f, err := os.CreateTemp("", "profatomic")
	1268  	if err != nil {
	1269  		t.Fatalf("TempFile: %v", err)
	1270  	}
	1271  	defer os.Remove(f.Name())
	1272  	defer f.Close()
	1273  
	1274  	if err := StartCPUProfile(f); err != nil {
	1275  		t.Fatal(err)
	1276  	}
	1277  	defer StopCPUProfile()
	1278  
	1279  	var flag uint64
	1280  	done := make(chan bool, 1)
	1281  
	1282  	go func() {
	1283  		for atomic.LoadUint64(&flag) == 0 {
	1284  			runtime.Gosched()
	1285  		}
	1286  		done <- true
	1287  	}()
	1288  	time.Sleep(50 * time.Millisecond)
	1289  	atomic.StoreUint64(&flag, 1)
	1290  	<-done
	1291  }
	1292  
	1293  func TestTracebackAll(t *testing.T) {
	1294  	// With gccgo, if a profiling signal arrives at the wrong time
	1295  	// during traceback, it may crash or hang. See issue #29448.
	1296  	f, err := os.CreateTemp("", "proftraceback")
	1297  	if err != nil {
	1298  		t.Fatalf("TempFile: %v", err)
	1299  	}
	1300  	defer os.Remove(f.Name())
	1301  	defer f.Close()
	1302  
	1303  	if err := StartCPUProfile(f); err != nil {
	1304  		t.Fatal(err)
	1305  	}
	1306  	defer StopCPUProfile()
	1307  
	1308  	ch := make(chan int)
	1309  	defer close(ch)
	1310  
	1311  	count := 10
	1312  	for i := 0; i < count; i++ {
	1313  		go func() {
	1314  			<-ch // block
	1315  		}()
	1316  	}
	1317  
	1318  	N := 10000
	1319  	if testing.Short() {
	1320  		N = 500
	1321  	}
	1322  	buf := make([]byte, 10*1024)
	1323  	for i := 0; i < N; i++ {
	1324  		runtime.Stack(buf, true)
	1325  	}
	1326  }
	1327  
	1328  // TestTryAdd tests the cases that are hard to test with real program execution.
	1329  //
	1330  // For example, the current go compilers may not always inline functions
	1331  // involved in recursion but that may not be true in the future compilers. This
	1332  // tests such cases by using fake call sequences and forcing the profile build
	1333  // utilizing translateCPUProfile defined in proto_test.go
	1334  func TestTryAdd(t *testing.T) {
	1335  	if _, found := findInlinedCall(inlinedCallerDump, 4<<10); !found {
	1336  		t.Skip("Can't determine whether anything was inlined into inlinedCallerDump.")
	1337  	}
	1338  
	1339  	// inlinedCallerDump
	1340  	//	 inlinedCalleeDump
	1341  	pcs := make([]uintptr, 2)
	1342  	inlinedCallerDump(pcs)
	1343  	inlinedCallerStack := make([]uint64, 2)
	1344  	for i := range pcs {
	1345  		inlinedCallerStack[i] = uint64(pcs[i])
	1346  	}
	1347  
	1348  	if _, found := findInlinedCall(recursionChainBottom, 4<<10); !found {
	1349  		t.Skip("Can't determine whether anything was inlined into recursionChainBottom.")
	1350  	}
	1351  
	1352  	// recursionChainTop
	1353  	//	 recursionChainMiddle
	1354  	//		 recursionChainBottom
	1355  	//			 recursionChainTop
	1356  	//				 recursionChainMiddle
	1357  	//					 recursionChainBottom
	1358  	pcs = make([]uintptr, 6)
	1359  	recursionChainTop(1, pcs)
	1360  	recursionStack := make([]uint64, len(pcs))
	1361  	for i := range pcs {
	1362  		recursionStack[i] = uint64(pcs[i])
	1363  	}
	1364  
	1365  	period := int64(2000 * 1000) // 1/500*1e9 nanosec.
	1366  
	1367  	testCases := []struct {
	1368  		name				string
	1369  		input			 []uint64					// following the input format assumed by profileBuilder.addCPUData.
	1370  		wantLocs		[][]string				// ordered location entries with function names.
	1371  		wantSamples []*profile.Sample // ordered samples, we care only about Value and the profile location IDs.
	1372  	}{{
	1373  		// Sanity test for a normal, complete stack trace.
	1374  		name: "full_stack_trace",
	1375  		input: []uint64{
	1376  			3, 0, 500, // hz = 500. Must match the period.
	1377  			5, 0, 50, inlinedCallerStack[0], inlinedCallerStack[1],
	1378  		},
	1379  		wantLocs: [][]string{
	1380  			{"runtime/pprof.inlinedCalleeDump", "runtime/pprof.inlinedCallerDump"},
	1381  		},
	1382  		wantSamples: []*profile.Sample{
	1383  			{Value: []int64{50, 50 * period}, Location: []*profile.Location{{ID: 1}}},
	1384  		},
	1385  	}, {
	1386  		name: "bug35538",
	1387  		input: []uint64{
	1388  			3, 0, 500, // hz = 500. Must match the period.
	1389  			// Fake frame: tryAdd will have inlinedCallerDump
	1390  			// (stack[1]) on the deck when it encounters the next
	1391  			// inline function. It should accept this.
	1392  			7, 0, 10, inlinedCallerStack[0], inlinedCallerStack[1], inlinedCallerStack[0], inlinedCallerStack[1],
	1393  			5, 0, 20, inlinedCallerStack[0], inlinedCallerStack[1],
	1394  		},
	1395  		wantLocs: [][]string{{"runtime/pprof.inlinedCalleeDump", "runtime/pprof.inlinedCallerDump"}},
	1396  		wantSamples: []*profile.Sample{
	1397  			{Value: []int64{10, 10 * period}, Location: []*profile.Location{{ID: 1}, {ID: 1}}},
	1398  			{Value: []int64{20, 20 * period}, Location: []*profile.Location{{ID: 1}}},
	1399  		},
	1400  	}, {
	1401  		name: "bug38096",
	1402  		input: []uint64{
	1403  			3, 0, 500, // hz = 500. Must match the period.
	1404  			// count (data[2]) == 0 && len(stk) == 1 is an overflow
	1405  			// entry. The "stk" entry is actually the count.
	1406  			4, 0, 0, 4242,
	1407  		},
	1408  		wantLocs: [][]string{{"runtime/pprof.lostProfileEvent"}},
	1409  		wantSamples: []*profile.Sample{
	1410  			{Value: []int64{4242, 4242 * period}, Location: []*profile.Location{{ID: 1}}},
	1411  		},
	1412  	}, {
	1413  		// If a function is directly called recursively then it must
	1414  		// not be inlined in the caller.
	1415  		//
	1416  		// N.B. We're generating an impossible profile here, with a
	1417  		// recursive inlineCalleeDump call. This is simulating a non-Go
	1418  		// function that looks like an inlined Go function other than
	1419  		// its recursive property. See pcDeck.tryAdd.
	1420  		name: "directly_recursive_func_is_not_inlined",
	1421  		input: []uint64{
	1422  			3, 0, 500, // hz = 500. Must match the period.
	1423  			5, 0, 30, inlinedCallerStack[0], inlinedCallerStack[0],
	1424  			4, 0, 40, inlinedCallerStack[0],
	1425  		},
	1426  		// inlinedCallerDump shows up here because
	1427  		// runtime_expandFinalInlineFrame adds it to the stack frame.
	1428  		wantLocs: [][]string{{"runtime/pprof.inlinedCalleeDump"}, {"runtime/pprof.inlinedCallerDump"}},
	1429  		wantSamples: []*profile.Sample{
	1430  			{Value: []int64{30, 30 * period}, Location: []*profile.Location{{ID: 1}, {ID: 1}, {ID: 2}}},
	1431  			{Value: []int64{40, 40 * period}, Location: []*profile.Location{{ID: 1}, {ID: 2}}},
	1432  		},
	1433  	}, {
	1434  		name: "recursion_chain_inline",
	1435  		input: []uint64{
	1436  			3, 0, 500, // hz = 500. Must match the period.
	1437  			9, 0, 10, recursionStack[0], recursionStack[1], recursionStack[2], recursionStack[3], recursionStack[4], recursionStack[5],
	1438  		},
	1439  		wantLocs: [][]string{
	1440  			{"runtime/pprof.recursionChainBottom"},
	1441  			{
	1442  				"runtime/pprof.recursionChainMiddle",
	1443  				"runtime/pprof.recursionChainTop",
	1444  				"runtime/pprof.recursionChainBottom",
	1445  			},
	1446  			{
	1447  				"runtime/pprof.recursionChainMiddle",
	1448  				"runtime/pprof.recursionChainTop",
	1449  				"runtime/pprof.TestTryAdd", // inlined into the test.
	1450  			},
	1451  		},
	1452  		wantSamples: []*profile.Sample{
	1453  			{Value: []int64{10, 10 * period}, Location: []*profile.Location{{ID: 1}, {ID: 2}, {ID: 3}}},
	1454  		},
	1455  	}, {
	1456  		name: "truncated_stack_trace_later",
	1457  		input: []uint64{
	1458  			3, 0, 500, // hz = 500. Must match the period.
	1459  			5, 0, 50, inlinedCallerStack[0], inlinedCallerStack[1],
	1460  			4, 0, 60, inlinedCallerStack[0],
	1461  		},
	1462  		wantLocs: [][]string{{"runtime/pprof.inlinedCalleeDump", "runtime/pprof.inlinedCallerDump"}},
	1463  		wantSamples: []*profile.Sample{
	1464  			{Value: []int64{50, 50 * period}, Location: []*profile.Location{{ID: 1}}},
	1465  			{Value: []int64{60, 60 * period}, Location: []*profile.Location{{ID: 1}}},
	1466  		},
	1467  	}, {
	1468  		name: "truncated_stack_trace_first",
	1469  		input: []uint64{
	1470  			3, 0, 500, // hz = 500. Must match the period.
	1471  			4, 0, 70, inlinedCallerStack[0],
	1472  			5, 0, 80, inlinedCallerStack[0], inlinedCallerStack[1],
	1473  		},
	1474  		wantLocs: [][]string{{"runtime/pprof.inlinedCalleeDump", "runtime/pprof.inlinedCallerDump"}},
	1475  		wantSamples: []*profile.Sample{
	1476  			{Value: []int64{70, 70 * period}, Location: []*profile.Location{{ID: 1}}},
	1477  			{Value: []int64{80, 80 * period}, Location: []*profile.Location{{ID: 1}}},
	1478  		},
	1479  	}, {
	1480  		// We can recover the inlined caller from a truncated stack.
	1481  		name: "truncated_stack_trace_only",
	1482  		input: []uint64{
	1483  			3, 0, 500, // hz = 500. Must match the period.
	1484  			4, 0, 70, inlinedCallerStack[0],
	1485  		},
	1486  		wantLocs: [][]string{{"runtime/pprof.inlinedCalleeDump", "runtime/pprof.inlinedCallerDump"}},
	1487  		wantSamples: []*profile.Sample{
	1488  			{Value: []int64{70, 70 * period}, Location: []*profile.Location{{ID: 1}}},
	1489  		},
	1490  	}, {
	1491  		// The same location is used for duplicated stacks.
	1492  		name: "truncated_stack_trace_twice",
	1493  		input: []uint64{
	1494  			3, 0, 500, // hz = 500. Must match the period.
	1495  			4, 0, 70, inlinedCallerStack[0],
	1496  			// Fake frame: add a fake call to
	1497  			// inlinedCallerDump to prevent this sample
	1498  			// from getting merged into above.
	1499  			5, 0, 80, inlinedCallerStack[1], inlinedCallerStack[0],
	1500  		},
	1501  		wantLocs: [][]string{
	1502  			{"runtime/pprof.inlinedCalleeDump", "runtime/pprof.inlinedCallerDump"},
	1503  			{"runtime/pprof.inlinedCallerDump"},
	1504  		},
	1505  		wantSamples: []*profile.Sample{
	1506  			{Value: []int64{70, 70 * period}, Location: []*profile.Location{{ID: 1}}},
	1507  			{Value: []int64{80, 80 * period}, Location: []*profile.Location{{ID: 2}, {ID: 1}}},
	1508  		},
	1509  	}}
	1510  
	1511  	for _, tc := range testCases {
	1512  		t.Run(tc.name, func(t *testing.T) {
	1513  			p, err := translateCPUProfile(tc.input)
	1514  			if err != nil {
	1515  				t.Fatalf("translating profile: %v", err)
	1516  			}
	1517  			t.Logf("Profile: %v\n", p)
	1518  
	1519  			// One location entry with all inlined functions.
	1520  			var gotLoc [][]string
	1521  			for _, loc := range p.Location {
	1522  				var names []string
	1523  				for _, line := range loc.Line {
	1524  					names = append(names, line.Function.Name)
	1525  				}
	1526  				gotLoc = append(gotLoc, names)
	1527  			}
	1528  			if got, want := fmtJSON(gotLoc), fmtJSON(tc.wantLocs); got != want {
	1529  				t.Errorf("Got Location = %+v\n\twant %+v", got, want)
	1530  			}
	1531  			// All samples should point to one location.
	1532  			var gotSamples []*profile.Sample
	1533  			for _, sample := range p.Sample {
	1534  				var locs []*profile.Location
	1535  				for _, loc := range sample.Location {
	1536  					locs = append(locs, &profile.Location{ID: loc.ID})
	1537  				}
	1538  				gotSamples = append(gotSamples, &profile.Sample{Value: sample.Value, Location: locs})
	1539  			}
	1540  			if got, want := fmtJSON(gotSamples), fmtJSON(tc.wantSamples); got != want {
	1541  				t.Errorf("Got Samples = %+v\n\twant %+v", got, want)
	1542  			}
	1543  		})
	1544  	}
	1545  }
	1546  
	1547  func TestTimeVDSO(t *testing.T) {
	1548  	// Test that time functions have the right stack trace. In particular,
	1549  	// it shouldn't be recursive.
	1550  
	1551  	if runtime.GOOS == "android" {
	1552  		// Flaky on Android, issue 48655. VDSO may not be enabled.
	1553  		testenv.SkipFlaky(t, 48655)
	1554  	}
	1555  
	1556  	p := testCPUProfile(t, stackContains, []string{"time.now"}, avoidFunctions(), func(dur time.Duration) {
	1557  		t0 := time.Now()
	1558  		for {
	1559  			t := time.Now()
	1560  			if t.Sub(t0) >= dur {
	1561  				return
	1562  			}
	1563  		}
	1564  	})
	1565  
	1566  	// Check for recursive time.now sample.
	1567  	for _, sample := range p.Sample {
	1568  		var seenNow bool
	1569  		for _, loc := range sample.Location {
	1570  			for _, line := range loc.Line {
	1571  				if line.Function.Name == "time.now" {
	1572  					if seenNow {
	1573  						t.Fatalf("unexpected recursive time.now")
	1574  					}
	1575  					seenNow = true
	1576  				}
	1577  			}
	1578  		}
	1579  	}
	1580  }
	1581  

View as plain text