...

Source file src/runtime/stack_test.go

Documentation: runtime

		 1  // Copyright 2012 The Go Authors. All rights reserved.
		 2  // Use of this source code is governed by a BSD-style
		 3  // license that can be found in the LICENSE file.
		 4  
		 5  package runtime_test
		 6  
		 7  import (
		 8  	"bytes"
		 9  	"fmt"
		10  	"os"
		11  	"reflect"
		12  	"regexp"
		13  	. "runtime"
		14  	"strconv"
		15  	"strings"
		16  	"sync"
		17  	"sync/atomic"
		18  	"testing"
		19  	"time"
		20  	_ "unsafe" // for go:linkname
		21  )
		22  
		23  // TestStackMem measures per-thread stack segment cache behavior.
		24  // The test consumed up to 500MB in the past.
		25  func TestStackMem(t *testing.T) {
		26  	const (
		27  		BatchSize			= 32
		28  		BatchCount		 = 256
		29  		ArraySize			= 1024
		30  		RecursionDepth = 128
		31  	)
		32  	if testing.Short() {
		33  		return
		34  	}
		35  	defer GOMAXPROCS(GOMAXPROCS(BatchSize))
		36  	s0 := new(MemStats)
		37  	ReadMemStats(s0)
		38  	for b := 0; b < BatchCount; b++ {
		39  		c := make(chan bool, BatchSize)
		40  		for i := 0; i < BatchSize; i++ {
		41  			go func() {
		42  				var f func(k int, a [ArraySize]byte)
		43  				f = func(k int, a [ArraySize]byte) {
		44  					if k == 0 {
		45  						time.Sleep(time.Millisecond)
		46  						return
		47  					}
		48  					f(k-1, a)
		49  				}
		50  				f(RecursionDepth, [ArraySize]byte{})
		51  				c <- true
		52  			}()
		53  		}
		54  		for i := 0; i < BatchSize; i++ {
		55  			<-c
		56  		}
		57  
		58  		// The goroutines have signaled via c that they are ready to exit.
		59  		// Give them a chance to exit by sleeping. If we don't wait, we
		60  		// might not reuse them on the next batch.
		61  		time.Sleep(10 * time.Millisecond)
		62  	}
		63  	s1 := new(MemStats)
		64  	ReadMemStats(s1)
		65  	consumed := int64(s1.StackSys - s0.StackSys)
		66  	t.Logf("Consumed %vMB for stack mem", consumed>>20)
		67  	estimate := int64(8 * BatchSize * ArraySize * RecursionDepth) // 8 is to reduce flakiness.
		68  	if consumed > estimate {
		69  		t.Fatalf("Stack mem: want %v, got %v", estimate, consumed)
		70  	}
		71  	// Due to broken stack memory accounting (https://golang.org/issue/7468),
		72  	// StackInuse can decrease during function execution, so we cast the values to int64.
		73  	inuse := int64(s1.StackInuse) - int64(s0.StackInuse)
		74  	t.Logf("Inuse %vMB for stack mem", inuse>>20)
		75  	if inuse > 4<<20 {
		76  		t.Fatalf("Stack inuse: want %v, got %v", 4<<20, inuse)
		77  	}
		78  }
		79  
		80  // Test stack growing in different contexts.
		81  func TestStackGrowth(t *testing.T) {
		82  	if *flagQuick {
		83  		t.Skip("-quick")
		84  	}
		85  
		86  	if GOARCH == "wasm" {
		87  		t.Skip("fails on wasm (too slow?)")
		88  	}
		89  
		90  	// Don't make this test parallel as this makes the 20 second
		91  	// timeout unreliable on slow builders. (See issue #19381.)
		92  
		93  	var wg sync.WaitGroup
		94  
		95  	// in a normal goroutine
		96  	var growDuration time.Duration // For debugging failures
		97  	wg.Add(1)
		98  	go func() {
		99  		defer wg.Done()
	 100  		start := time.Now()
	 101  		growStack(nil)
	 102  		growDuration = time.Since(start)
	 103  	}()
	 104  	wg.Wait()
	 105  
	 106  	// in locked goroutine
	 107  	wg.Add(1)
	 108  	go func() {
	 109  		defer wg.Done()
	 110  		LockOSThread()
	 111  		growStack(nil)
	 112  		UnlockOSThread()
	 113  	}()
	 114  	wg.Wait()
	 115  
	 116  	// in finalizer
	 117  	wg.Add(1)
	 118  	go func() {
	 119  		defer wg.Done()
	 120  		done := make(chan bool)
	 121  		var startTime time.Time
	 122  		var started, progress uint32
	 123  		go func() {
	 124  			s := new(string)
	 125  			SetFinalizer(s, func(ss *string) {
	 126  				startTime = time.Now()
	 127  				atomic.StoreUint32(&started, 1)
	 128  				growStack(&progress)
	 129  				done <- true
	 130  			})
	 131  			s = nil
	 132  			done <- true
	 133  		}()
	 134  		<-done
	 135  		GC()
	 136  
	 137  		timeout := 20 * time.Second
	 138  		if s := os.Getenv("GO_TEST_TIMEOUT_SCALE"); s != "" {
	 139  			scale, err := strconv.Atoi(s)
	 140  			if err == nil {
	 141  				timeout *= time.Duration(scale)
	 142  			}
	 143  		}
	 144  
	 145  		select {
	 146  		case <-done:
	 147  		case <-time.After(timeout):
	 148  			if atomic.LoadUint32(&started) == 0 {
	 149  				t.Log("finalizer did not start")
	 150  			} else {
	 151  				t.Logf("finalizer started %s ago and finished %d iterations", time.Since(startTime), atomic.LoadUint32(&progress))
	 152  			}
	 153  			t.Log("first growStack took", growDuration)
	 154  			t.Error("finalizer did not run")
	 155  			return
	 156  		}
	 157  	}()
	 158  	wg.Wait()
	 159  }
	 160  
	 161  // ... and in init
	 162  //func init() {
	 163  //	growStack()
	 164  //}
	 165  
	 166  func growStack(progress *uint32) {
	 167  	n := 1 << 10
	 168  	if testing.Short() {
	 169  		n = 1 << 8
	 170  	}
	 171  	for i := 0; i < n; i++ {
	 172  		x := 0
	 173  		growStackIter(&x, i)
	 174  		if x != i+1 {
	 175  			panic("stack is corrupted")
	 176  		}
	 177  		if progress != nil {
	 178  			atomic.StoreUint32(progress, uint32(i))
	 179  		}
	 180  	}
	 181  	GC()
	 182  }
	 183  
	 184  // This function is not an anonymous func, so that the compiler can do escape
	 185  // analysis and place x on stack (and subsequently stack growth update the pointer).
	 186  func growStackIter(p *int, n int) {
	 187  	if n == 0 {
	 188  		*p = n + 1
	 189  		GC()
	 190  		return
	 191  	}
	 192  	*p = n + 1
	 193  	x := 0
	 194  	growStackIter(&x, n-1)
	 195  	if x != n {
	 196  		panic("stack is corrupted")
	 197  	}
	 198  }
	 199  
	 200  func TestStackGrowthCallback(t *testing.T) {
	 201  	t.Parallel()
	 202  	var wg sync.WaitGroup
	 203  
	 204  	// test stack growth at chan op
	 205  	wg.Add(1)
	 206  	go func() {
	 207  		defer wg.Done()
	 208  		c := make(chan int, 1)
	 209  		growStackWithCallback(func() {
	 210  			c <- 1
	 211  			<-c
	 212  		})
	 213  	}()
	 214  
	 215  	// test stack growth at map op
	 216  	wg.Add(1)
	 217  	go func() {
	 218  		defer wg.Done()
	 219  		m := make(map[int]int)
	 220  		growStackWithCallback(func() {
	 221  			_, _ = m[1]
	 222  			m[1] = 1
	 223  		})
	 224  	}()
	 225  
	 226  	// test stack growth at goroutine creation
	 227  	wg.Add(1)
	 228  	go func() {
	 229  		defer wg.Done()
	 230  		growStackWithCallback(func() {
	 231  			done := make(chan bool)
	 232  			go func() {
	 233  				done <- true
	 234  			}()
	 235  			<-done
	 236  		})
	 237  	}()
	 238  	wg.Wait()
	 239  }
	 240  
	 241  func growStackWithCallback(cb func()) {
	 242  	var f func(n int)
	 243  	f = func(n int) {
	 244  		if n == 0 {
	 245  			cb()
	 246  			return
	 247  		}
	 248  		f(n - 1)
	 249  	}
	 250  	for i := 0; i < 1<<10; i++ {
	 251  		f(i)
	 252  	}
	 253  }
	 254  
	 255  // TestDeferPtrs tests the adjustment of Defer's argument pointers (p aka &y)
	 256  // during a stack copy.
	 257  func set(p *int, x int) {
	 258  	*p = x
	 259  }
	 260  func TestDeferPtrs(t *testing.T) {
	 261  	var y int
	 262  
	 263  	defer func() {
	 264  		if y != 42 {
	 265  			t.Errorf("defer's stack references were not adjusted appropriately")
	 266  		}
	 267  	}()
	 268  	defer set(&y, 42)
	 269  	growStack(nil)
	 270  }
	 271  
	 272  type bigBuf [4 * 1024]byte
	 273  
	 274  // TestDeferPtrsGoexit is like TestDeferPtrs but exercises the possibility that the
	 275  // stack grows as part of starting the deferred function. It calls Goexit at various
	 276  // stack depths, forcing the deferred function (with >4kB of args) to be run at
	 277  // the bottom of the stack. The goal is to find a stack depth less than 4kB from
	 278  // the end of the stack. Each trial runs in a different goroutine so that an earlier
	 279  // stack growth does not invalidate a later attempt.
	 280  func TestDeferPtrsGoexit(t *testing.T) {
	 281  	for i := 0; i < 100; i++ {
	 282  		c := make(chan int, 1)
	 283  		go testDeferPtrsGoexit(c, i)
	 284  		if n := <-c; n != 42 {
	 285  			t.Fatalf("defer's stack references were not adjusted appropriately (i=%d n=%d)", i, n)
	 286  		}
	 287  	}
	 288  }
	 289  
	 290  func testDeferPtrsGoexit(c chan int, i int) {
	 291  	var y int
	 292  	defer func() {
	 293  		c <- y
	 294  	}()
	 295  	defer setBig(&y, 42, bigBuf{})
	 296  	useStackAndCall(i, Goexit)
	 297  }
	 298  
	 299  func setBig(p *int, x int, b bigBuf) {
	 300  	*p = x
	 301  }
	 302  
	 303  // TestDeferPtrsPanic is like TestDeferPtrsGoexit, but it's using panic instead
	 304  // of Goexit to run the Defers. Those two are different execution paths
	 305  // in the runtime.
	 306  func TestDeferPtrsPanic(t *testing.T) {
	 307  	for i := 0; i < 100; i++ {
	 308  		c := make(chan int, 1)
	 309  		go testDeferPtrsGoexit(c, i)
	 310  		if n := <-c; n != 42 {
	 311  			t.Fatalf("defer's stack references were not adjusted appropriately (i=%d n=%d)", i, n)
	 312  		}
	 313  	}
	 314  }
	 315  
	 316  func testDeferPtrsPanic(c chan int, i int) {
	 317  	var y int
	 318  	defer func() {
	 319  		if recover() == nil {
	 320  			c <- -1
	 321  			return
	 322  		}
	 323  		c <- y
	 324  	}()
	 325  	defer setBig(&y, 42, bigBuf{})
	 326  	useStackAndCall(i, func() { panic(1) })
	 327  }
	 328  
	 329  //go:noinline
	 330  func testDeferLeafSigpanic1() {
	 331  	// Cause a sigpanic to be injected in this frame.
	 332  	//
	 333  	// This function has to be declared before
	 334  	// TestDeferLeafSigpanic so the runtime will crash if we think
	 335  	// this function's continuation PC is in
	 336  	// TestDeferLeafSigpanic.
	 337  	*(*int)(nil) = 0
	 338  }
	 339  
	 340  // TestDeferLeafSigpanic tests defer matching around leaf functions
	 341  // that sigpanic. This is tricky because on LR machines the outer
	 342  // function and the inner function have the same SP, but it's critical
	 343  // that we match up the defer correctly to get the right liveness map.
	 344  // See issue #25499.
	 345  func TestDeferLeafSigpanic(t *testing.T) {
	 346  	// Push a defer that will walk the stack.
	 347  	defer func() {
	 348  		if err := recover(); err == nil {
	 349  			t.Fatal("expected panic from nil pointer")
	 350  		}
	 351  		GC()
	 352  	}()
	 353  	// Call a leaf function. We must set up the exact call stack:
	 354  	//
	 355  	//	defering function -> leaf function -> sigpanic
	 356  	//
	 357  	// On LR machines, the leaf function will have the same SP as
	 358  	// the SP pushed for the defer frame.
	 359  	testDeferLeafSigpanic1()
	 360  }
	 361  
	 362  // TestPanicUseStack checks that a chain of Panic structs on the stack are
	 363  // updated correctly if the stack grows during the deferred execution that
	 364  // happens as a result of the panic.
	 365  func TestPanicUseStack(t *testing.T) {
	 366  	pc := make([]uintptr, 10000)
	 367  	defer func() {
	 368  		recover()
	 369  		Callers(0, pc) // force stack walk
	 370  		useStackAndCall(100, func() {
	 371  			defer func() {
	 372  				recover()
	 373  				Callers(0, pc) // force stack walk
	 374  				useStackAndCall(200, func() {
	 375  					defer func() {
	 376  						recover()
	 377  						Callers(0, pc) // force stack walk
	 378  					}()
	 379  					panic(3)
	 380  				})
	 381  			}()
	 382  			panic(2)
	 383  		})
	 384  	}()
	 385  	panic(1)
	 386  }
	 387  
	 388  func TestPanicFar(t *testing.T) {
	 389  	var xtree *xtreeNode
	 390  	pc := make([]uintptr, 10000)
	 391  	defer func() {
	 392  		// At this point we created a large stack and unwound
	 393  		// it via recovery. Force a stack walk, which will
	 394  		// check the stack's consistency.
	 395  		Callers(0, pc)
	 396  	}()
	 397  	defer func() {
	 398  		recover()
	 399  	}()
	 400  	useStackAndCall(100, func() {
	 401  		// Kick off the GC and make it do something nontrivial.
	 402  		// (This used to force stack barriers to stick around.)
	 403  		xtree = makeTree(18)
	 404  		// Give the GC time to start scanning stacks.
	 405  		time.Sleep(time.Millisecond)
	 406  		panic(1)
	 407  	})
	 408  	_ = xtree
	 409  }
	 410  
	 411  type xtreeNode struct {
	 412  	l, r *xtreeNode
	 413  }
	 414  
	 415  func makeTree(d int) *xtreeNode {
	 416  	if d == 0 {
	 417  		return new(xtreeNode)
	 418  	}
	 419  	return &xtreeNode{makeTree(d - 1), makeTree(d - 1)}
	 420  }
	 421  
	 422  // use about n KB of stack and call f
	 423  func useStackAndCall(n int, f func()) {
	 424  	if n == 0 {
	 425  		f()
	 426  		return
	 427  	}
	 428  	var b [1024]byte // makes frame about 1KB
	 429  	useStackAndCall(n-1+int(b[99]), f)
	 430  }
	 431  
	 432  func useStack(n int) {
	 433  	useStackAndCall(n, func() {})
	 434  }
	 435  
	 436  func growing(c chan int, done chan struct{}) {
	 437  	for n := range c {
	 438  		useStack(n)
	 439  		done <- struct{}{}
	 440  	}
	 441  	done <- struct{}{}
	 442  }
	 443  
	 444  func TestStackCache(t *testing.T) {
	 445  	// Allocate a bunch of goroutines and grow their stacks.
	 446  	// Repeat a few times to test the stack cache.
	 447  	const (
	 448  		R = 4
	 449  		G = 200
	 450  		S = 5
	 451  	)
	 452  	for i := 0; i < R; i++ {
	 453  		var reqchans [G]chan int
	 454  		done := make(chan struct{})
	 455  		for j := 0; j < G; j++ {
	 456  			reqchans[j] = make(chan int)
	 457  			go growing(reqchans[j], done)
	 458  		}
	 459  		for s := 0; s < S; s++ {
	 460  			for j := 0; j < G; j++ {
	 461  				reqchans[j] <- 1 << uint(s)
	 462  			}
	 463  			for j := 0; j < G; j++ {
	 464  				<-done
	 465  			}
	 466  		}
	 467  		for j := 0; j < G; j++ {
	 468  			close(reqchans[j])
	 469  		}
	 470  		for j := 0; j < G; j++ {
	 471  			<-done
	 472  		}
	 473  	}
	 474  }
	 475  
	 476  func TestStackOutput(t *testing.T) {
	 477  	b := make([]byte, 1024)
	 478  	stk := string(b[:Stack(b, false)])
	 479  	if !strings.HasPrefix(stk, "goroutine ") {
	 480  		t.Errorf("Stack (len %d):\n%s", len(stk), stk)
	 481  		t.Errorf("Stack output should begin with \"goroutine \"")
	 482  	}
	 483  }
	 484  
	 485  func TestStackAllOutput(t *testing.T) {
	 486  	b := make([]byte, 1024)
	 487  	stk := string(b[:Stack(b, true)])
	 488  	if !strings.HasPrefix(stk, "goroutine ") {
	 489  		t.Errorf("Stack (len %d):\n%s", len(stk), stk)
	 490  		t.Errorf("Stack output should begin with \"goroutine \"")
	 491  	}
	 492  }
	 493  
	 494  func TestStackPanic(t *testing.T) {
	 495  	// Test that stack copying copies panics correctly. This is difficult
	 496  	// to test because it is very unlikely that the stack will be copied
	 497  	// in the middle of gopanic. But it can happen.
	 498  	// To make this test effective, edit panic.go:gopanic and uncomment
	 499  	// the GC() call just before freedefer(d).
	 500  	defer func() {
	 501  		if x := recover(); x == nil {
	 502  			t.Errorf("recover failed")
	 503  		}
	 504  	}()
	 505  	useStack(32)
	 506  	panic("test panic")
	 507  }
	 508  
	 509  func BenchmarkStackCopyPtr(b *testing.B) {
	 510  	c := make(chan bool)
	 511  	for i := 0; i < b.N; i++ {
	 512  		go func() {
	 513  			i := 1000000
	 514  			countp(&i)
	 515  			c <- true
	 516  		}()
	 517  		<-c
	 518  	}
	 519  }
	 520  
	 521  func countp(n *int) {
	 522  	if *n == 0 {
	 523  		return
	 524  	}
	 525  	*n--
	 526  	countp(n)
	 527  }
	 528  
	 529  func BenchmarkStackCopy(b *testing.B) {
	 530  	c := make(chan bool)
	 531  	for i := 0; i < b.N; i++ {
	 532  		go func() {
	 533  			count(1000000)
	 534  			c <- true
	 535  		}()
	 536  		<-c
	 537  	}
	 538  }
	 539  
	 540  func count(n int) int {
	 541  	if n == 0 {
	 542  		return 0
	 543  	}
	 544  	return 1 + count(n-1)
	 545  }
	 546  
	 547  func BenchmarkStackCopyNoCache(b *testing.B) {
	 548  	c := make(chan bool)
	 549  	for i := 0; i < b.N; i++ {
	 550  		go func() {
	 551  			count1(1000000)
	 552  			c <- true
	 553  		}()
	 554  		<-c
	 555  	}
	 556  }
	 557  
	 558  func count1(n int) int {
	 559  	if n <= 0 {
	 560  		return 0
	 561  	}
	 562  	return 1 + count2(n-1)
	 563  }
	 564  
	 565  func count2(n int) int	{ return 1 + count3(n-1) }
	 566  func count3(n int) int	{ return 1 + count4(n-1) }
	 567  func count4(n int) int	{ return 1 + count5(n-1) }
	 568  func count5(n int) int	{ return 1 + count6(n-1) }
	 569  func count6(n int) int	{ return 1 + count7(n-1) }
	 570  func count7(n int) int	{ return 1 + count8(n-1) }
	 571  func count8(n int) int	{ return 1 + count9(n-1) }
	 572  func count9(n int) int	{ return 1 + count10(n-1) }
	 573  func count10(n int) int { return 1 + count11(n-1) }
	 574  func count11(n int) int { return 1 + count12(n-1) }
	 575  func count12(n int) int { return 1 + count13(n-1) }
	 576  func count13(n int) int { return 1 + count14(n-1) }
	 577  func count14(n int) int { return 1 + count15(n-1) }
	 578  func count15(n int) int { return 1 + count16(n-1) }
	 579  func count16(n int) int { return 1 + count17(n-1) }
	 580  func count17(n int) int { return 1 + count18(n-1) }
	 581  func count18(n int) int { return 1 + count19(n-1) }
	 582  func count19(n int) int { return 1 + count20(n-1) }
	 583  func count20(n int) int { return 1 + count21(n-1) }
	 584  func count21(n int) int { return 1 + count22(n-1) }
	 585  func count22(n int) int { return 1 + count23(n-1) }
	 586  func count23(n int) int { return 1 + count1(n-1) }
	 587  
	 588  type structWithMethod struct{}
	 589  
	 590  func (s structWithMethod) caller() string {
	 591  	_, file, line, ok := Caller(1)
	 592  	if !ok {
	 593  		panic("Caller failed")
	 594  	}
	 595  	return fmt.Sprintf("%s:%d", file, line)
	 596  }
	 597  
	 598  func (s structWithMethod) callers() []uintptr {
	 599  	pc := make([]uintptr, 16)
	 600  	return pc[:Callers(0, pc)]
	 601  }
	 602  
	 603  func (s structWithMethod) stack() string {
	 604  	buf := make([]byte, 4<<10)
	 605  	return string(buf[:Stack(buf, false)])
	 606  }
	 607  
	 608  func (s structWithMethod) nop() {}
	 609  
	 610  func TestStackWrapperCaller(t *testing.T) {
	 611  	var d structWithMethod
	 612  	// Force the compiler to construct a wrapper method.
	 613  	wrapper := (*structWithMethod).caller
	 614  	// Check that the wrapper doesn't affect the stack trace.
	 615  	if dc, ic := d.caller(), wrapper(&d); dc != ic {
	 616  		t.Fatalf("direct caller %q != indirect caller %q", dc, ic)
	 617  	}
	 618  }
	 619  
	 620  func TestStackWrapperCallers(t *testing.T) {
	 621  	var d structWithMethod
	 622  	wrapper := (*structWithMethod).callers
	 623  	// Check that <autogenerated> doesn't appear in the stack trace.
	 624  	pcs := wrapper(&d)
	 625  	frames := CallersFrames(pcs)
	 626  	for {
	 627  		fr, more := frames.Next()
	 628  		if fr.File == "<autogenerated>" {
	 629  			t.Fatalf("<autogenerated> appears in stack trace: %+v", fr)
	 630  		}
	 631  		if !more {
	 632  			break
	 633  		}
	 634  	}
	 635  }
	 636  
	 637  func TestStackWrapperStack(t *testing.T) {
	 638  	var d structWithMethod
	 639  	wrapper := (*structWithMethod).stack
	 640  	// Check that <autogenerated> doesn't appear in the stack trace.
	 641  	stk := wrapper(&d)
	 642  	if strings.Contains(stk, "<autogenerated>") {
	 643  		t.Fatalf("<autogenerated> appears in stack trace:\n%s", stk)
	 644  	}
	 645  }
	 646  
	 647  type I interface {
	 648  	M()
	 649  }
	 650  
	 651  func TestStackWrapperStackPanic(t *testing.T) {
	 652  	t.Run("sigpanic", func(t *testing.T) {
	 653  		// nil calls to interface methods cause a sigpanic.
	 654  		testStackWrapperPanic(t, func() { I.M(nil) }, "runtime_test.I.M")
	 655  	})
	 656  	t.Run("panicwrap", func(t *testing.T) {
	 657  		// Nil calls to value method wrappers call panicwrap.
	 658  		wrapper := (*structWithMethod).nop
	 659  		testStackWrapperPanic(t, func() { wrapper(nil) }, "runtime_test.(*structWithMethod).nop")
	 660  	})
	 661  }
	 662  
	 663  func testStackWrapperPanic(t *testing.T, cb func(), expect string) {
	 664  	// Test that the stack trace from a panicking wrapper includes
	 665  	// the wrapper, even though elide these when they don't panic.
	 666  	t.Run("CallersFrames", func(t *testing.T) {
	 667  		defer func() {
	 668  			err := recover()
	 669  			if err == nil {
	 670  				t.Fatalf("expected panic")
	 671  			}
	 672  			pcs := make([]uintptr, 10)
	 673  			n := Callers(0, pcs)
	 674  			frames := CallersFrames(pcs[:n])
	 675  			for {
	 676  				frame, more := frames.Next()
	 677  				t.Log(frame.Function)
	 678  				if frame.Function == expect {
	 679  					return
	 680  				}
	 681  				if !more {
	 682  					break
	 683  				}
	 684  			}
	 685  			t.Fatalf("panicking wrapper %s missing from stack trace", expect)
	 686  		}()
	 687  		cb()
	 688  	})
	 689  	t.Run("Stack", func(t *testing.T) {
	 690  		defer func() {
	 691  			err := recover()
	 692  			if err == nil {
	 693  				t.Fatalf("expected panic")
	 694  			}
	 695  			buf := make([]byte, 4<<10)
	 696  			stk := string(buf[:Stack(buf, false)])
	 697  			if !strings.Contains(stk, "\n"+expect) {
	 698  				t.Fatalf("panicking wrapper %s missing from stack trace:\n%s", expect, stk)
	 699  			}
	 700  		}()
	 701  		cb()
	 702  	})
	 703  }
	 704  
	 705  func TestCallersFromWrapper(t *testing.T) {
	 706  	// Test that invoking CallersFrames on a stack where the first
	 707  	// PC is an autogenerated wrapper keeps the wrapper in the
	 708  	// trace. Normally we elide these, assuming that the wrapper
	 709  	// calls the thing you actually wanted to see, but in this
	 710  	// case we need to keep it.
	 711  	pc := reflect.ValueOf(I.M).Pointer()
	 712  	frames := CallersFrames([]uintptr{pc})
	 713  	frame, more := frames.Next()
	 714  	if frame.Function != "runtime_test.I.M" {
	 715  		t.Fatalf("want function %s, got %s", "runtime_test.I.M", frame.Function)
	 716  	}
	 717  	if more {
	 718  		t.Fatalf("want 1 frame, got > 1")
	 719  	}
	 720  }
	 721  
	 722  func TestTracebackSystemstack(t *testing.T) {
	 723  	if GOARCH == "ppc64" || GOARCH == "ppc64le" {
	 724  		t.Skip("systemstack tail call not implemented on ppc64x")
	 725  	}
	 726  
	 727  	// Test that profiles correctly jump over systemstack,
	 728  	// including nested systemstack calls.
	 729  	pcs := make([]uintptr, 20)
	 730  	pcs = pcs[:TracebackSystemstack(pcs, 5)]
	 731  	// Check that runtime.TracebackSystemstack appears five times
	 732  	// and that we see TestTracebackSystemstack.
	 733  	countIn, countOut := 0, 0
	 734  	frames := CallersFrames(pcs)
	 735  	var tb bytes.Buffer
	 736  	for {
	 737  		frame, more := frames.Next()
	 738  		fmt.Fprintf(&tb, "\n%s+0x%x %s:%d", frame.Function, frame.PC-frame.Entry, frame.File, frame.Line)
	 739  		switch frame.Function {
	 740  		case "runtime.TracebackSystemstack":
	 741  			countIn++
	 742  		case "runtime_test.TestTracebackSystemstack":
	 743  			countOut++
	 744  		}
	 745  		if !more {
	 746  			break
	 747  		}
	 748  	}
	 749  	if countIn != 5 || countOut != 1 {
	 750  		t.Fatalf("expected 5 calls to TracebackSystemstack and 1 call to TestTracebackSystemstack, got:%s", tb.String())
	 751  	}
	 752  }
	 753  
	 754  func TestTracebackAncestors(t *testing.T) {
	 755  	goroutineRegex := regexp.MustCompile(`goroutine [0-9]+ \[`)
	 756  	for _, tracebackDepth := range []int{0, 1, 5, 50} {
	 757  		output := runTestProg(t, "testprog", "TracebackAncestors", fmt.Sprintf("GODEBUG=tracebackancestors=%d", tracebackDepth))
	 758  
	 759  		numGoroutines := 3
	 760  		numFrames := 2
	 761  		ancestorsExpected := numGoroutines
	 762  		if numGoroutines > tracebackDepth {
	 763  			ancestorsExpected = tracebackDepth
	 764  		}
	 765  
	 766  		matches := goroutineRegex.FindAllStringSubmatch(output, -1)
	 767  		if len(matches) != 2 {
	 768  			t.Fatalf("want 2 goroutines, got:\n%s", output)
	 769  		}
	 770  
	 771  		// Check functions in the traceback.
	 772  		fns := []string{"main.recurseThenCallGo", "main.main", "main.printStack", "main.TracebackAncestors"}
	 773  		for _, fn := range fns {
	 774  			if !strings.Contains(output, "\n"+fn+"(") {
	 775  				t.Fatalf("expected %q function in traceback:\n%s", fn, output)
	 776  			}
	 777  		}
	 778  
	 779  		if want, count := "originating from goroutine", ancestorsExpected; strings.Count(output, want) != count {
	 780  			t.Errorf("output does not contain %d instances of %q:\n%s", count, want, output)
	 781  		}
	 782  
	 783  		if want, count := "main.recurseThenCallGo(...)", ancestorsExpected*(numFrames+1); strings.Count(output, want) != count {
	 784  			t.Errorf("output does not contain %d instances of %q:\n%s", count, want, output)
	 785  		}
	 786  
	 787  		if want, count := "main.recurseThenCallGo(0x", 1; strings.Count(output, want) != count {
	 788  			t.Errorf("output does not contain %d instances of %q:\n%s", count, want, output)
	 789  		}
	 790  	}
	 791  }
	 792  
	 793  // Test that defer closure is correctly scanned when the stack is scanned.
	 794  func TestDeferLiveness(t *testing.T) {
	 795  	output := runTestProg(t, "testprog", "DeferLiveness", "GODEBUG=clobberfree=1")
	 796  	if output != "" {
	 797  		t.Errorf("output:\n%s\n\nwant no output", output)
	 798  	}
	 799  }
	 800  
	 801  func TestDeferHeapAndStack(t *testing.T) {
	 802  	P := 4		 // processors
	 803  	N := 10000 //iterations
	 804  	D := 200	 // stack depth
	 805  
	 806  	if testing.Short() {
	 807  		P /= 2
	 808  		N /= 10
	 809  		D /= 10
	 810  	}
	 811  	c := make(chan bool)
	 812  	for p := 0; p < P; p++ {
	 813  		go func() {
	 814  			for i := 0; i < N; i++ {
	 815  				if deferHeapAndStack(D) != 2*D {
	 816  					panic("bad result")
	 817  				}
	 818  			}
	 819  			c <- true
	 820  		}()
	 821  	}
	 822  	for p := 0; p < P; p++ {
	 823  		<-c
	 824  	}
	 825  }
	 826  
	 827  // deferHeapAndStack(n) computes 2*n
	 828  func deferHeapAndStack(n int) (r int) {
	 829  	if n == 0 {
	 830  		return 0
	 831  	}
	 832  	if n%2 == 0 {
	 833  		// heap-allocated defers
	 834  		for i := 0; i < 2; i++ {
	 835  			defer func() {
	 836  				r++
	 837  			}()
	 838  		}
	 839  	} else {
	 840  		// stack-allocated defers
	 841  		defer func() {
	 842  			r++
	 843  		}()
	 844  		defer func() {
	 845  			r++
	 846  		}()
	 847  	}
	 848  	r = deferHeapAndStack(n - 1)
	 849  	escapeMe(new([1024]byte)) // force some GCs
	 850  	return
	 851  }
	 852  
	 853  // Pass a value to escapeMe to force it to escape.
	 854  var escapeMe = func(x interface{}) {}
	 855  
	 856  // Test that when F -> G is inlined and F is excluded from stack
	 857  // traces, G still appears.
	 858  func TestTracebackInlineExcluded(t *testing.T) {
	 859  	defer func() {
	 860  		recover()
	 861  		buf := make([]byte, 4<<10)
	 862  		stk := string(buf[:Stack(buf, false)])
	 863  
	 864  		t.Log(stk)
	 865  
	 866  		if not := "tracebackExcluded"; strings.Contains(stk, not) {
	 867  			t.Errorf("found but did not expect %q", not)
	 868  		}
	 869  		if want := "tracebackNotExcluded"; !strings.Contains(stk, want) {
	 870  			t.Errorf("expected %q in stack", want)
	 871  		}
	 872  	}()
	 873  	tracebackExcluded()
	 874  }
	 875  
	 876  // tracebackExcluded should be excluded from tracebacks. There are
	 877  // various ways this could come up. Linking it to a "runtime." name is
	 878  // rather synthetic, but it's easy and reliable. See issue #42754 for
	 879  // one way this happened in real code.
	 880  //
	 881  //go:linkname tracebackExcluded runtime.tracebackExcluded
	 882  //go:noinline
	 883  func tracebackExcluded() {
	 884  	// Call an inlined function that should not itself be excluded
	 885  	// from tracebacks.
	 886  	tracebackNotExcluded()
	 887  }
	 888  
	 889  // tracebackNotExcluded should be inlined into tracebackExcluded, but
	 890  // should not itself be excluded from the traceback.
	 891  func tracebackNotExcluded() {
	 892  	var x *int
	 893  	*x = 0
	 894  }
	 895  

View as plain text