...

Source file src/runtime/export_test.go

Documentation: runtime

		 1  // Copyright 2010 The Go Authors. All rights reserved.
		 2  // Use of this source code is governed by a BSD-style
		 3  // license that can be found in the LICENSE file.
		 4  
		 5  // Export guts for testing.
		 6  
		 7  package runtime
		 8  
		 9  import (
		10  	"runtime/internal/atomic"
		11  	"runtime/internal/sys"
		12  	"unsafe"
		13  )
		14  
		15  var Fadd64 = fadd64
		16  var Fsub64 = fsub64
		17  var Fmul64 = fmul64
		18  var Fdiv64 = fdiv64
		19  var F64to32 = f64to32
		20  var F32to64 = f32to64
		21  var Fcmp64 = fcmp64
		22  var Fintto64 = fintto64
		23  var F64toint = f64toint
		24  
		25  var Entersyscall = entersyscall
		26  var Exitsyscall = exitsyscall
		27  var LockedOSThread = lockedOSThread
		28  var Xadduintptr = atomic.Xadduintptr
		29  
		30  var FuncPC = funcPC
		31  
		32  var Fastlog2 = fastlog2
		33  
		34  var Atoi = atoi
		35  var Atoi32 = atoi32
		36  
		37  var Nanotime = nanotime
		38  var NetpollBreak = netpollBreak
		39  var Usleep = usleep
		40  
		41  var PhysPageSize = physPageSize
		42  var PhysHugePageSize = physHugePageSize
		43  
		44  var NetpollGenericInit = netpollGenericInit
		45  
		46  var Memmove = memmove
		47  var MemclrNoHeapPointers = memclrNoHeapPointers
		48  
		49  var LockPartialOrder = lockPartialOrder
		50  
		51  type LockRank lockRank
		52  
		53  func (l LockRank) String() string {
		54  	return lockRank(l).String()
		55  }
		56  
		57  const PreemptMSupported = preemptMSupported
		58  
		59  type LFNode struct {
		60  	Next		uint64
		61  	Pushcnt uintptr
		62  }
		63  
		64  func LFStackPush(head *uint64, node *LFNode) {
		65  	(*lfstack)(head).push((*lfnode)(unsafe.Pointer(node)))
		66  }
		67  
		68  func LFStackPop(head *uint64) *LFNode {
		69  	return (*LFNode)(unsafe.Pointer((*lfstack)(head).pop()))
		70  }
		71  
		72  func Netpoll(delta int64) {
		73  	systemstack(func() {
		74  		netpoll(delta)
		75  	})
		76  }
		77  
		78  func GCMask(x interface{}) (ret []byte) {
		79  	systemstack(func() {
		80  		ret = getgcmask(x)
		81  	})
		82  	return
		83  }
		84  
		85  func RunSchedLocalQueueTest() {
		86  	_p_ := new(p)
		87  	gs := make([]g, len(_p_.runq))
		88  	for i := 0; i < len(_p_.runq); i++ {
		89  		if g, _ := runqget(_p_); g != nil {
		90  			throw("runq is not empty initially")
		91  		}
		92  		for j := 0; j < i; j++ {
		93  			runqput(_p_, &gs[i], false)
		94  		}
		95  		for j := 0; j < i; j++ {
		96  			if g, _ := runqget(_p_); g != &gs[i] {
		97  				print("bad element at iter ", i, "/", j, "\n")
		98  				throw("bad element")
		99  			}
	 100  		}
	 101  		if g, _ := runqget(_p_); g != nil {
	 102  			throw("runq is not empty afterwards")
	 103  		}
	 104  	}
	 105  }
	 106  
	 107  func RunSchedLocalQueueStealTest() {
	 108  	p1 := new(p)
	 109  	p2 := new(p)
	 110  	gs := make([]g, len(p1.runq))
	 111  	for i := 0; i < len(p1.runq); i++ {
	 112  		for j := 0; j < i; j++ {
	 113  			gs[j].sig = 0
	 114  			runqput(p1, &gs[j], false)
	 115  		}
	 116  		gp := runqsteal(p2, p1, true)
	 117  		s := 0
	 118  		if gp != nil {
	 119  			s++
	 120  			gp.sig++
	 121  		}
	 122  		for {
	 123  			gp, _ = runqget(p2)
	 124  			if gp == nil {
	 125  				break
	 126  			}
	 127  			s++
	 128  			gp.sig++
	 129  		}
	 130  		for {
	 131  			gp, _ = runqget(p1)
	 132  			if gp == nil {
	 133  				break
	 134  			}
	 135  			gp.sig++
	 136  		}
	 137  		for j := 0; j < i; j++ {
	 138  			if gs[j].sig != 1 {
	 139  				print("bad element ", j, "(", gs[j].sig, ") at iter ", i, "\n")
	 140  				throw("bad element")
	 141  			}
	 142  		}
	 143  		if s != i/2 && s != i/2+1 {
	 144  			print("bad steal ", s, ", want ", i/2, " or ", i/2+1, ", iter ", i, "\n")
	 145  			throw("bad steal")
	 146  		}
	 147  	}
	 148  }
	 149  
	 150  // Temporary to enable register ABI bringup.
	 151  // TODO(register args): convert back to local variables in RunSchedLocalQueueEmptyTest that
	 152  // get passed to the "go" stmts there.
	 153  var RunSchedLocalQueueEmptyState struct {
	 154  	done	chan bool
	 155  	ready *uint32
	 156  	p		 *p
	 157  }
	 158  
	 159  func RunSchedLocalQueueEmptyTest(iters int) {
	 160  	// Test that runq is not spuriously reported as empty.
	 161  	// Runq emptiness affects scheduling decisions and spurious emptiness
	 162  	// can lead to underutilization (both runnable Gs and idle Ps coexist
	 163  	// for arbitrary long time).
	 164  	done := make(chan bool, 1)
	 165  	RunSchedLocalQueueEmptyState.done = done
	 166  	p := new(p)
	 167  	RunSchedLocalQueueEmptyState.p = p
	 168  	gs := make([]g, 2)
	 169  	ready := new(uint32)
	 170  	RunSchedLocalQueueEmptyState.ready = ready
	 171  	for i := 0; i < iters; i++ {
	 172  		*ready = 0
	 173  		next0 := (i & 1) == 0
	 174  		next1 := (i & 2) == 0
	 175  		runqput(p, &gs[0], next0)
	 176  		go func() {
	 177  			for atomic.Xadd(RunSchedLocalQueueEmptyState.ready, 1); atomic.Load(RunSchedLocalQueueEmptyState.ready) != 2; {
	 178  			}
	 179  			if runqempty(RunSchedLocalQueueEmptyState.p) {
	 180  				//println("next:", next0, next1)
	 181  				throw("queue is empty")
	 182  			}
	 183  			RunSchedLocalQueueEmptyState.done <- true
	 184  		}()
	 185  		for atomic.Xadd(ready, 1); atomic.Load(ready) != 2; {
	 186  		}
	 187  		runqput(p, &gs[1], next1)
	 188  		runqget(p)
	 189  		<-done
	 190  		runqget(p)
	 191  	}
	 192  }
	 193  
	 194  var (
	 195  	StringHash = stringHash
	 196  	BytesHash	= bytesHash
	 197  	Int32Hash	= int32Hash
	 198  	Int64Hash	= int64Hash
	 199  	MemHash		= memhash
	 200  	MemHash32	= memhash32
	 201  	MemHash64	= memhash64
	 202  	EfaceHash	= efaceHash
	 203  	IfaceHash	= ifaceHash
	 204  )
	 205  
	 206  var UseAeshash = &useAeshash
	 207  
	 208  func MemclrBytes(b []byte) {
	 209  	s := (*slice)(unsafe.Pointer(&b))
	 210  	memclrNoHeapPointers(s.array, uintptr(s.len))
	 211  }
	 212  
	 213  var HashLoad = &hashLoad
	 214  
	 215  // entry point for testing
	 216  func GostringW(w []uint16) (s string) {
	 217  	systemstack(func() {
	 218  		s = gostringw(&w[0])
	 219  	})
	 220  	return
	 221  }
	 222  
	 223  var Open = open
	 224  var Close = closefd
	 225  var Read = read
	 226  var Write = write
	 227  
	 228  func Envs() []string		 { return envs }
	 229  func SetEnvs(e []string) { envs = e }
	 230  
	 231  var BigEndian = sys.BigEndian
	 232  
	 233  // For benchmarking.
	 234  
	 235  func BenchSetType(n int, x interface{}) {
	 236  	e := *efaceOf(&x)
	 237  	t := e._type
	 238  	var size uintptr
	 239  	var p unsafe.Pointer
	 240  	switch t.kind & kindMask {
	 241  	case kindPtr:
	 242  		t = (*ptrtype)(unsafe.Pointer(t)).elem
	 243  		size = t.size
	 244  		p = e.data
	 245  	case kindSlice:
	 246  		slice := *(*struct {
	 247  			ptr			unsafe.Pointer
	 248  			len, cap uintptr
	 249  		})(e.data)
	 250  		t = (*slicetype)(unsafe.Pointer(t)).elem
	 251  		size = t.size * slice.len
	 252  		p = slice.ptr
	 253  	}
	 254  	allocSize := roundupsize(size)
	 255  	systemstack(func() {
	 256  		for i := 0; i < n; i++ {
	 257  			heapBitsSetType(uintptr(p), allocSize, size, t)
	 258  		}
	 259  	})
	 260  }
	 261  
	 262  const PtrSize = sys.PtrSize
	 263  
	 264  var ForceGCPeriod = &forcegcperiod
	 265  
	 266  // SetTracebackEnv is like runtime/debug.SetTraceback, but it raises
	 267  // the "environment" traceback level, so later calls to
	 268  // debug.SetTraceback (e.g., from testing timeouts) can't lower it.
	 269  func SetTracebackEnv(level string) {
	 270  	setTraceback(level)
	 271  	traceback_env = traceback_cache
	 272  }
	 273  
	 274  var ReadUnaligned32 = readUnaligned32
	 275  var ReadUnaligned64 = readUnaligned64
	 276  
	 277  func CountPagesInUse() (pagesInUse, counted uintptr) {
	 278  	stopTheWorld("CountPagesInUse")
	 279  
	 280  	pagesInUse = uintptr(mheap_.pagesInUse)
	 281  
	 282  	for _, s := range mheap_.allspans {
	 283  		if s.state.get() == mSpanInUse {
	 284  			counted += s.npages
	 285  		}
	 286  	}
	 287  
	 288  	startTheWorld()
	 289  
	 290  	return
	 291  }
	 292  
	 293  func Fastrand() uint32					{ return fastrand() }
	 294  func Fastrandn(n uint32) uint32 { return fastrandn(n) }
	 295  
	 296  type ProfBuf profBuf
	 297  
	 298  func NewProfBuf(hdrsize, bufwords, tags int) *ProfBuf {
	 299  	return (*ProfBuf)(newProfBuf(hdrsize, bufwords, tags))
	 300  }
	 301  
	 302  func (p *ProfBuf) Write(tag *unsafe.Pointer, now int64, hdr []uint64, stk []uintptr) {
	 303  	(*profBuf)(p).write(tag, now, hdr, stk)
	 304  }
	 305  
	 306  const (
	 307  	ProfBufBlocking		= profBufBlocking
	 308  	ProfBufNonBlocking = profBufNonBlocking
	 309  )
	 310  
	 311  func (p *ProfBuf) Read(mode profBufReadMode) ([]uint64, []unsafe.Pointer, bool) {
	 312  	return (*profBuf)(p).read(profBufReadMode(mode))
	 313  }
	 314  
	 315  func (p *ProfBuf) Close() {
	 316  	(*profBuf)(p).close()
	 317  }
	 318  
	 319  func ReadMetricsSlow(memStats *MemStats, samplesp unsafe.Pointer, len, cap int) {
	 320  	stopTheWorld("ReadMetricsSlow")
	 321  
	 322  	// Initialize the metrics beforehand because this could
	 323  	// allocate and skew the stats.
	 324  	metricsLock()
	 325  	initMetrics()
	 326  	metricsUnlock()
	 327  
	 328  	systemstack(func() {
	 329  		// Read memstats first. It's going to flush
	 330  		// the mcaches which readMetrics does not do, so
	 331  		// going the other way around may result in
	 332  		// inconsistent statistics.
	 333  		readmemstats_m(memStats)
	 334  	})
	 335  
	 336  	// Read metrics off the system stack.
	 337  	//
	 338  	// The only part of readMetrics that could allocate
	 339  	// and skew the stats is initMetrics.
	 340  	readMetrics(samplesp, len, cap)
	 341  
	 342  	startTheWorld()
	 343  }
	 344  
	 345  // ReadMemStatsSlow returns both the runtime-computed MemStats and
	 346  // MemStats accumulated by scanning the heap.
	 347  func ReadMemStatsSlow() (base, slow MemStats) {
	 348  	stopTheWorld("ReadMemStatsSlow")
	 349  
	 350  	// Run on the system stack to avoid stack growth allocation.
	 351  	systemstack(func() {
	 352  		// Make sure stats don't change.
	 353  		getg().m.mallocing++
	 354  
	 355  		readmemstats_m(&base)
	 356  
	 357  		// Initialize slow from base and zero the fields we're
	 358  		// recomputing.
	 359  		slow = base
	 360  		slow.Alloc = 0
	 361  		slow.TotalAlloc = 0
	 362  		slow.Mallocs = 0
	 363  		slow.Frees = 0
	 364  		slow.HeapReleased = 0
	 365  		var bySize [_NumSizeClasses]struct {
	 366  			Mallocs, Frees uint64
	 367  		}
	 368  
	 369  		// Add up current allocations in spans.
	 370  		for _, s := range mheap_.allspans {
	 371  			if s.state.get() != mSpanInUse {
	 372  				continue
	 373  			}
	 374  			if sizeclass := s.spanclass.sizeclass(); sizeclass == 0 {
	 375  				slow.Mallocs++
	 376  				slow.Alloc += uint64(s.elemsize)
	 377  			} else {
	 378  				slow.Mallocs += uint64(s.allocCount)
	 379  				slow.Alloc += uint64(s.allocCount) * uint64(s.elemsize)
	 380  				bySize[sizeclass].Mallocs += uint64(s.allocCount)
	 381  			}
	 382  		}
	 383  
	 384  		// Add in frees by just reading the stats for those directly.
	 385  		var m heapStatsDelta
	 386  		memstats.heapStats.unsafeRead(&m)
	 387  
	 388  		// Collect per-sizeclass free stats.
	 389  		var smallFree uint64
	 390  		for i := 0; i < _NumSizeClasses; i++ {
	 391  			slow.Frees += uint64(m.smallFreeCount[i])
	 392  			bySize[i].Frees += uint64(m.smallFreeCount[i])
	 393  			bySize[i].Mallocs += uint64(m.smallFreeCount[i])
	 394  			smallFree += uint64(m.smallFreeCount[i]) * uint64(class_to_size[i])
	 395  		}
	 396  		slow.Frees += uint64(m.tinyAllocCount) + uint64(m.largeFreeCount)
	 397  		slow.Mallocs += slow.Frees
	 398  
	 399  		slow.TotalAlloc = slow.Alloc + uint64(m.largeFree) + smallFree
	 400  
	 401  		for i := range slow.BySize {
	 402  			slow.BySize[i].Mallocs = bySize[i].Mallocs
	 403  			slow.BySize[i].Frees = bySize[i].Frees
	 404  		}
	 405  
	 406  		for i := mheap_.pages.start; i < mheap_.pages.end; i++ {
	 407  			chunk := mheap_.pages.tryChunkOf(i)
	 408  			if chunk == nil {
	 409  				continue
	 410  			}
	 411  			pg := chunk.scavenged.popcntRange(0, pallocChunkPages)
	 412  			slow.HeapReleased += uint64(pg) * pageSize
	 413  		}
	 414  		for _, p := range allp {
	 415  			pg := sys.OnesCount64(p.pcache.scav)
	 416  			slow.HeapReleased += uint64(pg) * pageSize
	 417  		}
	 418  
	 419  		getg().m.mallocing--
	 420  	})
	 421  
	 422  	startTheWorld()
	 423  	return
	 424  }
	 425  
	 426  // BlockOnSystemStack switches to the system stack, prints "x\n" to
	 427  // stderr, and blocks in a stack containing
	 428  // "runtime.blockOnSystemStackInternal".
	 429  func BlockOnSystemStack() {
	 430  	systemstack(blockOnSystemStackInternal)
	 431  }
	 432  
	 433  func blockOnSystemStackInternal() {
	 434  	print("x\n")
	 435  	lock(&deadlock)
	 436  	lock(&deadlock)
	 437  }
	 438  
	 439  type RWMutex struct {
	 440  	rw rwmutex
	 441  }
	 442  
	 443  func (rw *RWMutex) RLock() {
	 444  	rw.rw.rlock()
	 445  }
	 446  
	 447  func (rw *RWMutex) RUnlock() {
	 448  	rw.rw.runlock()
	 449  }
	 450  
	 451  func (rw *RWMutex) Lock() {
	 452  	rw.rw.lock()
	 453  }
	 454  
	 455  func (rw *RWMutex) Unlock() {
	 456  	rw.rw.unlock()
	 457  }
	 458  
	 459  const RuntimeHmapSize = unsafe.Sizeof(hmap{})
	 460  
	 461  func MapBucketsCount(m map[int]int) int {
	 462  	h := *(**hmap)(unsafe.Pointer(&m))
	 463  	return 1 << h.B
	 464  }
	 465  
	 466  func MapBucketsPointerIsNil(m map[int]int) bool {
	 467  	h := *(**hmap)(unsafe.Pointer(&m))
	 468  	return h.buckets == nil
	 469  }
	 470  
	 471  func LockOSCounts() (external, internal uint32) {
	 472  	g := getg()
	 473  	if g.m.lockedExt+g.m.lockedInt == 0 {
	 474  		if g.lockedm != 0 {
	 475  			panic("lockedm on non-locked goroutine")
	 476  		}
	 477  	} else {
	 478  		if g.lockedm == 0 {
	 479  			panic("nil lockedm on locked goroutine")
	 480  		}
	 481  	}
	 482  	return g.m.lockedExt, g.m.lockedInt
	 483  }
	 484  
	 485  //go:noinline
	 486  func TracebackSystemstack(stk []uintptr, i int) int {
	 487  	if i == 0 {
	 488  		pc, sp := getcallerpc(), getcallersp()
	 489  		return gentraceback(pc, sp, 0, getg(), 0, &stk[0], len(stk), nil, nil, _TraceJumpStack)
	 490  	}
	 491  	n := 0
	 492  	systemstack(func() {
	 493  		n = TracebackSystemstack(stk, i-1)
	 494  	})
	 495  	return n
	 496  }
	 497  
	 498  func KeepNArenaHints(n int) {
	 499  	hint := mheap_.arenaHints
	 500  	for i := 1; i < n; i++ {
	 501  		hint = hint.next
	 502  		if hint == nil {
	 503  			return
	 504  		}
	 505  	}
	 506  	hint.next = nil
	 507  }
	 508  
	 509  // MapNextArenaHint reserves a page at the next arena growth hint,
	 510  // preventing the arena from growing there, and returns the range of
	 511  // addresses that are no longer viable.
	 512  func MapNextArenaHint() (start, end uintptr) {
	 513  	hint := mheap_.arenaHints
	 514  	addr := hint.addr
	 515  	if hint.down {
	 516  		start, end = addr-heapArenaBytes, addr
	 517  		addr -= physPageSize
	 518  	} else {
	 519  		start, end = addr, addr+heapArenaBytes
	 520  	}
	 521  	sysReserve(unsafe.Pointer(addr), physPageSize)
	 522  	return
	 523  }
	 524  
	 525  func GetNextArenaHint() uintptr {
	 526  	return mheap_.arenaHints.addr
	 527  }
	 528  
	 529  type G = g
	 530  
	 531  type Sudog = sudog
	 532  
	 533  func Getg() *G {
	 534  	return getg()
	 535  }
	 536  
	 537  //go:noinline
	 538  func PanicForTesting(b []byte, i int) byte {
	 539  	return unexportedPanicForTesting(b, i)
	 540  }
	 541  
	 542  //go:noinline
	 543  func unexportedPanicForTesting(b []byte, i int) byte {
	 544  	return b[i]
	 545  }
	 546  
	 547  func G0StackOverflow() {
	 548  	systemstack(func() {
	 549  		stackOverflow(nil)
	 550  	})
	 551  }
	 552  
	 553  func stackOverflow(x *byte) {
	 554  	var buf [256]byte
	 555  	stackOverflow(&buf[0])
	 556  }
	 557  
	 558  func MapTombstoneCheck(m map[int]int) {
	 559  	// Make sure emptyOne and emptyRest are distributed correctly.
	 560  	// We should have a series of filled and emptyOne cells, followed by
	 561  	// a series of emptyRest cells.
	 562  	h := *(**hmap)(unsafe.Pointer(&m))
	 563  	i := interface{}(m)
	 564  	t := *(**maptype)(unsafe.Pointer(&i))
	 565  
	 566  	for x := 0; x < 1<<h.B; x++ {
	 567  		b0 := (*bmap)(add(h.buckets, uintptr(x)*uintptr(t.bucketsize)))
	 568  		n := 0
	 569  		for b := b0; b != nil; b = b.overflow(t) {
	 570  			for i := 0; i < bucketCnt; i++ {
	 571  				if b.tophash[i] != emptyRest {
	 572  					n++
	 573  				}
	 574  			}
	 575  		}
	 576  		k := 0
	 577  		for b := b0; b != nil; b = b.overflow(t) {
	 578  			for i := 0; i < bucketCnt; i++ {
	 579  				if k < n && b.tophash[i] == emptyRest {
	 580  					panic("early emptyRest")
	 581  				}
	 582  				if k >= n && b.tophash[i] != emptyRest {
	 583  					panic("late non-emptyRest")
	 584  				}
	 585  				if k == n-1 && b.tophash[i] == emptyOne {
	 586  					panic("last non-emptyRest entry is emptyOne")
	 587  				}
	 588  				k++
	 589  			}
	 590  		}
	 591  	}
	 592  }
	 593  
	 594  func RunGetgThreadSwitchTest() {
	 595  	// Test that getg works correctly with thread switch.
	 596  	// With gccgo, if we generate getg inlined, the backend
	 597  	// may cache the address of the TLS variable, which
	 598  	// will become invalid after a thread switch. This test
	 599  	// checks that the bad caching doesn't happen.
	 600  
	 601  	ch := make(chan int)
	 602  	go func(ch chan int) {
	 603  		ch <- 5
	 604  		LockOSThread()
	 605  	}(ch)
	 606  
	 607  	g1 := getg()
	 608  
	 609  	// Block on a receive. This is likely to get us a thread
	 610  	// switch. If we yield to the sender goroutine, it will
	 611  	// lock the thread, forcing us to resume on a different
	 612  	// thread.
	 613  	<-ch
	 614  
	 615  	g2 := getg()
	 616  	if g1 != g2 {
	 617  		panic("g1 != g2")
	 618  	}
	 619  
	 620  	// Also test getg after some control flow, as the
	 621  	// backend is sensitive to control flow.
	 622  	g3 := getg()
	 623  	if g1 != g3 {
	 624  		panic("g1 != g3")
	 625  	}
	 626  }
	 627  
	 628  const (
	 629  	PageSize				 = pageSize
	 630  	PallocChunkPages = pallocChunkPages
	 631  	PageAlloc64Bit	 = pageAlloc64Bit
	 632  	PallocSumBytes	 = pallocSumBytes
	 633  )
	 634  
	 635  // Expose pallocSum for testing.
	 636  type PallocSum pallocSum
	 637  
	 638  func PackPallocSum(start, max, end uint) PallocSum { return PallocSum(packPallocSum(start, max, end)) }
	 639  func (m PallocSum) Start() uint										{ return pallocSum(m).start() }
	 640  func (m PallocSum) Max() uint											{ return pallocSum(m).max() }
	 641  func (m PallocSum) End() uint											{ return pallocSum(m).end() }
	 642  
	 643  // Expose pallocBits for testing.
	 644  type PallocBits pallocBits
	 645  
	 646  func (b *PallocBits) Find(npages uintptr, searchIdx uint) (uint, uint) {
	 647  	return (*pallocBits)(b).find(npages, searchIdx)
	 648  }
	 649  func (b *PallocBits) AllocRange(i, n uint)			 { (*pallocBits)(b).allocRange(i, n) }
	 650  func (b *PallocBits) Free(i, n uint)						 { (*pallocBits)(b).free(i, n) }
	 651  func (b *PallocBits) Summarize() PallocSum			 { return PallocSum((*pallocBits)(b).summarize()) }
	 652  func (b *PallocBits) PopcntRange(i, n uint) uint { return (*pageBits)(b).popcntRange(i, n) }
	 653  
	 654  // SummarizeSlow is a slow but more obviously correct implementation
	 655  // of (*pallocBits).summarize. Used for testing.
	 656  func SummarizeSlow(b *PallocBits) PallocSum {
	 657  	var start, max, end uint
	 658  
	 659  	const N = uint(len(b)) * 64
	 660  	for start < N && (*pageBits)(b).get(start) == 0 {
	 661  		start++
	 662  	}
	 663  	for end < N && (*pageBits)(b).get(N-end-1) == 0 {
	 664  		end++
	 665  	}
	 666  	run := uint(0)
	 667  	for i := uint(0); i < N; i++ {
	 668  		if (*pageBits)(b).get(i) == 0 {
	 669  			run++
	 670  		} else {
	 671  			run = 0
	 672  		}
	 673  		if run > max {
	 674  			max = run
	 675  		}
	 676  	}
	 677  	return PackPallocSum(start, max, end)
	 678  }
	 679  
	 680  // Expose non-trivial helpers for testing.
	 681  func FindBitRange64(c uint64, n uint) uint { return findBitRange64(c, n) }
	 682  
	 683  // Given two PallocBits, returns a set of bit ranges where
	 684  // they differ.
	 685  func DiffPallocBits(a, b *PallocBits) []BitRange {
	 686  	ba := (*pageBits)(a)
	 687  	bb := (*pageBits)(b)
	 688  
	 689  	var d []BitRange
	 690  	base, size := uint(0), uint(0)
	 691  	for i := uint(0); i < uint(len(ba))*64; i++ {
	 692  		if ba.get(i) != bb.get(i) {
	 693  			if size == 0 {
	 694  				base = i
	 695  			}
	 696  			size++
	 697  		} else {
	 698  			if size != 0 {
	 699  				d = append(d, BitRange{base, size})
	 700  			}
	 701  			size = 0
	 702  		}
	 703  	}
	 704  	if size != 0 {
	 705  		d = append(d, BitRange{base, size})
	 706  	}
	 707  	return d
	 708  }
	 709  
	 710  // StringifyPallocBits gets the bits in the bit range r from b,
	 711  // and returns a string containing the bits as ASCII 0 and 1
	 712  // characters.
	 713  func StringifyPallocBits(b *PallocBits, r BitRange) string {
	 714  	str := ""
	 715  	for j := r.I; j < r.I+r.N; j++ {
	 716  		if (*pageBits)(b).get(j) != 0 {
	 717  			str += "1"
	 718  		} else {
	 719  			str += "0"
	 720  		}
	 721  	}
	 722  	return str
	 723  }
	 724  
	 725  // Expose pallocData for testing.
	 726  type PallocData pallocData
	 727  
	 728  func (d *PallocData) FindScavengeCandidate(searchIdx uint, min, max uintptr) (uint, uint) {
	 729  	return (*pallocData)(d).findScavengeCandidate(searchIdx, min, max)
	 730  }
	 731  func (d *PallocData) AllocRange(i, n uint) { (*pallocData)(d).allocRange(i, n) }
	 732  func (d *PallocData) ScavengedSetRange(i, n uint) {
	 733  	(*pallocData)(d).scavenged.setRange(i, n)
	 734  }
	 735  func (d *PallocData) PallocBits() *PallocBits {
	 736  	return (*PallocBits)(&(*pallocData)(d).pallocBits)
	 737  }
	 738  func (d *PallocData) Scavenged() *PallocBits {
	 739  	return (*PallocBits)(&(*pallocData)(d).scavenged)
	 740  }
	 741  
	 742  // Expose fillAligned for testing.
	 743  func FillAligned(x uint64, m uint) uint64 { return fillAligned(x, m) }
	 744  
	 745  // Expose pageCache for testing.
	 746  type PageCache pageCache
	 747  
	 748  const PageCachePages = pageCachePages
	 749  
	 750  func NewPageCache(base uintptr, cache, scav uint64) PageCache {
	 751  	return PageCache(pageCache{base: base, cache: cache, scav: scav})
	 752  }
	 753  func (c *PageCache) Empty() bool	 { return (*pageCache)(c).empty() }
	 754  func (c *PageCache) Base() uintptr { return (*pageCache)(c).base }
	 755  func (c *PageCache) Cache() uint64 { return (*pageCache)(c).cache }
	 756  func (c *PageCache) Scav() uint64	{ return (*pageCache)(c).scav }
	 757  func (c *PageCache) Alloc(npages uintptr) (uintptr, uintptr) {
	 758  	return (*pageCache)(c).alloc(npages)
	 759  }
	 760  func (c *PageCache) Flush(s *PageAlloc) {
	 761  	cp := (*pageCache)(c)
	 762  	sp := (*pageAlloc)(s)
	 763  
	 764  	systemstack(func() {
	 765  		// None of the tests need any higher-level locking, so we just
	 766  		// take the lock internally.
	 767  		lock(sp.mheapLock)
	 768  		cp.flush(sp)
	 769  		unlock(sp.mheapLock)
	 770  	})
	 771  }
	 772  
	 773  // Expose chunk index type.
	 774  type ChunkIdx chunkIdx
	 775  
	 776  // Expose pageAlloc for testing. Note that because pageAlloc is
	 777  // not in the heap, so is PageAlloc.
	 778  type PageAlloc pageAlloc
	 779  
	 780  func (p *PageAlloc) Alloc(npages uintptr) (uintptr, uintptr) {
	 781  	pp := (*pageAlloc)(p)
	 782  
	 783  	var addr, scav uintptr
	 784  	systemstack(func() {
	 785  		// None of the tests need any higher-level locking, so we just
	 786  		// take the lock internally.
	 787  		lock(pp.mheapLock)
	 788  		addr, scav = pp.alloc(npages)
	 789  		unlock(pp.mheapLock)
	 790  	})
	 791  	return addr, scav
	 792  }
	 793  func (p *PageAlloc) AllocToCache() PageCache {
	 794  	pp := (*pageAlloc)(p)
	 795  
	 796  	var c PageCache
	 797  	systemstack(func() {
	 798  		// None of the tests need any higher-level locking, so we just
	 799  		// take the lock internally.
	 800  		lock(pp.mheapLock)
	 801  		c = PageCache(pp.allocToCache())
	 802  		unlock(pp.mheapLock)
	 803  	})
	 804  	return c
	 805  }
	 806  func (p *PageAlloc) Free(base, npages uintptr) {
	 807  	pp := (*pageAlloc)(p)
	 808  
	 809  	systemstack(func() {
	 810  		// None of the tests need any higher-level locking, so we just
	 811  		// take the lock internally.
	 812  		lock(pp.mheapLock)
	 813  		pp.free(base, npages)
	 814  		unlock(pp.mheapLock)
	 815  	})
	 816  }
	 817  func (p *PageAlloc) Bounds() (ChunkIdx, ChunkIdx) {
	 818  	return ChunkIdx((*pageAlloc)(p).start), ChunkIdx((*pageAlloc)(p).end)
	 819  }
	 820  func (p *PageAlloc) Scavenge(nbytes uintptr, mayUnlock bool) (r uintptr) {
	 821  	pp := (*pageAlloc)(p)
	 822  	systemstack(func() {
	 823  		// None of the tests need any higher-level locking, so we just
	 824  		// take the lock internally.
	 825  		lock(pp.mheapLock)
	 826  		r = pp.scavenge(nbytes, mayUnlock)
	 827  		unlock(pp.mheapLock)
	 828  	})
	 829  	return
	 830  }
	 831  func (p *PageAlloc) InUse() []AddrRange {
	 832  	ranges := make([]AddrRange, 0, len(p.inUse.ranges))
	 833  	for _, r := range p.inUse.ranges {
	 834  		ranges = append(ranges, AddrRange{r})
	 835  	}
	 836  	return ranges
	 837  }
	 838  
	 839  // Returns nil if the PallocData's L2 is missing.
	 840  func (p *PageAlloc) PallocData(i ChunkIdx) *PallocData {
	 841  	ci := chunkIdx(i)
	 842  	return (*PallocData)((*pageAlloc)(p).tryChunkOf(ci))
	 843  }
	 844  
	 845  // AddrRange is a wrapper around addrRange for testing.
	 846  type AddrRange struct {
	 847  	addrRange
	 848  }
	 849  
	 850  // MakeAddrRange creates a new address range.
	 851  func MakeAddrRange(base, limit uintptr) AddrRange {
	 852  	return AddrRange{makeAddrRange(base, limit)}
	 853  }
	 854  
	 855  // Base returns the virtual base address of the address range.
	 856  func (a AddrRange) Base() uintptr {
	 857  	return a.addrRange.base.addr()
	 858  }
	 859  
	 860  // Base returns the virtual address of the limit of the address range.
	 861  func (a AddrRange) Limit() uintptr {
	 862  	return a.addrRange.limit.addr()
	 863  }
	 864  
	 865  // Equals returns true if the two address ranges are exactly equal.
	 866  func (a AddrRange) Equals(b AddrRange) bool {
	 867  	return a == b
	 868  }
	 869  
	 870  // Size returns the size in bytes of the address range.
	 871  func (a AddrRange) Size() uintptr {
	 872  	return a.addrRange.size()
	 873  }
	 874  
	 875  // AddrRanges is a wrapper around addrRanges for testing.
	 876  type AddrRanges struct {
	 877  	addrRanges
	 878  	mutable bool
	 879  }
	 880  
	 881  // NewAddrRanges creates a new empty addrRanges.
	 882  //
	 883  // Note that this initializes addrRanges just like in the
	 884  // runtime, so its memory is persistentalloc'd. Call this
	 885  // function sparingly since the memory it allocates is
	 886  // leaked.
	 887  //
	 888  // This AddrRanges is mutable, so we can test methods like
	 889  // Add.
	 890  func NewAddrRanges() AddrRanges {
	 891  	r := addrRanges{}
	 892  	r.init(new(sysMemStat))
	 893  	return AddrRanges{r, true}
	 894  }
	 895  
	 896  // MakeAddrRanges creates a new addrRanges populated with
	 897  // the ranges in a.
	 898  //
	 899  // The returned AddrRanges is immutable, so methods like
	 900  // Add will fail.
	 901  func MakeAddrRanges(a ...AddrRange) AddrRanges {
	 902  	// Methods that manipulate the backing store of addrRanges.ranges should
	 903  	// not be used on the result from this function (e.g. add) since they may
	 904  	// trigger reallocation. That would normally be fine, except the new
	 905  	// backing store won't come from the heap, but from persistentalloc, so
	 906  	// we'll leak some memory implicitly.
	 907  	ranges := make([]addrRange, 0, len(a))
	 908  	total := uintptr(0)
	 909  	for _, r := range a {
	 910  		ranges = append(ranges, r.addrRange)
	 911  		total += r.Size()
	 912  	}
	 913  	return AddrRanges{addrRanges{
	 914  		ranges:		 ranges,
	 915  		totalBytes: total,
	 916  		sysStat:		new(sysMemStat),
	 917  	}, false}
	 918  }
	 919  
	 920  // Ranges returns a copy of the ranges described by the
	 921  // addrRanges.
	 922  func (a *AddrRanges) Ranges() []AddrRange {
	 923  	result := make([]AddrRange, 0, len(a.addrRanges.ranges))
	 924  	for _, r := range a.addrRanges.ranges {
	 925  		result = append(result, AddrRange{r})
	 926  	}
	 927  	return result
	 928  }
	 929  
	 930  // FindSucc returns the successor to base. See addrRanges.findSucc
	 931  // for more details.
	 932  func (a *AddrRanges) FindSucc(base uintptr) int {
	 933  	return a.findSucc(base)
	 934  }
	 935  
	 936  // Add adds a new AddrRange to the AddrRanges.
	 937  //
	 938  // The AddrRange must be mutable (i.e. created by NewAddrRanges),
	 939  // otherwise this method will throw.
	 940  func (a *AddrRanges) Add(r AddrRange) {
	 941  	if !a.mutable {
	 942  		throw("attempt to mutate immutable AddrRanges")
	 943  	}
	 944  	a.add(r.addrRange)
	 945  }
	 946  
	 947  // TotalBytes returns the totalBytes field of the addrRanges.
	 948  func (a *AddrRanges) TotalBytes() uintptr {
	 949  	return a.addrRanges.totalBytes
	 950  }
	 951  
	 952  // BitRange represents a range over a bitmap.
	 953  type BitRange struct {
	 954  	I, N uint // bit index and length in bits
	 955  }
	 956  
	 957  // NewPageAlloc creates a new page allocator for testing and
	 958  // initializes it with the scav and chunks maps. Each key in these maps
	 959  // represents a chunk index and each value is a series of bit ranges to
	 960  // set within each bitmap's chunk.
	 961  //
	 962  // The initialization of the pageAlloc preserves the invariant that if a
	 963  // scavenged bit is set the alloc bit is necessarily unset, so some
	 964  // of the bits described by scav may be cleared in the final bitmap if
	 965  // ranges in chunks overlap with them.
	 966  //
	 967  // scav is optional, and if nil, the scavenged bitmap will be cleared
	 968  // (as opposed to all 1s, which it usually is). Furthermore, every
	 969  // chunk index in scav must appear in chunks; ones that do not are
	 970  // ignored.
	 971  func NewPageAlloc(chunks, scav map[ChunkIdx][]BitRange) *PageAlloc {
	 972  	p := new(pageAlloc)
	 973  
	 974  	// We've got an entry, so initialize the pageAlloc.
	 975  	p.init(new(mutex), nil)
	 976  	lockInit(p.mheapLock, lockRankMheap)
	 977  	p.test = true
	 978  
	 979  	for i, init := range chunks {
	 980  		addr := chunkBase(chunkIdx(i))
	 981  
	 982  		// Mark the chunk's existence in the pageAlloc.
	 983  		systemstack(func() {
	 984  			lock(p.mheapLock)
	 985  			p.grow(addr, pallocChunkBytes)
	 986  			unlock(p.mheapLock)
	 987  		})
	 988  
	 989  		// Initialize the bitmap and update pageAlloc metadata.
	 990  		chunk := p.chunkOf(chunkIndex(addr))
	 991  
	 992  		// Clear all the scavenged bits which grow set.
	 993  		chunk.scavenged.clearRange(0, pallocChunkPages)
	 994  
	 995  		// Apply scavenge state if applicable.
	 996  		if scav != nil {
	 997  			if scvg, ok := scav[i]; ok {
	 998  				for _, s := range scvg {
	 999  					// Ignore the case of s.N == 0. setRange doesn't handle
	1000  					// it and it's a no-op anyway.
	1001  					if s.N != 0 {
	1002  						chunk.scavenged.setRange(s.I, s.N)
	1003  					}
	1004  				}
	1005  			}
	1006  		}
	1007  
	1008  		// Apply alloc state.
	1009  		for _, s := range init {
	1010  			// Ignore the case of s.N == 0. allocRange doesn't handle
	1011  			// it and it's a no-op anyway.
	1012  			if s.N != 0 {
	1013  				chunk.allocRange(s.I, s.N)
	1014  			}
	1015  		}
	1016  
	1017  		// Update heap metadata for the allocRange calls above.
	1018  		systemstack(func() {
	1019  			lock(p.mheapLock)
	1020  			p.update(addr, pallocChunkPages, false, false)
	1021  			unlock(p.mheapLock)
	1022  		})
	1023  	}
	1024  
	1025  	systemstack(func() {
	1026  		lock(p.mheapLock)
	1027  		p.scavengeStartGen()
	1028  		unlock(p.mheapLock)
	1029  	})
	1030  
	1031  	return (*PageAlloc)(p)
	1032  }
	1033  
	1034  // FreePageAlloc releases hard OS resources owned by the pageAlloc. Once this
	1035  // is called the pageAlloc may no longer be used. The object itself will be
	1036  // collected by the garbage collector once it is no longer live.
	1037  func FreePageAlloc(pp *PageAlloc) {
	1038  	p := (*pageAlloc)(pp)
	1039  
	1040  	// Free all the mapped space for the summary levels.
	1041  	if pageAlloc64Bit != 0 {
	1042  		for l := 0; l < summaryLevels; l++ {
	1043  			sysFree(unsafe.Pointer(&p.summary[l][0]), uintptr(cap(p.summary[l]))*pallocSumBytes, nil)
	1044  		}
	1045  	} else {
	1046  		resSize := uintptr(0)
	1047  		for _, s := range p.summary {
	1048  			resSize += uintptr(cap(s)) * pallocSumBytes
	1049  		}
	1050  		sysFree(unsafe.Pointer(&p.summary[0][0]), alignUp(resSize, physPageSize), nil)
	1051  	}
	1052  
	1053  	// Free the mapped space for chunks.
	1054  	for i := range p.chunks {
	1055  		if x := p.chunks[i]; x != nil {
	1056  			p.chunks[i] = nil
	1057  			// This memory comes from sysAlloc and will always be page-aligned.
	1058  			sysFree(unsafe.Pointer(x), unsafe.Sizeof(*p.chunks[0]), nil)
	1059  		}
	1060  	}
	1061  }
	1062  
	1063  // BaseChunkIdx is a convenient chunkIdx value which works on both
	1064  // 64 bit and 32 bit platforms, allowing the tests to share code
	1065  // between the two.
	1066  //
	1067  // This should not be higher than 0x100*pallocChunkBytes to support
	1068  // mips and mipsle, which only have 31-bit address spaces.
	1069  var BaseChunkIdx = func() ChunkIdx {
	1070  	var prefix uintptr
	1071  	if pageAlloc64Bit != 0 {
	1072  		prefix = 0xc000
	1073  	} else {
	1074  		prefix = 0x100
	1075  	}
	1076  	baseAddr := prefix * pallocChunkBytes
	1077  	if sys.GoosAix != 0 {
	1078  		baseAddr += arenaBaseOffset
	1079  	}
	1080  	return ChunkIdx(chunkIndex(baseAddr))
	1081  }()
	1082  
	1083  // PageBase returns an address given a chunk index and a page index
	1084  // relative to that chunk.
	1085  func PageBase(c ChunkIdx, pageIdx uint) uintptr {
	1086  	return chunkBase(chunkIdx(c)) + uintptr(pageIdx)*pageSize
	1087  }
	1088  
	1089  type BitsMismatch struct {
	1090  	Base			uintptr
	1091  	Got, Want uint64
	1092  }
	1093  
	1094  func CheckScavengedBitsCleared(mismatches []BitsMismatch) (n int, ok bool) {
	1095  	ok = true
	1096  
	1097  	// Run on the system stack to avoid stack growth allocation.
	1098  	systemstack(func() {
	1099  		getg().m.mallocing++
	1100  
	1101  		// Lock so that we can safely access the bitmap.
	1102  		lock(&mheap_.lock)
	1103  	chunkLoop:
	1104  		for i := mheap_.pages.start; i < mheap_.pages.end; i++ {
	1105  			chunk := mheap_.pages.tryChunkOf(i)
	1106  			if chunk == nil {
	1107  				continue
	1108  			}
	1109  			for j := 0; j < pallocChunkPages/64; j++ {
	1110  				// Run over each 64-bit bitmap section and ensure
	1111  				// scavenged is being cleared properly on allocation.
	1112  				// If a used bit and scavenged bit are both set, that's
	1113  				// an error, and could indicate a larger problem, or
	1114  				// an accounting problem.
	1115  				want := chunk.scavenged[j] &^ chunk.pallocBits[j]
	1116  				got := chunk.scavenged[j]
	1117  				if want != got {
	1118  					ok = false
	1119  					if n >= len(mismatches) {
	1120  						break chunkLoop
	1121  					}
	1122  					mismatches[n] = BitsMismatch{
	1123  						Base: chunkBase(i) + uintptr(j)*64*pageSize,
	1124  						Got:	got,
	1125  						Want: want,
	1126  					}
	1127  					n++
	1128  				}
	1129  			}
	1130  		}
	1131  		unlock(&mheap_.lock)
	1132  
	1133  		getg().m.mallocing--
	1134  	})
	1135  	return
	1136  }
	1137  
	1138  func PageCachePagesLeaked() (leaked uintptr) {
	1139  	stopTheWorld("PageCachePagesLeaked")
	1140  
	1141  	// Walk over destroyed Ps and look for unflushed caches.
	1142  	deadp := allp[len(allp):cap(allp)]
	1143  	for _, p := range deadp {
	1144  		// Since we're going past len(allp) we may see nil Ps.
	1145  		// Just ignore them.
	1146  		if p != nil {
	1147  			leaked += uintptr(sys.OnesCount64(p.pcache.cache))
	1148  		}
	1149  	}
	1150  
	1151  	startTheWorld()
	1152  	return
	1153  }
	1154  
	1155  var Semacquire = semacquire
	1156  var Semrelease1 = semrelease1
	1157  
	1158  func SemNwait(addr *uint32) uint32 {
	1159  	root := semroot(addr)
	1160  	return atomic.Load(&root.nwait)
	1161  }
	1162  
	1163  // mspan wrapper for testing.
	1164  //go:notinheap
	1165  type MSpan mspan
	1166  
	1167  // Allocate an mspan for testing.
	1168  func AllocMSpan() *MSpan {
	1169  	var s *mspan
	1170  	systemstack(func() {
	1171  		lock(&mheap_.lock)
	1172  		s = (*mspan)(mheap_.spanalloc.alloc())
	1173  		unlock(&mheap_.lock)
	1174  	})
	1175  	return (*MSpan)(s)
	1176  }
	1177  
	1178  // Free an allocated mspan.
	1179  func FreeMSpan(s *MSpan) {
	1180  	systemstack(func() {
	1181  		lock(&mheap_.lock)
	1182  		mheap_.spanalloc.free(unsafe.Pointer(s))
	1183  		unlock(&mheap_.lock)
	1184  	})
	1185  }
	1186  
	1187  func MSpanCountAlloc(ms *MSpan, bits []byte) int {
	1188  	s := (*mspan)(ms)
	1189  	s.nelems = uintptr(len(bits) * 8)
	1190  	s.gcmarkBits = (*gcBits)(unsafe.Pointer(&bits[0]))
	1191  	result := s.countAlloc()
	1192  	s.gcmarkBits = nil
	1193  	return result
	1194  }
	1195  
	1196  const (
	1197  	TimeHistSubBucketBits	 = timeHistSubBucketBits
	1198  	TimeHistNumSubBuckets	 = timeHistNumSubBuckets
	1199  	TimeHistNumSuperBuckets = timeHistNumSuperBuckets
	1200  )
	1201  
	1202  type TimeHistogram timeHistogram
	1203  
	1204  // Counts returns the counts for the given bucket, subBucket indices.
	1205  // Returns true if the bucket was valid, otherwise returns the counts
	1206  // for the underflow bucket and false.
	1207  func (th *TimeHistogram) Count(bucket, subBucket uint) (uint64, bool) {
	1208  	t := (*timeHistogram)(th)
	1209  	i := bucket*TimeHistNumSubBuckets + subBucket
	1210  	if i >= uint(len(t.counts)) {
	1211  		return t.underflow, false
	1212  	}
	1213  	return t.counts[i], true
	1214  }
	1215  
	1216  func (th *TimeHistogram) Record(duration int64) {
	1217  	(*timeHistogram)(th).record(duration)
	1218  }
	1219  
	1220  var TimeHistogramMetricsBuckets = timeHistogramMetricsBuckets
	1221  
	1222  func SetIntArgRegs(a int) int {
	1223  	lock(&finlock)
	1224  	old := intArgRegs
	1225  	if a >= 0 {
	1226  		intArgRegs = a
	1227  	}
	1228  	unlock(&finlock)
	1229  	return old
	1230  }
	1231  
	1232  func FinalizerGAsleep() bool {
	1233  	lock(&finlock)
	1234  	result := fingwait
	1235  	unlock(&finlock)
	1236  	return result
	1237  }
	1238  
	1239  // For GCTestMoveStackOnNextCall, it's important not to introduce an
	1240  // extra layer of call, since then there's a return before the "real"
	1241  // next call.
	1242  var GCTestMoveStackOnNextCall = gcTestMoveStackOnNextCall
	1243  
	1244  // For GCTestIsReachable, it's important that we do this as a call so
	1245  // escape analysis can see through it.
	1246  func GCTestIsReachable(ptrs ...unsafe.Pointer) (mask uint64) {
	1247  	return gcTestIsReachable(ptrs...)
	1248  }
	1249  
	1250  // For GCTestPointerClass, it's important that we do this as a call so
	1251  // escape analysis can see through it.
	1252  //
	1253  // This is nosplit because gcTestPointerClass is.
	1254  //
	1255  //go:nosplit
	1256  func GCTestPointerClass(p unsafe.Pointer) string {
	1257  	return gcTestPointerClass(p)
	1258  }
	1259  
	1260  const Raceenabled = raceenabled
	1261  

View as plain text