...

Source file src/runtime/runtime2.go

Documentation: runtime

		 1  // Copyright 2009 The Go Authors. All rights reserved.
		 2  // Use of this source code is governed by a BSD-style
		 3  // license that can be found in the LICENSE file.
		 4  
		 5  package runtime
		 6  
		 7  import (
		 8  	"runtime/internal/atomic"
		 9  	"runtime/internal/sys"
		10  	"unsafe"
		11  )
		12  
		13  // defined constants
		14  const (
		15  	// G status
		16  	//
		17  	// Beyond indicating the general state of a G, the G status
		18  	// acts like a lock on the goroutine's stack (and hence its
		19  	// ability to execute user code).
		20  	//
		21  	// If you add to this list, add to the list
		22  	// of "okay during garbage collection" status
		23  	// in mgcmark.go too.
		24  	//
		25  	// TODO(austin): The _Gscan bit could be much lighter-weight.
		26  	// For example, we could choose not to run _Gscanrunnable
		27  	// goroutines found in the run queue, rather than CAS-looping
		28  	// until they become _Grunnable. And transitions like
		29  	// _Gscanwaiting -> _Gscanrunnable are actually okay because
		30  	// they don't affect stack ownership.
		31  
		32  	// _Gidle means this goroutine was just allocated and has not
		33  	// yet been initialized.
		34  	_Gidle = iota // 0
		35  
		36  	// _Grunnable means this goroutine is on a run queue. It is
		37  	// not currently executing user code. The stack is not owned.
		38  	_Grunnable // 1
		39  
		40  	// _Grunning means this goroutine may execute user code. The
		41  	// stack is owned by this goroutine. It is not on a run queue.
		42  	// It is assigned an M and a P (g.m and g.m.p are valid).
		43  	_Grunning // 2
		44  
		45  	// _Gsyscall means this goroutine is executing a system call.
		46  	// It is not executing user code. The stack is owned by this
		47  	// goroutine. It is not on a run queue. It is assigned an M.
		48  	_Gsyscall // 3
		49  
		50  	// _Gwaiting means this goroutine is blocked in the runtime.
		51  	// It is not executing user code. It is not on a run queue,
		52  	// but should be recorded somewhere (e.g., a channel wait
		53  	// queue) so it can be ready()d when necessary. The stack is
		54  	// not owned *except* that a channel operation may read or
		55  	// write parts of the stack under the appropriate channel
		56  	// lock. Otherwise, it is not safe to access the stack after a
		57  	// goroutine enters _Gwaiting (e.g., it may get moved).
		58  	_Gwaiting // 4
		59  
		60  	// _Gmoribund_unused is currently unused, but hardcoded in gdb
		61  	// scripts.
		62  	_Gmoribund_unused // 5
		63  
		64  	// _Gdead means this goroutine is currently unused. It may be
		65  	// just exited, on a free list, or just being initialized. It
		66  	// is not executing user code. It may or may not have a stack
		67  	// allocated. The G and its stack (if any) are owned by the M
		68  	// that is exiting the G or that obtained the G from the free
		69  	// list.
		70  	_Gdead // 6
		71  
		72  	// _Genqueue_unused is currently unused.
		73  	_Genqueue_unused // 7
		74  
		75  	// _Gcopystack means this goroutine's stack is being moved. It
		76  	// is not executing user code and is not on a run queue. The
		77  	// stack is owned by the goroutine that put it in _Gcopystack.
		78  	_Gcopystack // 8
		79  
		80  	// _Gpreempted means this goroutine stopped itself for a
		81  	// suspendG preemption. It is like _Gwaiting, but nothing is
		82  	// yet responsible for ready()ing it. Some suspendG must CAS
		83  	// the status to _Gwaiting to take responsibility for
		84  	// ready()ing this G.
		85  	_Gpreempted // 9
		86  
		87  	// _Gscan combined with one of the above states other than
		88  	// _Grunning indicates that GC is scanning the stack. The
		89  	// goroutine is not executing user code and the stack is owned
		90  	// by the goroutine that set the _Gscan bit.
		91  	//
		92  	// _Gscanrunning is different: it is used to briefly block
		93  	// state transitions while GC signals the G to scan its own
		94  	// stack. This is otherwise like _Grunning.
		95  	//
		96  	// atomicstatus&~Gscan gives the state the goroutine will
		97  	// return to when the scan completes.
		98  	_Gscan					= 0x1000
		99  	_Gscanrunnable	= _Gscan + _Grunnable	// 0x1001
	 100  	_Gscanrunning	 = _Gscan + _Grunning	 // 0x1002
	 101  	_Gscansyscall	 = _Gscan + _Gsyscall	 // 0x1003
	 102  	_Gscanwaiting	 = _Gscan + _Gwaiting	 // 0x1004
	 103  	_Gscanpreempted = _Gscan + _Gpreempted // 0x1009
	 104  )
	 105  
	 106  const (
	 107  	// P status
	 108  
	 109  	// _Pidle means a P is not being used to run user code or the
	 110  	// scheduler. Typically, it's on the idle P list and available
	 111  	// to the scheduler, but it may just be transitioning between
	 112  	// other states.
	 113  	//
	 114  	// The P is owned by the idle list or by whatever is
	 115  	// transitioning its state. Its run queue is empty.
	 116  	_Pidle = iota
	 117  
	 118  	// _Prunning means a P is owned by an M and is being used to
	 119  	// run user code or the scheduler. Only the M that owns this P
	 120  	// is allowed to change the P's status from _Prunning. The M
	 121  	// may transition the P to _Pidle (if it has no more work to
	 122  	// do), _Psyscall (when entering a syscall), or _Pgcstop (to
	 123  	// halt for the GC). The M may also hand ownership of the P
	 124  	// off directly to another M (e.g., to schedule a locked G).
	 125  	_Prunning
	 126  
	 127  	// _Psyscall means a P is not running user code. It has
	 128  	// affinity to an M in a syscall but is not owned by it and
	 129  	// may be stolen by another M. This is similar to _Pidle but
	 130  	// uses lightweight transitions and maintains M affinity.
	 131  	//
	 132  	// Leaving _Psyscall must be done with a CAS, either to steal
	 133  	// or retake the P. Note that there's an ABA hazard: even if
	 134  	// an M successfully CASes its original P back to _Prunning
	 135  	// after a syscall, it must understand the P may have been
	 136  	// used by another M in the interim.
	 137  	_Psyscall
	 138  
	 139  	// _Pgcstop means a P is halted for STW and owned by the M
	 140  	// that stopped the world. The M that stopped the world
	 141  	// continues to use its P, even in _Pgcstop. Transitioning
	 142  	// from _Prunning to _Pgcstop causes an M to release its P and
	 143  	// park.
	 144  	//
	 145  	// The P retains its run queue and startTheWorld will restart
	 146  	// the scheduler on Ps with non-empty run queues.
	 147  	_Pgcstop
	 148  
	 149  	// _Pdead means a P is no longer used (GOMAXPROCS shrank). We
	 150  	// reuse Ps if GOMAXPROCS increases. A dead P is mostly
	 151  	// stripped of its resources, though a few things remain
	 152  	// (e.g., trace buffers).
	 153  	_Pdead
	 154  )
	 155  
	 156  // Mutual exclusion locks.	In the uncontended case,
	 157  // as fast as spin locks (just a few user-level instructions),
	 158  // but on the contention path they sleep in the kernel.
	 159  // A zeroed Mutex is unlocked (no need to initialize each lock).
	 160  // Initialization is helpful for static lock ranking, but not required.
	 161  type mutex struct {
	 162  	// Empty struct if lock ranking is disabled, otherwise includes the lock rank
	 163  	lockRankStruct
	 164  	// Futex-based impl treats it as uint32 key,
	 165  	// while sema-based impl as M* waitm.
	 166  	// Used to be a union, but unions break precise GC.
	 167  	key uintptr
	 168  }
	 169  
	 170  // sleep and wakeup on one-time events.
	 171  // before any calls to notesleep or notewakeup,
	 172  // must call noteclear to initialize the Note.
	 173  // then, exactly one thread can call notesleep
	 174  // and exactly one thread can call notewakeup (once).
	 175  // once notewakeup has been called, the notesleep
	 176  // will return.	future notesleep will return immediately.
	 177  // subsequent noteclear must be called only after
	 178  // previous notesleep has returned, e.g. it's disallowed
	 179  // to call noteclear straight after notewakeup.
	 180  //
	 181  // notetsleep is like notesleep but wakes up after
	 182  // a given number of nanoseconds even if the event
	 183  // has not yet happened.	if a goroutine uses notetsleep to
	 184  // wake up early, it must wait to call noteclear until it
	 185  // can be sure that no other goroutine is calling
	 186  // notewakeup.
	 187  //
	 188  // notesleep/notetsleep are generally called on g0,
	 189  // notetsleepg is similar to notetsleep but is called on user g.
	 190  type note struct {
	 191  	// Futex-based impl treats it as uint32 key,
	 192  	// while sema-based impl as M* waitm.
	 193  	// Used to be a union, but unions break precise GC.
	 194  	key uintptr
	 195  }
	 196  
	 197  type funcval struct {
	 198  	fn uintptr
	 199  	// variable-size, fn-specific data here
	 200  }
	 201  
	 202  type iface struct {
	 203  	tab	*itab
	 204  	data unsafe.Pointer
	 205  }
	 206  
	 207  type eface struct {
	 208  	_type *_type
	 209  	data	unsafe.Pointer
	 210  }
	 211  
	 212  func efaceOf(ep *interface{}) *eface {
	 213  	return (*eface)(unsafe.Pointer(ep))
	 214  }
	 215  
	 216  // The guintptr, muintptr, and puintptr are all used to bypass write barriers.
	 217  // It is particularly important to avoid write barriers when the current P has
	 218  // been released, because the GC thinks the world is stopped, and an
	 219  // unexpected write barrier would not be synchronized with the GC,
	 220  // which can lead to a half-executed write barrier that has marked the object
	 221  // but not queued it. If the GC skips the object and completes before the
	 222  // queuing can occur, it will incorrectly free the object.
	 223  //
	 224  // We tried using special assignment functions invoked only when not
	 225  // holding a running P, but then some updates to a particular memory
	 226  // word went through write barriers and some did not. This breaks the
	 227  // write barrier shadow checking mode, and it is also scary: better to have
	 228  // a word that is completely ignored by the GC than to have one for which
	 229  // only a few updates are ignored.
	 230  //
	 231  // Gs and Ps are always reachable via true pointers in the
	 232  // allgs and allp lists or (during allocation before they reach those lists)
	 233  // from stack variables.
	 234  //
	 235  // Ms are always reachable via true pointers either from allm or
	 236  // freem. Unlike Gs and Ps we do free Ms, so it's important that
	 237  // nothing ever hold an muintptr across a safe point.
	 238  
	 239  // A guintptr holds a goroutine pointer, but typed as a uintptr
	 240  // to bypass write barriers. It is used in the Gobuf goroutine state
	 241  // and in scheduling lists that are manipulated without a P.
	 242  //
	 243  // The Gobuf.g goroutine pointer is almost always updated by assembly code.
	 244  // In one of the few places it is updated by Go code - func save - it must be
	 245  // treated as a uintptr to avoid a write barrier being emitted at a bad time.
	 246  // Instead of figuring out how to emit the write barriers missing in the
	 247  // assembly manipulation, we change the type of the field to uintptr,
	 248  // so that it does not require write barriers at all.
	 249  //
	 250  // Goroutine structs are published in the allg list and never freed.
	 251  // That will keep the goroutine structs from being collected.
	 252  // There is never a time that Gobuf.g's contain the only references
	 253  // to a goroutine: the publishing of the goroutine in allg comes first.
	 254  // Goroutine pointers are also kept in non-GC-visible places like TLS,
	 255  // so I can't see them ever moving. If we did want to start moving data
	 256  // in the GC, we'd need to allocate the goroutine structs from an
	 257  // alternate arena. Using guintptr doesn't make that problem any worse.
	 258  // Note that pollDesc.rg, pollDesc.wg also store g in uintptr form,
	 259  // so they would need to be updated too if g's start moving.
	 260  type guintptr uintptr
	 261  
	 262  //go:nosplit
	 263  func (gp guintptr) ptr() *g { return (*g)(unsafe.Pointer(gp)) }
	 264  
	 265  //go:nosplit
	 266  func (gp *guintptr) set(g *g) { *gp = guintptr(unsafe.Pointer(g)) }
	 267  
	 268  //go:nosplit
	 269  func (gp *guintptr) cas(old, new guintptr) bool {
	 270  	return atomic.Casuintptr((*uintptr)(unsafe.Pointer(gp)), uintptr(old), uintptr(new))
	 271  }
	 272  
	 273  // setGNoWB performs *gp = new without a write barrier.
	 274  // For times when it's impractical to use a guintptr.
	 275  //go:nosplit
	 276  //go:nowritebarrier
	 277  func setGNoWB(gp **g, new *g) {
	 278  	(*guintptr)(unsafe.Pointer(gp)).set(new)
	 279  }
	 280  
	 281  type puintptr uintptr
	 282  
	 283  //go:nosplit
	 284  func (pp puintptr) ptr() *p { return (*p)(unsafe.Pointer(pp)) }
	 285  
	 286  //go:nosplit
	 287  func (pp *puintptr) set(p *p) { *pp = puintptr(unsafe.Pointer(p)) }
	 288  
	 289  // muintptr is a *m that is not tracked by the garbage collector.
	 290  //
	 291  // Because we do free Ms, there are some additional constrains on
	 292  // muintptrs:
	 293  //
	 294  // 1. Never hold an muintptr locally across a safe point.
	 295  //
	 296  // 2. Any muintptr in the heap must be owned by the M itself so it can
	 297  //		ensure it is not in use when the last true *m is released.
	 298  type muintptr uintptr
	 299  
	 300  //go:nosplit
	 301  func (mp muintptr) ptr() *m { return (*m)(unsafe.Pointer(mp)) }
	 302  
	 303  //go:nosplit
	 304  func (mp *muintptr) set(m *m) { *mp = muintptr(unsafe.Pointer(m)) }
	 305  
	 306  // setMNoWB performs *mp = new without a write barrier.
	 307  // For times when it's impractical to use an muintptr.
	 308  //go:nosplit
	 309  //go:nowritebarrier
	 310  func setMNoWB(mp **m, new *m) {
	 311  	(*muintptr)(unsafe.Pointer(mp)).set(new)
	 312  }
	 313  
	 314  type gobuf struct {
	 315  	// The offsets of sp, pc, and g are known to (hard-coded in) libmach.
	 316  	//
	 317  	// ctxt is unusual with respect to GC: it may be a
	 318  	// heap-allocated funcval, so GC needs to track it, but it
	 319  	// needs to be set and cleared from assembly, where it's
	 320  	// difficult to have write barriers. However, ctxt is really a
	 321  	// saved, live register, and we only ever exchange it between
	 322  	// the real register and the gobuf. Hence, we treat it as a
	 323  	// root during stack scanning, which means assembly that saves
	 324  	// and restores it doesn't need write barriers. It's still
	 325  	// typed as a pointer so that any other writes from Go get
	 326  	// write barriers.
	 327  	sp	 uintptr
	 328  	pc	 uintptr
	 329  	g		guintptr
	 330  	ctxt unsafe.Pointer
	 331  	ret	uintptr
	 332  	lr	 uintptr
	 333  	bp	 uintptr // for framepointer-enabled architectures
	 334  }
	 335  
	 336  // sudog represents a g in a wait list, such as for sending/receiving
	 337  // on a channel.
	 338  //
	 339  // sudog is necessary because the g ↔ synchronization object relation
	 340  // is many-to-many. A g can be on many wait lists, so there may be
	 341  // many sudogs for one g; and many gs may be waiting on the same
	 342  // synchronization object, so there may be many sudogs for one object.
	 343  //
	 344  // sudogs are allocated from a special pool. Use acquireSudog and
	 345  // releaseSudog to allocate and free them.
	 346  type sudog struct {
	 347  	// The following fields are protected by the hchan.lock of the
	 348  	// channel this sudog is blocking on. shrinkstack depends on
	 349  	// this for sudogs involved in channel ops.
	 350  
	 351  	g *g
	 352  
	 353  	next *sudog
	 354  	prev *sudog
	 355  	elem unsafe.Pointer // data element (may point to stack)
	 356  
	 357  	// The following fields are never accessed concurrently.
	 358  	// For channels, waitlink is only accessed by g.
	 359  	// For semaphores, all fields (including the ones above)
	 360  	// are only accessed when holding a semaRoot lock.
	 361  
	 362  	acquiretime int64
	 363  	releasetime int64
	 364  	ticket			uint32
	 365  
	 366  	// isSelect indicates g is participating in a select, so
	 367  	// g.selectDone must be CAS'd to win the wake-up race.
	 368  	isSelect bool
	 369  
	 370  	// success indicates whether communication over channel c
	 371  	// succeeded. It is true if the goroutine was awoken because a
	 372  	// value was delivered over channel c, and false if awoken
	 373  	// because c was closed.
	 374  	success bool
	 375  
	 376  	parent	 *sudog // semaRoot binary tree
	 377  	waitlink *sudog // g.waiting list or semaRoot
	 378  	waittail *sudog // semaRoot
	 379  	c				*hchan // channel
	 380  }
	 381  
	 382  type libcall struct {
	 383  	fn	 uintptr
	 384  	n		uintptr // number of parameters
	 385  	args uintptr // parameters
	 386  	r1	 uintptr // return values
	 387  	r2	 uintptr
	 388  	err	uintptr // error number
	 389  }
	 390  
	 391  // Stack describes a Go execution stack.
	 392  // The bounds of the stack are exactly [lo, hi),
	 393  // with no implicit data structures on either side.
	 394  type stack struct {
	 395  	lo uintptr
	 396  	hi uintptr
	 397  }
	 398  
	 399  // heldLockInfo gives info on a held lock and the rank of that lock
	 400  type heldLockInfo struct {
	 401  	lockAddr uintptr
	 402  	rank		 lockRank
	 403  }
	 404  
	 405  type g struct {
	 406  	// Stack parameters.
	 407  	// stack describes the actual stack memory: [stack.lo, stack.hi).
	 408  	// stackguard0 is the stack pointer compared in the Go stack growth prologue.
	 409  	// It is stack.lo+StackGuard normally, but can be StackPreempt to trigger a preemption.
	 410  	// stackguard1 is the stack pointer compared in the C stack growth prologue.
	 411  	// It is stack.lo+StackGuard on g0 and gsignal stacks.
	 412  	// It is ~0 on other goroutine stacks, to trigger a call to morestackc (and crash).
	 413  	stack			 stack	 // offset known to runtime/cgo
	 414  	stackguard0 uintptr // offset known to liblink
	 415  	stackguard1 uintptr // offset known to liblink
	 416  
	 417  	_panic		*_panic // innermost panic - offset known to liblink
	 418  	_defer		*_defer // innermost defer
	 419  	m				 *m			// current m; offset known to arm liblink
	 420  	sched		 gobuf
	 421  	syscallsp uintptr // if status==Gsyscall, syscallsp = sched.sp to use during gc
	 422  	syscallpc uintptr // if status==Gsyscall, syscallpc = sched.pc to use during gc
	 423  	stktopsp	uintptr // expected sp at top of stack, to check in traceback
	 424  	// param is a generic pointer parameter field used to pass
	 425  	// values in particular contexts where other storage for the
	 426  	// parameter would be difficult to find. It is currently used
	 427  	// in three ways:
	 428  	// 1. When a channel operation wakes up a blocked goroutine, it sets param to
	 429  	//		point to the sudog of the completed blocking operation.
	 430  	// 2. By gcAssistAlloc1 to signal back to its caller that the goroutine completed
	 431  	//		the GC cycle. It is unsafe to do so in any other way, because the goroutine's
	 432  	//		stack may have moved in the meantime.
	 433  	// 3. By debugCallWrap to pass parameters to a new goroutine because allocating a
	 434  	//		closure in the runtime is forbidden.
	 435  	param				unsafe.Pointer
	 436  	atomicstatus uint32
	 437  	stackLock		uint32 // sigprof/scang lock; TODO: fold in to atomicstatus
	 438  	goid				 int64
	 439  	schedlink		guintptr
	 440  	waitsince		int64			// approx time when the g become blocked
	 441  	waitreason	 waitReason // if status==Gwaiting
	 442  
	 443  	preempt			 bool // preemption signal, duplicates stackguard0 = stackpreempt
	 444  	preemptStop	 bool // transition to _Gpreempted on preemption; otherwise, just deschedule
	 445  	preemptShrink bool // shrink stack at synchronous safe point
	 446  
	 447  	// asyncSafePoint is set if g is stopped at an asynchronous
	 448  	// safe point. This means there are frames on the stack
	 449  	// without precise pointer information.
	 450  	asyncSafePoint bool
	 451  
	 452  	paniconfault bool // panic (instead of crash) on unexpected fault address
	 453  	gcscandone	 bool // g has scanned stack; protected by _Gscan bit in status
	 454  	throwsplit	 bool // must not split stack
	 455  	// activeStackChans indicates that there are unlocked channels
	 456  	// pointing into this goroutine's stack. If true, stack
	 457  	// copying needs to acquire channel locks to protect these
	 458  	// areas of the stack.
	 459  	activeStackChans bool
	 460  	// parkingOnChan indicates that the goroutine is about to
	 461  	// park on a chansend or chanrecv. Used to signal an unsafe point
	 462  	// for stack shrinking. It's a boolean value, but is updated atomically.
	 463  	parkingOnChan uint8
	 464  
	 465  	raceignore		 int8		 // ignore race detection events
	 466  	sysblocktraced bool		 // StartTrace has emitted EvGoInSyscall about this goroutine
	 467  	tracking			 bool		 // whether we're tracking this G for sched latency statistics
	 468  	trackingSeq		uint8		// used to decide whether to track this G
	 469  	runnableStamp	int64		// timestamp of when the G last became runnable, only used when tracking
	 470  	runnableTime	 int64		// the amount of time spent runnable, cleared when running, only used when tracking
	 471  	sysexitticks	 int64		// cputicks when syscall has returned (for tracing)
	 472  	traceseq			 uint64	 // trace event sequencer
	 473  	tracelastp		 puintptr // last P emitted an event for this goroutine
	 474  	lockedm				muintptr
	 475  	sig						uint32
	 476  	writebuf			 []byte
	 477  	sigcode0			 uintptr
	 478  	sigcode1			 uintptr
	 479  	sigpc					uintptr
	 480  	gopc					 uintptr				 // pc of go statement that created this goroutine
	 481  	ancestors			*[]ancestorInfo // ancestor information goroutine(s) that created this goroutine (only used if debug.tracebackancestors)
	 482  	startpc				uintptr				 // pc of goroutine function
	 483  	racectx				uintptr
	 484  	waiting				*sudog				 // sudog structures this g is waiting on (that have a valid elem ptr); in lock order
	 485  	cgoCtxt				[]uintptr			// cgo traceback context
	 486  	labels				 unsafe.Pointer // profiler labels
	 487  	timer					*timer				 // cached timer for time.Sleep
	 488  	selectDone		 uint32				 // are we participating in a select and did someone win the race?
	 489  
	 490  	// Per-G GC state
	 491  
	 492  	// gcAssistBytes is this G's GC assist credit in terms of
	 493  	// bytes allocated. If this is positive, then the G has credit
	 494  	// to allocate gcAssistBytes bytes without assisting. If this
	 495  	// is negative, then the G must correct this by performing
	 496  	// scan work. We track this in bytes to make it fast to update
	 497  	// and check for debt in the malloc hot path. The assist ratio
	 498  	// determines how this corresponds to scan work debt.
	 499  	gcAssistBytes int64
	 500  }
	 501  
	 502  // gTrackingPeriod is the number of transitions out of _Grunning between
	 503  // latency tracking runs.
	 504  const gTrackingPeriod = 8
	 505  
	 506  const (
	 507  	// tlsSlots is the number of pointer-sized slots reserved for TLS on some platforms,
	 508  	// like Windows.
	 509  	tlsSlots = 6
	 510  	tlsSize	= tlsSlots * sys.PtrSize
	 511  )
	 512  
	 513  type m struct {
	 514  	g0			*g		 // goroutine with scheduling stack
	 515  	morebuf gobuf	// gobuf arg to morestack
	 516  	divmod	uint32 // div/mod denominator for arm - known to liblink
	 517  
	 518  	// Fields not known to debuggers.
	 519  	procid				uint64						// for debuggers, but offset not hard-coded
	 520  	gsignal			 *g								// signal-handling g
	 521  	goSigStack		gsignalStack			// Go-allocated signal handling stack
	 522  	sigmask			 sigset						// storage for saved signal mask
	 523  	tls					 [tlsSlots]uintptr // thread-local storage (for x86 extern register)
	 524  	mstartfn			func()
	 525  	curg					*g			 // current running goroutine
	 526  	caughtsig		 guintptr // goroutine running during fatal signal
	 527  	p						 puintptr // attached p for executing go code (nil if not executing go code)
	 528  	nextp				 puintptr
	 529  	oldp					puintptr // the p that was attached before executing a syscall
	 530  	id						int64
	 531  	mallocing		 int32
	 532  	throwing			int32
	 533  	preemptoff		string // if != "", keep curg running on this m
	 534  	locks				 int32
	 535  	dying				 int32
	 536  	profilehz		 int32
	 537  	spinning			bool // m is out of work and is actively looking for work
	 538  	blocked			 bool // m is blocked on a note
	 539  	newSigstack	 bool // minit on C thread called sigaltstack
	 540  	printlock		 int8
	 541  	incgo				 bool	 // m is executing a cgo call
	 542  	freeWait			uint32 // if == 0, safe to free g0 and delete m (atomic)
	 543  	fastrand			[2]uint32
	 544  	needextram		bool
	 545  	traceback		 uint8
	 546  	ncgocall			uint64			// number of cgo calls in total
	 547  	ncgo					int32			 // number of cgo calls currently in progress
	 548  	cgoCallersUse uint32			// if non-zero, cgoCallers in use temporarily
	 549  	cgoCallers		*cgoCallers // cgo traceback if crashing in cgo call
	 550  	doesPark			bool				// non-P running threads: sysmon and newmHandoff never use .park
	 551  	park					note
	 552  	alllink			 *m // on allm
	 553  	schedlink		 muintptr
	 554  	lockedg			 guintptr
	 555  	createstack	 [32]uintptr // stack that created this thread.
	 556  	lockedExt		 uint32			// tracking for external LockOSThread
	 557  	lockedInt		 uint32			// tracking for internal lockOSThread
	 558  	nextwaitm		 muintptr		// next m waiting for lock
	 559  	waitunlockf	 func(*g, unsafe.Pointer) bool
	 560  	waitlock			unsafe.Pointer
	 561  	waittraceev	 byte
	 562  	waittraceskip int
	 563  	startingtrace bool
	 564  	syscalltick	 uint32
	 565  	freelink			*m // on sched.freem
	 566  
	 567  	// mFixup is used to synchronize OS related m state
	 568  	// (credentials etc) use mutex to access. To avoid deadlocks
	 569  	// an atomic.Load() of used being zero in mDoFixupFn()
	 570  	// guarantees fn is nil.
	 571  	mFixup struct {
	 572  		lock mutex
	 573  		used uint32
	 574  		fn	 func(bool) bool
	 575  	}
	 576  
	 577  	// these are here because they are too large to be on the stack
	 578  	// of low-level NOSPLIT functions.
	 579  	libcall	 libcall
	 580  	libcallpc uintptr // for cpu profiler
	 581  	libcallsp uintptr
	 582  	libcallg	guintptr
	 583  	syscall	 libcall // stores syscall parameters on windows
	 584  
	 585  	vdsoSP uintptr // SP for traceback while in VDSO call (0 if not in call)
	 586  	vdsoPC uintptr // PC for traceback while in VDSO call
	 587  
	 588  	// preemptGen counts the number of completed preemption
	 589  	// signals. This is used to detect when a preemption is
	 590  	// requested, but fails. Accessed atomically.
	 591  	preemptGen uint32
	 592  
	 593  	// Whether this is a pending preemption signal on this M.
	 594  	// Accessed atomically.
	 595  	signalPending uint32
	 596  
	 597  	dlogPerM
	 598  
	 599  	mOS
	 600  
	 601  	// Up to 10 locks held by this m, maintained by the lock ranking code.
	 602  	locksHeldLen int
	 603  	locksHeld		[10]heldLockInfo
	 604  }
	 605  
	 606  type p struct {
	 607  	id					int32
	 608  	status			uint32 // one of pidle/prunning/...
	 609  	link				puintptr
	 610  	schedtick	 uint32		 // incremented on every scheduler call
	 611  	syscalltick uint32		 // incremented on every system call
	 612  	sysmontick	sysmontick // last tick observed by sysmon
	 613  	m					 muintptr	 // back-link to associated m (nil if idle)
	 614  	mcache			*mcache
	 615  	pcache			pageCache
	 616  	raceprocctx uintptr
	 617  
	 618  	deferpool		[5][]*_defer // pool of available defer structs of different sizes (see panic.go)
	 619  	deferpoolbuf [5][32]*_defer
	 620  
	 621  	// Cache of goroutine ids, amortizes accesses to runtime·sched.goidgen.
	 622  	goidcache		uint64
	 623  	goidcacheend uint64
	 624  
	 625  	// Queue of runnable goroutines. Accessed without lock.
	 626  	runqhead uint32
	 627  	runqtail uint32
	 628  	runq		 [256]guintptr
	 629  	// runnext, if non-nil, is a runnable G that was ready'd by
	 630  	// the current G and should be run next instead of what's in
	 631  	// runq if there's time remaining in the running G's time
	 632  	// slice. It will inherit the time left in the current time
	 633  	// slice. If a set of goroutines is locked in a
	 634  	// communicate-and-wait pattern, this schedules that set as a
	 635  	// unit and eliminates the (potentially large) scheduling
	 636  	// latency that otherwise arises from adding the ready'd
	 637  	// goroutines to the end of the run queue.
	 638  	//
	 639  	// Note that while other P's may atomically CAS this to zero,
	 640  	// only the owner P can CAS it to a valid G.
	 641  	runnext guintptr
	 642  
	 643  	// Available G's (status == Gdead)
	 644  	gFree struct {
	 645  		gList
	 646  		n int32
	 647  	}
	 648  
	 649  	sudogcache []*sudog
	 650  	sudogbuf	 [128]*sudog
	 651  
	 652  	// Cache of mspan objects from the heap.
	 653  	mspancache struct {
	 654  		// We need an explicit length here because this field is used
	 655  		// in allocation codepaths where write barriers are not allowed,
	 656  		// and eliminating the write barrier/keeping it eliminated from
	 657  		// slice updates is tricky, moreso than just managing the length
	 658  		// ourselves.
	 659  		len int
	 660  		buf [128]*mspan
	 661  	}
	 662  
	 663  	tracebuf traceBufPtr
	 664  
	 665  	// traceSweep indicates the sweep events should be traced.
	 666  	// This is used to defer the sweep start event until a span
	 667  	// has actually been swept.
	 668  	traceSweep bool
	 669  	// traceSwept and traceReclaimed track the number of bytes
	 670  	// swept and reclaimed by sweeping in the current sweep loop.
	 671  	traceSwept, traceReclaimed uintptr
	 672  
	 673  	palloc persistentAlloc // per-P to avoid mutex
	 674  
	 675  	_ uint32 // Alignment for atomic fields below
	 676  
	 677  	// The when field of the first entry on the timer heap.
	 678  	// This is updated using atomic functions.
	 679  	// This is 0 if the timer heap is empty.
	 680  	timer0When uint64
	 681  
	 682  	// The earliest known nextwhen field of a timer with
	 683  	// timerModifiedEarlier status. Because the timer may have been
	 684  	// modified again, there need not be any timer with this value.
	 685  	// This is updated using atomic functions.
	 686  	// This is 0 if there are no timerModifiedEarlier timers.
	 687  	timerModifiedEarliest uint64
	 688  
	 689  	// Per-P GC state
	 690  	gcAssistTime				 int64 // Nanoseconds in assistAlloc
	 691  	gcFractionalMarkTime int64 // Nanoseconds in fractional mark worker (atomic)
	 692  
	 693  	// gcMarkWorkerMode is the mode for the next mark worker to run in.
	 694  	// That is, this is used to communicate with the worker goroutine
	 695  	// selected for immediate execution by
	 696  	// gcController.findRunnableGCWorker. When scheduling other goroutines,
	 697  	// this field must be set to gcMarkWorkerNotWorker.
	 698  	gcMarkWorkerMode gcMarkWorkerMode
	 699  	// gcMarkWorkerStartTime is the nanotime() at which the most recent
	 700  	// mark worker started.
	 701  	gcMarkWorkerStartTime int64
	 702  
	 703  	// gcw is this P's GC work buffer cache. The work buffer is
	 704  	// filled by write barriers, drained by mutator assists, and
	 705  	// disposed on certain GC state transitions.
	 706  	gcw gcWork
	 707  
	 708  	// wbBuf is this P's GC write barrier buffer.
	 709  	//
	 710  	// TODO: Consider caching this in the running G.
	 711  	wbBuf wbBuf
	 712  
	 713  	runSafePointFn uint32 // if 1, run sched.safePointFn at next safe point
	 714  
	 715  	// statsSeq is a counter indicating whether this P is currently
	 716  	// writing any stats. Its value is even when not, odd when it is.
	 717  	statsSeq uint32
	 718  
	 719  	// Lock for timers. We normally access the timers while running
	 720  	// on this P, but the scheduler can also do it from a different P.
	 721  	timersLock mutex
	 722  
	 723  	// Actions to take at some time. This is used to implement the
	 724  	// standard library's time package.
	 725  	// Must hold timersLock to access.
	 726  	timers []*timer
	 727  
	 728  	// Number of timers in P's heap.
	 729  	// Modified using atomic instructions.
	 730  	numTimers uint32
	 731  
	 732  	// Number of timerDeleted timers in P's heap.
	 733  	// Modified using atomic instructions.
	 734  	deletedTimers uint32
	 735  
	 736  	// Race context used while executing timer functions.
	 737  	timerRaceCtx uintptr
	 738  
	 739  	// preempt is set to indicate that this P should be enter the
	 740  	// scheduler ASAP (regardless of what G is running on it).
	 741  	preempt bool
	 742  
	 743  	// Padding is no longer needed. False sharing is now not a worry because p is large enough
	 744  	// that its size class is an integer multiple of the cache line size (for any of our architectures).
	 745  }
	 746  
	 747  type schedt struct {
	 748  	// accessed atomically. keep at top to ensure alignment on 32-bit systems.
	 749  	goidgen	 uint64
	 750  	lastpoll	uint64 // time of last network poll, 0 if currently polling
	 751  	pollUntil uint64 // time to which current poll is sleeping
	 752  
	 753  	lock mutex
	 754  
	 755  	// When increasing nmidle, nmidlelocked, nmsys, or nmfreed, be
	 756  	// sure to call checkdead().
	 757  
	 758  	midle				muintptr // idle m's waiting for work
	 759  	nmidle			 int32		// number of idle m's waiting for work
	 760  	nmidlelocked int32		// number of locked m's waiting for work
	 761  	mnext				int64		// number of m's that have been created and next M ID
	 762  	maxmcount		int32		// maximum number of m's allowed (or die)
	 763  	nmsys				int32		// number of system m's not counted for deadlock
	 764  	nmfreed			int64		// cumulative number of freed m's
	 765  
	 766  	ngsys uint32 // number of system goroutines; updated atomically
	 767  
	 768  	pidle			puintptr // idle p's
	 769  	npidle		 uint32
	 770  	nmspinning uint32 // See "Worker thread parking/unparking" comment in proc.go.
	 771  
	 772  	// Global runnable queue.
	 773  	runq		 gQueue
	 774  	runqsize int32
	 775  
	 776  	// disable controls selective disabling of the scheduler.
	 777  	//
	 778  	// Use schedEnableUser to control this.
	 779  	//
	 780  	// disable is protected by sched.lock.
	 781  	disable struct {
	 782  		// user disables scheduling of user goroutines.
	 783  		user		 bool
	 784  		runnable gQueue // pending runnable Gs
	 785  		n				int32	// length of runnable
	 786  	}
	 787  
	 788  	// Global cache of dead G's.
	 789  	gFree struct {
	 790  		lock		mutex
	 791  		stack	 gList // Gs with stacks
	 792  		noStack gList // Gs without stacks
	 793  		n			 int32
	 794  	}
	 795  
	 796  	// Central cache of sudog structs.
	 797  	sudoglock	mutex
	 798  	sudogcache *sudog
	 799  
	 800  	// Central pool of available defer structs of different sizes.
	 801  	deferlock mutex
	 802  	deferpool [5]*_defer
	 803  
	 804  	// freem is the list of m's waiting to be freed when their
	 805  	// m.exited is set. Linked through m.freelink.
	 806  	freem *m
	 807  
	 808  	gcwaiting	uint32 // gc is waiting to run
	 809  	stopwait	 int32
	 810  	stopnote	 note
	 811  	sysmonwait uint32
	 812  	sysmonnote note
	 813  
	 814  	// While true, sysmon not ready for mFixup calls.
	 815  	// Accessed atomically.
	 816  	sysmonStarting uint32
	 817  
	 818  	// safepointFn should be called on each P at the next GC
	 819  	// safepoint if p.runSafePointFn is set.
	 820  	safePointFn	 func(*p)
	 821  	safePointWait int32
	 822  	safePointNote note
	 823  
	 824  	profilehz int32 // cpu profiling rate
	 825  
	 826  	procresizetime int64 // nanotime() of last change to gomaxprocs
	 827  	totaltime			int64 // ∫gomaxprocs dt up to procresizetime
	 828  
	 829  	// sysmonlock protects sysmon's actions on the runtime.
	 830  	//
	 831  	// Acquire and hold this mutex to block sysmon from interacting
	 832  	// with the rest of the runtime.
	 833  	sysmonlock mutex
	 834  
	 835  	_ uint32 // ensure timeToRun has 8-byte alignment
	 836  
	 837  	// timeToRun is a distribution of scheduling latencies, defined
	 838  	// as the sum of time a G spends in the _Grunnable state before
	 839  	// it transitions to _Grunning.
	 840  	//
	 841  	// timeToRun is protected by sched.lock.
	 842  	timeToRun timeHistogram
	 843  }
	 844  
	 845  // Values for the flags field of a sigTabT.
	 846  const (
	 847  	_SigNotify	 = 1 << iota // let signal.Notify have signal, even if from kernel
	 848  	_SigKill								 // if signal.Notify doesn't take it, exit quietly
	 849  	_SigThrow								// if signal.Notify doesn't take it, exit loudly
	 850  	_SigPanic								// if the signal is from the kernel, panic
	 851  	_SigDefault							// if the signal isn't explicitly requested, don't monitor it
	 852  	_SigGoExit							 // cause all runtime procs to exit (only used on Plan 9).
	 853  	_SigSetStack						 // add SA_ONSTACK to libc handler
	 854  	_SigUnblock							// always unblock; see blockableSig
	 855  	_SigIgn									// _SIG_DFL action is to ignore the signal
	 856  )
	 857  
	 858  // Layout of in-memory per-function information prepared by linker
	 859  // See https://golang.org/s/go12symtab.
	 860  // Keep in sync with linker (../cmd/link/internal/ld/pcln.go:/pclntab)
	 861  // and with package debug/gosym and with symtab.go in package runtime.
	 862  type _func struct {
	 863  	entry	 uintptr // start pc
	 864  	nameoff int32	 // function name
	 865  
	 866  	args				int32	// in/out args size
	 867  	deferreturn uint32 // offset of start of a deferreturn call instruction from entry, if any.
	 868  
	 869  	pcsp			uint32
	 870  	pcfile		uint32
	 871  	pcln			uint32
	 872  	npcdata	 uint32
	 873  	cuOffset	uint32 // runtime.cutab offset of this function's CU
	 874  	funcID		funcID // set for certain special runtime functions
	 875  	flag			funcFlag
	 876  	_				 [1]byte // pad
	 877  	nfuncdata uint8	 // must be last, must end on a uint32-aligned boundary
	 878  }
	 879  
	 880  // Pseudo-Func that is returned for PCs that occur in inlined code.
	 881  // A *Func can be either a *_func or a *funcinl, and they are distinguished
	 882  // by the first uintptr.
	 883  type funcinl struct {
	 884  	zero	uintptr // set to 0 to distinguish from _func
	 885  	entry uintptr // entry of the real (the "outermost") frame.
	 886  	name	string
	 887  	file	string
	 888  	line	int
	 889  }
	 890  
	 891  // layout of Itab known to compilers
	 892  // allocated in non-garbage-collected memory
	 893  // Needs to be in sync with
	 894  // ../cmd/compile/internal/reflectdata/reflect.go:/^func.WriteTabs.
	 895  type itab struct {
	 896  	inter *interfacetype
	 897  	_type *_type
	 898  	hash	uint32 // copy of _type.hash. Used for type switches.
	 899  	_		 [4]byte
	 900  	fun	 [1]uintptr // variable sized. fun[0]==0 means _type does not implement inter.
	 901  }
	 902  
	 903  // Lock-free stack node.
	 904  // Also known to export_test.go.
	 905  type lfnode struct {
	 906  	next		uint64
	 907  	pushcnt uintptr
	 908  }
	 909  
	 910  type forcegcstate struct {
	 911  	lock mutex
	 912  	g		*g
	 913  	idle uint32
	 914  }
	 915  
	 916  // extendRandom extends the random numbers in r[:n] to the whole slice r.
	 917  // Treats n<0 as n==0.
	 918  func extendRandom(r []byte, n int) {
	 919  	if n < 0 {
	 920  		n = 0
	 921  	}
	 922  	for n < len(r) {
	 923  		// Extend random bits using hash function & time seed
	 924  		w := n
	 925  		if w > 16 {
	 926  			w = 16
	 927  		}
	 928  		h := memhash(unsafe.Pointer(&r[n-w]), uintptr(nanotime()), uintptr(w))
	 929  		for i := 0; i < sys.PtrSize && n < len(r); i++ {
	 930  			r[n] = byte(h)
	 931  			n++
	 932  			h >>= 8
	 933  		}
	 934  	}
	 935  }
	 936  
	 937  // A _defer holds an entry on the list of deferred calls.
	 938  // If you add a field here, add code to clear it in freedefer and deferProcStack
	 939  // This struct must match the code in cmd/compile/internal/reflectdata/reflect.go:deferstruct
	 940  // and cmd/compile/internal/gc/ssa.go:(*state).call.
	 941  // Some defers will be allocated on the stack and some on the heap.
	 942  // All defers are logically part of the stack, so write barriers to
	 943  // initialize them are not required. All defers must be manually scanned,
	 944  // and for heap defers, marked.
	 945  type _defer struct {
	 946  	siz		 int32 // includes both arguments and results
	 947  	started bool
	 948  	heap		bool
	 949  	// openDefer indicates that this _defer is for a frame with open-coded
	 950  	// defers. We have only one defer record for the entire frame (which may
	 951  	// currently have 0, 1, or more defers active).
	 952  	openDefer bool
	 953  	sp				uintptr	// sp at time of defer
	 954  	pc				uintptr	// pc at time of defer
	 955  	fn				*funcval // can be nil for open-coded defers
	 956  	_panic		*_panic	// panic that is running defer
	 957  	link			*_defer
	 958  
	 959  	// If openDefer is true, the fields below record values about the stack
	 960  	// frame and associated function that has the open-coded defer(s). sp
	 961  	// above will be the sp for the frame, and pc will be address of the
	 962  	// deferreturn call in the function.
	 963  	fd	 unsafe.Pointer // funcdata for the function associated with the frame
	 964  	varp uintptr				// value of varp for the stack frame
	 965  	// framepc is the current pc associated with the stack frame. Together,
	 966  	// with sp above (which is the sp associated with the stack frame),
	 967  	// framepc/sp can be used as pc/sp pair to continue a stack trace via
	 968  	// gentraceback().
	 969  	framepc uintptr
	 970  }
	 971  
	 972  // A _panic holds information about an active panic.
	 973  //
	 974  // A _panic value must only ever live on the stack.
	 975  //
	 976  // The argp and link fields are stack pointers, but don't need special
	 977  // handling during stack growth: because they are pointer-typed and
	 978  // _panic values only live on the stack, regular stack pointer
	 979  // adjustment takes care of them.
	 980  type _panic struct {
	 981  	argp			unsafe.Pointer // pointer to arguments of deferred call run during panic; cannot move - known to liblink
	 982  	arg			 interface{}		// argument to panic
	 983  	link			*_panic				// link to earlier panic
	 984  	pc				uintptr				// where to return to in runtime if this panic is bypassed
	 985  	sp				unsafe.Pointer // where to return to in runtime if this panic is bypassed
	 986  	recovered bool					 // whether this panic is over
	 987  	aborted	 bool					 // the panic was aborted
	 988  	goexit		bool
	 989  }
	 990  
	 991  // stack traces
	 992  type stkframe struct {
	 993  	fn			 funcInfo	 // function being run
	 994  	pc			 uintptr		// program counter within fn
	 995  	continpc uintptr		// program counter where execution can continue, or 0 if not
	 996  	lr			 uintptr		// program counter at caller aka link register
	 997  	sp			 uintptr		// stack pointer at pc
	 998  	fp			 uintptr		// stack pointer at caller aka frame pointer
	 999  	varp		 uintptr		// top of local variables
	1000  	argp		 uintptr		// pointer to function arguments
	1001  	arglen	 uintptr		// number of bytes at argp
	1002  	argmap	 *bitvector // force use of this argmap
	1003  }
	1004  
	1005  // ancestorInfo records details of where a goroutine was started.
	1006  type ancestorInfo struct {
	1007  	pcs	[]uintptr // pcs from the stack of this goroutine
	1008  	goid int64		 // goroutine id of this goroutine; original goroutine possibly dead
	1009  	gopc uintptr	 // pc of go statement that created this goroutine
	1010  }
	1011  
	1012  const (
	1013  	_TraceRuntimeFrames = 1 << iota // include frames for internal runtime functions.
	1014  	_TraceTrap											// the initial PC, SP are from a trap, not a return PC from a call
	1015  	_TraceJumpStack								 // if traceback is on a systemstack, resume trace at g that called into it
	1016  )
	1017  
	1018  // The maximum number of frames we print for a traceback
	1019  const _TracebackMaxFrames = 100
	1020  
	1021  // A waitReason explains why a goroutine has been stopped.
	1022  // See gopark. Do not re-use waitReasons, add new ones.
	1023  type waitReason uint8
	1024  
	1025  const (
	1026  	waitReasonZero									waitReason = iota // ""
	1027  	waitReasonGCAssistMarking												 // "GC assist marking"
	1028  	waitReasonIOWait																	// "IO wait"
	1029  	waitReasonChanReceiveNilChan											// "chan receive (nil chan)"
	1030  	waitReasonChanSendNilChan												 // "chan send (nil chan)"
	1031  	waitReasonDumpingHeap														 // "dumping heap"
	1032  	waitReasonGarbageCollection											 // "garbage collection"
	1033  	waitReasonGarbageCollectionScan									 // "garbage collection scan"
	1034  	waitReasonPanicWait															 // "panicwait"
	1035  	waitReasonSelect																	// "select"
	1036  	waitReasonSelectNoCases													 // "select (no cases)"
	1037  	waitReasonGCAssistWait														// "GC assist wait"
	1038  	waitReasonGCSweepWait														 // "GC sweep wait"
	1039  	waitReasonGCScavengeWait													// "GC scavenge wait"
	1040  	waitReasonChanReceive														 // "chan receive"
	1041  	waitReasonChanSend																// "chan send"
	1042  	waitReasonFinalizerWait													 // "finalizer wait"
	1043  	waitReasonForceGCIdle														 // "force gc (idle)"
	1044  	waitReasonSemacquire															// "semacquire"
	1045  	waitReasonSleep																	 // "sleep"
	1046  	waitReasonSyncCondWait														// "sync.Cond.Wait"
	1047  	waitReasonTimerGoroutineIdle											// "timer goroutine (idle)"
	1048  	waitReasonTraceReaderBlocked											// "trace reader (blocked)"
	1049  	waitReasonWaitForGCCycle													// "wait for GC cycle"
	1050  	waitReasonGCWorkerIdle														// "GC worker (idle)"
	1051  	waitReasonPreempted															 // "preempted"
	1052  	waitReasonDebugCall															 // "debug call"
	1053  )
	1054  
	1055  var waitReasonStrings = [...]string{
	1056  	waitReasonZero:									"",
	1057  	waitReasonGCAssistMarking:			 "GC assist marking",
	1058  	waitReasonIOWait:								"IO wait",
	1059  	waitReasonChanReceiveNilChan:		"chan receive (nil chan)",
	1060  	waitReasonChanSendNilChan:			 "chan send (nil chan)",
	1061  	waitReasonDumpingHeap:					 "dumping heap",
	1062  	waitReasonGarbageCollection:		 "garbage collection",
	1063  	waitReasonGarbageCollectionScan: "garbage collection scan",
	1064  	waitReasonPanicWait:						 "panicwait",
	1065  	waitReasonSelect:								"select",
	1066  	waitReasonSelectNoCases:				 "select (no cases)",
	1067  	waitReasonGCAssistWait:					"GC assist wait",
	1068  	waitReasonGCSweepWait:					 "GC sweep wait",
	1069  	waitReasonGCScavengeWait:				"GC scavenge wait",
	1070  	waitReasonChanReceive:					 "chan receive",
	1071  	waitReasonChanSend:							"chan send",
	1072  	waitReasonFinalizerWait:				 "finalizer wait",
	1073  	waitReasonForceGCIdle:					 "force gc (idle)",
	1074  	waitReasonSemacquire:						"semacquire",
	1075  	waitReasonSleep:								 "sleep",
	1076  	waitReasonSyncCondWait:					"sync.Cond.Wait",
	1077  	waitReasonTimerGoroutineIdle:		"timer goroutine (idle)",
	1078  	waitReasonTraceReaderBlocked:		"trace reader (blocked)",
	1079  	waitReasonWaitForGCCycle:				"wait for GC cycle",
	1080  	waitReasonGCWorkerIdle:					"GC worker (idle)",
	1081  	waitReasonPreempted:						 "preempted",
	1082  	waitReasonDebugCall:						 "debug call",
	1083  }
	1084  
	1085  func (w waitReason) String() string {
	1086  	if w < 0 || w >= waitReason(len(waitReasonStrings)) {
	1087  		return "unknown wait reason"
	1088  	}
	1089  	return waitReasonStrings[w]
	1090  }
	1091  
	1092  var (
	1093  	allm			 *m
	1094  	gomaxprocs int32
	1095  	ncpu			 int32
	1096  	forcegc		forcegcstate
	1097  	sched			schedt
	1098  	newprocs	 int32
	1099  
	1100  	// allpLock protects P-less reads and size changes of allp, idlepMask,
	1101  	// and timerpMask, and all writes to allp.
	1102  	allpLock mutex
	1103  	// len(allp) == gomaxprocs; may change at safe points, otherwise
	1104  	// immutable.
	1105  	allp []*p
	1106  	// Bitmask of Ps in _Pidle list, one bit per P. Reads and writes must
	1107  	// be atomic. Length may change at safe points.
	1108  	//
	1109  	// Each P must update only its own bit. In order to maintain
	1110  	// consistency, a P going idle must the idle mask simultaneously with
	1111  	// updates to the idle P list under the sched.lock, otherwise a racing
	1112  	// pidleget may clear the mask before pidleput sets the mask,
	1113  	// corrupting the bitmap.
	1114  	//
	1115  	// N.B., procresize takes ownership of all Ps in stopTheWorldWithSema.
	1116  	idlepMask pMask
	1117  	// Bitmask of Ps that may have a timer, one bit per P. Reads and writes
	1118  	// must be atomic. Length may change at safe points.
	1119  	timerpMask pMask
	1120  
	1121  	// Pool of GC parked background workers. Entries are type
	1122  	// *gcBgMarkWorkerNode.
	1123  	gcBgMarkWorkerPool lfstack
	1124  
	1125  	// Total number of gcBgMarkWorker goroutines. Protected by worldsema.
	1126  	gcBgMarkWorkerCount int32
	1127  
	1128  	// Information about what cpu features are available.
	1129  	// Packages outside the runtime should not use these
	1130  	// as they are not an external api.
	1131  	// Set on startup in asm_{386,amd64}.s
	1132  	processorVersionInfo uint32
	1133  	isIntel							bool
	1134  	lfenceBeforeRdtsc		bool
	1135  
	1136  	goarm uint8 // set by cmd/link on arm systems
	1137  )
	1138  
	1139  // Set by the linker so the runtime can determine the buildmode.
	1140  var (
	1141  	islibrary bool // -buildmode=c-shared
	1142  	isarchive bool // -buildmode=c-archive
	1143  )
	1144  
	1145  // Must agree with internal/buildcfg.Experiment.FramePointer.
	1146  const framepointer_enabled = GOARCH == "amd64" || GOARCH == "arm64"
	1147  

View as plain text