...

Source file src/runtime/runtime1.go

Documentation: runtime

		 1  // Copyright 2009 The Go Authors. All rights reserved.
		 2  // Use of this source code is governed by a BSD-style
		 3  // license that can be found in the LICENSE file.
		 4  
		 5  package runtime
		 6  
		 7  import (
		 8  	"internal/bytealg"
		 9  	"runtime/internal/atomic"
		10  	"runtime/internal/sys"
		11  	"unsafe"
		12  )
		13  
		14  // Keep a cached value to make gotraceback fast,
		15  // since we call it on every call to gentraceback.
		16  // The cached value is a uint32 in which the low bits
		17  // are the "crash" and "all" settings and the remaining
		18  // bits are the traceback value (0 off, 1 on, 2 include system).
		19  const (
		20  	tracebackCrash = 1 << iota
		21  	tracebackAll
		22  	tracebackShift = iota
		23  )
		24  
		25  var traceback_cache uint32 = 2 << tracebackShift
		26  var traceback_env uint32
		27  
		28  // gotraceback returns the current traceback settings.
		29  //
		30  // If level is 0, suppress all tracebacks.
		31  // If level is 1, show tracebacks, but exclude runtime frames.
		32  // If level is 2, show tracebacks including runtime frames.
		33  // If all is set, print all goroutine stacks. Otherwise, print just the current goroutine.
		34  // If crash is set, crash (core dump, etc) after tracebacking.
		35  //
		36  //go:nosplit
		37  func gotraceback() (level int32, all, crash bool) {
		38  	_g_ := getg()
		39  	t := atomic.Load(&traceback_cache)
		40  	crash = t&tracebackCrash != 0
		41  	all = _g_.m.throwing > 0 || t&tracebackAll != 0
		42  	if _g_.m.traceback != 0 {
		43  		level = int32(_g_.m.traceback)
		44  	} else {
		45  		level = int32(t >> tracebackShift)
		46  	}
		47  	return
		48  }
		49  
		50  var (
		51  	argc int32
		52  	argv **byte
		53  )
		54  
		55  // nosplit for use in linux startup sysargs
		56  //go:nosplit
		57  func argv_index(argv **byte, i int32) *byte {
		58  	return *(**byte)(add(unsafe.Pointer(argv), uintptr(i)*sys.PtrSize))
		59  }
		60  
		61  func args(c int32, v **byte) {
		62  	argc = c
		63  	argv = v
		64  	sysargs(c, v)
		65  }
		66  
		67  func goargs() {
		68  	if GOOS == "windows" {
		69  		return
		70  	}
		71  	argslice = make([]string, argc)
		72  	for i := int32(0); i < argc; i++ {
		73  		argslice[i] = gostringnocopy(argv_index(argv, i))
		74  	}
		75  }
		76  
		77  func goenvs_unix() {
		78  	// TODO(austin): ppc64 in dynamic linking mode doesn't
		79  	// guarantee env[] will immediately follow argv. Might cause
		80  	// problems.
		81  	n := int32(0)
		82  	for argv_index(argv, argc+1+n) != nil {
		83  		n++
		84  	}
		85  
		86  	envs = make([]string, n)
		87  	for i := int32(0); i < n; i++ {
		88  		envs[i] = gostring(argv_index(argv, argc+1+i))
		89  	}
		90  }
		91  
		92  func environ() []string {
		93  	return envs
		94  }
		95  
		96  // TODO: These should be locals in testAtomic64, but we don't 8-byte
		97  // align stack variables on 386.
		98  var test_z64, test_x64 uint64
		99  
	 100  func testAtomic64() {
	 101  	test_z64 = 42
	 102  	test_x64 = 0
	 103  	if atomic.Cas64(&test_z64, test_x64, 1) {
	 104  		throw("cas64 failed")
	 105  	}
	 106  	if test_x64 != 0 {
	 107  		throw("cas64 failed")
	 108  	}
	 109  	test_x64 = 42
	 110  	if !atomic.Cas64(&test_z64, test_x64, 1) {
	 111  		throw("cas64 failed")
	 112  	}
	 113  	if test_x64 != 42 || test_z64 != 1 {
	 114  		throw("cas64 failed")
	 115  	}
	 116  	if atomic.Load64(&test_z64) != 1 {
	 117  		throw("load64 failed")
	 118  	}
	 119  	atomic.Store64(&test_z64, (1<<40)+1)
	 120  	if atomic.Load64(&test_z64) != (1<<40)+1 {
	 121  		throw("store64 failed")
	 122  	}
	 123  	if atomic.Xadd64(&test_z64, (1<<40)+1) != (2<<40)+2 {
	 124  		throw("xadd64 failed")
	 125  	}
	 126  	if atomic.Load64(&test_z64) != (2<<40)+2 {
	 127  		throw("xadd64 failed")
	 128  	}
	 129  	if atomic.Xchg64(&test_z64, (3<<40)+3) != (2<<40)+2 {
	 130  		throw("xchg64 failed")
	 131  	}
	 132  	if atomic.Load64(&test_z64) != (3<<40)+3 {
	 133  		throw("xchg64 failed")
	 134  	}
	 135  }
	 136  
	 137  func check() {
	 138  	var (
	 139  		a		 int8
	 140  		b		 uint8
	 141  		c		 int16
	 142  		d		 uint16
	 143  		e		 int32
	 144  		f		 uint32
	 145  		g		 int64
	 146  		h		 uint64
	 147  		i, i1 float32
	 148  		j, j1 float64
	 149  		k		 unsafe.Pointer
	 150  		l		 *uint16
	 151  		m		 [4]byte
	 152  	)
	 153  	type x1t struct {
	 154  		x uint8
	 155  	}
	 156  	type y1t struct {
	 157  		x1 x1t
	 158  		y	uint8
	 159  	}
	 160  	var x1 x1t
	 161  	var y1 y1t
	 162  
	 163  	if unsafe.Sizeof(a) != 1 {
	 164  		throw("bad a")
	 165  	}
	 166  	if unsafe.Sizeof(b) != 1 {
	 167  		throw("bad b")
	 168  	}
	 169  	if unsafe.Sizeof(c) != 2 {
	 170  		throw("bad c")
	 171  	}
	 172  	if unsafe.Sizeof(d) != 2 {
	 173  		throw("bad d")
	 174  	}
	 175  	if unsafe.Sizeof(e) != 4 {
	 176  		throw("bad e")
	 177  	}
	 178  	if unsafe.Sizeof(f) != 4 {
	 179  		throw("bad f")
	 180  	}
	 181  	if unsafe.Sizeof(g) != 8 {
	 182  		throw("bad g")
	 183  	}
	 184  	if unsafe.Sizeof(h) != 8 {
	 185  		throw("bad h")
	 186  	}
	 187  	if unsafe.Sizeof(i) != 4 {
	 188  		throw("bad i")
	 189  	}
	 190  	if unsafe.Sizeof(j) != 8 {
	 191  		throw("bad j")
	 192  	}
	 193  	if unsafe.Sizeof(k) != sys.PtrSize {
	 194  		throw("bad k")
	 195  	}
	 196  	if unsafe.Sizeof(l) != sys.PtrSize {
	 197  		throw("bad l")
	 198  	}
	 199  	if unsafe.Sizeof(x1) != 1 {
	 200  		throw("bad unsafe.Sizeof x1")
	 201  	}
	 202  	if unsafe.Offsetof(y1.y) != 1 {
	 203  		throw("bad offsetof y1.y")
	 204  	}
	 205  	if unsafe.Sizeof(y1) != 2 {
	 206  		throw("bad unsafe.Sizeof y1")
	 207  	}
	 208  
	 209  	if timediv(12345*1000000000+54321, 1000000000, &e) != 12345 || e != 54321 {
	 210  		throw("bad timediv")
	 211  	}
	 212  
	 213  	var z uint32
	 214  	z = 1
	 215  	if !atomic.Cas(&z, 1, 2) {
	 216  		throw("cas1")
	 217  	}
	 218  	if z != 2 {
	 219  		throw("cas2")
	 220  	}
	 221  
	 222  	z = 4
	 223  	if atomic.Cas(&z, 5, 6) {
	 224  		throw("cas3")
	 225  	}
	 226  	if z != 4 {
	 227  		throw("cas4")
	 228  	}
	 229  
	 230  	z = 0xffffffff
	 231  	if !atomic.Cas(&z, 0xffffffff, 0xfffffffe) {
	 232  		throw("cas5")
	 233  	}
	 234  	if z != 0xfffffffe {
	 235  		throw("cas6")
	 236  	}
	 237  
	 238  	m = [4]byte{1, 1, 1, 1}
	 239  	atomic.Or8(&m[1], 0xf0)
	 240  	if m[0] != 1 || m[1] != 0xf1 || m[2] != 1 || m[3] != 1 {
	 241  		throw("atomicor8")
	 242  	}
	 243  
	 244  	m = [4]byte{0xff, 0xff, 0xff, 0xff}
	 245  	atomic.And8(&m[1], 0x1)
	 246  	if m[0] != 0xff || m[1] != 0x1 || m[2] != 0xff || m[3] != 0xff {
	 247  		throw("atomicand8")
	 248  	}
	 249  
	 250  	*(*uint64)(unsafe.Pointer(&j)) = ^uint64(0)
	 251  	if j == j {
	 252  		throw("float64nan")
	 253  	}
	 254  	if !(j != j) {
	 255  		throw("float64nan1")
	 256  	}
	 257  
	 258  	*(*uint64)(unsafe.Pointer(&j1)) = ^uint64(1)
	 259  	if j == j1 {
	 260  		throw("float64nan2")
	 261  	}
	 262  	if !(j != j1) {
	 263  		throw("float64nan3")
	 264  	}
	 265  
	 266  	*(*uint32)(unsafe.Pointer(&i)) = ^uint32(0)
	 267  	if i == i {
	 268  		throw("float32nan")
	 269  	}
	 270  	if i == i {
	 271  		throw("float32nan1")
	 272  	}
	 273  
	 274  	*(*uint32)(unsafe.Pointer(&i1)) = ^uint32(1)
	 275  	if i == i1 {
	 276  		throw("float32nan2")
	 277  	}
	 278  	if i == i1 {
	 279  		throw("float32nan3")
	 280  	}
	 281  
	 282  	testAtomic64()
	 283  
	 284  	if _FixedStack != round2(_FixedStack) {
	 285  		throw("FixedStack is not power-of-2")
	 286  	}
	 287  
	 288  	if !checkASM() {
	 289  		throw("assembly checks failed")
	 290  	}
	 291  }
	 292  
	 293  type dbgVar struct {
	 294  	name	string
	 295  	value *int32
	 296  }
	 297  
	 298  // Holds variables parsed from GODEBUG env var,
	 299  // except for "memprofilerate" since there is an
	 300  // existing int var for that value, which may
	 301  // already have an initial value.
	 302  var debug struct {
	 303  	cgocheck					 int32
	 304  	clobberfree				int32
	 305  	efence						 int32
	 306  	gccheckmark				int32
	 307  	gcpacertrace			 int32
	 308  	gcshrinkstackoff	 int32
	 309  	gcstoptheworld		 int32
	 310  	gctrace						int32
	 311  	invalidptr				 int32
	 312  	madvdontneed			 int32 // for Linux; issue 28466
	 313  	scavtrace					int32
	 314  	scheddetail				int32
	 315  	schedtrace				 int32
	 316  	tracebackancestors int32
	 317  	asyncpreemptoff		int32
	 318  
	 319  	// debug.malloc is used as a combined debug check
	 320  	// in the malloc function and should be set
	 321  	// if any of the below debug options is != 0.
	 322  	malloc				 bool
	 323  	allocfreetrace int32
	 324  	inittrace			int32
	 325  	sbrk					 int32
	 326  }
	 327  
	 328  var dbgvars = []dbgVar{
	 329  	{"allocfreetrace", &debug.allocfreetrace},
	 330  	{"clobberfree", &debug.clobberfree},
	 331  	{"cgocheck", &debug.cgocheck},
	 332  	{"efence", &debug.efence},
	 333  	{"gccheckmark", &debug.gccheckmark},
	 334  	{"gcpacertrace", &debug.gcpacertrace},
	 335  	{"gcshrinkstackoff", &debug.gcshrinkstackoff},
	 336  	{"gcstoptheworld", &debug.gcstoptheworld},
	 337  	{"gctrace", &debug.gctrace},
	 338  	{"invalidptr", &debug.invalidptr},
	 339  	{"madvdontneed", &debug.madvdontneed},
	 340  	{"sbrk", &debug.sbrk},
	 341  	{"scavtrace", &debug.scavtrace},
	 342  	{"scheddetail", &debug.scheddetail},
	 343  	{"schedtrace", &debug.schedtrace},
	 344  	{"tracebackancestors", &debug.tracebackancestors},
	 345  	{"asyncpreemptoff", &debug.asyncpreemptoff},
	 346  	{"inittrace", &debug.inittrace},
	 347  }
	 348  
	 349  func parsedebugvars() {
	 350  	// defaults
	 351  	debug.cgocheck = 1
	 352  	debug.invalidptr = 1
	 353  	if GOOS == "linux" {
	 354  		// On Linux, MADV_FREE is faster than MADV_DONTNEED,
	 355  		// but doesn't affect many of the statistics that
	 356  		// MADV_DONTNEED does until the memory is actually
	 357  		// reclaimed. This generally leads to poor user
	 358  		// experience, like confusing stats in top and other
	 359  		// monitoring tools; and bad integration with
	 360  		// management systems that respond to memory usage.
	 361  		// Hence, default to MADV_DONTNEED.
	 362  		debug.madvdontneed = 1
	 363  	}
	 364  
	 365  	for p := gogetenv("GODEBUG"); p != ""; {
	 366  		field := ""
	 367  		i := bytealg.IndexByteString(p, ',')
	 368  		if i < 0 {
	 369  			field, p = p, ""
	 370  		} else {
	 371  			field, p = p[:i], p[i+1:]
	 372  		}
	 373  		i = bytealg.IndexByteString(field, '=')
	 374  		if i < 0 {
	 375  			continue
	 376  		}
	 377  		key, value := field[:i], field[i+1:]
	 378  
	 379  		// Update MemProfileRate directly here since it
	 380  		// is int, not int32, and should only be updated
	 381  		// if specified in GODEBUG.
	 382  		if key == "memprofilerate" {
	 383  			if n, ok := atoi(value); ok {
	 384  				MemProfileRate = n
	 385  			}
	 386  		} else {
	 387  			for _, v := range dbgvars {
	 388  				if v.name == key {
	 389  					if n, ok := atoi32(value); ok {
	 390  						*v.value = n
	 391  					}
	 392  				}
	 393  			}
	 394  		}
	 395  	}
	 396  
	 397  	debug.malloc = (debug.allocfreetrace | debug.inittrace | debug.sbrk) != 0
	 398  
	 399  	setTraceback(gogetenv("GOTRACEBACK"))
	 400  	traceback_env = traceback_cache
	 401  }
	 402  
	 403  //go:linkname setTraceback runtime/debug.SetTraceback
	 404  func setTraceback(level string) {
	 405  	var t uint32
	 406  	switch level {
	 407  	case "none":
	 408  		t = 0
	 409  	case "single", "":
	 410  		t = 1 << tracebackShift
	 411  	case "all":
	 412  		t = 1<<tracebackShift | tracebackAll
	 413  	case "system":
	 414  		t = 2<<tracebackShift | tracebackAll
	 415  	case "crash":
	 416  		t = 2<<tracebackShift | tracebackAll | tracebackCrash
	 417  	default:
	 418  		t = tracebackAll
	 419  		if n, ok := atoi(level); ok && n == int(uint32(n)) {
	 420  			t |= uint32(n) << tracebackShift
	 421  		}
	 422  	}
	 423  	// when C owns the process, simply exit'ing the process on fatal errors
	 424  	// and panics is surprising. Be louder and abort instead.
	 425  	if islibrary || isarchive {
	 426  		t |= tracebackCrash
	 427  	}
	 428  
	 429  	t |= traceback_env
	 430  
	 431  	atomic.Store(&traceback_cache, t)
	 432  }
	 433  
	 434  // Poor mans 64-bit division.
	 435  // This is a very special function, do not use it if you are not sure what you are doing.
	 436  // int64 division is lowered into _divv() call on 386, which does not fit into nosplit functions.
	 437  // Handles overflow in a time-specific manner.
	 438  // This keeps us within no-split stack limits on 32-bit processors.
	 439  //go:nosplit
	 440  func timediv(v int64, div int32, rem *int32) int32 {
	 441  	res := int32(0)
	 442  	for bit := 30; bit >= 0; bit-- {
	 443  		if v >= int64(div)<<uint(bit) {
	 444  			v = v - (int64(div) << uint(bit))
	 445  			// Before this for loop, res was 0, thus all these
	 446  			// power of 2 increments are now just bitsets.
	 447  			res |= 1 << uint(bit)
	 448  		}
	 449  	}
	 450  	if v >= int64(div) {
	 451  		if rem != nil {
	 452  			*rem = 0
	 453  		}
	 454  		return 0x7fffffff
	 455  	}
	 456  	if rem != nil {
	 457  		*rem = int32(v)
	 458  	}
	 459  	return res
	 460  }
	 461  
	 462  // Helpers for Go. Must be NOSPLIT, must only call NOSPLIT functions, and must not block.
	 463  
	 464  //go:nosplit
	 465  func acquirem() *m {
	 466  	_g_ := getg()
	 467  	_g_.m.locks++
	 468  	return _g_.m
	 469  }
	 470  
	 471  //go:nosplit
	 472  func releasem(mp *m) {
	 473  	_g_ := getg()
	 474  	mp.locks--
	 475  	if mp.locks == 0 && _g_.preempt {
	 476  		// restore the preemption request in case we've cleared it in newstack
	 477  		_g_.stackguard0 = stackPreempt
	 478  	}
	 479  }
	 480  
	 481  //go:linkname reflect_typelinks reflect.typelinks
	 482  func reflect_typelinks() ([]unsafe.Pointer, [][]int32) {
	 483  	modules := activeModules()
	 484  	sections := []unsafe.Pointer{unsafe.Pointer(modules[0].types)}
	 485  	ret := [][]int32{modules[0].typelinks}
	 486  	for _, md := range modules[1:] {
	 487  		sections = append(sections, unsafe.Pointer(md.types))
	 488  		ret = append(ret, md.typelinks)
	 489  	}
	 490  	return sections, ret
	 491  }
	 492  
	 493  // reflect_resolveNameOff resolves a name offset from a base pointer.
	 494  //go:linkname reflect_resolveNameOff reflect.resolveNameOff
	 495  func reflect_resolveNameOff(ptrInModule unsafe.Pointer, off int32) unsafe.Pointer {
	 496  	return unsafe.Pointer(resolveNameOff(ptrInModule, nameOff(off)).bytes)
	 497  }
	 498  
	 499  // reflect_resolveTypeOff resolves an *rtype offset from a base type.
	 500  //go:linkname reflect_resolveTypeOff reflect.resolveTypeOff
	 501  func reflect_resolveTypeOff(rtype unsafe.Pointer, off int32) unsafe.Pointer {
	 502  	return unsafe.Pointer((*_type)(rtype).typeOff(typeOff(off)))
	 503  }
	 504  
	 505  // reflect_resolveTextOff resolves a function pointer offset from a base type.
	 506  //go:linkname reflect_resolveTextOff reflect.resolveTextOff
	 507  func reflect_resolveTextOff(rtype unsafe.Pointer, off int32) unsafe.Pointer {
	 508  	return (*_type)(rtype).textOff(textOff(off))
	 509  
	 510  }
	 511  
	 512  // reflectlite_resolveNameOff resolves a name offset from a base pointer.
	 513  //go:linkname reflectlite_resolveNameOff internal/reflectlite.resolveNameOff
	 514  func reflectlite_resolveNameOff(ptrInModule unsafe.Pointer, off int32) unsafe.Pointer {
	 515  	return unsafe.Pointer(resolveNameOff(ptrInModule, nameOff(off)).bytes)
	 516  }
	 517  
	 518  // reflectlite_resolveTypeOff resolves an *rtype offset from a base type.
	 519  //go:linkname reflectlite_resolveTypeOff internal/reflectlite.resolveTypeOff
	 520  func reflectlite_resolveTypeOff(rtype unsafe.Pointer, off int32) unsafe.Pointer {
	 521  	return unsafe.Pointer((*_type)(rtype).typeOff(typeOff(off)))
	 522  }
	 523  
	 524  // reflect_addReflectOff adds a pointer to the reflection offset lookup map.
	 525  //go:linkname reflect_addReflectOff reflect.addReflectOff
	 526  func reflect_addReflectOff(ptr unsafe.Pointer) int32 {
	 527  	reflectOffsLock()
	 528  	if reflectOffs.m == nil {
	 529  		reflectOffs.m = make(map[int32]unsafe.Pointer)
	 530  		reflectOffs.minv = make(map[unsafe.Pointer]int32)
	 531  		reflectOffs.next = -1
	 532  	}
	 533  	id, found := reflectOffs.minv[ptr]
	 534  	if !found {
	 535  		id = reflectOffs.next
	 536  		reflectOffs.next-- // use negative offsets as IDs to aid debugging
	 537  		reflectOffs.m[id] = ptr
	 538  		reflectOffs.minv[ptr] = id
	 539  	}
	 540  	reflectOffsUnlock()
	 541  	return id
	 542  }
	 543  

View as plain text