Source file
src/runtime/runtime1.go
Documentation: runtime
1
2
3
4
5 package runtime
6
7 import (
8 "internal/bytealg"
9 "runtime/internal/atomic"
10 "runtime/internal/sys"
11 "unsafe"
12 )
13
14
15
16
17
18
19 const (
20 tracebackCrash = 1 << iota
21 tracebackAll
22 tracebackShift = iota
23 )
24
25 var traceback_cache uint32 = 2 << tracebackShift
26 var traceback_env uint32
27
28
29
30
31
32
33
34
35
36
37 func gotraceback() (level int32, all, crash bool) {
38 _g_ := getg()
39 t := atomic.Load(&traceback_cache)
40 crash = t&tracebackCrash != 0
41 all = _g_.m.throwing > 0 || t&tracebackAll != 0
42 if _g_.m.traceback != 0 {
43 level = int32(_g_.m.traceback)
44 } else {
45 level = int32(t >> tracebackShift)
46 }
47 return
48 }
49
50 var (
51 argc int32
52 argv **byte
53 )
54
55
56
57 func argv_index(argv **byte, i int32) *byte {
58 return *(**byte)(add(unsafe.Pointer(argv), uintptr(i)*sys.PtrSize))
59 }
60
61 func args(c int32, v **byte) {
62 argc = c
63 argv = v
64 sysargs(c, v)
65 }
66
67 func goargs() {
68 if GOOS == "windows" {
69 return
70 }
71 argslice = make([]string, argc)
72 for i := int32(0); i < argc; i++ {
73 argslice[i] = gostringnocopy(argv_index(argv, i))
74 }
75 }
76
77 func goenvs_unix() {
78
79
80
81 n := int32(0)
82 for argv_index(argv, argc+1+n) != nil {
83 n++
84 }
85
86 envs = make([]string, n)
87 for i := int32(0); i < n; i++ {
88 envs[i] = gostring(argv_index(argv, argc+1+i))
89 }
90 }
91
92 func environ() []string {
93 return envs
94 }
95
96
97
98 var test_z64, test_x64 uint64
99
100 func testAtomic64() {
101 test_z64 = 42
102 test_x64 = 0
103 if atomic.Cas64(&test_z64, test_x64, 1) {
104 throw("cas64 failed")
105 }
106 if test_x64 != 0 {
107 throw("cas64 failed")
108 }
109 test_x64 = 42
110 if !atomic.Cas64(&test_z64, test_x64, 1) {
111 throw("cas64 failed")
112 }
113 if test_x64 != 42 || test_z64 != 1 {
114 throw("cas64 failed")
115 }
116 if atomic.Load64(&test_z64) != 1 {
117 throw("load64 failed")
118 }
119 atomic.Store64(&test_z64, (1<<40)+1)
120 if atomic.Load64(&test_z64) != (1<<40)+1 {
121 throw("store64 failed")
122 }
123 if atomic.Xadd64(&test_z64, (1<<40)+1) != (2<<40)+2 {
124 throw("xadd64 failed")
125 }
126 if atomic.Load64(&test_z64) != (2<<40)+2 {
127 throw("xadd64 failed")
128 }
129 if atomic.Xchg64(&test_z64, (3<<40)+3) != (2<<40)+2 {
130 throw("xchg64 failed")
131 }
132 if atomic.Load64(&test_z64) != (3<<40)+3 {
133 throw("xchg64 failed")
134 }
135 }
136
137 func check() {
138 var (
139 a int8
140 b uint8
141 c int16
142 d uint16
143 e int32
144 f uint32
145 g int64
146 h uint64
147 i, i1 float32
148 j, j1 float64
149 k unsafe.Pointer
150 l *uint16
151 m [4]byte
152 )
153 type x1t struct {
154 x uint8
155 }
156 type y1t struct {
157 x1 x1t
158 y uint8
159 }
160 var x1 x1t
161 var y1 y1t
162
163 if unsafe.Sizeof(a) != 1 {
164 throw("bad a")
165 }
166 if unsafe.Sizeof(b) != 1 {
167 throw("bad b")
168 }
169 if unsafe.Sizeof(c) != 2 {
170 throw("bad c")
171 }
172 if unsafe.Sizeof(d) != 2 {
173 throw("bad d")
174 }
175 if unsafe.Sizeof(e) != 4 {
176 throw("bad e")
177 }
178 if unsafe.Sizeof(f) != 4 {
179 throw("bad f")
180 }
181 if unsafe.Sizeof(g) != 8 {
182 throw("bad g")
183 }
184 if unsafe.Sizeof(h) != 8 {
185 throw("bad h")
186 }
187 if unsafe.Sizeof(i) != 4 {
188 throw("bad i")
189 }
190 if unsafe.Sizeof(j) != 8 {
191 throw("bad j")
192 }
193 if unsafe.Sizeof(k) != sys.PtrSize {
194 throw("bad k")
195 }
196 if unsafe.Sizeof(l) != sys.PtrSize {
197 throw("bad l")
198 }
199 if unsafe.Sizeof(x1) != 1 {
200 throw("bad unsafe.Sizeof x1")
201 }
202 if unsafe.Offsetof(y1.y) != 1 {
203 throw("bad offsetof y1.y")
204 }
205 if unsafe.Sizeof(y1) != 2 {
206 throw("bad unsafe.Sizeof y1")
207 }
208
209 if timediv(12345*1000000000+54321, 1000000000, &e) != 12345 || e != 54321 {
210 throw("bad timediv")
211 }
212
213 var z uint32
214 z = 1
215 if !atomic.Cas(&z, 1, 2) {
216 throw("cas1")
217 }
218 if z != 2 {
219 throw("cas2")
220 }
221
222 z = 4
223 if atomic.Cas(&z, 5, 6) {
224 throw("cas3")
225 }
226 if z != 4 {
227 throw("cas4")
228 }
229
230 z = 0xffffffff
231 if !atomic.Cas(&z, 0xffffffff, 0xfffffffe) {
232 throw("cas5")
233 }
234 if z != 0xfffffffe {
235 throw("cas6")
236 }
237
238 m = [4]byte{1, 1, 1, 1}
239 atomic.Or8(&m[1], 0xf0)
240 if m[0] != 1 || m[1] != 0xf1 || m[2] != 1 || m[3] != 1 {
241 throw("atomicor8")
242 }
243
244 m = [4]byte{0xff, 0xff, 0xff, 0xff}
245 atomic.And8(&m[1], 0x1)
246 if m[0] != 0xff || m[1] != 0x1 || m[2] != 0xff || m[3] != 0xff {
247 throw("atomicand8")
248 }
249
250 *(*uint64)(unsafe.Pointer(&j)) = ^uint64(0)
251 if j == j {
252 throw("float64nan")
253 }
254 if !(j != j) {
255 throw("float64nan1")
256 }
257
258 *(*uint64)(unsafe.Pointer(&j1)) = ^uint64(1)
259 if j == j1 {
260 throw("float64nan2")
261 }
262 if !(j != j1) {
263 throw("float64nan3")
264 }
265
266 *(*uint32)(unsafe.Pointer(&i)) = ^uint32(0)
267 if i == i {
268 throw("float32nan")
269 }
270 if i == i {
271 throw("float32nan1")
272 }
273
274 *(*uint32)(unsafe.Pointer(&i1)) = ^uint32(1)
275 if i == i1 {
276 throw("float32nan2")
277 }
278 if i == i1 {
279 throw("float32nan3")
280 }
281
282 testAtomic64()
283
284 if _FixedStack != round2(_FixedStack) {
285 throw("FixedStack is not power-of-2")
286 }
287
288 if !checkASM() {
289 throw("assembly checks failed")
290 }
291 }
292
293 type dbgVar struct {
294 name string
295 value *int32
296 }
297
298
299
300
301
302 var debug struct {
303 cgocheck int32
304 clobberfree int32
305 efence int32
306 gccheckmark int32
307 gcpacertrace int32
308 gcshrinkstackoff int32
309 gcstoptheworld int32
310 gctrace int32
311 invalidptr int32
312 madvdontneed int32
313 scavtrace int32
314 scheddetail int32
315 schedtrace int32
316 tracebackancestors int32
317 asyncpreemptoff int32
318
319
320
321
322 malloc bool
323 allocfreetrace int32
324 inittrace int32
325 sbrk int32
326 }
327
328 var dbgvars = []dbgVar{
329 {"allocfreetrace", &debug.allocfreetrace},
330 {"clobberfree", &debug.clobberfree},
331 {"cgocheck", &debug.cgocheck},
332 {"efence", &debug.efence},
333 {"gccheckmark", &debug.gccheckmark},
334 {"gcpacertrace", &debug.gcpacertrace},
335 {"gcshrinkstackoff", &debug.gcshrinkstackoff},
336 {"gcstoptheworld", &debug.gcstoptheworld},
337 {"gctrace", &debug.gctrace},
338 {"invalidptr", &debug.invalidptr},
339 {"madvdontneed", &debug.madvdontneed},
340 {"sbrk", &debug.sbrk},
341 {"scavtrace", &debug.scavtrace},
342 {"scheddetail", &debug.scheddetail},
343 {"schedtrace", &debug.schedtrace},
344 {"tracebackancestors", &debug.tracebackancestors},
345 {"asyncpreemptoff", &debug.asyncpreemptoff},
346 {"inittrace", &debug.inittrace},
347 }
348
349 func parsedebugvars() {
350
351 debug.cgocheck = 1
352 debug.invalidptr = 1
353 if GOOS == "linux" {
354
355
356
357
358
359
360
361
362 debug.madvdontneed = 1
363 }
364
365 for p := gogetenv("GODEBUG"); p != ""; {
366 field := ""
367 i := bytealg.IndexByteString(p, ',')
368 if i < 0 {
369 field, p = p, ""
370 } else {
371 field, p = p[:i], p[i+1:]
372 }
373 i = bytealg.IndexByteString(field, '=')
374 if i < 0 {
375 continue
376 }
377 key, value := field[:i], field[i+1:]
378
379
380
381
382 if key == "memprofilerate" {
383 if n, ok := atoi(value); ok {
384 MemProfileRate = n
385 }
386 } else {
387 for _, v := range dbgvars {
388 if v.name == key {
389 if n, ok := atoi32(value); ok {
390 *v.value = n
391 }
392 }
393 }
394 }
395 }
396
397 debug.malloc = (debug.allocfreetrace | debug.inittrace | debug.sbrk) != 0
398
399 setTraceback(gogetenv("GOTRACEBACK"))
400 traceback_env = traceback_cache
401 }
402
403
404 func setTraceback(level string) {
405 var t uint32
406 switch level {
407 case "none":
408 t = 0
409 case "single", "":
410 t = 1 << tracebackShift
411 case "all":
412 t = 1<<tracebackShift | tracebackAll
413 case "system":
414 t = 2<<tracebackShift | tracebackAll
415 case "crash":
416 t = 2<<tracebackShift | tracebackAll | tracebackCrash
417 default:
418 t = tracebackAll
419 if n, ok := atoi(level); ok && n == int(uint32(n)) {
420 t |= uint32(n) << tracebackShift
421 }
422 }
423
424
425 if islibrary || isarchive {
426 t |= tracebackCrash
427 }
428
429 t |= traceback_env
430
431 atomic.Store(&traceback_cache, t)
432 }
433
434
435
436
437
438
439
440 func timediv(v int64, div int32, rem *int32) int32 {
441 res := int32(0)
442 for bit := 30; bit >= 0; bit-- {
443 if v >= int64(div)<<uint(bit) {
444 v = v - (int64(div) << uint(bit))
445
446
447 res |= 1 << uint(bit)
448 }
449 }
450 if v >= int64(div) {
451 if rem != nil {
452 *rem = 0
453 }
454 return 0x7fffffff
455 }
456 if rem != nil {
457 *rem = int32(v)
458 }
459 return res
460 }
461
462
463
464
465 func acquirem() *m {
466 _g_ := getg()
467 _g_.m.locks++
468 return _g_.m
469 }
470
471
472 func releasem(mp *m) {
473 _g_ := getg()
474 mp.locks--
475 if mp.locks == 0 && _g_.preempt {
476
477 _g_.stackguard0 = stackPreempt
478 }
479 }
480
481
482 func reflect_typelinks() ([]unsafe.Pointer, [][]int32) {
483 modules := activeModules()
484 sections := []unsafe.Pointer{unsafe.Pointer(modules[0].types)}
485 ret := [][]int32{modules[0].typelinks}
486 for _, md := range modules[1:] {
487 sections = append(sections, unsafe.Pointer(md.types))
488 ret = append(ret, md.typelinks)
489 }
490 return sections, ret
491 }
492
493
494
495 func reflect_resolveNameOff(ptrInModule unsafe.Pointer, off int32) unsafe.Pointer {
496 return unsafe.Pointer(resolveNameOff(ptrInModule, nameOff(off)).bytes)
497 }
498
499
500
501 func reflect_resolveTypeOff(rtype unsafe.Pointer, off int32) unsafe.Pointer {
502 return unsafe.Pointer((*_type)(rtype).typeOff(typeOff(off)))
503 }
504
505
506
507 func reflect_resolveTextOff(rtype unsafe.Pointer, off int32) unsafe.Pointer {
508 return (*_type)(rtype).textOff(textOff(off))
509
510 }
511
512
513
514 func reflectlite_resolveNameOff(ptrInModule unsafe.Pointer, off int32) unsafe.Pointer {
515 return unsafe.Pointer(resolveNameOff(ptrInModule, nameOff(off)).bytes)
516 }
517
518
519
520 func reflectlite_resolveTypeOff(rtype unsafe.Pointer, off int32) unsafe.Pointer {
521 return unsafe.Pointer((*_type)(rtype).typeOff(typeOff(off)))
522 }
523
524
525
526 func reflect_addReflectOff(ptr unsafe.Pointer) int32 {
527 reflectOffsLock()
528 if reflectOffs.m == nil {
529 reflectOffs.m = make(map[int32]unsafe.Pointer)
530 reflectOffs.minv = make(map[unsafe.Pointer]int32)
531 reflectOffs.next = -1
532 }
533 id, found := reflectOffs.minv[ptr]
534 if !found {
535 id = reflectOffs.next
536 reflectOffs.next--
537 reflectOffs.m[id] = ptr
538 reflectOffs.minv[ptr] = id
539 }
540 reflectOffsUnlock()
541 return id
542 }
543
View as plain text