Source file
src/runtime/proc.go
Documentation: runtime
1
2
3
4
5 package runtime
6
7 import (
8 "internal/abi"
9 "internal/cpu"
10 "internal/goexperiment"
11 "runtime/internal/atomic"
12 "runtime/internal/sys"
13 "unsafe"
14 )
15
16
17 var modinfo string
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113 var (
114 m0 m
115 g0 g
116 mcache0 *mcache
117 raceprocctx0 uintptr
118 )
119
120
121 var runtime_inittask initTask
122
123
124 var main_inittask initTask
125
126
127
128
129
130 var main_init_done chan bool
131
132
133 func main_main()
134
135
136 var mainStarted bool
137
138
139 var runtimeInitTime int64
140
141
142 var initSigmask sigset
143
144
145 func main() {
146 g := getg()
147
148
149
150 g.m.g0.racectx = 0
151
152
153
154
155 if sys.PtrSize == 8 {
156 maxstacksize = 1000000000
157 } else {
158 maxstacksize = 250000000
159 }
160
161
162
163
164 maxstackceiling = 2 * maxstacksize
165
166
167 mainStarted = true
168
169 if GOARCH != "wasm" {
170
171
172
173 atomic.Store(&sched.sysmonStarting, 1)
174 systemstack(func() {
175 newm(sysmon, nil, -1)
176 })
177 }
178
179
180
181
182
183
184
185 lockOSThread()
186
187 if g.m != &m0 {
188 throw("runtime.main not on m0")
189 }
190 m0.doesPark = true
191
192
193
194 runtimeInitTime = nanotime()
195 if runtimeInitTime == 0 {
196 throw("nanotime returning zero")
197 }
198
199 if debug.inittrace != 0 {
200 inittrace.id = getg().goid
201 inittrace.active = true
202 }
203
204 doInit(&runtime_inittask)
205
206
207 needUnlock := true
208 defer func() {
209 if needUnlock {
210 unlockOSThread()
211 }
212 }()
213
214 gcenable()
215
216 main_init_done = make(chan bool)
217 if iscgo {
218 if _cgo_thread_start == nil {
219 throw("_cgo_thread_start missing")
220 }
221 if GOOS != "windows" {
222 if _cgo_setenv == nil {
223 throw("_cgo_setenv missing")
224 }
225 if _cgo_unsetenv == nil {
226 throw("_cgo_unsetenv missing")
227 }
228 }
229 if _cgo_notify_runtime_init_done == nil {
230 throw("_cgo_notify_runtime_init_done missing")
231 }
232
233
234 startTemplateThread()
235 cgocall(_cgo_notify_runtime_init_done, nil)
236 }
237
238 doInit(&main_inittask)
239
240
241
242 inittrace.active = false
243
244 close(main_init_done)
245
246 needUnlock = false
247 unlockOSThread()
248
249 if isarchive || islibrary {
250
251
252 return
253 }
254 fn := main_main
255 fn()
256 if raceenabled {
257 racefini()
258 }
259
260
261
262
263
264 if atomic.Load(&runningPanicDefers) != 0 {
265
266 for c := 0; c < 1000; c++ {
267 if atomic.Load(&runningPanicDefers) == 0 {
268 break
269 }
270 Gosched()
271 }
272 }
273 if atomic.Load(&panicking) != 0 {
274 gopark(nil, nil, waitReasonPanicWait, traceEvGoStop, 1)
275 }
276
277 exit(0)
278 for {
279 var x *int32
280 *x = 0
281 }
282 }
283
284
285
286 func os_beforeExit() {
287 if raceenabled {
288 racefini()
289 }
290 }
291
292
293 func init() {
294 go forcegchelper()
295 }
296
297 func forcegchelper() {
298 forcegc.g = getg()
299 lockInit(&forcegc.lock, lockRankForcegc)
300 for {
301 lock(&forcegc.lock)
302 if forcegc.idle != 0 {
303 throw("forcegc: phase error")
304 }
305 atomic.Store(&forcegc.idle, 1)
306 goparkunlock(&forcegc.lock, waitReasonForceGCIdle, traceEvGoBlock, 1)
307
308 if debug.gctrace > 0 {
309 println("GC forced")
310 }
311
312 gcStart(gcTrigger{kind: gcTriggerTime, now: nanotime()})
313 }
314 }
315
316
317
318
319
320 func Gosched() {
321 checkTimeouts()
322 mcall(gosched_m)
323 }
324
325
326
327
328 func goschedguarded() {
329 mcall(goschedguarded_m)
330 }
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349 func gopark(unlockf func(*g, unsafe.Pointer) bool, lock unsafe.Pointer, reason waitReason, traceEv byte, traceskip int) {
350 if reason != waitReasonSleep {
351 checkTimeouts()
352 }
353 mp := acquirem()
354 gp := mp.curg
355 status := readgstatus(gp)
356 if status != _Grunning && status != _Gscanrunning {
357 throw("gopark: bad g status")
358 }
359 mp.waitlock = lock
360 mp.waitunlockf = unlockf
361 gp.waitreason = reason
362 mp.waittraceev = traceEv
363 mp.waittraceskip = traceskip
364 releasem(mp)
365
366 mcall(park_m)
367 }
368
369
370
371 func goparkunlock(lock *mutex, reason waitReason, traceEv byte, traceskip int) {
372 gopark(parkunlock_c, unsafe.Pointer(lock), reason, traceEv, traceskip)
373 }
374
375 func goready(gp *g, traceskip int) {
376 systemstack(func() {
377 ready(gp, traceskip, true)
378 })
379 }
380
381
382 func acquireSudog() *sudog {
383
384
385
386
387
388
389
390
391 mp := acquirem()
392 pp := mp.p.ptr()
393 if len(pp.sudogcache) == 0 {
394 lock(&sched.sudoglock)
395
396 for len(pp.sudogcache) < cap(pp.sudogcache)/2 && sched.sudogcache != nil {
397 s := sched.sudogcache
398 sched.sudogcache = s.next
399 s.next = nil
400 pp.sudogcache = append(pp.sudogcache, s)
401 }
402 unlock(&sched.sudoglock)
403
404 if len(pp.sudogcache) == 0 {
405 pp.sudogcache = append(pp.sudogcache, new(sudog))
406 }
407 }
408 n := len(pp.sudogcache)
409 s := pp.sudogcache[n-1]
410 pp.sudogcache[n-1] = nil
411 pp.sudogcache = pp.sudogcache[:n-1]
412 if s.elem != nil {
413 throw("acquireSudog: found s.elem != nil in cache")
414 }
415 releasem(mp)
416 return s
417 }
418
419
420 func releaseSudog(s *sudog) {
421 if s.elem != nil {
422 throw("runtime: sudog with non-nil elem")
423 }
424 if s.isSelect {
425 throw("runtime: sudog with non-false isSelect")
426 }
427 if s.next != nil {
428 throw("runtime: sudog with non-nil next")
429 }
430 if s.prev != nil {
431 throw("runtime: sudog with non-nil prev")
432 }
433 if s.waitlink != nil {
434 throw("runtime: sudog with non-nil waitlink")
435 }
436 if s.c != nil {
437 throw("runtime: sudog with non-nil c")
438 }
439 gp := getg()
440 if gp.param != nil {
441 throw("runtime: releaseSudog with non-nil gp.param")
442 }
443 mp := acquirem()
444 pp := mp.p.ptr()
445 if len(pp.sudogcache) == cap(pp.sudogcache) {
446
447 var first, last *sudog
448 for len(pp.sudogcache) > cap(pp.sudogcache)/2 {
449 n := len(pp.sudogcache)
450 p := pp.sudogcache[n-1]
451 pp.sudogcache[n-1] = nil
452 pp.sudogcache = pp.sudogcache[:n-1]
453 if first == nil {
454 first = p
455 } else {
456 last.next = p
457 }
458 last = p
459 }
460 lock(&sched.sudoglock)
461 last.next = sched.sudogcache
462 sched.sudogcache = first
463 unlock(&sched.sudoglock)
464 }
465 pp.sudogcache = append(pp.sudogcache, s)
466 releasem(mp)
467 }
468
469
470
471
472
473
474
475
476
477 func funcPC(f interface{}) uintptr {
478 return *(*uintptr)(efaceOf(&f).data)
479 }
480
481
482 func badmcall(fn func(*g)) {
483 throw("runtime: mcall called on m->g0 stack")
484 }
485
486 func badmcall2(fn func(*g)) {
487 throw("runtime: mcall function returned")
488 }
489
490 func badreflectcall() {
491 panic(plainError("arg size to reflect.call more than 1GB"))
492 }
493
494 var badmorestackg0Msg = "fatal: morestack on g0\n"
495
496
497
498 func badmorestackg0() {
499 sp := stringStructOf(&badmorestackg0Msg)
500 write(2, sp.str, int32(sp.len))
501 }
502
503 var badmorestackgsignalMsg = "fatal: morestack on gsignal\n"
504
505
506
507 func badmorestackgsignal() {
508 sp := stringStructOf(&badmorestackgsignalMsg)
509 write(2, sp.str, int32(sp.len))
510 }
511
512
513 func badctxt() {
514 throw("ctxt != 0")
515 }
516
517 func lockedOSThread() bool {
518 gp := getg()
519 return gp.lockedm != 0 && gp.m.lockedg != 0
520 }
521
522 var (
523
524
525
526
527
528
529 allglock mutex
530 allgs []*g
531
532
533
534
535
536
537
538
539
540
541
542
543
544 allglen uintptr
545 allgptr **g
546 )
547
548 func allgadd(gp *g) {
549 if readgstatus(gp) == _Gidle {
550 throw("allgadd: bad status Gidle")
551 }
552
553 lock(&allglock)
554 allgs = append(allgs, gp)
555 if &allgs[0] != allgptr {
556 atomicstorep(unsafe.Pointer(&allgptr), unsafe.Pointer(&allgs[0]))
557 }
558 atomic.Storeuintptr(&allglen, uintptr(len(allgs)))
559 unlock(&allglock)
560 }
561
562
563 func atomicAllG() (**g, uintptr) {
564 length := atomic.Loaduintptr(&allglen)
565 ptr := (**g)(atomic.Loadp(unsafe.Pointer(&allgptr)))
566 return ptr, length
567 }
568
569
570 func atomicAllGIndex(ptr **g, i uintptr) *g {
571 return *(**g)(add(unsafe.Pointer(ptr), i*sys.PtrSize))
572 }
573
574
575
576
577 func forEachG(fn func(gp *g)) {
578 lock(&allglock)
579 for _, gp := range allgs {
580 fn(gp)
581 }
582 unlock(&allglock)
583 }
584
585
586
587
588
589 func forEachGRace(fn func(gp *g)) {
590 ptr, length := atomicAllG()
591 for i := uintptr(0); i < length; i++ {
592 gp := atomicAllGIndex(ptr, i)
593 fn(gp)
594 }
595 return
596 }
597
598 const (
599
600
601 _GoidCacheBatch = 16
602 )
603
604
605
606 func cpuinit() {
607 const prefix = "GODEBUG="
608 var env string
609
610 switch GOOS {
611 case "aix", "darwin", "ios", "dragonfly", "freebsd", "netbsd", "openbsd", "illumos", "solaris", "linux":
612 cpu.DebugOptions = true
613
614
615
616
617 n := int32(0)
618 for argv_index(argv, argc+1+n) != nil {
619 n++
620 }
621
622 for i := int32(0); i < n; i++ {
623 p := argv_index(argv, argc+1+i)
624 s := *(*string)(unsafe.Pointer(&stringStruct{unsafe.Pointer(p), findnull(p)}))
625
626 if hasPrefix(s, prefix) {
627 env = gostring(p)[len(prefix):]
628 break
629 }
630 }
631 }
632
633 cpu.Initialize(env)
634
635
636
637 x86HasPOPCNT = cpu.X86.HasPOPCNT
638 x86HasSSE41 = cpu.X86.HasSSE41
639 x86HasFMA = cpu.X86.HasFMA
640
641 armHasVFPv4 = cpu.ARM.HasVFPv4
642
643 arm64HasATOMICS = cpu.ARM64.HasATOMICS
644 }
645
646
647
648
649
650
651
652
653
654 func schedinit() {
655 lockInit(&sched.lock, lockRankSched)
656 lockInit(&sched.sysmonlock, lockRankSysmon)
657 lockInit(&sched.deferlock, lockRankDefer)
658 lockInit(&sched.sudoglock, lockRankSudog)
659 lockInit(&deadlock, lockRankDeadlock)
660 lockInit(&paniclk, lockRankPanic)
661 lockInit(&allglock, lockRankAllg)
662 lockInit(&allpLock, lockRankAllp)
663 lockInit(&reflectOffs.lock, lockRankReflectOffs)
664 lockInit(&finlock, lockRankFin)
665 lockInit(&trace.bufLock, lockRankTraceBuf)
666 lockInit(&trace.stringsLock, lockRankTraceStrings)
667 lockInit(&trace.lock, lockRankTrace)
668 lockInit(&cpuprof.lock, lockRankCpuprof)
669 lockInit(&trace.stackTab.lock, lockRankTraceStackTab)
670
671
672
673 lockInit(&memstats.heapStats.noPLock, lockRankLeafRank)
674
675
676
677 _g_ := getg()
678 if raceenabled {
679 _g_.racectx, raceprocctx0 = raceinit()
680 }
681
682 sched.maxmcount = 10000
683
684
685 worldStopped()
686
687 moduledataverify()
688 stackinit()
689 mallocinit()
690 fastrandinit()
691 mcommoninit(_g_.m, -1)
692 cpuinit()
693 alginit()
694 modulesinit()
695 typelinksinit()
696 itabsinit()
697
698 sigsave(&_g_.m.sigmask)
699 initSigmask = _g_.m.sigmask
700
701 if offset := unsafe.Offsetof(sched.timeToRun); offset%8 != 0 {
702 println(offset)
703 throw("sched.timeToRun not aligned to 8 bytes")
704 }
705
706 goargs()
707 goenvs()
708 parsedebugvars()
709 gcinit()
710
711 lock(&sched.lock)
712 sched.lastpoll = uint64(nanotime())
713 procs := ncpu
714 if n, ok := atoi32(gogetenv("GOMAXPROCS")); ok && n > 0 {
715 procs = n
716 }
717 if procresize(procs) != nil {
718 throw("unknown runnable goroutine during bootstrap")
719 }
720 unlock(&sched.lock)
721
722
723 worldStarted()
724
725
726
727
728 if debug.cgocheck > 1 {
729 writeBarrier.cgo = true
730 writeBarrier.enabled = true
731 for _, p := range allp {
732 p.wbBuf.reset()
733 }
734 }
735
736 if buildVersion == "" {
737
738
739 buildVersion = "unknown"
740 }
741 if len(modinfo) == 1 {
742
743
744 modinfo = ""
745 }
746 }
747
748 func dumpgstatus(gp *g) {
749 _g_ := getg()
750 print("runtime: gp: gp=", gp, ", goid=", gp.goid, ", gp->atomicstatus=", readgstatus(gp), "\n")
751 print("runtime: g: g=", _g_, ", goid=", _g_.goid, ", g->atomicstatus=", readgstatus(_g_), "\n")
752 }
753
754
755 func checkmcount() {
756 assertLockHeld(&sched.lock)
757
758 if mcount() > sched.maxmcount {
759 print("runtime: program exceeds ", sched.maxmcount, "-thread limit\n")
760 throw("thread exhaustion")
761 }
762 }
763
764
765
766
767
768 func mReserveID() int64 {
769 assertLockHeld(&sched.lock)
770
771 if sched.mnext+1 < sched.mnext {
772 throw("runtime: thread ID overflow")
773 }
774 id := sched.mnext
775 sched.mnext++
776 checkmcount()
777 return id
778 }
779
780
781 func mcommoninit(mp *m, id int64) {
782 _g_ := getg()
783
784
785 if _g_ != _g_.m.g0 {
786 callers(1, mp.createstack[:])
787 }
788
789 lock(&sched.lock)
790
791 if id >= 0 {
792 mp.id = id
793 } else {
794 mp.id = mReserveID()
795 }
796
797 mp.fastrand[0] = uint32(int64Hash(uint64(mp.id), fastrandseed))
798 mp.fastrand[1] = uint32(int64Hash(uint64(cputicks()), ^fastrandseed))
799 if mp.fastrand[0]|mp.fastrand[1] == 0 {
800 mp.fastrand[1] = 1
801 }
802
803 mpreinit(mp)
804 if mp.gsignal != nil {
805 mp.gsignal.stackguard1 = mp.gsignal.stack.lo + _StackGuard
806 }
807
808
809
810 mp.alllink = allm
811
812
813
814 atomicstorep(unsafe.Pointer(&allm), unsafe.Pointer(mp))
815 unlock(&sched.lock)
816
817
818 if iscgo || GOOS == "solaris" || GOOS == "illumos" || GOOS == "windows" {
819 mp.cgoCallers = new(cgoCallers)
820 }
821 }
822
823 var fastrandseed uintptr
824
825 func fastrandinit() {
826 s := (*[unsafe.Sizeof(fastrandseed)]byte)(unsafe.Pointer(&fastrandseed))[:]
827 getRandomData(s)
828 }
829
830
831 func ready(gp *g, traceskip int, next bool) {
832 if trace.enabled {
833 traceGoUnpark(gp, traceskip)
834 }
835
836 status := readgstatus(gp)
837
838
839 _g_ := getg()
840 mp := acquirem()
841 if status&^_Gscan != _Gwaiting {
842 dumpgstatus(gp)
843 throw("bad g->status in ready")
844 }
845
846
847 casgstatus(gp, _Gwaiting, _Grunnable)
848 runqput(_g_.m.p.ptr(), gp, next)
849 wakep()
850 releasem(mp)
851 }
852
853
854
855 const freezeStopWait = 0x7fffffff
856
857
858
859 var freezing uint32
860
861
862
863
864 func freezetheworld() {
865 atomic.Store(&freezing, 1)
866
867
868
869 for i := 0; i < 5; i++ {
870
871 sched.stopwait = freezeStopWait
872 atomic.Store(&sched.gcwaiting, 1)
873
874 if !preemptall() {
875 break
876 }
877 usleep(1000)
878 }
879
880 usleep(1000)
881 preemptall()
882 usleep(1000)
883 }
884
885
886
887
888 func readgstatus(gp *g) uint32 {
889 return atomic.Load(&gp.atomicstatus)
890 }
891
892
893
894
895
896 func casfrom_Gscanstatus(gp *g, oldval, newval uint32) {
897 success := false
898
899
900 switch oldval {
901 default:
902 print("runtime: casfrom_Gscanstatus bad oldval gp=", gp, ", oldval=", hex(oldval), ", newval=", hex(newval), "\n")
903 dumpgstatus(gp)
904 throw("casfrom_Gscanstatus:top gp->status is not in scan state")
905 case _Gscanrunnable,
906 _Gscanwaiting,
907 _Gscanrunning,
908 _Gscansyscall,
909 _Gscanpreempted:
910 if newval == oldval&^_Gscan {
911 success = atomic.Cas(&gp.atomicstatus, oldval, newval)
912 }
913 }
914 if !success {
915 print("runtime: casfrom_Gscanstatus failed gp=", gp, ", oldval=", hex(oldval), ", newval=", hex(newval), "\n")
916 dumpgstatus(gp)
917 throw("casfrom_Gscanstatus: gp->status is not in scan state")
918 }
919 releaseLockRank(lockRankGscan)
920 }
921
922
923
924 func castogscanstatus(gp *g, oldval, newval uint32) bool {
925 switch oldval {
926 case _Grunnable,
927 _Grunning,
928 _Gwaiting,
929 _Gsyscall:
930 if newval == oldval|_Gscan {
931 r := atomic.Cas(&gp.atomicstatus, oldval, newval)
932 if r {
933 acquireLockRank(lockRankGscan)
934 }
935 return r
936
937 }
938 }
939 print("runtime: castogscanstatus oldval=", hex(oldval), " newval=", hex(newval), "\n")
940 throw("castogscanstatus")
941 panic("not reached")
942 }
943
944
945
946
947
948
949 func casgstatus(gp *g, oldval, newval uint32) {
950 if (oldval&_Gscan != 0) || (newval&_Gscan != 0) || oldval == newval {
951 systemstack(func() {
952 print("runtime: casgstatus: oldval=", hex(oldval), " newval=", hex(newval), "\n")
953 throw("casgstatus: bad incoming values")
954 })
955 }
956
957 acquireLockRank(lockRankGscan)
958 releaseLockRank(lockRankGscan)
959
960
961 const yieldDelay = 5 * 1000
962 var nextYield int64
963
964
965
966 for i := 0; !atomic.Cas(&gp.atomicstatus, oldval, newval); i++ {
967 if oldval == _Gwaiting && gp.atomicstatus == _Grunnable {
968 throw("casgstatus: waiting for Gwaiting but is Grunnable")
969 }
970 if i == 0 {
971 nextYield = nanotime() + yieldDelay
972 }
973 if nanotime() < nextYield {
974 for x := 0; x < 10 && gp.atomicstatus != oldval; x++ {
975 procyield(1)
976 }
977 } else {
978 osyield()
979 nextYield = nanotime() + yieldDelay/2
980 }
981 }
982
983
984 if oldval == _Grunning {
985
986 if gp.trackingSeq%gTrackingPeriod == 0 {
987 gp.tracking = true
988 }
989 gp.trackingSeq++
990 }
991 if gp.tracking {
992 now := nanotime()
993 if oldval == _Grunnable {
994
995
996
997 gp.runnableTime += now - gp.runnableStamp
998 gp.runnableStamp = 0
999 }
1000 if newval == _Grunnable {
1001
1002
1003 gp.runnableStamp = now
1004 } else if newval == _Grunning {
1005
1006
1007
1008 gp.tracking = false
1009 sched.timeToRun.record(gp.runnableTime)
1010 gp.runnableTime = 0
1011 }
1012 }
1013 }
1014
1015
1016
1017
1018
1019
1020
1021 func casgcopystack(gp *g) uint32 {
1022 for {
1023 oldstatus := readgstatus(gp) &^ _Gscan
1024 if oldstatus != _Gwaiting && oldstatus != _Grunnable {
1025 throw("copystack: bad status, not Gwaiting or Grunnable")
1026 }
1027 if atomic.Cas(&gp.atomicstatus, oldstatus, _Gcopystack) {
1028 return oldstatus
1029 }
1030 }
1031 }
1032
1033
1034
1035
1036
1037 func casGToPreemptScan(gp *g, old, new uint32) {
1038 if old != _Grunning || new != _Gscan|_Gpreempted {
1039 throw("bad g transition")
1040 }
1041 acquireLockRank(lockRankGscan)
1042 for !atomic.Cas(&gp.atomicstatus, _Grunning, _Gscan|_Gpreempted) {
1043 }
1044 }
1045
1046
1047
1048
1049 func casGFromPreempted(gp *g, old, new uint32) bool {
1050 if old != _Gpreempted || new != _Gwaiting {
1051 throw("bad g transition")
1052 }
1053 return atomic.Cas(&gp.atomicstatus, _Gpreempted, _Gwaiting)
1054 }
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070 func stopTheWorld(reason string) {
1071 semacquire(&worldsema)
1072 gp := getg()
1073 gp.m.preemptoff = reason
1074 systemstack(func() {
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085 casgstatus(gp, _Grunning, _Gwaiting)
1086 stopTheWorldWithSema()
1087 casgstatus(gp, _Gwaiting, _Grunning)
1088 })
1089 }
1090
1091
1092 func startTheWorld() {
1093 systemstack(func() { startTheWorldWithSema(false) })
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110 mp := acquirem()
1111 mp.preemptoff = ""
1112 semrelease1(&worldsema, true, 0)
1113 releasem(mp)
1114 }
1115
1116
1117
1118
1119 func stopTheWorldGC(reason string) {
1120 semacquire(&gcsema)
1121 stopTheWorld(reason)
1122 }
1123
1124
1125 func startTheWorldGC() {
1126 startTheWorld()
1127 semrelease(&gcsema)
1128 }
1129
1130
1131 var worldsema uint32 = 1
1132
1133
1134
1135
1136
1137
1138
1139 var gcsema uint32 = 1
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163 func stopTheWorldWithSema() {
1164 _g_ := getg()
1165
1166
1167
1168 if _g_.m.locks > 0 {
1169 throw("stopTheWorld: holding locks")
1170 }
1171
1172 lock(&sched.lock)
1173 sched.stopwait = gomaxprocs
1174 atomic.Store(&sched.gcwaiting, 1)
1175 preemptall()
1176
1177 _g_.m.p.ptr().status = _Pgcstop
1178 sched.stopwait--
1179
1180 for _, p := range allp {
1181 s := p.status
1182 if s == _Psyscall && atomic.Cas(&p.status, s, _Pgcstop) {
1183 if trace.enabled {
1184 traceGoSysBlock(p)
1185 traceProcStop(p)
1186 }
1187 p.syscalltick++
1188 sched.stopwait--
1189 }
1190 }
1191
1192 for {
1193 p := pidleget()
1194 if p == nil {
1195 break
1196 }
1197 p.status = _Pgcstop
1198 sched.stopwait--
1199 }
1200 wait := sched.stopwait > 0
1201 unlock(&sched.lock)
1202
1203
1204 if wait {
1205 for {
1206
1207 if notetsleep(&sched.stopnote, 100*1000) {
1208 noteclear(&sched.stopnote)
1209 break
1210 }
1211 preemptall()
1212 }
1213 }
1214
1215
1216 bad := ""
1217 if sched.stopwait != 0 {
1218 bad = "stopTheWorld: not stopped (stopwait != 0)"
1219 } else {
1220 for _, p := range allp {
1221 if p.status != _Pgcstop {
1222 bad = "stopTheWorld: not stopped (status != _Pgcstop)"
1223 }
1224 }
1225 }
1226 if atomic.Load(&freezing) != 0 {
1227
1228
1229
1230
1231 lock(&deadlock)
1232 lock(&deadlock)
1233 }
1234 if bad != "" {
1235 throw(bad)
1236 }
1237
1238 worldStopped()
1239 }
1240
1241 func startTheWorldWithSema(emitTraceEvent bool) int64 {
1242 assertWorldStopped()
1243
1244 mp := acquirem()
1245 if netpollinited() {
1246 list := netpoll(0)
1247 injectglist(&list)
1248 }
1249 lock(&sched.lock)
1250
1251 procs := gomaxprocs
1252 if newprocs != 0 {
1253 procs = newprocs
1254 newprocs = 0
1255 }
1256 p1 := procresize(procs)
1257 sched.gcwaiting = 0
1258 if sched.sysmonwait != 0 {
1259 sched.sysmonwait = 0
1260 notewakeup(&sched.sysmonnote)
1261 }
1262 unlock(&sched.lock)
1263
1264 worldStarted()
1265
1266 for p1 != nil {
1267 p := p1
1268 p1 = p1.link.ptr()
1269 if p.m != 0 {
1270 mp := p.m.ptr()
1271 p.m = 0
1272 if mp.nextp != 0 {
1273 throw("startTheWorld: inconsistent mp->nextp")
1274 }
1275 mp.nextp.set(p)
1276 notewakeup(&mp.park)
1277 } else {
1278
1279 newm(nil, p, -1)
1280 }
1281 }
1282
1283
1284 startTime := nanotime()
1285 if emitTraceEvent {
1286 traceGCSTWDone()
1287 }
1288
1289
1290
1291
1292 wakep()
1293
1294 releasem(mp)
1295
1296 return startTime
1297 }
1298
1299
1300
1301 func usesLibcall() bool {
1302 switch GOOS {
1303 case "aix", "darwin", "illumos", "ios", "openbsd", "solaris", "windows":
1304 return true
1305 }
1306 return false
1307 }
1308
1309
1310
1311 func mStackIsSystemAllocated() bool {
1312 switch GOOS {
1313 case "aix", "darwin", "plan9", "illumos", "ios", "solaris", "windows":
1314 return true
1315 case "openbsd":
1316 switch GOARCH {
1317 case "386", "amd64", "arm", "arm64", "mips64":
1318 return true
1319 }
1320 }
1321 return false
1322 }
1323
1324
1325
1326 func mstart()
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337 func mstart0() {
1338 _g_ := getg()
1339
1340 osStack := _g_.stack.lo == 0
1341 if osStack {
1342
1343
1344
1345
1346
1347
1348
1349
1350 size := _g_.stack.hi
1351 if size == 0 {
1352 size = 8192 * sys.StackGuardMultiplier
1353 }
1354 _g_.stack.hi = uintptr(noescape(unsafe.Pointer(&size)))
1355 _g_.stack.lo = _g_.stack.hi - size + 1024
1356 }
1357
1358
1359 _g_.stackguard0 = _g_.stack.lo + _StackGuard
1360
1361
1362 _g_.stackguard1 = _g_.stackguard0
1363 mstart1()
1364
1365
1366 if mStackIsSystemAllocated() {
1367
1368
1369
1370 osStack = true
1371 }
1372 mexit(osStack)
1373 }
1374
1375
1376
1377
1378 func mstart1() {
1379 _g_ := getg()
1380
1381 if _g_ != _g_.m.g0 {
1382 throw("bad runtime·mstart")
1383 }
1384
1385
1386
1387
1388
1389
1390
1391 _g_.sched.g = guintptr(unsafe.Pointer(_g_))
1392 _g_.sched.pc = getcallerpc()
1393 _g_.sched.sp = getcallersp()
1394
1395 asminit()
1396 minit()
1397
1398
1399
1400 if _g_.m == &m0 {
1401 mstartm0()
1402 }
1403
1404 if fn := _g_.m.mstartfn; fn != nil {
1405 fn()
1406 }
1407
1408 if _g_.m != &m0 {
1409 acquirep(_g_.m.nextp.ptr())
1410 _g_.m.nextp = 0
1411 }
1412 schedule()
1413 }
1414
1415
1416
1417
1418
1419
1420
1421 func mstartm0() {
1422
1423
1424
1425 if (iscgo || GOOS == "windows") && !cgoHasExtraM {
1426 cgoHasExtraM = true
1427 newextram()
1428 }
1429 initsig(false)
1430 }
1431
1432
1433
1434
1435
1436 func mPark() {
1437 g := getg()
1438 for {
1439 notesleep(&g.m.park)
1440
1441
1442
1443 noteclear(&g.m.park)
1444 if !mDoFixup() {
1445 return
1446 }
1447 }
1448 }
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460 func mexit(osStack bool) {
1461 g := getg()
1462 m := g.m
1463
1464 if m == &m0 {
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476 handoffp(releasep())
1477 lock(&sched.lock)
1478 sched.nmfreed++
1479 checkdead()
1480 unlock(&sched.lock)
1481 mPark()
1482 throw("locked m0 woke up")
1483 }
1484
1485 sigblock(true)
1486 unminit()
1487
1488
1489 if m.gsignal != nil {
1490 stackfree(m.gsignal.stack)
1491
1492
1493
1494
1495 m.gsignal = nil
1496 }
1497
1498
1499 lock(&sched.lock)
1500 for pprev := &allm; *pprev != nil; pprev = &(*pprev).alllink {
1501 if *pprev == m {
1502 *pprev = m.alllink
1503 goto found
1504 }
1505 }
1506 throw("m not found in allm")
1507 found:
1508 if !osStack {
1509
1510
1511
1512
1513 atomic.Store(&m.freeWait, 1)
1514
1515
1516
1517
1518 m.freelink = sched.freem
1519 sched.freem = m
1520 }
1521 unlock(&sched.lock)
1522
1523 atomic.Xadd64(&ncgocall, int64(m.ncgocall))
1524
1525
1526 handoffp(releasep())
1527
1528
1529
1530
1531
1532 lock(&sched.lock)
1533 sched.nmfreed++
1534 checkdead()
1535 unlock(&sched.lock)
1536
1537 if GOOS == "darwin" || GOOS == "ios" {
1538
1539
1540 if atomic.Load(&m.signalPending) != 0 {
1541 atomic.Xadd(&pendingPreemptSignals, -1)
1542 }
1543 }
1544
1545
1546
1547 mdestroy(m)
1548
1549 if osStack {
1550
1551
1552 return
1553 }
1554
1555
1556
1557
1558
1559 exitThread(&m.freeWait)
1560 }
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573 func forEachP(fn func(*p)) {
1574 mp := acquirem()
1575 _p_ := getg().m.p.ptr()
1576
1577 lock(&sched.lock)
1578 if sched.safePointWait != 0 {
1579 throw("forEachP: sched.safePointWait != 0")
1580 }
1581 sched.safePointWait = gomaxprocs - 1
1582 sched.safePointFn = fn
1583
1584
1585 for _, p := range allp {
1586 if p != _p_ {
1587 atomic.Store(&p.runSafePointFn, 1)
1588 }
1589 }
1590 preemptall()
1591
1592
1593
1594
1595
1596
1597
1598 for p := sched.pidle.ptr(); p != nil; p = p.link.ptr() {
1599 if atomic.Cas(&p.runSafePointFn, 1, 0) {
1600 fn(p)
1601 sched.safePointWait--
1602 }
1603 }
1604
1605 wait := sched.safePointWait > 0
1606 unlock(&sched.lock)
1607
1608
1609 fn(_p_)
1610
1611
1612
1613 for _, p := range allp {
1614 s := p.status
1615 if s == _Psyscall && p.runSafePointFn == 1 && atomic.Cas(&p.status, s, _Pidle) {
1616 if trace.enabled {
1617 traceGoSysBlock(p)
1618 traceProcStop(p)
1619 }
1620 p.syscalltick++
1621 handoffp(p)
1622 }
1623 }
1624
1625
1626 if wait {
1627 for {
1628
1629
1630
1631
1632 if notetsleep(&sched.safePointNote, 100*1000) {
1633 noteclear(&sched.safePointNote)
1634 break
1635 }
1636 preemptall()
1637 }
1638 }
1639 if sched.safePointWait != 0 {
1640 throw("forEachP: not done")
1641 }
1642 for _, p := range allp {
1643 if p.runSafePointFn != 0 {
1644 throw("forEachP: P did not run fn")
1645 }
1646 }
1647
1648 lock(&sched.lock)
1649 sched.safePointFn = nil
1650 unlock(&sched.lock)
1651 releasem(mp)
1652 }
1653
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663
1664
1665
1666
1667
1668 func syscall_runtime_doAllThreadsSyscall(fn func(bool) bool) {
1669 if iscgo {
1670 panic("doAllThreadsSyscall not supported with cgo enabled")
1671 }
1672 if fn == nil {
1673 return
1674 }
1675 for atomic.Load(&sched.sysmonStarting) != 0 {
1676 osyield()
1677 }
1678
1679
1680
1681
1682
1683
1684
1685
1686
1687
1688
1689 lockOSThread()
1690 var sigmask sigset
1691 sigsave(&sigmask)
1692 sigblock(false)
1693
1694 stopTheWorldGC("doAllThreadsSyscall")
1695 if atomic.Load(&newmHandoff.haveTemplateThread) != 0 {
1696
1697
1698 lock(&newmHandoff.lock)
1699 for !newmHandoff.waiting {
1700 unlock(&newmHandoff.lock)
1701 osyield()
1702 lock(&newmHandoff.lock)
1703 }
1704 unlock(&newmHandoff.lock)
1705 }
1706 if netpollinited() {
1707 netpollBreak()
1708 }
1709 sigRecvPrepareForFixup()
1710 _g_ := getg()
1711 if raceenabled {
1712
1713
1714 lock(&mFixupRace.lock)
1715 mFixupRace.ctx = _g_.racectx
1716 unlock(&mFixupRace.lock)
1717 }
1718 if ok := fn(true); ok {
1719 tid := _g_.m.procid
1720 for mp := allm; mp != nil; mp = mp.alllink {
1721 if mp.procid == tid {
1722
1723
1724 continue
1725 }
1726
1727
1728
1729
1730
1731
1732
1733 if mp.procid == 0 && !mp.doesPark {
1734
1735
1736
1737
1738 throw("unsupported runtime environment")
1739 }
1740
1741
1742
1743 lock(&mp.mFixup.lock)
1744 mp.mFixup.fn = fn
1745 atomic.Store(&mp.mFixup.used, 1)
1746 if mp.doesPark {
1747
1748
1749
1750
1751
1752 notewakeup(&mp.park)
1753 }
1754 unlock(&mp.mFixup.lock)
1755 }
1756 for {
1757 done := true
1758 for mp := allm; done && mp != nil; mp = mp.alllink {
1759 if mp.procid == tid {
1760 continue
1761 }
1762 done = atomic.Load(&mp.mFixup.used) == 0
1763 }
1764 if done {
1765 break
1766 }
1767
1768 lock(&sched.lock)
1769 if atomic.Load(&sched.sysmonwait) != 0 {
1770 atomic.Store(&sched.sysmonwait, 0)
1771 notewakeup(&sched.sysmonnote)
1772 }
1773 unlock(&sched.lock)
1774 lock(&newmHandoff.lock)
1775 if newmHandoff.waiting {
1776 newmHandoff.waiting = false
1777 notewakeup(&newmHandoff.wake)
1778 }
1779 unlock(&newmHandoff.lock)
1780 osyield()
1781 }
1782 }
1783 if raceenabled {
1784 lock(&mFixupRace.lock)
1785 mFixupRace.ctx = 0
1786 unlock(&mFixupRace.lock)
1787 }
1788 startTheWorldGC()
1789 msigrestore(sigmask)
1790 unlockOSThread()
1791 }
1792
1793
1794
1795
1796
1797
1798
1799
1800
1801
1802
1803
1804 func runSafePointFn() {
1805 p := getg().m.p.ptr()
1806
1807
1808
1809 if !atomic.Cas(&p.runSafePointFn, 1, 0) {
1810 return
1811 }
1812 sched.safePointFn(p)
1813 lock(&sched.lock)
1814 sched.safePointWait--
1815 if sched.safePointWait == 0 {
1816 notewakeup(&sched.safePointNote)
1817 }
1818 unlock(&sched.lock)
1819 }
1820
1821
1822
1823
1824 var cgoThreadStart unsafe.Pointer
1825
1826 type cgothreadstart struct {
1827 g guintptr
1828 tls *uint64
1829 fn unsafe.Pointer
1830 }
1831
1832
1833
1834
1835
1836
1837
1838
1839
1840
1841 func allocm(_p_ *p, fn func(), id int64) *m {
1842 _g_ := getg()
1843 acquirem()
1844 if _g_.m.p == 0 {
1845 acquirep(_p_)
1846 }
1847
1848
1849
1850 if sched.freem != nil {
1851 lock(&sched.lock)
1852 var newList *m
1853 for freem := sched.freem; freem != nil; {
1854 if freem.freeWait != 0 {
1855 next := freem.freelink
1856 freem.freelink = newList
1857 newList = freem
1858 freem = next
1859 continue
1860 }
1861
1862
1863
1864 systemstack(func() {
1865 stackfree(freem.g0.stack)
1866 })
1867 freem = freem.freelink
1868 }
1869 sched.freem = newList
1870 unlock(&sched.lock)
1871 }
1872
1873 mp := new(m)
1874 mp.mstartfn = fn
1875 mcommoninit(mp, id)
1876
1877
1878
1879 if iscgo || mStackIsSystemAllocated() {
1880 mp.g0 = malg(-1)
1881 } else {
1882 mp.g0 = malg(8192 * sys.StackGuardMultiplier)
1883 }
1884 mp.g0.m = mp
1885
1886 if _p_ == _g_.m.p.ptr() {
1887 releasep()
1888 }
1889 releasem(_g_.m)
1890
1891 return mp
1892 }
1893
1894
1895
1896
1897
1898
1899
1900
1901
1902
1903
1904
1905
1906
1907
1908
1909
1910
1911
1912
1913
1914
1915
1916
1917
1918
1919
1920
1921
1922
1923
1924
1925
1926
1927
1928 func needm() {
1929 if (iscgo || GOOS == "windows") && !cgoHasExtraM {
1930
1931
1932
1933
1934
1935
1936 write(2, unsafe.Pointer(&earlycgocallback[0]), int32(len(earlycgocallback)))
1937 exit(1)
1938 }
1939
1940
1941
1942
1943
1944
1945
1946
1947
1948 var sigmask sigset
1949 sigsave(&sigmask)
1950 sigblock(false)
1951
1952
1953
1954
1955
1956 mp := lockextra(false)
1957
1958
1959
1960
1961
1962
1963
1964
1965 mp.needextram = mp.schedlink == 0
1966 extraMCount--
1967 unlockextra(mp.schedlink.ptr())
1968
1969
1970 mp.sigmask = sigmask
1971
1972
1973
1974 osSetupTLS(mp)
1975
1976
1977
1978
1979
1980
1981 setg(mp.g0)
1982 _g_ := getg()
1983 _g_.stack.hi = getcallersp() + 1024
1984 _g_.stack.lo = getcallersp() - 32*1024
1985 _g_.stackguard0 = _g_.stack.lo + _StackGuard
1986
1987
1988 asminit()
1989 minit()
1990
1991
1992 casgstatus(mp.curg, _Gdead, _Gsyscall)
1993 atomic.Xadd(&sched.ngsys, -1)
1994 }
1995
1996 var earlycgocallback = []byte("fatal error: cgo callback before cgo call\n")
1997
1998
1999
2000
2001 func newextram() {
2002 c := atomic.Xchg(&extraMWaiters, 0)
2003 if c > 0 {
2004 for i := uint32(0); i < c; i++ {
2005 oneNewExtraM()
2006 }
2007 } else {
2008
2009 mp := lockextra(true)
2010 unlockextra(mp)
2011 if mp == nil {
2012 oneNewExtraM()
2013 }
2014 }
2015 }
2016
2017
2018 func oneNewExtraM() {
2019
2020
2021
2022
2023
2024 mp := allocm(nil, nil, -1)
2025 gp := malg(4096)
2026 gp.sched.pc = abi.FuncPCABI0(goexit) + sys.PCQuantum
2027 gp.sched.sp = gp.stack.hi
2028 gp.sched.sp -= 4 * sys.PtrSize
2029 gp.sched.lr = 0
2030 gp.sched.g = guintptr(unsafe.Pointer(gp))
2031 gp.syscallpc = gp.sched.pc
2032 gp.syscallsp = gp.sched.sp
2033 gp.stktopsp = gp.sched.sp
2034
2035
2036
2037
2038 casgstatus(gp, _Gidle, _Gdead)
2039 gp.m = mp
2040 mp.curg = gp
2041 mp.lockedInt++
2042 mp.lockedg.set(gp)
2043 gp.lockedm.set(mp)
2044 gp.goid = int64(atomic.Xadd64(&sched.goidgen, 1))
2045 if raceenabled {
2046 gp.racectx = racegostart(funcPC(newextram) + sys.PCQuantum)
2047 }
2048
2049 allgadd(gp)
2050
2051
2052
2053
2054
2055 atomic.Xadd(&sched.ngsys, +1)
2056
2057
2058 mnext := lockextra(true)
2059 mp.schedlink.set(mnext)
2060 extraMCount++
2061 unlockextra(mp)
2062 }
2063
2064
2065
2066
2067
2068
2069
2070
2071
2072
2073
2074
2075
2076
2077
2078
2079
2080
2081
2082
2083
2084
2085
2086
2087 func dropm() {
2088
2089
2090
2091 mp := getg().m
2092
2093
2094 casgstatus(mp.curg, _Gsyscall, _Gdead)
2095 mp.curg.preemptStop = false
2096 atomic.Xadd(&sched.ngsys, +1)
2097
2098
2099
2100
2101
2102 sigmask := mp.sigmask
2103 sigblock(false)
2104 unminit()
2105
2106 mnext := lockextra(true)
2107 extraMCount++
2108 mp.schedlink.set(mnext)
2109
2110 setg(nil)
2111
2112
2113 unlockextra(mp)
2114
2115 msigrestore(sigmask)
2116 }
2117
2118
2119 func getm() uintptr {
2120 return uintptr(unsafe.Pointer(getg().m))
2121 }
2122
2123 var extram uintptr
2124 var extraMCount uint32
2125 var extraMWaiters uint32
2126
2127
2128
2129
2130
2131
2132
2133 func lockextra(nilokay bool) *m {
2134 const locked = 1
2135
2136 incr := false
2137 for {
2138 old := atomic.Loaduintptr(&extram)
2139 if old == locked {
2140 osyield_no_g()
2141 continue
2142 }
2143 if old == 0 && !nilokay {
2144 if !incr {
2145
2146
2147
2148 atomic.Xadd(&extraMWaiters, 1)
2149 incr = true
2150 }
2151 usleep_no_g(1)
2152 continue
2153 }
2154 if atomic.Casuintptr(&extram, old, locked) {
2155 return (*m)(unsafe.Pointer(old))
2156 }
2157 osyield_no_g()
2158 continue
2159 }
2160 }
2161
2162
2163 func unlockextra(mp *m) {
2164 atomic.Storeuintptr(&extram, uintptr(unsafe.Pointer(mp)))
2165 }
2166
2167
2168
2169 var execLock rwmutex
2170
2171
2172
2173
2174 var newmHandoff struct {
2175 lock mutex
2176
2177
2178
2179 newm muintptr
2180
2181
2182
2183 waiting bool
2184 wake note
2185
2186
2187
2188
2189 haveTemplateThread uint32
2190 }
2191
2192
2193
2194
2195
2196
2197
2198 func newm(fn func(), _p_ *p, id int64) {
2199 mp := allocm(_p_, fn, id)
2200 mp.doesPark = (_p_ != nil)
2201 mp.nextp.set(_p_)
2202 mp.sigmask = initSigmask
2203 if gp := getg(); gp != nil && gp.m != nil && (gp.m.lockedExt != 0 || gp.m.incgo) && GOOS != "plan9" {
2204
2205
2206
2207
2208
2209
2210
2211
2212
2213
2214
2215 lock(&newmHandoff.lock)
2216 if newmHandoff.haveTemplateThread == 0 {
2217 throw("on a locked thread with no template thread")
2218 }
2219 mp.schedlink = newmHandoff.newm
2220 newmHandoff.newm.set(mp)
2221 if newmHandoff.waiting {
2222 newmHandoff.waiting = false
2223 notewakeup(&newmHandoff.wake)
2224 }
2225 unlock(&newmHandoff.lock)
2226 return
2227 }
2228 newm1(mp)
2229 }
2230
2231 func newm1(mp *m) {
2232 if iscgo {
2233 var ts cgothreadstart
2234 if _cgo_thread_start == nil {
2235 throw("_cgo_thread_start missing")
2236 }
2237 ts.g.set(mp.g0)
2238 ts.tls = (*uint64)(unsafe.Pointer(&mp.tls[0]))
2239 ts.fn = unsafe.Pointer(funcPC(mstart))
2240 if msanenabled {
2241 msanwrite(unsafe.Pointer(&ts), unsafe.Sizeof(ts))
2242 }
2243 execLock.rlock()
2244 asmcgocall(_cgo_thread_start, unsafe.Pointer(&ts))
2245 execLock.runlock()
2246 return
2247 }
2248 execLock.rlock()
2249 newosproc(mp)
2250 execLock.runlock()
2251 }
2252
2253
2254
2255
2256
2257 func startTemplateThread() {
2258 if GOARCH == "wasm" {
2259 return
2260 }
2261
2262
2263
2264 mp := acquirem()
2265 if !atomic.Cas(&newmHandoff.haveTemplateThread, 0, 1) {
2266 releasem(mp)
2267 return
2268 }
2269 newm(templateThread, nil, -1)
2270 releasem(mp)
2271 }
2272
2273
2274
2275
2276
2277 var mFixupRace struct {
2278 lock mutex
2279 ctx uintptr
2280 }
2281
2282
2283
2284
2285
2286
2287
2288
2289
2290 func mDoFixup() bool {
2291 _g_ := getg()
2292 if used := atomic.Load(&_g_.m.mFixup.used); used == 0 {
2293 return false
2294 }
2295
2296
2297 var sigmask sigset
2298 sigsave(&sigmask)
2299 sigblock(false)
2300 lock(&_g_.m.mFixup.lock)
2301 fn := _g_.m.mFixup.fn
2302 if fn != nil {
2303 if gcphase != _GCoff {
2304
2305
2306
2307
2308
2309
2310
2311
2312
2313
2314 throw("GC must be disabled to protect validity of fn value")
2315 }
2316 if _g_.racectx != 0 || !raceenabled {
2317 fn(false)
2318 } else {
2319
2320
2321
2322
2323
2324 lock(&mFixupRace.lock)
2325 _g_.racectx = mFixupRace.ctx
2326 fn(false)
2327 _g_.racectx = 0
2328 unlock(&mFixupRace.lock)
2329 }
2330 *(*uintptr)(unsafe.Pointer(&_g_.m.mFixup.fn)) = 0
2331 atomic.Store(&_g_.m.mFixup.used, 0)
2332 }
2333 unlock(&_g_.m.mFixup.lock)
2334 msigrestore(sigmask)
2335 return fn != nil
2336 }
2337
2338
2339
2340
2341
2342
2343 func mDoFixupAndOSYield() {
2344 mDoFixup()
2345 osyield()
2346 }
2347
2348
2349
2350
2351
2352
2353
2354
2355
2356
2357
2358
2359
2360 func templateThread() {
2361 lock(&sched.lock)
2362 sched.nmsys++
2363 checkdead()
2364 unlock(&sched.lock)
2365
2366 for {
2367 lock(&newmHandoff.lock)
2368 for newmHandoff.newm != 0 {
2369 newm := newmHandoff.newm.ptr()
2370 newmHandoff.newm = 0
2371 unlock(&newmHandoff.lock)
2372 for newm != nil {
2373 next := newm.schedlink.ptr()
2374 newm.schedlink = 0
2375 newm1(newm)
2376 newm = next
2377 }
2378 lock(&newmHandoff.lock)
2379 }
2380 newmHandoff.waiting = true
2381 noteclear(&newmHandoff.wake)
2382 unlock(&newmHandoff.lock)
2383 notesleep(&newmHandoff.wake)
2384 mDoFixup()
2385 }
2386 }
2387
2388
2389
2390 func stopm() {
2391 _g_ := getg()
2392
2393 if _g_.m.locks != 0 {
2394 throw("stopm holding locks")
2395 }
2396 if _g_.m.p != 0 {
2397 throw("stopm holding p")
2398 }
2399 if _g_.m.spinning {
2400 throw("stopm spinning")
2401 }
2402
2403 lock(&sched.lock)
2404 mput(_g_.m)
2405 unlock(&sched.lock)
2406 mPark()
2407 acquirep(_g_.m.nextp.ptr())
2408 _g_.m.nextp = 0
2409 }
2410
2411 func mspinning() {
2412
2413 getg().m.spinning = true
2414 }
2415
2416
2417
2418
2419
2420
2421
2422
2423
2424
2425
2426
2427 func startm(_p_ *p, spinning bool) {
2428
2429
2430
2431
2432
2433
2434
2435
2436
2437
2438
2439
2440
2441
2442
2443
2444 mp := acquirem()
2445 lock(&sched.lock)
2446 if _p_ == nil {
2447 _p_ = pidleget()
2448 if _p_ == nil {
2449 unlock(&sched.lock)
2450 if spinning {
2451
2452
2453 if int32(atomic.Xadd(&sched.nmspinning, -1)) < 0 {
2454 throw("startm: negative nmspinning")
2455 }
2456 }
2457 releasem(mp)
2458 return
2459 }
2460 }
2461 nmp := mget()
2462 if nmp == nil {
2463
2464
2465
2466
2467
2468
2469
2470
2471
2472
2473
2474
2475 id := mReserveID()
2476 unlock(&sched.lock)
2477
2478 var fn func()
2479 if spinning {
2480
2481 fn = mspinning
2482 }
2483 newm(fn, _p_, id)
2484
2485
2486 releasem(mp)
2487 return
2488 }
2489 unlock(&sched.lock)
2490 if nmp.spinning {
2491 throw("startm: m is spinning")
2492 }
2493 if nmp.nextp != 0 {
2494 throw("startm: m has p")
2495 }
2496 if spinning && !runqempty(_p_) {
2497 throw("startm: p has runnable gs")
2498 }
2499
2500 nmp.spinning = spinning
2501 nmp.nextp.set(_p_)
2502 notewakeup(&nmp.park)
2503
2504
2505 releasem(mp)
2506 }
2507
2508
2509
2510
2511 func handoffp(_p_ *p) {
2512
2513
2514
2515
2516 if !runqempty(_p_) || sched.runqsize != 0 {
2517 startm(_p_, false)
2518 return
2519 }
2520
2521 if gcBlackenEnabled != 0 && gcMarkWorkAvailable(_p_) {
2522 startm(_p_, false)
2523 return
2524 }
2525
2526
2527 if atomic.Load(&sched.nmspinning)+atomic.Load(&sched.npidle) == 0 && atomic.Cas(&sched.nmspinning, 0, 1) {
2528 startm(_p_, true)
2529 return
2530 }
2531 lock(&sched.lock)
2532 if sched.gcwaiting != 0 {
2533 _p_.status = _Pgcstop
2534 sched.stopwait--
2535 if sched.stopwait == 0 {
2536 notewakeup(&sched.stopnote)
2537 }
2538 unlock(&sched.lock)
2539 return
2540 }
2541 if _p_.runSafePointFn != 0 && atomic.Cas(&_p_.runSafePointFn, 1, 0) {
2542 sched.safePointFn(_p_)
2543 sched.safePointWait--
2544 if sched.safePointWait == 0 {
2545 notewakeup(&sched.safePointNote)
2546 }
2547 }
2548 if sched.runqsize != 0 {
2549 unlock(&sched.lock)
2550 startm(_p_, false)
2551 return
2552 }
2553
2554
2555 if sched.npidle == uint32(gomaxprocs-1) && atomic.Load64(&sched.lastpoll) != 0 {
2556 unlock(&sched.lock)
2557 startm(_p_, false)
2558 return
2559 }
2560
2561
2562
2563 when := nobarrierWakeTime(_p_)
2564 pidleput(_p_)
2565 unlock(&sched.lock)
2566
2567 if when != 0 {
2568 wakeNetPoller(when)
2569 }
2570 }
2571
2572
2573
2574 func wakep() {
2575 if atomic.Load(&sched.npidle) == 0 {
2576 return
2577 }
2578
2579 if atomic.Load(&sched.nmspinning) != 0 || !atomic.Cas(&sched.nmspinning, 0, 1) {
2580 return
2581 }
2582 startm(nil, true)
2583 }
2584
2585
2586
2587 func stoplockedm() {
2588 _g_ := getg()
2589
2590 if _g_.m.lockedg == 0 || _g_.m.lockedg.ptr().lockedm.ptr() != _g_.m {
2591 throw("stoplockedm: inconsistent locking")
2592 }
2593 if _g_.m.p != 0 {
2594
2595 _p_ := releasep()
2596 handoffp(_p_)
2597 }
2598 incidlelocked(1)
2599
2600 mPark()
2601 status := readgstatus(_g_.m.lockedg.ptr())
2602 if status&^_Gscan != _Grunnable {
2603 print("runtime:stoplockedm: lockedg (atomicstatus=", status, ") is not Grunnable or Gscanrunnable\n")
2604 dumpgstatus(_g_.m.lockedg.ptr())
2605 throw("stoplockedm: not runnable")
2606 }
2607 acquirep(_g_.m.nextp.ptr())
2608 _g_.m.nextp = 0
2609 }
2610
2611
2612
2613
2614 func startlockedm(gp *g) {
2615 _g_ := getg()
2616
2617 mp := gp.lockedm.ptr()
2618 if mp == _g_.m {
2619 throw("startlockedm: locked to me")
2620 }
2621 if mp.nextp != 0 {
2622 throw("startlockedm: m has p")
2623 }
2624
2625 incidlelocked(-1)
2626 _p_ := releasep()
2627 mp.nextp.set(_p_)
2628 notewakeup(&mp.park)
2629 stopm()
2630 }
2631
2632
2633
2634 func gcstopm() {
2635 _g_ := getg()
2636
2637 if sched.gcwaiting == 0 {
2638 throw("gcstopm: not waiting for gc")
2639 }
2640 if _g_.m.spinning {
2641 _g_.m.spinning = false
2642
2643
2644 if int32(atomic.Xadd(&sched.nmspinning, -1)) < 0 {
2645 throw("gcstopm: negative nmspinning")
2646 }
2647 }
2648 _p_ := releasep()
2649 lock(&sched.lock)
2650 _p_.status = _Pgcstop
2651 sched.stopwait--
2652 if sched.stopwait == 0 {
2653 notewakeup(&sched.stopnote)
2654 }
2655 unlock(&sched.lock)
2656 stopm()
2657 }
2658
2659
2660
2661
2662
2663
2664
2665
2666
2667
2668 func execute(gp *g, inheritTime bool) {
2669 _g_ := getg()
2670
2671
2672
2673 _g_.m.curg = gp
2674 gp.m = _g_.m
2675 casgstatus(gp, _Grunnable, _Grunning)
2676 gp.waitsince = 0
2677 gp.preempt = false
2678 gp.stackguard0 = gp.stack.lo + _StackGuard
2679 if !inheritTime {
2680 _g_.m.p.ptr().schedtick++
2681 }
2682
2683
2684 hz := sched.profilehz
2685 if _g_.m.profilehz != hz {
2686 setThreadCPUProfiler(hz)
2687 }
2688
2689 if trace.enabled {
2690
2691
2692 if gp.syscallsp != 0 && gp.sysblocktraced {
2693 traceGoSysExit(gp.sysexitticks)
2694 }
2695 traceGoStart()
2696 }
2697
2698 gogo(&gp.sched)
2699 }
2700
2701
2702
2703 func findrunnable() (gp *g, inheritTime bool) {
2704 _g_ := getg()
2705
2706
2707
2708
2709
2710 top:
2711 _p_ := _g_.m.p.ptr()
2712 if sched.gcwaiting != 0 {
2713 gcstopm()
2714 goto top
2715 }
2716 if _p_.runSafePointFn != 0 {
2717 runSafePointFn()
2718 }
2719
2720 now, pollUntil, _ := checkTimers(_p_, 0)
2721
2722 if fingwait && fingwake {
2723 if gp := wakefing(); gp != nil {
2724 ready(gp, 0, true)
2725 }
2726 }
2727 if *cgo_yield != nil {
2728 asmcgocall(*cgo_yield, nil)
2729 }
2730
2731
2732 if gp, inheritTime := runqget(_p_); gp != nil {
2733 return gp, inheritTime
2734 }
2735
2736
2737 if sched.runqsize != 0 {
2738 lock(&sched.lock)
2739 gp := globrunqget(_p_, 0)
2740 unlock(&sched.lock)
2741 if gp != nil {
2742 return gp, false
2743 }
2744 }
2745
2746
2747
2748
2749
2750
2751
2752
2753 if netpollinited() && atomic.Load(&netpollWaiters) > 0 && atomic.Load64(&sched.lastpoll) != 0 {
2754 if list := netpoll(0); !list.empty() {
2755 gp := list.pop()
2756 injectglist(&list)
2757 casgstatus(gp, _Gwaiting, _Grunnable)
2758 if trace.enabled {
2759 traceGoUnpark(gp, 0)
2760 }
2761 return gp, false
2762 }
2763 }
2764
2765
2766
2767
2768
2769
2770 procs := uint32(gomaxprocs)
2771 if _g_.m.spinning || 2*atomic.Load(&sched.nmspinning) < procs-atomic.Load(&sched.npidle) {
2772 if !_g_.m.spinning {
2773 _g_.m.spinning = true
2774 atomic.Xadd(&sched.nmspinning, 1)
2775 }
2776
2777 gp, inheritTime, tnow, w, newWork := stealWork(now)
2778 now = tnow
2779 if gp != nil {
2780
2781 return gp, inheritTime
2782 }
2783 if newWork {
2784
2785
2786 goto top
2787 }
2788 if w != 0 && (pollUntil == 0 || w < pollUntil) {
2789
2790 pollUntil = w
2791 }
2792 }
2793
2794
2795
2796
2797
2798
2799 if gcBlackenEnabled != 0 && gcMarkWorkAvailable(_p_) {
2800 node := (*gcBgMarkWorkerNode)(gcBgMarkWorkerPool.pop())
2801 if node != nil {
2802 _p_.gcMarkWorkerMode = gcMarkWorkerIdleMode
2803 gp := node.gp.ptr()
2804 casgstatus(gp, _Gwaiting, _Grunnable)
2805 if trace.enabled {
2806 traceGoUnpark(gp, 0)
2807 }
2808 return gp, false
2809 }
2810 }
2811
2812
2813
2814
2815
2816 gp, otherReady := beforeIdle(now, pollUntil)
2817 if gp != nil {
2818 casgstatus(gp, _Gwaiting, _Grunnable)
2819 if trace.enabled {
2820 traceGoUnpark(gp, 0)
2821 }
2822 return gp, false
2823 }
2824 if otherReady {
2825 goto top
2826 }
2827
2828
2829
2830
2831
2832 allpSnapshot := allp
2833
2834
2835 idlepMaskSnapshot := idlepMask
2836 timerpMaskSnapshot := timerpMask
2837
2838
2839 lock(&sched.lock)
2840 if sched.gcwaiting != 0 || _p_.runSafePointFn != 0 {
2841 unlock(&sched.lock)
2842 goto top
2843 }
2844 if sched.runqsize != 0 {
2845 gp := globrunqget(_p_, 0)
2846 unlock(&sched.lock)
2847 return gp, false
2848 }
2849 if releasep() != _p_ {
2850 throw("findrunnable: wrong p")
2851 }
2852 pidleput(_p_)
2853 unlock(&sched.lock)
2854
2855
2856
2857
2858
2859
2860
2861
2862
2863
2864
2865
2866
2867
2868
2869
2870
2871
2872
2873
2874
2875 wasSpinning := _g_.m.spinning
2876 if _g_.m.spinning {
2877 _g_.m.spinning = false
2878 if int32(atomic.Xadd(&sched.nmspinning, -1)) < 0 {
2879 throw("findrunnable: negative nmspinning")
2880 }
2881
2882
2883
2884
2885
2886
2887
2888
2889 _p_ = checkRunqsNoP(allpSnapshot, idlepMaskSnapshot)
2890 if _p_ != nil {
2891 acquirep(_p_)
2892 _g_.m.spinning = true
2893 atomic.Xadd(&sched.nmspinning, 1)
2894 goto top
2895 }
2896
2897
2898 _p_, gp = checkIdleGCNoP()
2899 if _p_ != nil {
2900 acquirep(_p_)
2901 _g_.m.spinning = true
2902 atomic.Xadd(&sched.nmspinning, 1)
2903
2904
2905 _p_.gcMarkWorkerMode = gcMarkWorkerIdleMode
2906 casgstatus(gp, _Gwaiting, _Grunnable)
2907 if trace.enabled {
2908 traceGoUnpark(gp, 0)
2909 }
2910 return gp, false
2911 }
2912
2913
2914
2915
2916
2917
2918
2919 pollUntil = checkTimersNoP(allpSnapshot, timerpMaskSnapshot, pollUntil)
2920 }
2921
2922
2923 if netpollinited() && (atomic.Load(&netpollWaiters) > 0 || pollUntil != 0) && atomic.Xchg64(&sched.lastpoll, 0) != 0 {
2924 atomic.Store64(&sched.pollUntil, uint64(pollUntil))
2925 if _g_.m.p != 0 {
2926 throw("findrunnable: netpoll with p")
2927 }
2928 if _g_.m.spinning {
2929 throw("findrunnable: netpoll with spinning")
2930 }
2931 delay := int64(-1)
2932 if pollUntil != 0 {
2933 if now == 0 {
2934 now = nanotime()
2935 }
2936 delay = pollUntil - now
2937 if delay < 0 {
2938 delay = 0
2939 }
2940 }
2941 if faketime != 0 {
2942
2943 delay = 0
2944 }
2945 list := netpoll(delay)
2946 atomic.Store64(&sched.pollUntil, 0)
2947 atomic.Store64(&sched.lastpoll, uint64(nanotime()))
2948 if faketime != 0 && list.empty() {
2949
2950
2951 stopm()
2952 goto top
2953 }
2954 lock(&sched.lock)
2955 _p_ = pidleget()
2956 unlock(&sched.lock)
2957 if _p_ == nil {
2958 injectglist(&list)
2959 } else {
2960 acquirep(_p_)
2961 if !list.empty() {
2962 gp := list.pop()
2963 injectglist(&list)
2964 casgstatus(gp, _Gwaiting, _Grunnable)
2965 if trace.enabled {
2966 traceGoUnpark(gp, 0)
2967 }
2968 return gp, false
2969 }
2970 if wasSpinning {
2971 _g_.m.spinning = true
2972 atomic.Xadd(&sched.nmspinning, 1)
2973 }
2974 goto top
2975 }
2976 } else if pollUntil != 0 && netpollinited() {
2977 pollerPollUntil := int64(atomic.Load64(&sched.pollUntil))
2978 if pollerPollUntil == 0 || pollerPollUntil > pollUntil {
2979 netpollBreak()
2980 }
2981 }
2982 stopm()
2983 goto top
2984 }
2985
2986
2987
2988
2989
2990 func pollWork() bool {
2991 if sched.runqsize != 0 {
2992 return true
2993 }
2994 p := getg().m.p.ptr()
2995 if !runqempty(p) {
2996 return true
2997 }
2998 if netpollinited() && atomic.Load(&netpollWaiters) > 0 && sched.lastpoll != 0 {
2999 if list := netpoll(0); !list.empty() {
3000 injectglist(&list)
3001 return true
3002 }
3003 }
3004 return false
3005 }
3006
3007
3008
3009
3010
3011
3012
3013 func stealWork(now int64) (gp *g, inheritTime bool, rnow, pollUntil int64, newWork bool) {
3014 pp := getg().m.p.ptr()
3015
3016 ranTimer := false
3017
3018 const stealTries = 4
3019 for i := 0; i < stealTries; i++ {
3020 stealTimersOrRunNextG := i == stealTries-1
3021
3022 for enum := stealOrder.start(fastrand()); !enum.done(); enum.next() {
3023 if sched.gcwaiting != 0 {
3024
3025 return nil, false, now, pollUntil, true
3026 }
3027 p2 := allp[enum.position()]
3028 if pp == p2 {
3029 continue
3030 }
3031
3032
3033
3034
3035
3036
3037
3038
3039
3040
3041
3042
3043
3044
3045 if stealTimersOrRunNextG && timerpMask.read(enum.position()) {
3046 tnow, w, ran := checkTimers(p2, now)
3047 now = tnow
3048 if w != 0 && (pollUntil == 0 || w < pollUntil) {
3049 pollUntil = w
3050 }
3051 if ran {
3052
3053
3054
3055
3056
3057
3058
3059
3060 if gp, inheritTime := runqget(pp); gp != nil {
3061 return gp, inheritTime, now, pollUntil, ranTimer
3062 }
3063 ranTimer = true
3064 }
3065 }
3066
3067
3068 if !idlepMask.read(enum.position()) {
3069 if gp := runqsteal(pp, p2, stealTimersOrRunNextG); gp != nil {
3070 return gp, false, now, pollUntil, ranTimer
3071 }
3072 }
3073 }
3074 }
3075
3076
3077
3078
3079 return nil, false, now, pollUntil, ranTimer
3080 }
3081
3082
3083
3084
3085
3086
3087 func checkRunqsNoP(allpSnapshot []*p, idlepMaskSnapshot pMask) *p {
3088 for id, p2 := range allpSnapshot {
3089 if !idlepMaskSnapshot.read(uint32(id)) && !runqempty(p2) {
3090 lock(&sched.lock)
3091 pp := pidleget()
3092 unlock(&sched.lock)
3093 if pp != nil {
3094 return pp
3095 }
3096
3097
3098 break
3099 }
3100 }
3101
3102 return nil
3103 }
3104
3105
3106
3107
3108 func checkTimersNoP(allpSnapshot []*p, timerpMaskSnapshot pMask, pollUntil int64) int64 {
3109 for id, p2 := range allpSnapshot {
3110 if timerpMaskSnapshot.read(uint32(id)) {
3111 w := nobarrierWakeTime(p2)
3112 if w != 0 && (pollUntil == 0 || w < pollUntil) {
3113 pollUntil = w
3114 }
3115 }
3116 }
3117
3118 return pollUntil
3119 }
3120
3121
3122
3123
3124
3125 func checkIdleGCNoP() (*p, *g) {
3126
3127
3128 if atomic.Load(&gcBlackenEnabled) == 0 {
3129 return nil, nil
3130 }
3131 if !gcMarkWorkAvailable(nil) {
3132 return nil, nil
3133 }
3134
3135
3136
3137
3138
3139
3140
3141
3142
3143
3144
3145
3146
3147
3148
3149
3150
3151
3152 lock(&sched.lock)
3153 pp := pidleget()
3154 if pp == nil {
3155 unlock(&sched.lock)
3156 return nil, nil
3157 }
3158
3159
3160
3161 if gcBlackenEnabled == 0 {
3162 pidleput(pp)
3163 unlock(&sched.lock)
3164 return nil, nil
3165 }
3166
3167 node := (*gcBgMarkWorkerNode)(gcBgMarkWorkerPool.pop())
3168 if node == nil {
3169 pidleput(pp)
3170 unlock(&sched.lock)
3171 return nil, nil
3172 }
3173
3174 unlock(&sched.lock)
3175
3176 return pp, node.gp.ptr()
3177 }
3178
3179
3180
3181
3182 func wakeNetPoller(when int64) {
3183 if atomic.Load64(&sched.lastpoll) == 0 {
3184
3185
3186
3187
3188 pollerPollUntil := int64(atomic.Load64(&sched.pollUntil))
3189 if pollerPollUntil == 0 || pollerPollUntil > when {
3190 netpollBreak()
3191 }
3192 } else {
3193
3194
3195 if GOOS != "plan9" {
3196 wakep()
3197 }
3198 }
3199 }
3200
3201 func resetspinning() {
3202 _g_ := getg()
3203 if !_g_.m.spinning {
3204 throw("resetspinning: not a spinning m")
3205 }
3206 _g_.m.spinning = false
3207 nmspinning := atomic.Xadd(&sched.nmspinning, -1)
3208 if int32(nmspinning) < 0 {
3209 throw("findrunnable: negative nmspinning")
3210 }
3211
3212
3213
3214 wakep()
3215 }
3216
3217
3218
3219
3220
3221
3222
3223
3224
3225 func injectglist(glist *gList) {
3226 if glist.empty() {
3227 return
3228 }
3229 if trace.enabled {
3230 for gp := glist.head.ptr(); gp != nil; gp = gp.schedlink.ptr() {
3231 traceGoUnpark(gp, 0)
3232 }
3233 }
3234
3235
3236
3237 head := glist.head.ptr()
3238 var tail *g
3239 qsize := 0
3240 for gp := head; gp != nil; gp = gp.schedlink.ptr() {
3241 tail = gp
3242 qsize++
3243 casgstatus(gp, _Gwaiting, _Grunnable)
3244 }
3245
3246
3247 var q gQueue
3248 q.head.set(head)
3249 q.tail.set(tail)
3250 *glist = gList{}
3251
3252 startIdle := func(n int) {
3253 for ; n != 0 && sched.npidle != 0; n-- {
3254 startm(nil, false)
3255 }
3256 }
3257
3258 pp := getg().m.p.ptr()
3259 if pp == nil {
3260 lock(&sched.lock)
3261 globrunqputbatch(&q, int32(qsize))
3262 unlock(&sched.lock)
3263 startIdle(qsize)
3264 return
3265 }
3266
3267 npidle := int(atomic.Load(&sched.npidle))
3268 var globq gQueue
3269 var n int
3270 for n = 0; n < npidle && !q.empty(); n++ {
3271 g := q.pop()
3272 globq.pushBack(g)
3273 }
3274 if n > 0 {
3275 lock(&sched.lock)
3276 globrunqputbatch(&globq, int32(n))
3277 unlock(&sched.lock)
3278 startIdle(n)
3279 qsize -= n
3280 }
3281
3282 if !q.empty() {
3283 runqputbatch(pp, &q, qsize)
3284 }
3285 }
3286
3287
3288
3289 func schedule() {
3290 _g_ := getg()
3291
3292 if _g_.m.locks != 0 {
3293 throw("schedule: holding locks")
3294 }
3295
3296 if _g_.m.lockedg != 0 {
3297 stoplockedm()
3298 execute(_g_.m.lockedg.ptr(), false)
3299 }
3300
3301
3302
3303 if _g_.m.incgo {
3304 throw("schedule: in cgo")
3305 }
3306
3307 top:
3308 pp := _g_.m.p.ptr()
3309 pp.preempt = false
3310
3311 if sched.gcwaiting != 0 {
3312 gcstopm()
3313 goto top
3314 }
3315 if pp.runSafePointFn != 0 {
3316 runSafePointFn()
3317 }
3318
3319
3320
3321
3322 if _g_.m.spinning && (pp.runnext != 0 || pp.runqhead != pp.runqtail) {
3323 throw("schedule: spinning with local work")
3324 }
3325
3326 checkTimers(pp, 0)
3327
3328 var gp *g
3329 var inheritTime bool
3330
3331
3332
3333
3334 tryWakeP := false
3335 if trace.enabled || trace.shutdown {
3336 gp = traceReader()
3337 if gp != nil {
3338 casgstatus(gp, _Gwaiting, _Grunnable)
3339 traceGoUnpark(gp, 0)
3340 tryWakeP = true
3341 }
3342 }
3343 if gp == nil && gcBlackenEnabled != 0 {
3344 gp = gcController.findRunnableGCWorker(_g_.m.p.ptr())
3345 if gp != nil {
3346 tryWakeP = true
3347 }
3348 }
3349 if gp == nil {
3350
3351
3352
3353 if _g_.m.p.ptr().schedtick%61 == 0 && sched.runqsize > 0 {
3354 lock(&sched.lock)
3355 gp = globrunqget(_g_.m.p.ptr(), 1)
3356 unlock(&sched.lock)
3357 }
3358 }
3359 if gp == nil {
3360 gp, inheritTime = runqget(_g_.m.p.ptr())
3361
3362
3363 }
3364 if gp == nil {
3365 gp, inheritTime = findrunnable()
3366 }
3367
3368
3369
3370
3371 if _g_.m.spinning {
3372 resetspinning()
3373 }
3374
3375 if sched.disable.user && !schedEnabled(gp) {
3376
3377
3378
3379 lock(&sched.lock)
3380 if schedEnabled(gp) {
3381
3382
3383 unlock(&sched.lock)
3384 } else {
3385 sched.disable.runnable.pushBack(gp)
3386 sched.disable.n++
3387 unlock(&sched.lock)
3388 goto top
3389 }
3390 }
3391
3392
3393
3394 if tryWakeP {
3395 wakep()
3396 }
3397 if gp.lockedm != 0 {
3398
3399
3400 startlockedm(gp)
3401 goto top
3402 }
3403
3404 execute(gp, inheritTime)
3405 }
3406
3407
3408
3409
3410
3411
3412
3413
3414 func dropg() {
3415 _g_ := getg()
3416
3417 setMNoWB(&_g_.m.curg.m, nil)
3418 setGNoWB(&_g_.m.curg, nil)
3419 }
3420
3421
3422
3423
3424
3425
3426
3427
3428
3429
3430 func checkTimers(pp *p, now int64) (rnow, pollUntil int64, ran bool) {
3431
3432
3433 next := int64(atomic.Load64(&pp.timer0When))
3434 nextAdj := int64(atomic.Load64(&pp.timerModifiedEarliest))
3435 if next == 0 || (nextAdj != 0 && nextAdj < next) {
3436 next = nextAdj
3437 }
3438
3439 if next == 0 {
3440
3441 return now, 0, false
3442 }
3443
3444 if now == 0 {
3445 now = nanotime()
3446 }
3447 if now < next {
3448
3449
3450
3451
3452 if pp != getg().m.p.ptr() || int(atomic.Load(&pp.deletedTimers)) <= int(atomic.Load(&pp.numTimers)/4) {
3453 return now, next, false
3454 }
3455 }
3456
3457 lock(&pp.timersLock)
3458
3459 if len(pp.timers) > 0 {
3460 adjusttimers(pp, now)
3461 for len(pp.timers) > 0 {
3462
3463
3464 if tw := runtimer(pp, now); tw != 0 {
3465 if tw > 0 {
3466 pollUntil = tw
3467 }
3468 break
3469 }
3470 ran = true
3471 }
3472 }
3473
3474
3475
3476
3477 if pp == getg().m.p.ptr() && int(atomic.Load(&pp.deletedTimers)) > len(pp.timers)/4 {
3478 clearDeletedTimers(pp)
3479 }
3480
3481 unlock(&pp.timersLock)
3482
3483 return now, pollUntil, ran
3484 }
3485
3486 func parkunlock_c(gp *g, lock unsafe.Pointer) bool {
3487 unlock((*mutex)(lock))
3488 return true
3489 }
3490
3491
3492 func park_m(gp *g) {
3493 _g_ := getg()
3494
3495 if trace.enabled {
3496 traceGoPark(_g_.m.waittraceev, _g_.m.waittraceskip)
3497 }
3498
3499 casgstatus(gp, _Grunning, _Gwaiting)
3500 dropg()
3501
3502 if fn := _g_.m.waitunlockf; fn != nil {
3503 ok := fn(gp, _g_.m.waitlock)
3504 _g_.m.waitunlockf = nil
3505 _g_.m.waitlock = nil
3506 if !ok {
3507 if trace.enabled {
3508 traceGoUnpark(gp, 2)
3509 }
3510 casgstatus(gp, _Gwaiting, _Grunnable)
3511 execute(gp, true)
3512 }
3513 }
3514 schedule()
3515 }
3516
3517 func goschedImpl(gp *g) {
3518 status := readgstatus(gp)
3519 if status&^_Gscan != _Grunning {
3520 dumpgstatus(gp)
3521 throw("bad g status")
3522 }
3523 casgstatus(gp, _Grunning, _Grunnable)
3524 dropg()
3525 lock(&sched.lock)
3526 globrunqput(gp)
3527 unlock(&sched.lock)
3528
3529 schedule()
3530 }
3531
3532
3533 func gosched_m(gp *g) {
3534 if trace.enabled {
3535 traceGoSched()
3536 }
3537 goschedImpl(gp)
3538 }
3539
3540
3541 func goschedguarded_m(gp *g) {
3542
3543 if !canPreemptM(gp.m) {
3544 gogo(&gp.sched)
3545 }
3546
3547 if trace.enabled {
3548 traceGoSched()
3549 }
3550 goschedImpl(gp)
3551 }
3552
3553 func gopreempt_m(gp *g) {
3554 if trace.enabled {
3555 traceGoPreempt()
3556 }
3557 goschedImpl(gp)
3558 }
3559
3560
3561
3562
3563 func preemptPark(gp *g) {
3564 if trace.enabled {
3565 traceGoPark(traceEvGoBlock, 0)
3566 }
3567 status := readgstatus(gp)
3568 if status&^_Gscan != _Grunning {
3569 dumpgstatus(gp)
3570 throw("bad g status")
3571 }
3572 gp.waitreason = waitReasonPreempted
3573
3574 if gp.asyncSafePoint {
3575
3576
3577
3578 f := findfunc(gp.sched.pc)
3579 if !f.valid() {
3580 throw("preempt at unknown pc")
3581 }
3582 if f.flag&funcFlag_SPWRITE != 0 {
3583 println("runtime: unexpected SPWRITE function", funcname(f), "in async preempt")
3584 throw("preempt SPWRITE")
3585 }
3586 }
3587
3588
3589
3590
3591
3592
3593
3594 casGToPreemptScan(gp, _Grunning, _Gscan|_Gpreempted)
3595 dropg()
3596 casfrom_Gscanstatus(gp, _Gscan|_Gpreempted, _Gpreempted)
3597 schedule()
3598 }
3599
3600
3601
3602
3603 func goyield() {
3604 checkTimeouts()
3605 mcall(goyield_m)
3606 }
3607
3608 func goyield_m(gp *g) {
3609 if trace.enabled {
3610 traceGoPreempt()
3611 }
3612 pp := gp.m.p.ptr()
3613 casgstatus(gp, _Grunning, _Grunnable)
3614 dropg()
3615 runqput(pp, gp, false)
3616 schedule()
3617 }
3618
3619
3620 func goexit1() {
3621 if raceenabled {
3622 racegoend()
3623 }
3624 if trace.enabled {
3625 traceGoEnd()
3626 }
3627 mcall(goexit0)
3628 }
3629
3630
3631 func goexit0(gp *g) {
3632 _g_ := getg()
3633
3634 casgstatus(gp, _Grunning, _Gdead)
3635 if isSystemGoroutine(gp, false) {
3636 atomic.Xadd(&sched.ngsys, -1)
3637 }
3638 gp.m = nil
3639 locked := gp.lockedm != 0
3640 gp.lockedm = 0
3641 _g_.m.lockedg = 0
3642 gp.preemptStop = false
3643 gp.paniconfault = false
3644 gp._defer = nil
3645 gp._panic = nil
3646 gp.writebuf = nil
3647 gp.waitreason = 0
3648 gp.param = nil
3649 gp.labels = nil
3650 gp.timer = nil
3651
3652 if gcBlackenEnabled != 0 && gp.gcAssistBytes > 0 {
3653
3654
3655
3656 assistWorkPerByte := float64frombits(atomic.Load64(&gcController.assistWorkPerByte))
3657 scanCredit := int64(assistWorkPerByte * float64(gp.gcAssistBytes))
3658 atomic.Xaddint64(&gcController.bgScanCredit, scanCredit)
3659 gp.gcAssistBytes = 0
3660 }
3661
3662 dropg()
3663
3664 if GOARCH == "wasm" {
3665 gfput(_g_.m.p.ptr(), gp)
3666 schedule()
3667 }
3668
3669 if _g_.m.lockedInt != 0 {
3670 print("invalid m->lockedInt = ", _g_.m.lockedInt, "\n")
3671 throw("internal lockOSThread error")
3672 }
3673 gfput(_g_.m.p.ptr(), gp)
3674 if locked {
3675
3676
3677
3678
3679
3680
3681 if GOOS != "plan9" {
3682 gogo(&_g_.m.g0.sched)
3683 } else {
3684
3685
3686 _g_.m.lockedExt = 0
3687 }
3688 }
3689 schedule()
3690 }
3691
3692
3693
3694
3695
3696
3697
3698
3699
3700 func save(pc, sp uintptr) {
3701 _g_ := getg()
3702
3703 if _g_ == _g_.m.g0 || _g_ == _g_.m.gsignal {
3704
3705
3706
3707
3708
3709 throw("save on system g not allowed")
3710 }
3711
3712 _g_.sched.pc = pc
3713 _g_.sched.sp = sp
3714 _g_.sched.lr = 0
3715 _g_.sched.ret = 0
3716
3717
3718
3719 if _g_.sched.ctxt != nil {
3720 badctxt()
3721 }
3722 }
3723
3724
3725
3726
3727
3728
3729
3730
3731
3732
3733
3734
3735
3736
3737
3738
3739
3740
3741
3742
3743
3744
3745
3746
3747
3748
3749
3750
3751
3752
3753
3754
3755
3756
3757
3758
3759
3760
3761 func reentersyscall(pc, sp uintptr) {
3762 _g_ := getg()
3763
3764
3765
3766 _g_.m.locks++
3767
3768
3769
3770
3771
3772 _g_.stackguard0 = stackPreempt
3773 _g_.throwsplit = true
3774
3775
3776 save(pc, sp)
3777 _g_.syscallsp = sp
3778 _g_.syscallpc = pc
3779 casgstatus(_g_, _Grunning, _Gsyscall)
3780 if _g_.syscallsp < _g_.stack.lo || _g_.stack.hi < _g_.syscallsp {
3781 systemstack(func() {
3782 print("entersyscall inconsistent ", hex(_g_.syscallsp), " [", hex(_g_.stack.lo), ",", hex(_g_.stack.hi), "]\n")
3783 throw("entersyscall")
3784 })
3785 }
3786
3787 if trace.enabled {
3788 systemstack(traceGoSysCall)
3789
3790
3791
3792 save(pc, sp)
3793 }
3794
3795 if atomic.Load(&sched.sysmonwait) != 0 {
3796 systemstack(entersyscall_sysmon)
3797 save(pc, sp)
3798 }
3799
3800 if _g_.m.p.ptr().runSafePointFn != 0 {
3801
3802 systemstack(runSafePointFn)
3803 save(pc, sp)
3804 }
3805
3806 _g_.m.syscalltick = _g_.m.p.ptr().syscalltick
3807 _g_.sysblocktraced = true
3808 pp := _g_.m.p.ptr()
3809 pp.m = 0
3810 _g_.m.oldp.set(pp)
3811 _g_.m.p = 0
3812 atomic.Store(&pp.status, _Psyscall)
3813 if sched.gcwaiting != 0 {
3814 systemstack(entersyscall_gcwait)
3815 save(pc, sp)
3816 }
3817
3818 _g_.m.locks--
3819 }
3820
3821
3822
3823
3824
3825
3826
3827 func entersyscall() {
3828 reentersyscall(getcallerpc(), getcallersp())
3829 }
3830
3831 func entersyscall_sysmon() {
3832 lock(&sched.lock)
3833 if atomic.Load(&sched.sysmonwait) != 0 {
3834 atomic.Store(&sched.sysmonwait, 0)
3835 notewakeup(&sched.sysmonnote)
3836 }
3837 unlock(&sched.lock)
3838 }
3839
3840 func entersyscall_gcwait() {
3841 _g_ := getg()
3842 _p_ := _g_.m.oldp.ptr()
3843
3844 lock(&sched.lock)
3845 if sched.stopwait > 0 && atomic.Cas(&_p_.status, _Psyscall, _Pgcstop) {
3846 if trace.enabled {
3847 traceGoSysBlock(_p_)
3848 traceProcStop(_p_)
3849 }
3850 _p_.syscalltick++
3851 if sched.stopwait--; sched.stopwait == 0 {
3852 notewakeup(&sched.stopnote)
3853 }
3854 }
3855 unlock(&sched.lock)
3856 }
3857
3858
3859
3860 func entersyscallblock() {
3861 _g_ := getg()
3862
3863 _g_.m.locks++
3864 _g_.throwsplit = true
3865 _g_.stackguard0 = stackPreempt
3866 _g_.m.syscalltick = _g_.m.p.ptr().syscalltick
3867 _g_.sysblocktraced = true
3868 _g_.m.p.ptr().syscalltick++
3869
3870
3871 pc := getcallerpc()
3872 sp := getcallersp()
3873 save(pc, sp)
3874 _g_.syscallsp = _g_.sched.sp
3875 _g_.syscallpc = _g_.sched.pc
3876 if _g_.syscallsp < _g_.stack.lo || _g_.stack.hi < _g_.syscallsp {
3877 sp1 := sp
3878 sp2 := _g_.sched.sp
3879 sp3 := _g_.syscallsp
3880 systemstack(func() {
3881 print("entersyscallblock inconsistent ", hex(sp1), " ", hex(sp2), " ", hex(sp3), " [", hex(_g_.stack.lo), ",", hex(_g_.stack.hi), "]\n")
3882 throw("entersyscallblock")
3883 })
3884 }
3885 casgstatus(_g_, _Grunning, _Gsyscall)
3886 if _g_.syscallsp < _g_.stack.lo || _g_.stack.hi < _g_.syscallsp {
3887 systemstack(func() {
3888 print("entersyscallblock inconsistent ", hex(sp), " ", hex(_g_.sched.sp), " ", hex(_g_.syscallsp), " [", hex(_g_.stack.lo), ",", hex(_g_.stack.hi), "]\n")
3889 throw("entersyscallblock")
3890 })
3891 }
3892
3893 systemstack(entersyscallblock_handoff)
3894
3895
3896 save(getcallerpc(), getcallersp())
3897
3898 _g_.m.locks--
3899 }
3900
3901 func entersyscallblock_handoff() {
3902 if trace.enabled {
3903 traceGoSysCall()
3904 traceGoSysBlock(getg().m.p.ptr())
3905 }
3906 handoffp(releasep())
3907 }
3908
3909
3910
3911
3912
3913
3914
3915
3916
3917
3918
3919
3920
3921 func exitsyscall() {
3922 _g_ := getg()
3923
3924 _g_.m.locks++
3925 if getcallersp() > _g_.syscallsp {
3926 throw("exitsyscall: syscall frame is no longer valid")
3927 }
3928
3929 _g_.waitsince = 0
3930 oldp := _g_.m.oldp.ptr()
3931 _g_.m.oldp = 0
3932 if exitsyscallfast(oldp) {
3933 if trace.enabled {
3934 if oldp != _g_.m.p.ptr() || _g_.m.syscalltick != _g_.m.p.ptr().syscalltick {
3935 systemstack(traceGoStart)
3936 }
3937 }
3938
3939 _g_.m.p.ptr().syscalltick++
3940
3941 casgstatus(_g_, _Gsyscall, _Grunning)
3942
3943
3944
3945 _g_.syscallsp = 0
3946 _g_.m.locks--
3947 if _g_.preempt {
3948
3949 _g_.stackguard0 = stackPreempt
3950 } else {
3951
3952 _g_.stackguard0 = _g_.stack.lo + _StackGuard
3953 }
3954 _g_.throwsplit = false
3955
3956 if sched.disable.user && !schedEnabled(_g_) {
3957
3958 Gosched()
3959 }
3960
3961 return
3962 }
3963
3964 _g_.sysexitticks = 0
3965 if trace.enabled {
3966
3967
3968 for oldp != nil && oldp.syscalltick == _g_.m.syscalltick {
3969 osyield()
3970 }
3971
3972
3973
3974
3975 _g_.sysexitticks = cputicks()
3976 }
3977
3978 _g_.m.locks--
3979
3980
3981 mcall(exitsyscall0)
3982
3983
3984
3985
3986
3987
3988
3989 _g_.syscallsp = 0
3990 _g_.m.p.ptr().syscalltick++
3991 _g_.throwsplit = false
3992 }
3993
3994
3995 func exitsyscallfast(oldp *p) bool {
3996 _g_ := getg()
3997
3998
3999 if sched.stopwait == freezeStopWait {
4000 return false
4001 }
4002
4003
4004 if oldp != nil && oldp.status == _Psyscall && atomic.Cas(&oldp.status, _Psyscall, _Pidle) {
4005
4006 wirep(oldp)
4007 exitsyscallfast_reacquired()
4008 return true
4009 }
4010
4011
4012 if sched.pidle != 0 {
4013 var ok bool
4014 systemstack(func() {
4015 ok = exitsyscallfast_pidle()
4016 if ok && trace.enabled {
4017 if oldp != nil {
4018
4019
4020 for oldp.syscalltick == _g_.m.syscalltick {
4021 osyield()
4022 }
4023 }
4024 traceGoSysExit(0)
4025 }
4026 })
4027 if ok {
4028 return true
4029 }
4030 }
4031 return false
4032 }
4033
4034
4035
4036
4037
4038
4039 func exitsyscallfast_reacquired() {
4040 _g_ := getg()
4041 if _g_.m.syscalltick != _g_.m.p.ptr().syscalltick {
4042 if trace.enabled {
4043
4044
4045
4046 systemstack(func() {
4047
4048 traceGoSysBlock(_g_.m.p.ptr())
4049
4050 traceGoSysExit(0)
4051 })
4052 }
4053 _g_.m.p.ptr().syscalltick++
4054 }
4055 }
4056
4057 func exitsyscallfast_pidle() bool {
4058 lock(&sched.lock)
4059 _p_ := pidleget()
4060 if _p_ != nil && atomic.Load(&sched.sysmonwait) != 0 {
4061 atomic.Store(&sched.sysmonwait, 0)
4062 notewakeup(&sched.sysmonnote)
4063 }
4064 unlock(&sched.lock)
4065 if _p_ != nil {
4066 acquirep(_p_)
4067 return true
4068 }
4069 return false
4070 }
4071
4072
4073
4074
4075
4076
4077
4078 func exitsyscall0(gp *g) {
4079 casgstatus(gp, _Gsyscall, _Grunnable)
4080 dropg()
4081 lock(&sched.lock)
4082 var _p_ *p
4083 if schedEnabled(gp) {
4084 _p_ = pidleget()
4085 }
4086 var locked bool
4087 if _p_ == nil {
4088 globrunqput(gp)
4089
4090
4091
4092
4093
4094
4095 locked = gp.lockedm != 0
4096 } else if atomic.Load(&sched.sysmonwait) != 0 {
4097 atomic.Store(&sched.sysmonwait, 0)
4098 notewakeup(&sched.sysmonnote)
4099 }
4100 unlock(&sched.lock)
4101 if _p_ != nil {
4102 acquirep(_p_)
4103 execute(gp, false)
4104 }
4105 if locked {
4106
4107
4108
4109
4110 stoplockedm()
4111 execute(gp, false)
4112 }
4113 stopm()
4114 schedule()
4115 }
4116
4117 func beforefork() {
4118 gp := getg().m.curg
4119
4120
4121
4122
4123 gp.m.locks++
4124 sigsave(&gp.m.sigmask)
4125 sigblock(false)
4126
4127
4128
4129
4130
4131 gp.stackguard0 = stackFork
4132 }
4133
4134
4135
4136
4137 func syscall_runtime_BeforeFork() {
4138 systemstack(beforefork)
4139 }
4140
4141 func afterfork() {
4142 gp := getg().m.curg
4143
4144
4145 gp.stackguard0 = gp.stack.lo + _StackGuard
4146
4147 msigrestore(gp.m.sigmask)
4148
4149 gp.m.locks--
4150 }
4151
4152
4153
4154
4155 func syscall_runtime_AfterFork() {
4156 systemstack(afterfork)
4157 }
4158
4159
4160
4161 var inForkedChild bool
4162
4163
4164
4165
4166
4167
4168
4169
4170
4171
4172
4173
4174 func syscall_runtime_AfterForkInChild() {
4175
4176
4177
4178
4179 inForkedChild = true
4180
4181 clearSignalHandlers()
4182
4183
4184
4185 msigrestore(getg().m.sigmask)
4186
4187 inForkedChild = false
4188 }
4189
4190
4191
4192
4193 var pendingPreemptSignals uint32
4194
4195
4196
4197 func syscall_runtime_BeforeExec() {
4198
4199 execLock.lock()
4200
4201
4202
4203 if GOOS == "darwin" || GOOS == "ios" {
4204 for int32(atomic.Load(&pendingPreemptSignals)) > 0 {
4205 osyield()
4206 }
4207 }
4208 }
4209
4210
4211
4212 func syscall_runtime_AfterExec() {
4213 execLock.unlock()
4214 }
4215
4216
4217 func malg(stacksize int32) *g {
4218 newg := new(g)
4219 if stacksize >= 0 {
4220 stacksize = round2(_StackSystem + stacksize)
4221 systemstack(func() {
4222 newg.stack = stackalloc(uint32(stacksize))
4223 })
4224 newg.stackguard0 = newg.stack.lo + _StackGuard
4225 newg.stackguard1 = ^uintptr(0)
4226
4227
4228 *(*uintptr)(unsafe.Pointer(newg.stack.lo)) = 0
4229 }
4230 return newg
4231 }
4232
4233
4234
4235
4236
4237
4238
4239
4240
4241
4242
4243
4244
4245
4246
4247
4248 func newproc(siz int32, fn *funcval) {
4249 argp := add(unsafe.Pointer(&fn), sys.PtrSize)
4250 gp := getg()
4251 pc := getcallerpc()
4252 systemstack(func() {
4253 newg := newproc1(fn, argp, siz, gp, pc)
4254
4255 _p_ := getg().m.p.ptr()
4256 runqput(_p_, newg, true)
4257
4258 if mainStarted {
4259 wakep()
4260 }
4261 })
4262 }
4263
4264
4265
4266
4267
4268
4269
4270
4271
4272
4273 func newproc1(fn *funcval, argp unsafe.Pointer, narg int32, callergp *g, callerpc uintptr) *g {
4274 if goexperiment.RegabiDefer && narg != 0 {
4275
4276
4277
4278
4279 throw("go with non-empty frame")
4280 }
4281
4282 _g_ := getg()
4283
4284 if fn == nil {
4285 _g_.m.throwing = -1
4286 throw("go of nil func value")
4287 }
4288 acquirem()
4289 siz := narg
4290 siz = (siz + 7) &^ 7
4291
4292
4293
4294
4295
4296 if siz >= _StackMin-4*sys.PtrSize-sys.PtrSize {
4297 throw("newproc: function arguments too large for new goroutine")
4298 }
4299
4300 _p_ := _g_.m.p.ptr()
4301 newg := gfget(_p_)
4302 if newg == nil {
4303 newg = malg(_StackMin)
4304 casgstatus(newg, _Gidle, _Gdead)
4305 allgadd(newg)
4306 }
4307 if newg.stack.hi == 0 {
4308 throw("newproc1: newg missing stack")
4309 }
4310
4311 if readgstatus(newg) != _Gdead {
4312 throw("newproc1: new g is not Gdead")
4313 }
4314
4315 totalSize := 4*sys.PtrSize + uintptr(siz) + sys.MinFrameSize
4316 totalSize += -totalSize & (sys.StackAlign - 1)
4317 sp := newg.stack.hi - totalSize
4318 spArg := sp
4319 if usesLR {
4320
4321 *(*uintptr)(unsafe.Pointer(sp)) = 0
4322 prepGoExitFrame(sp)
4323 spArg += sys.MinFrameSize
4324 }
4325 if narg > 0 {
4326 memmove(unsafe.Pointer(spArg), argp, uintptr(narg))
4327
4328
4329
4330
4331
4332
4333 if writeBarrier.needed && !_g_.m.curg.gcscandone {
4334 f := findfunc(fn.fn)
4335 stkmap := (*stackmap)(funcdata(f, _FUNCDATA_ArgsPointerMaps))
4336 if stkmap.nbit > 0 {
4337
4338 bv := stackmapdata(stkmap, 0)
4339 bulkBarrierBitmap(spArg, spArg, uintptr(bv.n)*sys.PtrSize, 0, bv.bytedata)
4340 }
4341 }
4342 }
4343
4344 memclrNoHeapPointers(unsafe.Pointer(&newg.sched), unsafe.Sizeof(newg.sched))
4345 newg.sched.sp = sp
4346 newg.stktopsp = sp
4347 newg.sched.pc = abi.FuncPCABI0(goexit) + sys.PCQuantum
4348 newg.sched.g = guintptr(unsafe.Pointer(newg))
4349 gostartcallfn(&newg.sched, fn)
4350 newg.gopc = callerpc
4351 newg.ancestors = saveAncestors(callergp)
4352 newg.startpc = fn.fn
4353 if _g_.m.curg != nil {
4354 newg.labels = _g_.m.curg.labels
4355 }
4356 if isSystemGoroutine(newg, false) {
4357 atomic.Xadd(&sched.ngsys, +1)
4358 }
4359
4360 newg.trackingSeq = uint8(fastrand())
4361 if newg.trackingSeq%gTrackingPeriod == 0 {
4362 newg.tracking = true
4363 }
4364 casgstatus(newg, _Gdead, _Grunnable)
4365
4366 if _p_.goidcache == _p_.goidcacheend {
4367
4368
4369
4370 _p_.goidcache = atomic.Xadd64(&sched.goidgen, _GoidCacheBatch)
4371 _p_.goidcache -= _GoidCacheBatch - 1
4372 _p_.goidcacheend = _p_.goidcache + _GoidCacheBatch
4373 }
4374 newg.goid = int64(_p_.goidcache)
4375 _p_.goidcache++
4376 if raceenabled {
4377 newg.racectx = racegostart(callerpc)
4378 }
4379 if trace.enabled {
4380 traceGoCreate(newg, newg.startpc)
4381 }
4382 releasem(_g_.m)
4383
4384 return newg
4385 }
4386
4387
4388
4389
4390 func saveAncestors(callergp *g) *[]ancestorInfo {
4391
4392 if debug.tracebackancestors <= 0 || callergp.goid == 0 {
4393 return nil
4394 }
4395 var callerAncestors []ancestorInfo
4396 if callergp.ancestors != nil {
4397 callerAncestors = *callergp.ancestors
4398 }
4399 n := int32(len(callerAncestors)) + 1
4400 if n > debug.tracebackancestors {
4401 n = debug.tracebackancestors
4402 }
4403 ancestors := make([]ancestorInfo, n)
4404 copy(ancestors[1:], callerAncestors)
4405
4406 var pcs [_TracebackMaxFrames]uintptr
4407 npcs := gcallers(callergp, 0, pcs[:])
4408 ipcs := make([]uintptr, npcs)
4409 copy(ipcs, pcs[:])
4410 ancestors[0] = ancestorInfo{
4411 pcs: ipcs,
4412 goid: callergp.goid,
4413 gopc: callergp.gopc,
4414 }
4415
4416 ancestorsp := new([]ancestorInfo)
4417 *ancestorsp = ancestors
4418 return ancestorsp
4419 }
4420
4421
4422
4423 func gfput(_p_ *p, gp *g) {
4424 if readgstatus(gp) != _Gdead {
4425 throw("gfput: bad status (not Gdead)")
4426 }
4427
4428 stksize := gp.stack.hi - gp.stack.lo
4429
4430 if stksize != _FixedStack {
4431
4432 stackfree(gp.stack)
4433 gp.stack.lo = 0
4434 gp.stack.hi = 0
4435 gp.stackguard0 = 0
4436 }
4437
4438 _p_.gFree.push(gp)
4439 _p_.gFree.n++
4440 if _p_.gFree.n >= 64 {
4441 var (
4442 inc int32
4443 stackQ gQueue
4444 noStackQ gQueue
4445 )
4446 for _p_.gFree.n >= 32 {
4447 gp = _p_.gFree.pop()
4448 _p_.gFree.n--
4449 if gp.stack.lo == 0 {
4450 noStackQ.push(gp)
4451 } else {
4452 stackQ.push(gp)
4453 }
4454 inc++
4455 }
4456 lock(&sched.gFree.lock)
4457 sched.gFree.noStack.pushAll(noStackQ)
4458 sched.gFree.stack.pushAll(stackQ)
4459 sched.gFree.n += inc
4460 unlock(&sched.gFree.lock)
4461 }
4462 }
4463
4464
4465
4466 func gfget(_p_ *p) *g {
4467 retry:
4468 if _p_.gFree.empty() && (!sched.gFree.stack.empty() || !sched.gFree.noStack.empty()) {
4469 lock(&sched.gFree.lock)
4470
4471 for _p_.gFree.n < 32 {
4472
4473 gp := sched.gFree.stack.pop()
4474 if gp == nil {
4475 gp = sched.gFree.noStack.pop()
4476 if gp == nil {
4477 break
4478 }
4479 }
4480 sched.gFree.n--
4481 _p_.gFree.push(gp)
4482 _p_.gFree.n++
4483 }
4484 unlock(&sched.gFree.lock)
4485 goto retry
4486 }
4487 gp := _p_.gFree.pop()
4488 if gp == nil {
4489 return nil
4490 }
4491 _p_.gFree.n--
4492 if gp.stack.lo == 0 {
4493
4494 systemstack(func() {
4495 gp.stack = stackalloc(_FixedStack)
4496 })
4497 gp.stackguard0 = gp.stack.lo + _StackGuard
4498 } else {
4499 if raceenabled {
4500 racemalloc(unsafe.Pointer(gp.stack.lo), gp.stack.hi-gp.stack.lo)
4501 }
4502 if msanenabled {
4503 msanmalloc(unsafe.Pointer(gp.stack.lo), gp.stack.hi-gp.stack.lo)
4504 }
4505 }
4506 return gp
4507 }
4508
4509
4510 func gfpurge(_p_ *p) {
4511 var (
4512 inc int32
4513 stackQ gQueue
4514 noStackQ gQueue
4515 )
4516 for !_p_.gFree.empty() {
4517 gp := _p_.gFree.pop()
4518 _p_.gFree.n--
4519 if gp.stack.lo == 0 {
4520 noStackQ.push(gp)
4521 } else {
4522 stackQ.push(gp)
4523 }
4524 inc++
4525 }
4526 lock(&sched.gFree.lock)
4527 sched.gFree.noStack.pushAll(noStackQ)
4528 sched.gFree.stack.pushAll(stackQ)
4529 sched.gFree.n += inc
4530 unlock(&sched.gFree.lock)
4531 }
4532
4533
4534 func Breakpoint() {
4535 breakpoint()
4536 }
4537
4538
4539
4540
4541
4542 func dolockOSThread() {
4543 if GOARCH == "wasm" {
4544 return
4545 }
4546 _g_ := getg()
4547 _g_.m.lockedg.set(_g_)
4548 _g_.lockedm.set(_g_.m)
4549 }
4550
4551
4552
4553
4554
4555
4556
4557
4558
4559
4560
4561
4562
4563
4564
4565
4566
4567 func LockOSThread() {
4568 if atomic.Load(&newmHandoff.haveTemplateThread) == 0 && GOOS != "plan9" {
4569
4570
4571
4572 startTemplateThread()
4573 }
4574 _g_ := getg()
4575 _g_.m.lockedExt++
4576 if _g_.m.lockedExt == 0 {
4577 _g_.m.lockedExt--
4578 panic("LockOSThread nesting overflow")
4579 }
4580 dolockOSThread()
4581 }
4582
4583
4584 func lockOSThread() {
4585 getg().m.lockedInt++
4586 dolockOSThread()
4587 }
4588
4589
4590
4591
4592
4593 func dounlockOSThread() {
4594 if GOARCH == "wasm" {
4595 return
4596 }
4597 _g_ := getg()
4598 if _g_.m.lockedInt != 0 || _g_.m.lockedExt != 0 {
4599 return
4600 }
4601 _g_.m.lockedg = 0
4602 _g_.lockedm = 0
4603 }
4604
4605
4606
4607
4608
4609
4610
4611
4612
4613
4614
4615
4616
4617
4618
4619 func UnlockOSThread() {
4620 _g_ := getg()
4621 if _g_.m.lockedExt == 0 {
4622 return
4623 }
4624 _g_.m.lockedExt--
4625 dounlockOSThread()
4626 }
4627
4628
4629 func unlockOSThread() {
4630 _g_ := getg()
4631 if _g_.m.lockedInt == 0 {
4632 systemstack(badunlockosthread)
4633 }
4634 _g_.m.lockedInt--
4635 dounlockOSThread()
4636 }
4637
4638 func badunlockosthread() {
4639 throw("runtime: internal error: misuse of lockOSThread/unlockOSThread")
4640 }
4641
4642 func gcount() int32 {
4643 n := int32(atomic.Loaduintptr(&allglen)) - sched.gFree.n - int32(atomic.Load(&sched.ngsys))
4644 for _, _p_ := range allp {
4645 n -= _p_.gFree.n
4646 }
4647
4648
4649
4650 if n < 1 {
4651 n = 1
4652 }
4653 return n
4654 }
4655
4656 func mcount() int32 {
4657 return int32(sched.mnext - sched.nmfreed)
4658 }
4659
4660 var prof struct {
4661 signalLock uint32
4662 hz int32
4663 }
4664
4665 func _System() { _System() }
4666 func _ExternalCode() { _ExternalCode() }
4667 func _LostExternalCode() { _LostExternalCode() }
4668 func _GC() { _GC() }
4669 func _LostSIGPROFDuringAtomic64() { _LostSIGPROFDuringAtomic64() }
4670 func _VDSO() { _VDSO() }
4671
4672
4673
4674
4675 func sigprof(pc, sp, lr uintptr, gp *g, mp *m) {
4676 if prof.hz == 0 {
4677 return
4678 }
4679
4680
4681
4682
4683 if mp != nil && mp.profilehz == 0 {
4684 return
4685 }
4686
4687
4688
4689
4690
4691
4692
4693 if GOARCH == "mips" || GOARCH == "mipsle" || GOARCH == "arm" {
4694 if f := findfunc(pc); f.valid() {
4695 if hasPrefix(funcname(f), "runtime/internal/atomic") {
4696 cpuprof.lostAtomic++
4697 return
4698 }
4699 }
4700 if GOARCH == "arm" && goarm < 7 && GOOS == "linux" && pc&0xffff0000 == 0xffff0000 {
4701
4702
4703
4704 cpuprof.lostAtomic++
4705 return
4706 }
4707 }
4708
4709
4710
4711
4712
4713
4714
4715 getg().m.mallocing++
4716
4717 var stk [maxCPUProfStack]uintptr
4718 n := 0
4719 if mp.ncgo > 0 && mp.curg != nil && mp.curg.syscallpc != 0 && mp.curg.syscallsp != 0 {
4720 cgoOff := 0
4721
4722
4723
4724
4725
4726 if atomic.Load(&mp.cgoCallersUse) == 0 && mp.cgoCallers != nil && mp.cgoCallers[0] != 0 {
4727 for cgoOff < len(mp.cgoCallers) && mp.cgoCallers[cgoOff] != 0 {
4728 cgoOff++
4729 }
4730 copy(stk[:], mp.cgoCallers[:cgoOff])
4731 mp.cgoCallers[0] = 0
4732 }
4733
4734
4735 n = gentraceback(mp.curg.syscallpc, mp.curg.syscallsp, 0, mp.curg, 0, &stk[cgoOff], len(stk)-cgoOff, nil, nil, 0)
4736 if n > 0 {
4737 n += cgoOff
4738 }
4739 } else {
4740 n = gentraceback(pc, sp, lr, gp, 0, &stk[0], len(stk), nil, nil, _TraceTrap|_TraceJumpStack)
4741 }
4742
4743 if n <= 0 {
4744
4745
4746 n = 0
4747 if usesLibcall() && mp.libcallg != 0 && mp.libcallpc != 0 && mp.libcallsp != 0 {
4748
4749
4750 n = gentraceback(mp.libcallpc, mp.libcallsp, 0, mp.libcallg.ptr(), 0, &stk[0], len(stk), nil, nil, 0)
4751 }
4752 if n == 0 && mp != nil && mp.vdsoSP != 0 {
4753 n = gentraceback(mp.vdsoPC, mp.vdsoSP, 0, gp, 0, &stk[0], len(stk), nil, nil, _TraceTrap|_TraceJumpStack)
4754 }
4755 if n == 0 {
4756
4757 n = 2
4758 if inVDSOPage(pc) {
4759 pc = funcPC(_VDSO) + sys.PCQuantum
4760 } else if pc > firstmoduledata.etext {
4761
4762 pc = funcPC(_ExternalCode) + sys.PCQuantum
4763 }
4764 stk[0] = pc
4765 if mp.preemptoff != "" {
4766 stk[1] = funcPC(_GC) + sys.PCQuantum
4767 } else {
4768 stk[1] = funcPC(_System) + sys.PCQuantum
4769 }
4770 }
4771 }
4772
4773 if prof.hz != 0 {
4774 cpuprof.add(gp, stk[:n])
4775 }
4776 getg().m.mallocing--
4777 }
4778
4779
4780
4781
4782 var sigprofCallers cgoCallers
4783 var sigprofCallersUse uint32
4784
4785
4786
4787
4788
4789
4790
4791 func sigprofNonGo() {
4792 if prof.hz != 0 {
4793 n := 0
4794 for n < len(sigprofCallers) && sigprofCallers[n] != 0 {
4795 n++
4796 }
4797 cpuprof.addNonGo(sigprofCallers[:n])
4798 }
4799
4800 atomic.Store(&sigprofCallersUse, 0)
4801 }
4802
4803
4804
4805
4806
4807
4808 func sigprofNonGoPC(pc uintptr) {
4809 if prof.hz != 0 {
4810 stk := []uintptr{
4811 pc,
4812 funcPC(_ExternalCode) + sys.PCQuantum,
4813 }
4814 cpuprof.addNonGo(stk)
4815 }
4816 }
4817
4818
4819
4820 func setcpuprofilerate(hz int32) {
4821
4822 if hz < 0 {
4823 hz = 0
4824 }
4825
4826
4827
4828 _g_ := getg()
4829 _g_.m.locks++
4830
4831
4832
4833
4834 setThreadCPUProfiler(0)
4835
4836 for !atomic.Cas(&prof.signalLock, 0, 1) {
4837 osyield()
4838 }
4839 if prof.hz != hz {
4840 setProcessCPUProfiler(hz)
4841 prof.hz = hz
4842 }
4843 atomic.Store(&prof.signalLock, 0)
4844
4845 lock(&sched.lock)
4846 sched.profilehz = hz
4847 unlock(&sched.lock)
4848
4849 if hz != 0 {
4850 setThreadCPUProfiler(hz)
4851 }
4852
4853 _g_.m.locks--
4854 }
4855
4856
4857
4858 func (pp *p) init(id int32) {
4859 pp.id = id
4860 pp.status = _Pgcstop
4861 pp.sudogcache = pp.sudogbuf[:0]
4862 for i := range pp.deferpool {
4863 pp.deferpool[i] = pp.deferpoolbuf[i][:0]
4864 }
4865 pp.wbBuf.reset()
4866 if pp.mcache == nil {
4867 if id == 0 {
4868 if mcache0 == nil {
4869 throw("missing mcache?")
4870 }
4871
4872
4873 pp.mcache = mcache0
4874 } else {
4875 pp.mcache = allocmcache()
4876 }
4877 }
4878 if raceenabled && pp.raceprocctx == 0 {
4879 if id == 0 {
4880 pp.raceprocctx = raceprocctx0
4881 raceprocctx0 = 0
4882 } else {
4883 pp.raceprocctx = raceproccreate()
4884 }
4885 }
4886 lockInit(&pp.timersLock, lockRankTimers)
4887
4888
4889
4890 timerpMask.set(id)
4891
4892
4893 idlepMask.clear(id)
4894 }
4895
4896
4897
4898
4899
4900 func (pp *p) destroy() {
4901 assertLockHeld(&sched.lock)
4902 assertWorldStopped()
4903
4904
4905 for pp.runqhead != pp.runqtail {
4906
4907 pp.runqtail--
4908 gp := pp.runq[pp.runqtail%uint32(len(pp.runq))].ptr()
4909
4910 globrunqputhead(gp)
4911 }
4912 if pp.runnext != 0 {
4913 globrunqputhead(pp.runnext.ptr())
4914 pp.runnext = 0
4915 }
4916 if len(pp.timers) > 0 {
4917 plocal := getg().m.p.ptr()
4918
4919
4920
4921
4922 lock(&plocal.timersLock)
4923 lock(&pp.timersLock)
4924 moveTimers(plocal, pp.timers)
4925 pp.timers = nil
4926 pp.numTimers = 0
4927 pp.deletedTimers = 0
4928 atomic.Store64(&pp.timer0When, 0)
4929 unlock(&pp.timersLock)
4930 unlock(&plocal.timersLock)
4931 }
4932
4933 if gcphase != _GCoff {
4934 wbBufFlush1(pp)
4935 pp.gcw.dispose()
4936 }
4937 for i := range pp.sudogbuf {
4938 pp.sudogbuf[i] = nil
4939 }
4940 pp.sudogcache = pp.sudogbuf[:0]
4941 for i := range pp.deferpool {
4942 for j := range pp.deferpoolbuf[i] {
4943 pp.deferpoolbuf[i][j] = nil
4944 }
4945 pp.deferpool[i] = pp.deferpoolbuf[i][:0]
4946 }
4947 systemstack(func() {
4948 for i := 0; i < pp.mspancache.len; i++ {
4949
4950 mheap_.spanalloc.free(unsafe.Pointer(pp.mspancache.buf[i]))
4951 }
4952 pp.mspancache.len = 0
4953 lock(&mheap_.lock)
4954 pp.pcache.flush(&mheap_.pages)
4955 unlock(&mheap_.lock)
4956 })
4957 freemcache(pp.mcache)
4958 pp.mcache = nil
4959 gfpurge(pp)
4960 traceProcFree(pp)
4961 if raceenabled {
4962 if pp.timerRaceCtx != 0 {
4963
4964
4965
4966
4967
4968 mp := getg().m
4969 phold := mp.p.ptr()
4970 mp.p.set(pp)
4971
4972 racectxend(pp.timerRaceCtx)
4973 pp.timerRaceCtx = 0
4974
4975 mp.p.set(phold)
4976 }
4977 raceprocdestroy(pp.raceprocctx)
4978 pp.raceprocctx = 0
4979 }
4980 pp.gcAssistTime = 0
4981 pp.status = _Pdead
4982 }
4983
4984
4985
4986
4987
4988
4989
4990
4991
4992 func procresize(nprocs int32) *p {
4993 assertLockHeld(&sched.lock)
4994 assertWorldStopped()
4995
4996 old := gomaxprocs
4997 if old < 0 || nprocs <= 0 {
4998 throw("procresize: invalid arg")
4999 }
5000 if trace.enabled {
5001 traceGomaxprocs(nprocs)
5002 }
5003
5004
5005 now := nanotime()
5006 if sched.procresizetime != 0 {
5007 sched.totaltime += int64(old) * (now - sched.procresizetime)
5008 }
5009 sched.procresizetime = now
5010
5011 maskWords := (nprocs + 31) / 32
5012
5013
5014 if nprocs > int32(len(allp)) {
5015
5016
5017 lock(&allpLock)
5018 if nprocs <= int32(cap(allp)) {
5019 allp = allp[:nprocs]
5020 } else {
5021 nallp := make([]*p, nprocs)
5022
5023
5024 copy(nallp, allp[:cap(allp)])
5025 allp = nallp
5026 }
5027
5028 if maskWords <= int32(cap(idlepMask)) {
5029 idlepMask = idlepMask[:maskWords]
5030 timerpMask = timerpMask[:maskWords]
5031 } else {
5032 nidlepMask := make([]uint32, maskWords)
5033
5034 copy(nidlepMask, idlepMask)
5035 idlepMask = nidlepMask
5036
5037 ntimerpMask := make([]uint32, maskWords)
5038 copy(ntimerpMask, timerpMask)
5039 timerpMask = ntimerpMask
5040 }
5041 unlock(&allpLock)
5042 }
5043
5044
5045 for i := old; i < nprocs; i++ {
5046 pp := allp[i]
5047 if pp == nil {
5048 pp = new(p)
5049 }
5050 pp.init(i)
5051 atomicstorep(unsafe.Pointer(&allp[i]), unsafe.Pointer(pp))
5052 }
5053
5054 _g_ := getg()
5055 if _g_.m.p != 0 && _g_.m.p.ptr().id < nprocs {
5056
5057 _g_.m.p.ptr().status = _Prunning
5058 _g_.m.p.ptr().mcache.prepareForSweep()
5059 } else {
5060
5061
5062
5063
5064
5065 if _g_.m.p != 0 {
5066 if trace.enabled {
5067
5068
5069
5070 traceGoSched()
5071 traceProcStop(_g_.m.p.ptr())
5072 }
5073 _g_.m.p.ptr().m = 0
5074 }
5075 _g_.m.p = 0
5076 p := allp[0]
5077 p.m = 0
5078 p.status = _Pidle
5079 acquirep(p)
5080 if trace.enabled {
5081 traceGoStart()
5082 }
5083 }
5084
5085
5086 mcache0 = nil
5087
5088
5089 for i := nprocs; i < old; i++ {
5090 p := allp[i]
5091 p.destroy()
5092
5093 }
5094
5095
5096 if int32(len(allp)) != nprocs {
5097 lock(&allpLock)
5098 allp = allp[:nprocs]
5099 idlepMask = idlepMask[:maskWords]
5100 timerpMask = timerpMask[:maskWords]
5101 unlock(&allpLock)
5102 }
5103
5104 var runnablePs *p
5105 for i := nprocs - 1; i >= 0; i-- {
5106 p := allp[i]
5107 if _g_.m.p.ptr() == p {
5108 continue
5109 }
5110 p.status = _Pidle
5111 if runqempty(p) {
5112 pidleput(p)
5113 } else {
5114 p.m.set(mget())
5115 p.link.set(runnablePs)
5116 runnablePs = p
5117 }
5118 }
5119 stealOrder.reset(uint32(nprocs))
5120 var int32p *int32 = &gomaxprocs
5121 atomic.Store((*uint32)(unsafe.Pointer(int32p)), uint32(nprocs))
5122 return runnablePs
5123 }
5124
5125
5126
5127
5128
5129
5130
5131 func acquirep(_p_ *p) {
5132
5133 wirep(_p_)
5134
5135
5136
5137
5138
5139 _p_.mcache.prepareForSweep()
5140
5141 if trace.enabled {
5142 traceProcStart()
5143 }
5144 }
5145
5146
5147
5148
5149
5150
5151
5152 func wirep(_p_ *p) {
5153 _g_ := getg()
5154
5155 if _g_.m.p != 0 {
5156 throw("wirep: already in go")
5157 }
5158 if _p_.m != 0 || _p_.status != _Pidle {
5159 id := int64(0)
5160 if _p_.m != 0 {
5161 id = _p_.m.ptr().id
5162 }
5163 print("wirep: p->m=", _p_.m, "(", id, ") p->status=", _p_.status, "\n")
5164 throw("wirep: invalid p state")
5165 }
5166 _g_.m.p.set(_p_)
5167 _p_.m.set(_g_.m)
5168 _p_.status = _Prunning
5169 }
5170
5171
5172 func releasep() *p {
5173 _g_ := getg()
5174
5175 if _g_.m.p == 0 {
5176 throw("releasep: invalid arg")
5177 }
5178 _p_ := _g_.m.p.ptr()
5179 if _p_.m.ptr() != _g_.m || _p_.status != _Prunning {
5180 print("releasep: m=", _g_.m, " m->p=", _g_.m.p.ptr(), " p->m=", hex(_p_.m), " p->status=", _p_.status, "\n")
5181 throw("releasep: invalid p state")
5182 }
5183 if trace.enabled {
5184 traceProcStop(_g_.m.p.ptr())
5185 }
5186 _g_.m.p = 0
5187 _p_.m = 0
5188 _p_.status = _Pidle
5189 return _p_
5190 }
5191
5192 func incidlelocked(v int32) {
5193 lock(&sched.lock)
5194 sched.nmidlelocked += v
5195 if v > 0 {
5196 checkdead()
5197 }
5198 unlock(&sched.lock)
5199 }
5200
5201
5202
5203
5204 func checkdead() {
5205 assertLockHeld(&sched.lock)
5206
5207
5208
5209
5210 if islibrary || isarchive {
5211 return
5212 }
5213
5214
5215
5216
5217
5218 if panicking > 0 {
5219 return
5220 }
5221
5222
5223
5224
5225
5226 var run0 int32
5227 if !iscgo && cgoHasExtraM {
5228 mp := lockextra(true)
5229 haveExtraM := extraMCount > 0
5230 unlockextra(mp)
5231 if haveExtraM {
5232 run0 = 1
5233 }
5234 }
5235
5236 run := mcount() - sched.nmidle - sched.nmidlelocked - sched.nmsys
5237 if run > run0 {
5238 return
5239 }
5240 if run < 0 {
5241 print("runtime: checkdead: nmidle=", sched.nmidle, " nmidlelocked=", sched.nmidlelocked, " mcount=", mcount(), " nmsys=", sched.nmsys, "\n")
5242 throw("checkdead: inconsistent counts")
5243 }
5244
5245 grunning := 0
5246 forEachG(func(gp *g) {
5247 if isSystemGoroutine(gp, false) {
5248 return
5249 }
5250 s := readgstatus(gp)
5251 switch s &^ _Gscan {
5252 case _Gwaiting,
5253 _Gpreempted:
5254 grunning++
5255 case _Grunnable,
5256 _Grunning,
5257 _Gsyscall:
5258 print("runtime: checkdead: find g ", gp.goid, " in status ", s, "\n")
5259 throw("checkdead: runnable g")
5260 }
5261 })
5262 if grunning == 0 {
5263 unlock(&sched.lock)
5264 throw("no goroutines (main called runtime.Goexit) - deadlock!")
5265 }
5266
5267
5268 if faketime != 0 {
5269 when, _p_ := timeSleepUntil()
5270 if _p_ != nil {
5271 faketime = when
5272 for pp := &sched.pidle; *pp != 0; pp = &(*pp).ptr().link {
5273 if (*pp).ptr() == _p_ {
5274 *pp = _p_.link
5275 break
5276 }
5277 }
5278 mp := mget()
5279 if mp == nil {
5280
5281
5282 throw("checkdead: no m for timer")
5283 }
5284 mp.nextp.set(_p_)
5285 notewakeup(&mp.park)
5286 return
5287 }
5288 }
5289
5290
5291 for _, _p_ := range allp {
5292 if len(_p_.timers) > 0 {
5293 return
5294 }
5295 }
5296
5297 getg().m.throwing = -1
5298 unlock(&sched.lock)
5299 throw("all goroutines are asleep - deadlock!")
5300 }
5301
5302
5303
5304
5305
5306
5307 var forcegcperiod int64 = 2 * 60 * 1e9
5308
5309
5310
5311
5312 func sysmon() {
5313 lock(&sched.lock)
5314 sched.nmsys++
5315 checkdead()
5316 unlock(&sched.lock)
5317
5318
5319
5320 atomic.Store(&sched.sysmonStarting, 0)
5321
5322 lasttrace := int64(0)
5323 idle := 0
5324 delay := uint32(0)
5325
5326 for {
5327 if idle == 0 {
5328 delay = 20
5329 } else if idle > 50 {
5330 delay *= 2
5331 }
5332 if delay > 10*1000 {
5333 delay = 10 * 1000
5334 }
5335 usleep(delay)
5336 mDoFixup()
5337
5338
5339
5340
5341
5342
5343
5344
5345
5346
5347
5348
5349
5350
5351
5352
5353 now := nanotime()
5354 if debug.schedtrace <= 0 && (sched.gcwaiting != 0 || atomic.Load(&sched.npidle) == uint32(gomaxprocs)) {
5355 lock(&sched.lock)
5356 if atomic.Load(&sched.gcwaiting) != 0 || atomic.Load(&sched.npidle) == uint32(gomaxprocs) {
5357 syscallWake := false
5358 next, _ := timeSleepUntil()
5359 if next > now {
5360 atomic.Store(&sched.sysmonwait, 1)
5361 unlock(&sched.lock)
5362
5363
5364 sleep := forcegcperiod / 2
5365 if next-now < sleep {
5366 sleep = next - now
5367 }
5368 shouldRelax := sleep >= osRelaxMinNS
5369 if shouldRelax {
5370 osRelax(true)
5371 }
5372 syscallWake = notetsleep(&sched.sysmonnote, sleep)
5373 mDoFixup()
5374 if shouldRelax {
5375 osRelax(false)
5376 }
5377 lock(&sched.lock)
5378 atomic.Store(&sched.sysmonwait, 0)
5379 noteclear(&sched.sysmonnote)
5380 }
5381 if syscallWake {
5382 idle = 0
5383 delay = 20
5384 }
5385 }
5386 unlock(&sched.lock)
5387 }
5388
5389 lock(&sched.sysmonlock)
5390
5391
5392 now = nanotime()
5393
5394
5395 if *cgo_yield != nil {
5396 asmcgocall(*cgo_yield, nil)
5397 }
5398
5399 lastpoll := int64(atomic.Load64(&sched.lastpoll))
5400 if netpollinited() && lastpoll != 0 && lastpoll+10*1000*1000 < now {
5401 atomic.Cas64(&sched.lastpoll, uint64(lastpoll), uint64(now))
5402 list := netpoll(0)
5403 if !list.empty() {
5404
5405
5406
5407
5408
5409
5410
5411 incidlelocked(-1)
5412 injectglist(&list)
5413 incidlelocked(1)
5414 }
5415 }
5416 mDoFixup()
5417 if GOOS == "netbsd" {
5418
5419
5420
5421
5422
5423
5424
5425
5426
5427
5428
5429
5430
5431
5432
5433 if next, _ := timeSleepUntil(); next < now {
5434 startm(nil, false)
5435 }
5436 }
5437 if atomic.Load(&scavenge.sysmonWake) != 0 {
5438
5439 wakeScavenger()
5440 }
5441
5442
5443 if retake(now) != 0 {
5444 idle = 0
5445 } else {
5446 idle++
5447 }
5448
5449 if t := (gcTrigger{kind: gcTriggerTime, now: now}); t.test() && atomic.Load(&forcegc.idle) != 0 {
5450 lock(&forcegc.lock)
5451 forcegc.idle = 0
5452 var list gList
5453 list.push(forcegc.g)
5454 injectglist(&list)
5455 unlock(&forcegc.lock)
5456 }
5457 if debug.schedtrace > 0 && lasttrace+int64(debug.schedtrace)*1000000 <= now {
5458 lasttrace = now
5459 schedtrace(debug.scheddetail > 0)
5460 }
5461 unlock(&sched.sysmonlock)
5462 }
5463 }
5464
5465 type sysmontick struct {
5466 schedtick uint32
5467 schedwhen int64
5468 syscalltick uint32
5469 syscallwhen int64
5470 }
5471
5472
5473
5474 const forcePreemptNS = 10 * 1000 * 1000
5475
5476 func retake(now int64) uint32 {
5477 n := 0
5478
5479
5480 lock(&allpLock)
5481
5482
5483
5484 for i := 0; i < len(allp); i++ {
5485 _p_ := allp[i]
5486 if _p_ == nil {
5487
5488
5489 continue
5490 }
5491 pd := &_p_.sysmontick
5492 s := _p_.status
5493 sysretake := false
5494 if s == _Prunning || s == _Psyscall {
5495
5496 t := int64(_p_.schedtick)
5497 if int64(pd.schedtick) != t {
5498 pd.schedtick = uint32(t)
5499 pd.schedwhen = now
5500 } else if pd.schedwhen+forcePreemptNS <= now {
5501 preemptone(_p_)
5502
5503
5504 sysretake = true
5505 }
5506 }
5507 if s == _Psyscall {
5508
5509 t := int64(_p_.syscalltick)
5510 if !sysretake && int64(pd.syscalltick) != t {
5511 pd.syscalltick = uint32(t)
5512 pd.syscallwhen = now
5513 continue
5514 }
5515
5516
5517
5518 if runqempty(_p_) && atomic.Load(&sched.nmspinning)+atomic.Load(&sched.npidle) > 0 && pd.syscallwhen+10*1000*1000 > now {
5519 continue
5520 }
5521
5522 unlock(&allpLock)
5523
5524
5525
5526
5527 incidlelocked(-1)
5528 if atomic.Cas(&_p_.status, s, _Pidle) {
5529 if trace.enabled {
5530 traceGoSysBlock(_p_)
5531 traceProcStop(_p_)
5532 }
5533 n++
5534 _p_.syscalltick++
5535 handoffp(_p_)
5536 }
5537 incidlelocked(1)
5538 lock(&allpLock)
5539 }
5540 }
5541 unlock(&allpLock)
5542 return uint32(n)
5543 }
5544
5545
5546
5547
5548
5549
5550 func preemptall() bool {
5551 res := false
5552 for _, _p_ := range allp {
5553 if _p_.status != _Prunning {
5554 continue
5555 }
5556 if preemptone(_p_) {
5557 res = true
5558 }
5559 }
5560 return res
5561 }
5562
5563
5564
5565
5566
5567
5568
5569
5570
5571
5572
5573 func preemptone(_p_ *p) bool {
5574 mp := _p_.m.ptr()
5575 if mp == nil || mp == getg().m {
5576 return false
5577 }
5578 gp := mp.curg
5579 if gp == nil || gp == mp.g0 {
5580 return false
5581 }
5582
5583 gp.preempt = true
5584
5585
5586
5587
5588
5589 gp.stackguard0 = stackPreempt
5590
5591
5592 if preemptMSupported && debug.asyncpreemptoff == 0 {
5593 _p_.preempt = true
5594 preemptM(mp)
5595 }
5596
5597 return true
5598 }
5599
5600 var starttime int64
5601
5602 func schedtrace(detailed bool) {
5603 now := nanotime()
5604 if starttime == 0 {
5605 starttime = now
5606 }
5607
5608 lock(&sched.lock)
5609 print("SCHED ", (now-starttime)/1e6, "ms: gomaxprocs=", gomaxprocs, " idleprocs=", sched.npidle, " threads=", mcount(), " spinningthreads=", sched.nmspinning, " idlethreads=", sched.nmidle, " runqueue=", sched.runqsize)
5610 if detailed {
5611 print(" gcwaiting=", sched.gcwaiting, " nmidlelocked=", sched.nmidlelocked, " stopwait=", sched.stopwait, " sysmonwait=", sched.sysmonwait, "\n")
5612 }
5613
5614
5615
5616 for i, _p_ := range allp {
5617 mp := _p_.m.ptr()
5618 h := atomic.Load(&_p_.runqhead)
5619 t := atomic.Load(&_p_.runqtail)
5620 if detailed {
5621 id := int64(-1)
5622 if mp != nil {
5623 id = mp.id
5624 }
5625 print(" P", i, ": status=", _p_.status, " schedtick=", _p_.schedtick, " syscalltick=", _p_.syscalltick, " m=", id, " runqsize=", t-h, " gfreecnt=", _p_.gFree.n, " timerslen=", len(_p_.timers), "\n")
5626 } else {
5627
5628
5629 print(" ")
5630 if i == 0 {
5631 print("[")
5632 }
5633 print(t - h)
5634 if i == len(allp)-1 {
5635 print("]\n")
5636 }
5637 }
5638 }
5639
5640 if !detailed {
5641 unlock(&sched.lock)
5642 return
5643 }
5644
5645 for mp := allm; mp != nil; mp = mp.alllink {
5646 _p_ := mp.p.ptr()
5647 gp := mp.curg
5648 lockedg := mp.lockedg.ptr()
5649 id1 := int32(-1)
5650 if _p_ != nil {
5651 id1 = _p_.id
5652 }
5653 id2 := int64(-1)
5654 if gp != nil {
5655 id2 = gp.goid
5656 }
5657 id3 := int64(-1)
5658 if lockedg != nil {
5659 id3 = lockedg.goid
5660 }
5661 print(" M", mp.id, ": p=", id1, " curg=", id2, " mallocing=", mp.mallocing, " throwing=", mp.throwing, " preemptoff=", mp.preemptoff, ""+" locks=", mp.locks, " dying=", mp.dying, " spinning=", mp.spinning, " blocked=", mp.blocked, " lockedg=", id3, "\n")
5662 }
5663
5664 forEachG(func(gp *g) {
5665 mp := gp.m
5666 lockedm := gp.lockedm.ptr()
5667 id1 := int64(-1)
5668 if mp != nil {
5669 id1 = mp.id
5670 }
5671 id2 := int64(-1)
5672 if lockedm != nil {
5673 id2 = lockedm.id
5674 }
5675 print(" G", gp.goid, ": status=", readgstatus(gp), "(", gp.waitreason.String(), ") m=", id1, " lockedm=", id2, "\n")
5676 })
5677 unlock(&sched.lock)
5678 }
5679
5680
5681
5682
5683
5684
5685 func schedEnableUser(enable bool) {
5686 lock(&sched.lock)
5687 if sched.disable.user == !enable {
5688 unlock(&sched.lock)
5689 return
5690 }
5691 sched.disable.user = !enable
5692 if enable {
5693 n := sched.disable.n
5694 sched.disable.n = 0
5695 globrunqputbatch(&sched.disable.runnable, n)
5696 unlock(&sched.lock)
5697 for ; n != 0 && sched.npidle != 0; n-- {
5698 startm(nil, false)
5699 }
5700 } else {
5701 unlock(&sched.lock)
5702 }
5703 }
5704
5705
5706
5707
5708
5709 func schedEnabled(gp *g) bool {
5710 assertLockHeld(&sched.lock)
5711
5712 if sched.disable.user {
5713 return isSystemGoroutine(gp, true)
5714 }
5715 return true
5716 }
5717
5718
5719
5720
5721
5722 func mput(mp *m) {
5723 assertLockHeld(&sched.lock)
5724
5725 mp.schedlink = sched.midle
5726 sched.midle.set(mp)
5727 sched.nmidle++
5728 checkdead()
5729 }
5730
5731
5732
5733
5734
5735 func mget() *m {
5736 assertLockHeld(&sched.lock)
5737
5738 mp := sched.midle.ptr()
5739 if mp != nil {
5740 sched.midle = mp.schedlink
5741 sched.nmidle--
5742 }
5743 return mp
5744 }
5745
5746
5747
5748
5749
5750 func globrunqput(gp *g) {
5751 assertLockHeld(&sched.lock)
5752
5753 sched.runq.pushBack(gp)
5754 sched.runqsize++
5755 }
5756
5757
5758
5759
5760
5761 func globrunqputhead(gp *g) {
5762 assertLockHeld(&sched.lock)
5763
5764 sched.runq.push(gp)
5765 sched.runqsize++
5766 }
5767
5768
5769
5770
5771
5772
5773 func globrunqputbatch(batch *gQueue, n int32) {
5774 assertLockHeld(&sched.lock)
5775
5776 sched.runq.pushBackAll(*batch)
5777 sched.runqsize += n
5778 *batch = gQueue{}
5779 }
5780
5781
5782
5783 func globrunqget(_p_ *p, max int32) *g {
5784 assertLockHeld(&sched.lock)
5785
5786 if sched.runqsize == 0 {
5787 return nil
5788 }
5789
5790 n := sched.runqsize/gomaxprocs + 1
5791 if n > sched.runqsize {
5792 n = sched.runqsize
5793 }
5794 if max > 0 && n > max {
5795 n = max
5796 }
5797 if n > int32(len(_p_.runq))/2 {
5798 n = int32(len(_p_.runq)) / 2
5799 }
5800
5801 sched.runqsize -= n
5802
5803 gp := sched.runq.pop()
5804 n--
5805 for ; n > 0; n-- {
5806 gp1 := sched.runq.pop()
5807 runqput(_p_, gp1, false)
5808 }
5809 return gp
5810 }
5811
5812
5813 type pMask []uint32
5814
5815
5816 func (p pMask) read(id uint32) bool {
5817 word := id / 32
5818 mask := uint32(1) << (id % 32)
5819 return (atomic.Load(&p[word]) & mask) != 0
5820 }
5821
5822
5823 func (p pMask) set(id int32) {
5824 word := id / 32
5825 mask := uint32(1) << (id % 32)
5826 atomic.Or(&p[word], mask)
5827 }
5828
5829
5830 func (p pMask) clear(id int32) {
5831 word := id / 32
5832 mask := uint32(1) << (id % 32)
5833 atomic.And(&p[word], ^mask)
5834 }
5835
5836
5837
5838
5839
5840
5841
5842
5843
5844
5845
5846
5847
5848
5849
5850
5851
5852
5853
5854
5855
5856
5857
5858
5859
5860
5861 func updateTimerPMask(pp *p) {
5862 if atomic.Load(&pp.numTimers) > 0 {
5863 return
5864 }
5865
5866
5867
5868
5869 lock(&pp.timersLock)
5870 if atomic.Load(&pp.numTimers) == 0 {
5871 timerpMask.clear(pp.id)
5872 }
5873 unlock(&pp.timersLock)
5874 }
5875
5876
5877
5878
5879
5880
5881
5882
5883
5884
5885 func pidleput(_p_ *p) {
5886 assertLockHeld(&sched.lock)
5887
5888 if !runqempty(_p_) {
5889 throw("pidleput: P has non-empty run queue")
5890 }
5891 updateTimerPMask(_p_)
5892 idlepMask.set(_p_.id)
5893 _p_.link = sched.pidle
5894 sched.pidle.set(_p_)
5895 atomic.Xadd(&sched.npidle, 1)
5896 }
5897
5898
5899
5900
5901
5902
5903
5904 func pidleget() *p {
5905 assertLockHeld(&sched.lock)
5906
5907 _p_ := sched.pidle.ptr()
5908 if _p_ != nil {
5909
5910 timerpMask.set(_p_.id)
5911 idlepMask.clear(_p_.id)
5912 sched.pidle = _p_.link
5913 atomic.Xadd(&sched.npidle, -1)
5914 }
5915 return _p_
5916 }
5917
5918
5919
5920 func runqempty(_p_ *p) bool {
5921
5922
5923
5924
5925 for {
5926 head := atomic.Load(&_p_.runqhead)
5927 tail := atomic.Load(&_p_.runqtail)
5928 runnext := atomic.Loaduintptr((*uintptr)(unsafe.Pointer(&_p_.runnext)))
5929 if tail == atomic.Load(&_p_.runqtail) {
5930 return head == tail && runnext == 0
5931 }
5932 }
5933 }
5934
5935
5936
5937
5938
5939
5940
5941
5942
5943
5944 const randomizeScheduler = raceenabled
5945
5946
5947
5948
5949
5950
5951 func runqput(_p_ *p, gp *g, next bool) {
5952 if randomizeScheduler && next && fastrand()%2 == 0 {
5953 next = false
5954 }
5955
5956 if next {
5957 retryNext:
5958 oldnext := _p_.runnext
5959 if !_p_.runnext.cas(oldnext, guintptr(unsafe.Pointer(gp))) {
5960 goto retryNext
5961 }
5962 if oldnext == 0 {
5963 return
5964 }
5965
5966 gp = oldnext.ptr()
5967 }
5968
5969 retry:
5970 h := atomic.LoadAcq(&_p_.runqhead)
5971 t := _p_.runqtail
5972 if t-h < uint32(len(_p_.runq)) {
5973 _p_.runq[t%uint32(len(_p_.runq))].set(gp)
5974 atomic.StoreRel(&_p_.runqtail, t+1)
5975 return
5976 }
5977 if runqputslow(_p_, gp, h, t) {
5978 return
5979 }
5980
5981 goto retry
5982 }
5983
5984
5985
5986 func runqputslow(_p_ *p, gp *g, h, t uint32) bool {
5987 var batch [len(_p_.runq)/2 + 1]*g
5988
5989
5990 n := t - h
5991 n = n / 2
5992 if n != uint32(len(_p_.runq)/2) {
5993 throw("runqputslow: queue is not full")
5994 }
5995 for i := uint32(0); i < n; i++ {
5996 batch[i] = _p_.runq[(h+i)%uint32(len(_p_.runq))].ptr()
5997 }
5998 if !atomic.CasRel(&_p_.runqhead, h, h+n) {
5999 return false
6000 }
6001 batch[n] = gp
6002
6003 if randomizeScheduler {
6004 for i := uint32(1); i <= n; i++ {
6005 j := fastrandn(i + 1)
6006 batch[i], batch[j] = batch[j], batch[i]
6007 }
6008 }
6009
6010
6011 for i := uint32(0); i < n; i++ {
6012 batch[i].schedlink.set(batch[i+1])
6013 }
6014 var q gQueue
6015 q.head.set(batch[0])
6016 q.tail.set(batch[n])
6017
6018
6019 lock(&sched.lock)
6020 globrunqputbatch(&q, int32(n+1))
6021 unlock(&sched.lock)
6022 return true
6023 }
6024
6025
6026
6027
6028
6029 func runqputbatch(pp *p, q *gQueue, qsize int) {
6030 h := atomic.LoadAcq(&pp.runqhead)
6031 t := pp.runqtail
6032 n := uint32(0)
6033 for !q.empty() && t-h < uint32(len(pp.runq)) {
6034 gp := q.pop()
6035 pp.runq[t%uint32(len(pp.runq))].set(gp)
6036 t++
6037 n++
6038 }
6039 qsize -= int(n)
6040
6041 if randomizeScheduler {
6042 off := func(o uint32) uint32 {
6043 return (pp.runqtail + o) % uint32(len(pp.runq))
6044 }
6045 for i := uint32(1); i < n; i++ {
6046 j := fastrandn(i + 1)
6047 pp.runq[off(i)], pp.runq[off(j)] = pp.runq[off(j)], pp.runq[off(i)]
6048 }
6049 }
6050
6051 atomic.StoreRel(&pp.runqtail, t)
6052 if !q.empty() {
6053 lock(&sched.lock)
6054 globrunqputbatch(q, int32(qsize))
6055 unlock(&sched.lock)
6056 }
6057 }
6058
6059
6060
6061
6062
6063 func runqget(_p_ *p) (gp *g, inheritTime bool) {
6064
6065 for {
6066 next := _p_.runnext
6067 if next == 0 {
6068 break
6069 }
6070 if _p_.runnext.cas(next, 0) {
6071 return next.ptr(), true
6072 }
6073 }
6074
6075 for {
6076 h := atomic.LoadAcq(&_p_.runqhead)
6077 t := _p_.runqtail
6078 if t == h {
6079 return nil, false
6080 }
6081 gp := _p_.runq[h%uint32(len(_p_.runq))].ptr()
6082 if atomic.CasRel(&_p_.runqhead, h, h+1) {
6083 return gp, false
6084 }
6085 }
6086 }
6087
6088
6089
6090 func runqdrain(_p_ *p) (drainQ gQueue, n uint32) {
6091 oldNext := _p_.runnext
6092 if oldNext != 0 && _p_.runnext.cas(oldNext, 0) {
6093 drainQ.pushBack(oldNext.ptr())
6094 n++
6095 }
6096
6097 retry:
6098 h := atomic.LoadAcq(&_p_.runqhead)
6099 t := _p_.runqtail
6100 qn := t - h
6101 if qn == 0 {
6102 return
6103 }
6104 if qn > uint32(len(_p_.runq)) {
6105 goto retry
6106 }
6107
6108 if !atomic.CasRel(&_p_.runqhead, h, h+qn) {
6109 goto retry
6110 }
6111
6112
6113
6114
6115
6116
6117
6118
6119 for i := uint32(0); i < qn; i++ {
6120 gp := _p_.runq[(h+i)%uint32(len(_p_.runq))].ptr()
6121 drainQ.pushBack(gp)
6122 n++
6123 }
6124 return
6125 }
6126
6127
6128
6129
6130
6131 func runqgrab(_p_ *p, batch *[256]guintptr, batchHead uint32, stealRunNextG bool) uint32 {
6132 for {
6133 h := atomic.LoadAcq(&_p_.runqhead)
6134 t := atomic.LoadAcq(&_p_.runqtail)
6135 n := t - h
6136 n = n - n/2
6137 if n == 0 {
6138 if stealRunNextG {
6139
6140 if next := _p_.runnext; next != 0 {
6141 if _p_.status == _Prunning {
6142
6143
6144
6145
6146
6147
6148
6149
6150
6151
6152 if GOOS != "windows" {
6153 usleep(3)
6154 } else {
6155
6156
6157
6158 osyield()
6159 }
6160 }
6161 if !_p_.runnext.cas(next, 0) {
6162 continue
6163 }
6164 batch[batchHead%uint32(len(batch))] = next
6165 return 1
6166 }
6167 }
6168 return 0
6169 }
6170 if n > uint32(len(_p_.runq)/2) {
6171 continue
6172 }
6173 for i := uint32(0); i < n; i++ {
6174 g := _p_.runq[(h+i)%uint32(len(_p_.runq))]
6175 batch[(batchHead+i)%uint32(len(batch))] = g
6176 }
6177 if atomic.CasRel(&_p_.runqhead, h, h+n) {
6178 return n
6179 }
6180 }
6181 }
6182
6183
6184
6185
6186 func runqsteal(_p_, p2 *p, stealRunNextG bool) *g {
6187 t := _p_.runqtail
6188 n := runqgrab(p2, &_p_.runq, t, stealRunNextG)
6189 if n == 0 {
6190 return nil
6191 }
6192 n--
6193 gp := _p_.runq[(t+n)%uint32(len(_p_.runq))].ptr()
6194 if n == 0 {
6195 return gp
6196 }
6197 h := atomic.LoadAcq(&_p_.runqhead)
6198 if t-h+n >= uint32(len(_p_.runq)) {
6199 throw("runqsteal: runq overflow")
6200 }
6201 atomic.StoreRel(&_p_.runqtail, t+n)
6202 return gp
6203 }
6204
6205
6206
6207 type gQueue struct {
6208 head guintptr
6209 tail guintptr
6210 }
6211
6212
6213 func (q *gQueue) empty() bool {
6214 return q.head == 0
6215 }
6216
6217
6218 func (q *gQueue) push(gp *g) {
6219 gp.schedlink = q.head
6220 q.head.set(gp)
6221 if q.tail == 0 {
6222 q.tail.set(gp)
6223 }
6224 }
6225
6226
6227 func (q *gQueue) pushBack(gp *g) {
6228 gp.schedlink = 0
6229 if q.tail != 0 {
6230 q.tail.ptr().schedlink.set(gp)
6231 } else {
6232 q.head.set(gp)
6233 }
6234 q.tail.set(gp)
6235 }
6236
6237
6238
6239 func (q *gQueue) pushBackAll(q2 gQueue) {
6240 if q2.tail == 0 {
6241 return
6242 }
6243 q2.tail.ptr().schedlink = 0
6244 if q.tail != 0 {
6245 q.tail.ptr().schedlink = q2.head
6246 } else {
6247 q.head = q2.head
6248 }
6249 q.tail = q2.tail
6250 }
6251
6252
6253
6254 func (q *gQueue) pop() *g {
6255 gp := q.head.ptr()
6256 if gp != nil {
6257 q.head = gp.schedlink
6258 if q.head == 0 {
6259 q.tail = 0
6260 }
6261 }
6262 return gp
6263 }
6264
6265
6266 func (q *gQueue) popList() gList {
6267 stack := gList{q.head}
6268 *q = gQueue{}
6269 return stack
6270 }
6271
6272
6273
6274 type gList struct {
6275 head guintptr
6276 }
6277
6278
6279 func (l *gList) empty() bool {
6280 return l.head == 0
6281 }
6282
6283
6284 func (l *gList) push(gp *g) {
6285 gp.schedlink = l.head
6286 l.head.set(gp)
6287 }
6288
6289
6290 func (l *gList) pushAll(q gQueue) {
6291 if !q.empty() {
6292 q.tail.ptr().schedlink = l.head
6293 l.head = q.head
6294 }
6295 }
6296
6297
6298 func (l *gList) pop() *g {
6299 gp := l.head.ptr()
6300 if gp != nil {
6301 l.head = gp.schedlink
6302 }
6303 return gp
6304 }
6305
6306
6307 func setMaxThreads(in int) (out int) {
6308 lock(&sched.lock)
6309 out = int(sched.maxmcount)
6310 if in > 0x7fffffff {
6311 sched.maxmcount = 0x7fffffff
6312 } else {
6313 sched.maxmcount = int32(in)
6314 }
6315 checkmcount()
6316 unlock(&sched.lock)
6317 return
6318 }
6319
6320
6321 func procPin() int {
6322 _g_ := getg()
6323 mp := _g_.m
6324
6325 mp.locks++
6326 return int(mp.p.ptr().id)
6327 }
6328
6329
6330 func procUnpin() {
6331 _g_ := getg()
6332 _g_.m.locks--
6333 }
6334
6335
6336
6337 func sync_runtime_procPin() int {
6338 return procPin()
6339 }
6340
6341
6342
6343 func sync_runtime_procUnpin() {
6344 procUnpin()
6345 }
6346
6347
6348
6349 func sync_atomic_runtime_procPin() int {
6350 return procPin()
6351 }
6352
6353
6354
6355 func sync_atomic_runtime_procUnpin() {
6356 procUnpin()
6357 }
6358
6359
6360
6361
6362 func sync_runtime_canSpin(i int) bool {
6363
6364
6365
6366
6367
6368 if i >= active_spin || ncpu <= 1 || gomaxprocs <= int32(sched.npidle+sched.nmspinning)+1 {
6369 return false
6370 }
6371 if p := getg().m.p.ptr(); !runqempty(p) {
6372 return false
6373 }
6374 return true
6375 }
6376
6377
6378
6379 func sync_runtime_doSpin() {
6380 procyield(active_spin_cnt)
6381 }
6382
6383 var stealOrder randomOrder
6384
6385
6386
6387
6388
6389 type randomOrder struct {
6390 count uint32
6391 coprimes []uint32
6392 }
6393
6394 type randomEnum struct {
6395 i uint32
6396 count uint32
6397 pos uint32
6398 inc uint32
6399 }
6400
6401 func (ord *randomOrder) reset(count uint32) {
6402 ord.count = count
6403 ord.coprimes = ord.coprimes[:0]
6404 for i := uint32(1); i <= count; i++ {
6405 if gcd(i, count) == 1 {
6406 ord.coprimes = append(ord.coprimes, i)
6407 }
6408 }
6409 }
6410
6411 func (ord *randomOrder) start(i uint32) randomEnum {
6412 return randomEnum{
6413 count: ord.count,
6414 pos: i % ord.count,
6415 inc: ord.coprimes[i%uint32(len(ord.coprimes))],
6416 }
6417 }
6418
6419 func (enum *randomEnum) done() bool {
6420 return enum.i == enum.count
6421 }
6422
6423 func (enum *randomEnum) next() {
6424 enum.i++
6425 enum.pos = (enum.pos + enum.inc) % enum.count
6426 }
6427
6428 func (enum *randomEnum) position() uint32 {
6429 return enum.pos
6430 }
6431
6432 func gcd(a, b uint32) uint32 {
6433 for b != 0 {
6434 a, b = b, a%b
6435 }
6436 return a
6437 }
6438
6439
6440
6441 type initTask struct {
6442
6443 state uintptr
6444 ndeps uintptr
6445 nfns uintptr
6446
6447
6448 }
6449
6450
6451
6452 var inittrace tracestat
6453
6454 type tracestat struct {
6455 active bool
6456 id int64
6457 allocs uint64
6458 bytes uint64
6459 }
6460
6461 func doInit(t *initTask) {
6462 switch t.state {
6463 case 2:
6464 return
6465 case 1:
6466 throw("recursive call during initialization - linker skew")
6467 default:
6468 t.state = 1
6469
6470 for i := uintptr(0); i < t.ndeps; i++ {
6471 p := add(unsafe.Pointer(t), (3+i)*sys.PtrSize)
6472 t2 := *(**initTask)(p)
6473 doInit(t2)
6474 }
6475
6476 if t.nfns == 0 {
6477 t.state = 2
6478 return
6479 }
6480
6481 var (
6482 start int64
6483 before tracestat
6484 )
6485
6486 if inittrace.active {
6487 start = nanotime()
6488
6489 before = inittrace
6490 }
6491
6492 firstFunc := add(unsafe.Pointer(t), (3+t.ndeps)*sys.PtrSize)
6493 for i := uintptr(0); i < t.nfns; i++ {
6494 p := add(firstFunc, i*sys.PtrSize)
6495 f := *(*func())(unsafe.Pointer(&p))
6496 f()
6497 }
6498
6499 if inittrace.active {
6500 end := nanotime()
6501
6502 after := inittrace
6503
6504 pkg := funcpkgpath(findfunc(funcPC(firstFunc)))
6505
6506 var sbuf [24]byte
6507 print("init ", pkg, " @")
6508 print(string(fmtNSAsMS(sbuf[:], uint64(start-runtimeInitTime))), " ms, ")
6509 print(string(fmtNSAsMS(sbuf[:], uint64(end-start))), " ms clock, ")
6510 print(string(itoa(sbuf[:], after.bytes-before.bytes)), " bytes, ")
6511 print(string(itoa(sbuf[:], after.allocs-before.allocs)), " allocs")
6512 print("\n")
6513 }
6514
6515 t.state = 2
6516 }
6517 }
6518
View as plain text