Source file
src/runtime/stack.go
Documentation: runtime
1
2
3
4
5 package runtime
6
7 import (
8 "internal/abi"
9 "internal/cpu"
10 "runtime/internal/atomic"
11 "runtime/internal/sys"
12 "unsafe"
13 )
14
15
64
65 const (
66
67
68
69
70 _StackSystem = sys.GoosWindows*512*sys.PtrSize + sys.GoosPlan9*512 + sys.GoosIos*sys.GoarchArm64*1024
71
72
73 _StackMin = 2048
74
75
76
77 _FixedStack0 = _StackMin + _StackSystem
78 _FixedStack1 = _FixedStack0 - 1
79 _FixedStack2 = _FixedStack1 | (_FixedStack1 >> 1)
80 _FixedStack3 = _FixedStack2 | (_FixedStack2 >> 2)
81 _FixedStack4 = _FixedStack3 | (_FixedStack3 >> 4)
82 _FixedStack5 = _FixedStack4 | (_FixedStack4 >> 8)
83 _FixedStack6 = _FixedStack5 | (_FixedStack5 >> 16)
84 _FixedStack = _FixedStack6 + 1
85
86
87
88
89
90
91 _StackBig = 4096
92
93
94
95
96
97
98
99 _StackGuard = 928*sys.StackGuardMultiplier + _StackSystem
100
101
102
103
104 _StackSmall = 128
105
106
107
108 _StackLimit = _StackGuard - _StackSystem - _StackSmall
109 )
110
111 const (
112
113
114
115
116
117 stackDebug = 0
118 stackFromSystem = 0
119 stackFaultOnFree = 0
120 stackPoisonCopy = 0
121 stackNoCache = 0
122
123
124 debugCheckBP = false
125 )
126
127 const (
128 uintptrMask = 1<<(8*sys.PtrSize) - 1
129
130
131
132
133
134
135
136 stackPreempt = uintptrMask & -1314
137
138
139
140 stackFork = uintptrMask & -1234
141
142
143
144 stackForceMove = uintptrMask & -275
145 )
146
147
148
149
150
151 var stackpool [_NumStackOrders]struct {
152 item stackpoolItem
153 _ [cpu.CacheLinePadSize - unsafe.Sizeof(stackpoolItem{})%cpu.CacheLinePadSize]byte
154 }
155
156
157 type stackpoolItem struct {
158 mu mutex
159 span mSpanList
160 }
161
162
163 var stackLarge struct {
164 lock mutex
165 free [heapAddrBits - pageShift]mSpanList
166 }
167
168 func stackinit() {
169 if _StackCacheSize&_PageMask != 0 {
170 throw("cache size must be a multiple of page size")
171 }
172 for i := range stackpool {
173 stackpool[i].item.span.init()
174 lockInit(&stackpool[i].item.mu, lockRankStackpool)
175 }
176 for i := range stackLarge.free {
177 stackLarge.free[i].init()
178 lockInit(&stackLarge.lock, lockRankStackLarge)
179 }
180 }
181
182
183 func stacklog2(n uintptr) int {
184 log2 := 0
185 for n > 1 {
186 n >>= 1
187 log2++
188 }
189 return log2
190 }
191
192
193
194 func stackpoolalloc(order uint8) gclinkptr {
195 list := &stackpool[order].item.span
196 s := list.first
197 lockWithRankMayAcquire(&mheap_.lock, lockRankMheap)
198 if s == nil {
199
200 s = mheap_.allocManual(_StackCacheSize>>_PageShift, spanAllocStack)
201 if s == nil {
202 throw("out of memory")
203 }
204 if s.allocCount != 0 {
205 throw("bad allocCount")
206 }
207 if s.manualFreeList.ptr() != nil {
208 throw("bad manualFreeList")
209 }
210 osStackAlloc(s)
211 s.elemsize = _FixedStack << order
212 for i := uintptr(0); i < _StackCacheSize; i += s.elemsize {
213 x := gclinkptr(s.base() + i)
214 x.ptr().next = s.manualFreeList
215 s.manualFreeList = x
216 }
217 list.insert(s)
218 }
219 x := s.manualFreeList
220 if x.ptr() == nil {
221 throw("span has no free stacks")
222 }
223 s.manualFreeList = x.ptr().next
224 s.allocCount++
225 if s.manualFreeList.ptr() == nil {
226
227 list.remove(s)
228 }
229 return x
230 }
231
232
233 func stackpoolfree(x gclinkptr, order uint8) {
234 s := spanOfUnchecked(uintptr(x))
235 if s.state.get() != mSpanManual {
236 throw("freeing stack not in a stack span")
237 }
238 if s.manualFreeList.ptr() == nil {
239
240 stackpool[order].item.span.insert(s)
241 }
242 x.ptr().next = s.manualFreeList
243 s.manualFreeList = x
244 s.allocCount--
245 if gcphase == _GCoff && s.allocCount == 0 {
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261 stackpool[order].item.span.remove(s)
262 s.manualFreeList = 0
263 osStackFree(s)
264 mheap_.freeManual(s, spanAllocStack)
265 }
266 }
267
268
269
270
271
272 func stackcacherefill(c *mcache, order uint8) {
273 if stackDebug >= 1 {
274 print("stackcacherefill order=", order, "\n")
275 }
276
277
278
279 var list gclinkptr
280 var size uintptr
281 lock(&stackpool[order].item.mu)
282 for size < _StackCacheSize/2 {
283 x := stackpoolalloc(order)
284 x.ptr().next = list
285 list = x
286 size += _FixedStack << order
287 }
288 unlock(&stackpool[order].item.mu)
289 c.stackcache[order].list = list
290 c.stackcache[order].size = size
291 }
292
293
294 func stackcacherelease(c *mcache, order uint8) {
295 if stackDebug >= 1 {
296 print("stackcacherelease order=", order, "\n")
297 }
298 x := c.stackcache[order].list
299 size := c.stackcache[order].size
300 lock(&stackpool[order].item.mu)
301 for size > _StackCacheSize/2 {
302 y := x.ptr().next
303 stackpoolfree(x, order)
304 x = y
305 size -= _FixedStack << order
306 }
307 unlock(&stackpool[order].item.mu)
308 c.stackcache[order].list = x
309 c.stackcache[order].size = size
310 }
311
312
313 func stackcache_clear(c *mcache) {
314 if stackDebug >= 1 {
315 print("stackcache clear\n")
316 }
317 for order := uint8(0); order < _NumStackOrders; order++ {
318 lock(&stackpool[order].item.mu)
319 x := c.stackcache[order].list
320 for x.ptr() != nil {
321 y := x.ptr().next
322 stackpoolfree(x, order)
323 x = y
324 }
325 c.stackcache[order].list = 0
326 c.stackcache[order].size = 0
327 unlock(&stackpool[order].item.mu)
328 }
329 }
330
331
332
333
334
335
336
337 func stackalloc(n uint32) stack {
338
339
340
341 thisg := getg()
342 if thisg != thisg.m.g0 {
343 throw("stackalloc not on scheduler stack")
344 }
345 if n&(n-1) != 0 {
346 throw("stack size not a power of 2")
347 }
348 if stackDebug >= 1 {
349 print("stackalloc ", n, "\n")
350 }
351
352 if debug.efence != 0 || stackFromSystem != 0 {
353 n = uint32(alignUp(uintptr(n), physPageSize))
354 v := sysAlloc(uintptr(n), &memstats.stacks_sys)
355 if v == nil {
356 throw("out of memory (stackalloc)")
357 }
358 return stack{uintptr(v), uintptr(v) + uintptr(n)}
359 }
360
361
362
363
364 var v unsafe.Pointer
365 if n < _FixedStack<<_NumStackOrders && n < _StackCacheSize {
366 order := uint8(0)
367 n2 := n
368 for n2 > _FixedStack {
369 order++
370 n2 >>= 1
371 }
372 var x gclinkptr
373 if stackNoCache != 0 || thisg.m.p == 0 || thisg.m.preemptoff != "" {
374
375
376
377
378 lock(&stackpool[order].item.mu)
379 x = stackpoolalloc(order)
380 unlock(&stackpool[order].item.mu)
381 } else {
382 c := thisg.m.p.ptr().mcache
383 x = c.stackcache[order].list
384 if x.ptr() == nil {
385 stackcacherefill(c, order)
386 x = c.stackcache[order].list
387 }
388 c.stackcache[order].list = x.ptr().next
389 c.stackcache[order].size -= uintptr(n)
390 }
391 v = unsafe.Pointer(x)
392 } else {
393 var s *mspan
394 npage := uintptr(n) >> _PageShift
395 log2npage := stacklog2(npage)
396
397
398 lock(&stackLarge.lock)
399 if !stackLarge.free[log2npage].isEmpty() {
400 s = stackLarge.free[log2npage].first
401 stackLarge.free[log2npage].remove(s)
402 }
403 unlock(&stackLarge.lock)
404
405 lockWithRankMayAcquire(&mheap_.lock, lockRankMheap)
406
407 if s == nil {
408
409 s = mheap_.allocManual(npage, spanAllocStack)
410 if s == nil {
411 throw("out of memory")
412 }
413 osStackAlloc(s)
414 s.elemsize = uintptr(n)
415 }
416 v = unsafe.Pointer(s.base())
417 }
418
419 if raceenabled {
420 racemalloc(v, uintptr(n))
421 }
422 if msanenabled {
423 msanmalloc(v, uintptr(n))
424 }
425 if stackDebug >= 1 {
426 print(" allocated ", v, "\n")
427 }
428 return stack{uintptr(v), uintptr(v) + uintptr(n)}
429 }
430
431
432
433
434
435
436
437 func stackfree(stk stack) {
438 gp := getg()
439 v := unsafe.Pointer(stk.lo)
440 n := stk.hi - stk.lo
441 if n&(n-1) != 0 {
442 throw("stack not a power of 2")
443 }
444 if stk.lo+n < stk.hi {
445 throw("bad stack size")
446 }
447 if stackDebug >= 1 {
448 println("stackfree", v, n)
449 memclrNoHeapPointers(v, n)
450 }
451 if debug.efence != 0 || stackFromSystem != 0 {
452 if debug.efence != 0 || stackFaultOnFree != 0 {
453 sysFault(v, n)
454 } else {
455 sysFree(v, n, &memstats.stacks_sys)
456 }
457 return
458 }
459 if msanenabled {
460 msanfree(v, n)
461 }
462 if n < _FixedStack<<_NumStackOrders && n < _StackCacheSize {
463 order := uint8(0)
464 n2 := n
465 for n2 > _FixedStack {
466 order++
467 n2 >>= 1
468 }
469 x := gclinkptr(v)
470 if stackNoCache != 0 || gp.m.p == 0 || gp.m.preemptoff != "" {
471 lock(&stackpool[order].item.mu)
472 stackpoolfree(x, order)
473 unlock(&stackpool[order].item.mu)
474 } else {
475 c := gp.m.p.ptr().mcache
476 if c.stackcache[order].size >= _StackCacheSize {
477 stackcacherelease(c, order)
478 }
479 x.ptr().next = c.stackcache[order].list
480 c.stackcache[order].list = x
481 c.stackcache[order].size += n
482 }
483 } else {
484 s := spanOfUnchecked(uintptr(v))
485 if s.state.get() != mSpanManual {
486 println(hex(s.base()), v)
487 throw("bad span state")
488 }
489 if gcphase == _GCoff {
490
491
492 osStackFree(s)
493 mheap_.freeManual(s, spanAllocStack)
494 } else {
495
496
497
498
499
500 log2npage := stacklog2(s.npages)
501 lock(&stackLarge.lock)
502 stackLarge.free[log2npage].insert(s)
503 unlock(&stackLarge.lock)
504 }
505 }
506 }
507
508 var maxstacksize uintptr = 1 << 20
509
510 var maxstackceiling = maxstacksize
511
512 var ptrnames = []string{
513 0: "scalar",
514 1: "ptr",
515 }
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545 type adjustinfo struct {
546 old stack
547 delta uintptr
548 cache pcvalueCache
549
550
551 sghi uintptr
552 }
553
554
555
556 func adjustpointer(adjinfo *adjustinfo, vpp unsafe.Pointer) {
557 pp := (*uintptr)(vpp)
558 p := *pp
559 if stackDebug >= 4 {
560 print(" ", pp, ":", hex(p), "\n")
561 }
562 if adjinfo.old.lo <= p && p < adjinfo.old.hi {
563 *pp = p + adjinfo.delta
564 if stackDebug >= 3 {
565 print(" adjust ptr ", pp, ":", hex(p), " -> ", hex(*pp), "\n")
566 }
567 }
568 }
569
570
571
572 type bitvector struct {
573 n int32
574 bytedata *uint8
575 }
576
577
578
579
580
581 func (bv *bitvector) ptrbit(i uintptr) uint8 {
582 b := *(addb(bv.bytedata, i/8))
583 return (b >> (i % 8)) & 1
584 }
585
586
587
588 func adjustpointers(scanp unsafe.Pointer, bv *bitvector, adjinfo *adjustinfo, f funcInfo) {
589 minp := adjinfo.old.lo
590 maxp := adjinfo.old.hi
591 delta := adjinfo.delta
592 num := uintptr(bv.n)
593
594
595
596
597
598 useCAS := uintptr(scanp) < adjinfo.sghi
599 for i := uintptr(0); i < num; i += 8 {
600 if stackDebug >= 4 {
601 for j := uintptr(0); j < 8; j++ {
602 print(" ", add(scanp, (i+j)*sys.PtrSize), ":", ptrnames[bv.ptrbit(i+j)], ":", hex(*(*uintptr)(add(scanp, (i+j)*sys.PtrSize))), " # ", i, " ", *addb(bv.bytedata, i/8), "\n")
603 }
604 }
605 b := *(addb(bv.bytedata, i/8))
606 for b != 0 {
607 j := uintptr(sys.Ctz8(b))
608 b &= b - 1
609 pp := (*uintptr)(add(scanp, (i+j)*sys.PtrSize))
610 retry:
611 p := *pp
612 if f.valid() && 0 < p && p < minLegalPointer && debug.invalidptr != 0 {
613
614
615 getg().m.traceback = 2
616 print("runtime: bad pointer in frame ", funcname(f), " at ", pp, ": ", hex(p), "\n")
617 throw("invalid pointer found on stack")
618 }
619 if minp <= p && p < maxp {
620 if stackDebug >= 3 {
621 print("adjust ptr ", hex(p), " ", funcname(f), "\n")
622 }
623 if useCAS {
624 ppu := (*unsafe.Pointer)(unsafe.Pointer(pp))
625 if !atomic.Casp1(ppu, unsafe.Pointer(p), unsafe.Pointer(p+delta)) {
626 goto retry
627 }
628 } else {
629 *pp = p + delta
630 }
631 }
632 }
633 }
634 }
635
636
637 func adjustframe(frame *stkframe, arg unsafe.Pointer) bool {
638 adjinfo := (*adjustinfo)(arg)
639 if frame.continpc == 0 {
640
641 return true
642 }
643 f := frame.fn
644 if stackDebug >= 2 {
645 print(" adjusting ", funcname(f), " frame=[", hex(frame.sp), ",", hex(frame.fp), "] pc=", hex(frame.pc), " continpc=", hex(frame.continpc), "\n")
646 }
647 if f.funcID == funcID_systemstack_switch {
648
649
650
651 return true
652 }
653
654 locals, args, objs := getStackMap(frame, &adjinfo.cache, true)
655
656
657 if locals.n > 0 {
658 size := uintptr(locals.n) * sys.PtrSize
659 adjustpointers(unsafe.Pointer(frame.varp-size), &locals, adjinfo, f)
660 }
661
662
663
664 if sys.ArchFamily == sys.AMD64 && frame.argp-frame.varp == 2*sys.PtrSize {
665 if stackDebug >= 3 {
666 print(" saved bp\n")
667 }
668 if debugCheckBP {
669
670
671 bp := *(*uintptr)(unsafe.Pointer(frame.varp))
672 if bp != 0 && (bp < adjinfo.old.lo || bp >= adjinfo.old.hi) {
673 println("runtime: found invalid frame pointer")
674 print("bp=", hex(bp), " min=", hex(adjinfo.old.lo), " max=", hex(adjinfo.old.hi), "\n")
675 throw("bad frame pointer")
676 }
677 }
678 adjustpointer(adjinfo, unsafe.Pointer(frame.varp))
679 }
680
681
682 if args.n > 0 {
683 if stackDebug >= 3 {
684 print(" args\n")
685 }
686 adjustpointers(unsafe.Pointer(frame.argp), &args, adjinfo, funcInfo{})
687 }
688
689
690
691 if frame.varp != 0 {
692 for _, obj := range objs {
693 off := obj.off
694 base := frame.varp
695 if off >= 0 {
696 base = frame.argp
697 }
698 p := base + uintptr(off)
699 if p < frame.sp {
700
701
702
703 continue
704 }
705 ptrdata := obj.ptrdata()
706 gcdata := obj.gcdata
707 var s *mspan
708 if obj.useGCProg() {
709
710 s = materializeGCProg(ptrdata, gcdata)
711 gcdata = (*byte)(unsafe.Pointer(s.startAddr))
712 }
713 for i := uintptr(0); i < ptrdata; i += sys.PtrSize {
714 if *addb(gcdata, i/(8*sys.PtrSize))>>(i/sys.PtrSize&7)&1 != 0 {
715 adjustpointer(adjinfo, unsafe.Pointer(p+i))
716 }
717 }
718 if s != nil {
719 dematerializeGCProg(s)
720 }
721 }
722 }
723
724 return true
725 }
726
727 func adjustctxt(gp *g, adjinfo *adjustinfo) {
728 adjustpointer(adjinfo, unsafe.Pointer(&gp.sched.ctxt))
729 if !framepointer_enabled {
730 return
731 }
732 if debugCheckBP {
733 bp := gp.sched.bp
734 if bp != 0 && (bp < adjinfo.old.lo || bp >= adjinfo.old.hi) {
735 println("runtime: found invalid top frame pointer")
736 print("bp=", hex(bp), " min=", hex(adjinfo.old.lo), " max=", hex(adjinfo.old.hi), "\n")
737 throw("bad top frame pointer")
738 }
739 }
740 adjustpointer(adjinfo, unsafe.Pointer(&gp.sched.bp))
741 }
742
743 func adjustdefers(gp *g, adjinfo *adjustinfo) {
744
745
746
747 adjustpointer(adjinfo, unsafe.Pointer(&gp._defer))
748 for d := gp._defer; d != nil; d = d.link {
749 adjustpointer(adjinfo, unsafe.Pointer(&d.fn))
750 adjustpointer(adjinfo, unsafe.Pointer(&d.sp))
751 adjustpointer(adjinfo, unsafe.Pointer(&d._panic))
752 adjustpointer(adjinfo, unsafe.Pointer(&d.link))
753 adjustpointer(adjinfo, unsafe.Pointer(&d.varp))
754 adjustpointer(adjinfo, unsafe.Pointer(&d.fd))
755 }
756
757
758
759
760 tracebackdefers(gp, adjustframe, noescape(unsafe.Pointer(adjinfo)))
761 }
762
763 func adjustpanics(gp *g, adjinfo *adjustinfo) {
764
765
766 adjustpointer(adjinfo, unsafe.Pointer(&gp._panic))
767 }
768
769 func adjustsudogs(gp *g, adjinfo *adjustinfo) {
770
771
772 for s := gp.waiting; s != nil; s = s.waitlink {
773 adjustpointer(adjinfo, unsafe.Pointer(&s.elem))
774 }
775 }
776
777 func fillstack(stk stack, b byte) {
778 for p := stk.lo; p < stk.hi; p++ {
779 *(*byte)(unsafe.Pointer(p)) = b
780 }
781 }
782
783 func findsghi(gp *g, stk stack) uintptr {
784 var sghi uintptr
785 for sg := gp.waiting; sg != nil; sg = sg.waitlink {
786 p := uintptr(sg.elem) + uintptr(sg.c.elemsize)
787 if stk.lo <= p && p < stk.hi && p > sghi {
788 sghi = p
789 }
790 }
791 return sghi
792 }
793
794
795
796
797 func syncadjustsudogs(gp *g, used uintptr, adjinfo *adjustinfo) uintptr {
798 if gp.waiting == nil {
799 return 0
800 }
801
802
803 var lastc *hchan
804 for sg := gp.waiting; sg != nil; sg = sg.waitlink {
805 if sg.c != lastc {
806
807
808
809
810
811
812
813
814
815 lockWithRank(&sg.c.lock, lockRankHchanLeaf)
816 }
817 lastc = sg.c
818 }
819
820
821 adjustsudogs(gp, adjinfo)
822
823
824
825
826 var sgsize uintptr
827 if adjinfo.sghi != 0 {
828 oldBot := adjinfo.old.hi - used
829 newBot := oldBot + adjinfo.delta
830 sgsize = adjinfo.sghi - oldBot
831 memmove(unsafe.Pointer(newBot), unsafe.Pointer(oldBot), sgsize)
832 }
833
834
835 lastc = nil
836 for sg := gp.waiting; sg != nil; sg = sg.waitlink {
837 if sg.c != lastc {
838 unlock(&sg.c.lock)
839 }
840 lastc = sg.c
841 }
842
843 return sgsize
844 }
845
846
847
848 func copystack(gp *g, newsize uintptr) {
849 if gp.syscallsp != 0 {
850 throw("stack growth not allowed in system call")
851 }
852 old := gp.stack
853 if old.lo == 0 {
854 throw("nil stackbase")
855 }
856 used := old.hi - gp.sched.sp
857
858
859 new := stackalloc(uint32(newsize))
860 if stackPoisonCopy != 0 {
861 fillstack(new, 0xfd)
862 }
863 if stackDebug >= 1 {
864 print("copystack gp=", gp, " [", hex(old.lo), " ", hex(old.hi-used), " ", hex(old.hi), "]", " -> [", hex(new.lo), " ", hex(new.hi-used), " ", hex(new.hi), "]/", newsize, "\n")
865 }
866
867
868 var adjinfo adjustinfo
869 adjinfo.old = old
870 adjinfo.delta = new.hi - old.hi
871
872
873 ncopy := used
874 if !gp.activeStackChans {
875 if newsize < old.hi-old.lo && atomic.Load8(&gp.parkingOnChan) != 0 {
876
877
878
879
880 throw("racy sudog adjustment due to parking on channel")
881 }
882 adjustsudogs(gp, &adjinfo)
883 } else {
884
885
886
887
888
889
890
891 adjinfo.sghi = findsghi(gp, old)
892
893
894
895 ncopy -= syncadjustsudogs(gp, used, &adjinfo)
896 }
897
898
899 memmove(unsafe.Pointer(new.hi-ncopy), unsafe.Pointer(old.hi-ncopy), ncopy)
900
901
902
903
904 adjustctxt(gp, &adjinfo)
905 adjustdefers(gp, &adjinfo)
906 adjustpanics(gp, &adjinfo)
907 if adjinfo.sghi != 0 {
908 adjinfo.sghi += adjinfo.delta
909 }
910
911
912 gp.stack = new
913 gp.stackguard0 = new.lo + _StackGuard
914 gp.sched.sp = new.hi - used
915 gp.stktopsp += adjinfo.delta
916
917
918 gentraceback(^uintptr(0), ^uintptr(0), 0, gp, 0, nil, 0x7fffffff, adjustframe, noescape(unsafe.Pointer(&adjinfo)), 0)
919
920
921 if stackPoisonCopy != 0 {
922 fillstack(old, 0xfc)
923 }
924 stackfree(old)
925 }
926
927
928 func round2(x int32) int32 {
929 s := uint(0)
930 for 1<<s < x {
931 s++
932 }
933 return 1 << s
934 }
935
936
937
938
939
940
941
942
943
944
945
946
947
948 func newstack() {
949 thisg := getg()
950
951 if thisg.m.morebuf.g.ptr().stackguard0 == stackFork {
952 throw("stack growth after fork")
953 }
954 if thisg.m.morebuf.g.ptr() != thisg.m.curg {
955 print("runtime: newstack called from g=", hex(thisg.m.morebuf.g), "\n"+"\tm=", thisg.m, " m->curg=", thisg.m.curg, " m->g0=", thisg.m.g0, " m->gsignal=", thisg.m.gsignal, "\n")
956 morebuf := thisg.m.morebuf
957 traceback(morebuf.pc, morebuf.sp, morebuf.lr, morebuf.g.ptr())
958 throw("runtime: wrong goroutine in newstack")
959 }
960
961 gp := thisg.m.curg
962
963 if thisg.m.curg.throwsplit {
964
965 morebuf := thisg.m.morebuf
966 gp.syscallsp = morebuf.sp
967 gp.syscallpc = morebuf.pc
968 pcname, pcoff := "(unknown)", uintptr(0)
969 f := findfunc(gp.sched.pc)
970 if f.valid() {
971 pcname = funcname(f)
972 pcoff = gp.sched.pc - f.entry
973 }
974 print("runtime: newstack at ", pcname, "+", hex(pcoff),
975 " sp=", hex(gp.sched.sp), " stack=[", hex(gp.stack.lo), ", ", hex(gp.stack.hi), "]\n",
976 "\tmorebuf={pc:", hex(morebuf.pc), " sp:", hex(morebuf.sp), " lr:", hex(morebuf.lr), "}\n",
977 "\tsched={pc:", hex(gp.sched.pc), " sp:", hex(gp.sched.sp), " lr:", hex(gp.sched.lr), " ctxt:", gp.sched.ctxt, "}\n")
978
979 thisg.m.traceback = 2
980 traceback(morebuf.pc, morebuf.sp, morebuf.lr, gp)
981 throw("runtime: stack split at bad time")
982 }
983
984 morebuf := thisg.m.morebuf
985 thisg.m.morebuf.pc = 0
986 thisg.m.morebuf.lr = 0
987 thisg.m.morebuf.sp = 0
988 thisg.m.morebuf.g = 0
989
990
991
992
993 preempt := atomic.Loaduintptr(&gp.stackguard0) == stackPreempt
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007 if preempt {
1008 if !canPreemptM(thisg.m) {
1009
1010
1011 gp.stackguard0 = gp.stack.lo + _StackGuard
1012 gogo(&gp.sched)
1013 }
1014 }
1015
1016 if gp.stack.lo == 0 {
1017 throw("missing stack in newstack")
1018 }
1019 sp := gp.sched.sp
1020 if sys.ArchFamily == sys.AMD64 || sys.ArchFamily == sys.I386 || sys.ArchFamily == sys.WASM {
1021
1022 sp -= sys.PtrSize
1023 }
1024 if stackDebug >= 1 || sp < gp.stack.lo {
1025 print("runtime: newstack sp=", hex(sp), " stack=[", hex(gp.stack.lo), ", ", hex(gp.stack.hi), "]\n",
1026 "\tmorebuf={pc:", hex(morebuf.pc), " sp:", hex(morebuf.sp), " lr:", hex(morebuf.lr), "}\n",
1027 "\tsched={pc:", hex(gp.sched.pc), " sp:", hex(gp.sched.sp), " lr:", hex(gp.sched.lr), " ctxt:", gp.sched.ctxt, "}\n")
1028 }
1029 if sp < gp.stack.lo {
1030 print("runtime: gp=", gp, ", goid=", gp.goid, ", gp->status=", hex(readgstatus(gp)), "\n ")
1031 print("runtime: split stack overflow: ", hex(sp), " < ", hex(gp.stack.lo), "\n")
1032 throw("runtime: split stack overflow")
1033 }
1034
1035 if preempt {
1036 if gp == thisg.m.g0 {
1037 throw("runtime: preempt g0")
1038 }
1039 if thisg.m.p == 0 && thisg.m.locks == 0 {
1040 throw("runtime: g is running but p is not")
1041 }
1042
1043 if gp.preemptShrink {
1044
1045
1046 gp.preemptShrink = false
1047 shrinkstack(gp)
1048 }
1049
1050 if gp.preemptStop {
1051 preemptPark(gp)
1052 }
1053
1054
1055 gopreempt_m(gp)
1056 }
1057
1058
1059 oldsize := gp.stack.hi - gp.stack.lo
1060 newsize := oldsize * 2
1061
1062
1063
1064
1065 if f := findfunc(gp.sched.pc); f.valid() {
1066 max := uintptr(funcMaxSPDelta(f))
1067 needed := max + _StackGuard
1068 used := gp.stack.hi - gp.sched.sp
1069 for newsize-used < needed {
1070 newsize *= 2
1071 }
1072 }
1073
1074 if gp.stackguard0 == stackForceMove {
1075
1076
1077
1078 newsize = oldsize
1079 }
1080
1081 if newsize > maxstacksize || newsize > maxstackceiling {
1082 if maxstacksize < maxstackceiling {
1083 print("runtime: goroutine stack exceeds ", maxstacksize, "-byte limit\n")
1084 } else {
1085 print("runtime: goroutine stack exceeds ", maxstackceiling, "-byte limit\n")
1086 }
1087 print("runtime: sp=", hex(sp), " stack=[", hex(gp.stack.lo), ", ", hex(gp.stack.hi), "]\n")
1088 throw("stack overflow")
1089 }
1090
1091
1092
1093 casgstatus(gp, _Grunning, _Gcopystack)
1094
1095
1096
1097 copystack(gp, newsize)
1098 if stackDebug >= 1 {
1099 print("stack grow done\n")
1100 }
1101 casgstatus(gp, _Gcopystack, _Grunning)
1102 gogo(&gp.sched)
1103 }
1104
1105
1106 func nilfunc() {
1107 *(*uint8)(nil) = 0
1108 }
1109
1110
1111
1112 func gostartcallfn(gobuf *gobuf, fv *funcval) {
1113 var fn unsafe.Pointer
1114 if fv != nil {
1115 fn = unsafe.Pointer(fv.fn)
1116 } else {
1117 fn = unsafe.Pointer(funcPC(nilfunc))
1118 }
1119 gostartcall(gobuf, fn, unsafe.Pointer(fv))
1120 }
1121
1122
1123
1124
1125 func isShrinkStackSafe(gp *g) bool {
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138 return gp.syscallsp == 0 && !gp.asyncSafePoint && atomic.Load8(&gp.parkingOnChan) == 0
1139 }
1140
1141
1142
1143
1144
1145 func shrinkstack(gp *g) {
1146 if gp.stack.lo == 0 {
1147 throw("missing stack in shrinkstack")
1148 }
1149 if s := readgstatus(gp); s&_Gscan == 0 {
1150
1151
1152
1153 if !(gp == getg().m.curg && getg() != getg().m.curg && s == _Grunning) {
1154
1155 throw("bad status in shrinkstack")
1156 }
1157 }
1158 if !isShrinkStackSafe(gp) {
1159 throw("shrinkstack at bad time")
1160 }
1161
1162
1163
1164 if gp == getg().m.curg && gp.m.libcallsp != 0 {
1165 throw("shrinking stack in libcall")
1166 }
1167
1168 if debug.gcshrinkstackoff > 0 {
1169 return
1170 }
1171 f := findfunc(gp.startpc)
1172 if f.valid() && f.funcID == funcID_gcBgMarkWorker {
1173
1174
1175 return
1176 }
1177
1178 oldsize := gp.stack.hi - gp.stack.lo
1179 newsize := oldsize / 2
1180
1181
1182 if newsize < _FixedStack {
1183 return
1184 }
1185
1186
1187
1188
1189
1190 avail := gp.stack.hi - gp.stack.lo
1191 if used := gp.stack.hi - gp.sched.sp + _StackLimit; used >= avail/4 {
1192 return
1193 }
1194
1195 if stackDebug > 0 {
1196 print("shrinking stack ", oldsize, "->", newsize, "\n")
1197 }
1198
1199 copystack(gp, newsize)
1200 }
1201
1202
1203 func freeStackSpans() {
1204
1205
1206 for order := range stackpool {
1207 lock(&stackpool[order].item.mu)
1208 list := &stackpool[order].item.span
1209 for s := list.first; s != nil; {
1210 next := s.next
1211 if s.allocCount == 0 {
1212 list.remove(s)
1213 s.manualFreeList = 0
1214 osStackFree(s)
1215 mheap_.freeManual(s, spanAllocStack)
1216 }
1217 s = next
1218 }
1219 unlock(&stackpool[order].item.mu)
1220 }
1221
1222
1223 lock(&stackLarge.lock)
1224 for i := range stackLarge.free {
1225 for s := stackLarge.free[i].first; s != nil; {
1226 next := s.next
1227 stackLarge.free[i].remove(s)
1228 osStackFree(s)
1229 mheap_.freeManual(s, spanAllocStack)
1230 s = next
1231 }
1232 }
1233 unlock(&stackLarge.lock)
1234 }
1235
1236
1237
1238 func getStackMap(frame *stkframe, cache *pcvalueCache, debug bool) (locals, args bitvector, objs []stackObjectRecord) {
1239 targetpc := frame.continpc
1240 if targetpc == 0 {
1241
1242 return
1243 }
1244
1245 f := frame.fn
1246 pcdata := int32(-1)
1247 if targetpc != f.entry {
1248
1249
1250
1251
1252 targetpc--
1253 pcdata = pcdatavalue(f, _PCDATA_StackMapIndex, targetpc, cache)
1254 }
1255 if pcdata == -1 {
1256
1257
1258
1259 pcdata = 0
1260 }
1261
1262
1263 size := frame.varp - frame.sp
1264 var minsize uintptr
1265 switch sys.ArchFamily {
1266 case sys.ARM64:
1267 minsize = sys.StackAlign
1268 default:
1269 minsize = sys.MinFrameSize
1270 }
1271 if size > minsize {
1272 stackid := pcdata
1273 stkmap := (*stackmap)(funcdata(f, _FUNCDATA_LocalsPointerMaps))
1274 if stkmap == nil || stkmap.n <= 0 {
1275 print("runtime: frame ", funcname(f), " untyped locals ", hex(frame.varp-size), "+", hex(size), "\n")
1276 throw("missing stackmap")
1277 }
1278
1279 if stkmap.nbit > 0 {
1280 if stackid < 0 || stackid >= stkmap.n {
1281
1282 print("runtime: pcdata is ", stackid, " and ", stkmap.n, " locals stack map entries for ", funcname(f), " (targetpc=", hex(targetpc), ")\n")
1283 throw("bad symbol table")
1284 }
1285 locals = stackmapdata(stkmap, stackid)
1286 if stackDebug >= 3 && debug {
1287 print(" locals ", stackid, "/", stkmap.n, " ", locals.n, " words ", locals.bytedata, "\n")
1288 }
1289 } else if stackDebug >= 3 && debug {
1290 print(" no locals to adjust\n")
1291 }
1292 }
1293
1294
1295 if frame.arglen > 0 {
1296 if frame.argmap != nil {
1297
1298
1299
1300 args = *frame.argmap
1301 n := int32(frame.arglen / sys.PtrSize)
1302 if n < args.n {
1303 args.n = n
1304 }
1305 } else {
1306 stackmap := (*stackmap)(funcdata(f, _FUNCDATA_ArgsPointerMaps))
1307 if stackmap == nil || stackmap.n <= 0 {
1308 print("runtime: frame ", funcname(f), " untyped args ", hex(frame.argp), "+", hex(frame.arglen), "\n")
1309 throw("missing stackmap")
1310 }
1311 if pcdata < 0 || pcdata >= stackmap.n {
1312
1313 print("runtime: pcdata is ", pcdata, " and ", stackmap.n, " args stack map entries for ", funcname(f), " (targetpc=", hex(targetpc), ")\n")
1314 throw("bad symbol table")
1315 }
1316 if stackmap.nbit > 0 {
1317 args = stackmapdata(stackmap, pcdata)
1318 }
1319 }
1320 }
1321
1322
1323 if GOARCH == "amd64" && unsafe.Sizeof(abi.RegArgs{}) > 0 && frame.argmap != nil {
1324
1325
1326
1327
1328 objs = methodValueCallFrameObjs
1329 } else {
1330 p := funcdata(f, _FUNCDATA_StackObjects)
1331 if p != nil {
1332 n := *(*uintptr)(p)
1333 p = add(p, sys.PtrSize)
1334 *(*slice)(unsafe.Pointer(&objs)) = slice{array: noescape(p), len: int(n), cap: int(n)}
1335
1336
1337
1338
1339
1340 }
1341 }
1342
1343 return
1344 }
1345
1346 var (
1347 abiRegArgsEface interface{} = abi.RegArgs{}
1348 abiRegArgsType *_type = efaceOf(&abiRegArgsEface)._type
1349 methodValueCallFrameObjs = []stackObjectRecord{
1350 {
1351 off: -int32(alignUp(abiRegArgsType.size, 8)),
1352 size: int32(abiRegArgsType.size),
1353 _ptrdata: int32(abiRegArgsType.ptrdata),
1354 gcdata: abiRegArgsType.gcdata,
1355 },
1356 }
1357 )
1358
1359 func init() {
1360 if abiRegArgsType.kind&kindGCProg != 0 {
1361 throw("abiRegArgsType needs GC Prog, update methodValueCallFrameObjs")
1362 }
1363 }
1364
1365
1366
1367 type stackObjectRecord struct {
1368
1369
1370
1371 off int32
1372 size int32
1373 _ptrdata int32
1374 gcdata *byte
1375 }
1376
1377 func (r *stackObjectRecord) useGCProg() bool {
1378 return r._ptrdata < 0
1379 }
1380
1381 func (r *stackObjectRecord) ptrdata() uintptr {
1382 x := r._ptrdata
1383 if x < 0 {
1384 return uintptr(-x)
1385 }
1386 return uintptr(x)
1387 }
1388
1389
1390
1391
1392
1393 func morestackc() {
1394 throw("attempt to execute system stack code on user stack")
1395 }
1396
View as plain text