Source file
src/runtime/gc_test.go
Documentation: runtime
1
2
3
4
5 package runtime_test
6
7 import (
8 "fmt"
9 "math/rand"
10 "os"
11 "reflect"
12 "runtime"
13 "runtime/debug"
14 "sort"
15 "strings"
16 "sync"
17 "sync/atomic"
18 "testing"
19 "time"
20 "unsafe"
21 )
22
23 func TestGcSys(t *testing.T) {
24 t.Skip("skipping known-flaky test; golang.org/issue/37331")
25 if os.Getenv("GOGC") == "off" {
26 t.Skip("skipping test; GOGC=off in environment")
27 }
28 got := runTestProg(t, "testprog", "GCSys")
29 want := "OK\n"
30 if got != want {
31 t.Fatalf("expected %q, but got %q", want, got)
32 }
33 }
34
35 func TestGcDeepNesting(t *testing.T) {
36 type T [2][2][2][2][2][2][2][2][2][2]*int
37 a := new(T)
38
39
40
41 t.Logf("%p", a)
42
43 a[0][0][0][0][0][0][0][0][0][0] = new(int)
44 *a[0][0][0][0][0][0][0][0][0][0] = 13
45 runtime.GC()
46 if *a[0][0][0][0][0][0][0][0][0][0] != 13 {
47 t.Fail()
48 }
49 }
50
51 func TestGcMapIndirection(t *testing.T) {
52 defer debug.SetGCPercent(debug.SetGCPercent(1))
53 runtime.GC()
54 type T struct {
55 a [256]int
56 }
57 m := make(map[T]T)
58 for i := 0; i < 2000; i++ {
59 var a T
60 a.a[0] = i
61 m[a] = T{}
62 }
63 }
64
65 func TestGcArraySlice(t *testing.T) {
66 type X struct {
67 buf [1]byte
68 nextbuf []byte
69 next *X
70 }
71 var head *X
72 for i := 0; i < 10; i++ {
73 p := &X{}
74 p.buf[0] = 42
75 p.next = head
76 if head != nil {
77 p.nextbuf = head.buf[:]
78 }
79 head = p
80 runtime.GC()
81 }
82 for p := head; p != nil; p = p.next {
83 if p.buf[0] != 42 {
84 t.Fatal("corrupted heap")
85 }
86 }
87 }
88
89 func TestGcRescan(t *testing.T) {
90 type X struct {
91 c chan error
92 nextx *X
93 }
94 type Y struct {
95 X
96 nexty *Y
97 p *int
98 }
99 var head *Y
100 for i := 0; i < 10; i++ {
101 p := &Y{}
102 p.c = make(chan error)
103 if head != nil {
104 p.nextx = &head.X
105 }
106 p.nexty = head
107 p.p = new(int)
108 *p.p = 42
109 head = p
110 runtime.GC()
111 }
112 for p := head; p != nil; p = p.nexty {
113 if *p.p != 42 {
114 t.Fatal("corrupted heap")
115 }
116 }
117 }
118
119 func TestGcLastTime(t *testing.T) {
120 ms := new(runtime.MemStats)
121 t0 := time.Now().UnixNano()
122 runtime.GC()
123 t1 := time.Now().UnixNano()
124 runtime.ReadMemStats(ms)
125 last := int64(ms.LastGC)
126 if t0 > last || last > t1 {
127 t.Fatalf("bad last GC time: got %v, want [%v, %v]", last, t0, t1)
128 }
129 pause := ms.PauseNs[(ms.NumGC+255)%256]
130
131
132 if pause == 0 {
133 t.Logf("last GC pause was 0")
134 } else if pause > 10e9 {
135 t.Logf("bad last GC pause: got %v, want [0, 10e9]", pause)
136 }
137 }
138
139 var hugeSink interface{}
140
141 func TestHugeGCInfo(t *testing.T) {
142
143
144 if hugeSink != nil {
145
146 const n = (400 << 20) + (unsafe.Sizeof(uintptr(0))-4)<<40
147 hugeSink = new([n]*byte)
148 hugeSink = new([n]uintptr)
149 hugeSink = new(struct {
150 x float64
151 y [n]*byte
152 z []string
153 })
154 hugeSink = new(struct {
155 x float64
156 y [n]uintptr
157 z []string
158 })
159 }
160 }
161
162 func TestPeriodicGC(t *testing.T) {
163 if runtime.GOARCH == "wasm" {
164 t.Skip("no sysmon on wasm yet")
165 }
166
167
168 runtime.GC()
169
170 var ms1, ms2 runtime.MemStats
171 runtime.ReadMemStats(&ms1)
172
173
174 orig := *runtime.ForceGCPeriod
175 *runtime.ForceGCPeriod = 0
176
177
178
179
180
181 var numGCs uint32
182 const want = 2
183 for i := 0; i < 200 && numGCs < want; i++ {
184 time.Sleep(5 * time.Millisecond)
185
186
187 runtime.ReadMemStats(&ms2)
188 numGCs = ms2.NumGC - ms1.NumGC
189 }
190 *runtime.ForceGCPeriod = orig
191
192 if numGCs < want {
193 t.Fatalf("no periodic GC: got %v GCs, want >= 2", numGCs)
194 }
195 }
196
197 func TestGcZombieReporting(t *testing.T) {
198
199 got := runTestProg(t, "testprog", "GCZombie")
200 want := "found pointer to free object"
201 if !strings.Contains(got, want) {
202 t.Fatalf("expected %q in output, but got %q", want, got)
203 }
204 }
205
206 func TestGCTestMoveStackOnNextCall(t *testing.T) {
207 t.Parallel()
208 var onStack int
209
210
211
212 for retry := 0; retry < 5; retry++ {
213 runtime.GCTestMoveStackOnNextCall()
214 if moveStackCheck(t, &onStack, uintptr(unsafe.Pointer(&onStack))) {
215
216 return
217 }
218 }
219 t.Fatal("stack did not move")
220 }
221
222
223
224
225
226 func moveStackCheck(t *testing.T, new *int, old uintptr) bool {
227
228
229
230
231
232 new2 := uintptr(unsafe.Pointer(new))
233
234 t.Logf("old stack pointer %x, new stack pointer %x", old, new2)
235 if new2 == old {
236
237 if cls := runtime.GCTestPointerClass(unsafe.Pointer(new)); cls != "stack" {
238 t.Fatalf("test bug: new (%#x) should be a stack pointer, not %s", new2, cls)
239 }
240
241 return false
242 }
243 return true
244 }
245
246 func TestGCTestMoveStackRepeatedly(t *testing.T) {
247
248
249 for i := 0; i < 100; i++ {
250 runtime.GCTestMoveStackOnNextCall()
251 moveStack1(false)
252 }
253 }
254
255
256 func moveStack1(x bool) {
257
258 if x {
259 println("x")
260 }
261 }
262
263 func TestGCTestIsReachable(t *testing.T) {
264 var all, half []unsafe.Pointer
265 var want uint64
266 for i := 0; i < 16; i++ {
267
268
269 p := unsafe.Pointer(new(*int))
270 all = append(all, p)
271 if i%2 == 0 {
272 half = append(half, p)
273 want |= 1 << i
274 }
275 }
276
277 got := runtime.GCTestIsReachable(all...)
278 if want != got {
279 t.Fatalf("did not get expected reachable set; want %b, got %b", want, got)
280 }
281 runtime.KeepAlive(half)
282 }
283
284 var pointerClassSink *int
285 var pointerClassData = 42
286
287 func TestGCTestPointerClass(t *testing.T) {
288 t.Parallel()
289 check := func(p unsafe.Pointer, want string) {
290 t.Helper()
291 got := runtime.GCTestPointerClass(p)
292 if got != want {
293
294
295 t.Errorf("for %#x, want class %s, got %s", uintptr(p), want, got)
296 }
297 }
298 var onStack int
299 var notOnStack int
300 pointerClassSink = ¬OnStack
301 check(unsafe.Pointer(&onStack), "stack")
302 check(unsafe.Pointer(¬OnStack), "heap")
303 check(unsafe.Pointer(&pointerClassSink), "bss")
304 check(unsafe.Pointer(&pointerClassData), "data")
305 check(nil, "other")
306 }
307
308 func BenchmarkSetTypePtr(b *testing.B) {
309 benchSetType(b, new(*byte))
310 }
311
312 func BenchmarkSetTypePtr8(b *testing.B) {
313 benchSetType(b, new([8]*byte))
314 }
315
316 func BenchmarkSetTypePtr16(b *testing.B) {
317 benchSetType(b, new([16]*byte))
318 }
319
320 func BenchmarkSetTypePtr32(b *testing.B) {
321 benchSetType(b, new([32]*byte))
322 }
323
324 func BenchmarkSetTypePtr64(b *testing.B) {
325 benchSetType(b, new([64]*byte))
326 }
327
328 func BenchmarkSetTypePtr126(b *testing.B) {
329 benchSetType(b, new([126]*byte))
330 }
331
332 func BenchmarkSetTypePtr128(b *testing.B) {
333 benchSetType(b, new([128]*byte))
334 }
335
336 func BenchmarkSetTypePtrSlice(b *testing.B) {
337 benchSetType(b, make([]*byte, 1<<10))
338 }
339
340 type Node1 struct {
341 Value [1]uintptr
342 Left, Right *byte
343 }
344
345 func BenchmarkSetTypeNode1(b *testing.B) {
346 benchSetType(b, new(Node1))
347 }
348
349 func BenchmarkSetTypeNode1Slice(b *testing.B) {
350 benchSetType(b, make([]Node1, 32))
351 }
352
353 type Node8 struct {
354 Value [8]uintptr
355 Left, Right *byte
356 }
357
358 func BenchmarkSetTypeNode8(b *testing.B) {
359 benchSetType(b, new(Node8))
360 }
361
362 func BenchmarkSetTypeNode8Slice(b *testing.B) {
363 benchSetType(b, make([]Node8, 32))
364 }
365
366 type Node64 struct {
367 Value [64]uintptr
368 Left, Right *byte
369 }
370
371 func BenchmarkSetTypeNode64(b *testing.B) {
372 benchSetType(b, new(Node64))
373 }
374
375 func BenchmarkSetTypeNode64Slice(b *testing.B) {
376 benchSetType(b, make([]Node64, 32))
377 }
378
379 type Node64Dead struct {
380 Left, Right *byte
381 Value [64]uintptr
382 }
383
384 func BenchmarkSetTypeNode64Dead(b *testing.B) {
385 benchSetType(b, new(Node64Dead))
386 }
387
388 func BenchmarkSetTypeNode64DeadSlice(b *testing.B) {
389 benchSetType(b, make([]Node64Dead, 32))
390 }
391
392 type Node124 struct {
393 Value [124]uintptr
394 Left, Right *byte
395 }
396
397 func BenchmarkSetTypeNode124(b *testing.B) {
398 benchSetType(b, new(Node124))
399 }
400
401 func BenchmarkSetTypeNode124Slice(b *testing.B) {
402 benchSetType(b, make([]Node124, 32))
403 }
404
405 type Node126 struct {
406 Value [126]uintptr
407 Left, Right *byte
408 }
409
410 func BenchmarkSetTypeNode126(b *testing.B) {
411 benchSetType(b, new(Node126))
412 }
413
414 func BenchmarkSetTypeNode126Slice(b *testing.B) {
415 benchSetType(b, make([]Node126, 32))
416 }
417
418 type Node128 struct {
419 Value [128]uintptr
420 Left, Right *byte
421 }
422
423 func BenchmarkSetTypeNode128(b *testing.B) {
424 benchSetType(b, new(Node128))
425 }
426
427 func BenchmarkSetTypeNode128Slice(b *testing.B) {
428 benchSetType(b, make([]Node128, 32))
429 }
430
431 type Node130 struct {
432 Value [130]uintptr
433 Left, Right *byte
434 }
435
436 func BenchmarkSetTypeNode130(b *testing.B) {
437 benchSetType(b, new(Node130))
438 }
439
440 func BenchmarkSetTypeNode130Slice(b *testing.B) {
441 benchSetType(b, make([]Node130, 32))
442 }
443
444 type Node1024 struct {
445 Value [1024]uintptr
446 Left, Right *byte
447 }
448
449 func BenchmarkSetTypeNode1024(b *testing.B) {
450 benchSetType(b, new(Node1024))
451 }
452
453 func BenchmarkSetTypeNode1024Slice(b *testing.B) {
454 benchSetType(b, make([]Node1024, 32))
455 }
456
457 func benchSetType(b *testing.B, x interface{}) {
458 v := reflect.ValueOf(x)
459 t := v.Type()
460 switch t.Kind() {
461 case reflect.Ptr:
462 b.SetBytes(int64(t.Elem().Size()))
463 case reflect.Slice:
464 b.SetBytes(int64(t.Elem().Size()) * int64(v.Len()))
465 }
466 b.ResetTimer()
467 runtime.BenchSetType(b.N, x)
468 }
469
470 func BenchmarkAllocation(b *testing.B) {
471 type T struct {
472 x, y *byte
473 }
474 ngo := runtime.GOMAXPROCS(0)
475 work := make(chan bool, b.N+ngo)
476 result := make(chan *T)
477 for i := 0; i < b.N; i++ {
478 work <- true
479 }
480 for i := 0; i < ngo; i++ {
481 work <- false
482 }
483 for i := 0; i < ngo; i++ {
484 go func() {
485 var x *T
486 for <-work {
487 for i := 0; i < 1000; i++ {
488 x = &T{}
489 }
490 }
491 result <- x
492 }()
493 }
494 for i := 0; i < ngo; i++ {
495 <-result
496 }
497 }
498
499 func TestPrintGC(t *testing.T) {
500 if testing.Short() {
501 t.Skip("Skipping in short mode")
502 }
503 defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(2))
504 done := make(chan bool)
505 go func() {
506 for {
507 select {
508 case <-done:
509 return
510 default:
511 runtime.GC()
512 }
513 }
514 }()
515 for i := 0; i < 1e4; i++ {
516 func() {
517 defer print("")
518 }()
519 }
520 close(done)
521 }
522
523 func testTypeSwitch(x interface{}) error {
524 switch y := x.(type) {
525 case nil:
526
527 case error:
528 return y
529 }
530 return nil
531 }
532
533 func testAssert(x interface{}) error {
534 if y, ok := x.(error); ok {
535 return y
536 }
537 return nil
538 }
539
540 func testAssertVar(x interface{}) error {
541 var y, ok = x.(error)
542 if ok {
543 return y
544 }
545 return nil
546 }
547
548 var a bool
549
550
551 func testIfaceEqual(x interface{}) {
552 if x == "abc" {
553 a = true
554 }
555 }
556
557 func TestPageAccounting(t *testing.T) {
558
559
560
561 const blockSize = 64 << 10
562 blocks := make([]*[blockSize]byte, (64<<20)/blockSize)
563 for i := range blocks {
564 blocks[i] = new([blockSize]byte)
565 }
566
567
568 pagesInUse, counted := runtime.CountPagesInUse()
569 if pagesInUse != counted {
570 t.Fatalf("mheap_.pagesInUse is %d, but direct count is %d", pagesInUse, counted)
571 }
572 }
573
574 func TestReadMemStats(t *testing.T) {
575 base, slow := runtime.ReadMemStatsSlow()
576 if base != slow {
577 logDiff(t, "MemStats", reflect.ValueOf(base), reflect.ValueOf(slow))
578 t.Fatal("memstats mismatch")
579 }
580 }
581
582 func logDiff(t *testing.T, prefix string, got, want reflect.Value) {
583 typ := got.Type()
584 switch typ.Kind() {
585 case reflect.Array, reflect.Slice:
586 if got.Len() != want.Len() {
587 t.Logf("len(%s): got %v, want %v", prefix, got, want)
588 return
589 }
590 for i := 0; i < got.Len(); i++ {
591 logDiff(t, fmt.Sprintf("%s[%d]", prefix, i), got.Index(i), want.Index(i))
592 }
593 case reflect.Struct:
594 for i := 0; i < typ.NumField(); i++ {
595 gf, wf := got.Field(i), want.Field(i)
596 logDiff(t, prefix+"."+typ.Field(i).Name, gf, wf)
597 }
598 case reflect.Map:
599 t.Fatal("not implemented: logDiff for map")
600 default:
601 if got.Interface() != want.Interface() {
602 t.Logf("%s: got %v, want %v", prefix, got, want)
603 }
604 }
605 }
606
607 func BenchmarkReadMemStats(b *testing.B) {
608 var ms runtime.MemStats
609 const heapSize = 100 << 20
610 x := make([]*[1024]byte, heapSize/1024)
611 for i := range x {
612 x[i] = new([1024]byte)
613 }
614 hugeSink = x
615
616 b.ResetTimer()
617 for i := 0; i < b.N; i++ {
618 runtime.ReadMemStats(&ms)
619 }
620
621 hugeSink = nil
622 }
623
624 func applyGCLoad(b *testing.B) func() {
625
626
627
628
629 maxProcs := runtime.GOMAXPROCS(-1)
630 if maxProcs == 1 {
631 b.Skip("This benchmark can only be run with GOMAXPROCS > 1")
632 }
633
634
635 type node struct {
636 children [16]*node
637 }
638 var buildTree func(depth int) *node
639 buildTree = func(depth int) *node {
640 tree := new(node)
641 if depth != 0 {
642 for i := range tree.children {
643 tree.children[i] = buildTree(depth - 1)
644 }
645 }
646 return tree
647 }
648
649
650 done := make(chan struct{})
651 var wg sync.WaitGroup
652 for i := 0; i < maxProcs-1; i++ {
653 wg.Add(1)
654 go func() {
655 defer wg.Done()
656 var hold *node
657 loop:
658 for {
659 hold = buildTree(5)
660 select {
661 case <-done:
662 break loop
663 default:
664 }
665 }
666 runtime.KeepAlive(hold)
667 }()
668 }
669 return func() {
670 close(done)
671 wg.Wait()
672 }
673 }
674
675 func BenchmarkReadMemStatsLatency(b *testing.B) {
676 stop := applyGCLoad(b)
677
678
679 latencies := make([]time.Duration, 0, 1024)
680
681
682
683 b.ResetTimer()
684 var ms runtime.MemStats
685 for i := 0; i < b.N; i++ {
686
687
688 time.Sleep(100 * time.Millisecond)
689 start := time.Now()
690 runtime.ReadMemStats(&ms)
691 latencies = append(latencies, time.Now().Sub(start))
692 }
693
694
695
696 b.StopTimer()
697 stop()
698
699
700
701
702 b.ReportMetric(0, "ns/op")
703 b.ReportMetric(0, "B/op")
704 b.ReportMetric(0, "allocs/op")
705
706
707 sort.Slice(latencies, func(i, j int) bool {
708 return latencies[i] < latencies[j]
709 })
710 b.ReportMetric(float64(latencies[len(latencies)*50/100]), "p50-ns")
711 b.ReportMetric(float64(latencies[len(latencies)*90/100]), "p90-ns")
712 b.ReportMetric(float64(latencies[len(latencies)*99/100]), "p99-ns")
713 }
714
715 func TestUserForcedGC(t *testing.T) {
716
717 defer debug.SetGCPercent(debug.SetGCPercent(-1))
718
719 var ms1, ms2 runtime.MemStats
720 runtime.ReadMemStats(&ms1)
721 runtime.GC()
722 runtime.ReadMemStats(&ms2)
723 if ms1.NumGC == ms2.NumGC {
724 t.Fatalf("runtime.GC() did not trigger GC")
725 }
726 if ms1.NumForcedGC == ms2.NumForcedGC {
727 t.Fatalf("runtime.GC() was not accounted in NumForcedGC")
728 }
729 }
730
731 func writeBarrierBenchmark(b *testing.B, f func()) {
732 runtime.GC()
733 var ms runtime.MemStats
734 runtime.ReadMemStats(&ms)
735
736
737
738
739 var stop uint32
740 done := make(chan bool)
741 go func() {
742 for atomic.LoadUint32(&stop) == 0 {
743 runtime.GC()
744 }
745 close(done)
746 }()
747 defer func() {
748 atomic.StoreUint32(&stop, 1)
749 <-done
750 }()
751
752 b.ResetTimer()
753 f()
754 b.StopTimer()
755 }
756
757 func BenchmarkWriteBarrier(b *testing.B) {
758 if runtime.GOMAXPROCS(-1) < 2 {
759
760 b.Skip("need GOMAXPROCS >= 2")
761 }
762
763
764
765 type node struct {
766 l, r *node
767 }
768 var wbRoots []*node
769 var mkTree func(level int) *node
770 mkTree = func(level int) *node {
771 if level == 0 {
772 return nil
773 }
774 n := &node{mkTree(level - 1), mkTree(level - 1)}
775 if level == 10 {
776
777
778
779 wbRoots = append(wbRoots, n)
780 }
781 return n
782 }
783 const depth = 22
784 root := mkTree(22)
785
786 writeBarrierBenchmark(b, func() {
787 var stack [depth]*node
788 tos := -1
789
790
791 for i := 0; i < b.N; i += 2 {
792 if tos == -1 {
793 stack[0] = root
794 tos = 0
795 }
796
797
798 n := stack[tos]
799 if n.l == nil {
800 tos--
801 } else {
802 n.l, n.r = n.r, n.l
803 stack[tos] = n.l
804 stack[tos+1] = n.r
805 tos++
806 }
807
808 if i%(1<<12) == 0 {
809
810 runtime.Gosched()
811 }
812 }
813 })
814
815 runtime.KeepAlive(wbRoots)
816 }
817
818 func BenchmarkBulkWriteBarrier(b *testing.B) {
819 if runtime.GOMAXPROCS(-1) < 2 {
820
821 b.Skip("need GOMAXPROCS >= 2")
822 }
823
824
825 const heapSize = 64 << 20
826 type obj [16]*byte
827 ptrs := make([]*obj, heapSize/unsafe.Sizeof(obj{}))
828 for i := range ptrs {
829 ptrs[i] = new(obj)
830 }
831
832 writeBarrierBenchmark(b, func() {
833 const blockSize = 1024
834 var pos int
835 for i := 0; i < b.N; i += blockSize {
836
837 block := ptrs[pos : pos+blockSize]
838 first := block[0]
839 copy(block, block[1:])
840 block[blockSize-1] = first
841
842 pos += blockSize
843 if pos+blockSize > len(ptrs) {
844 pos = 0
845 }
846
847 runtime.Gosched()
848 }
849 })
850
851 runtime.KeepAlive(ptrs)
852 }
853
854 func BenchmarkScanStackNoLocals(b *testing.B) {
855 var ready sync.WaitGroup
856 teardown := make(chan bool)
857 for j := 0; j < 10; j++ {
858 ready.Add(1)
859 go func() {
860 x := 100000
861 countpwg(&x, &ready, teardown)
862 }()
863 }
864 ready.Wait()
865 b.ResetTimer()
866 for i := 0; i < b.N; i++ {
867 b.StartTimer()
868 runtime.GC()
869 runtime.GC()
870 b.StopTimer()
871 }
872 close(teardown)
873 }
874
875 func BenchmarkMSpanCountAlloc(b *testing.B) {
876
877 s := runtime.AllocMSpan()
878 defer runtime.FreeMSpan(s)
879
880
881
882
883 for _, n := range []int{8, 16, 32, 64, 128} {
884 b.Run(fmt.Sprintf("bits=%d", n*8), func(b *testing.B) {
885
886 bits := make([]byte, n)
887 rand.Read(bits)
888
889 b.ResetTimer()
890 for i := 0; i < b.N; i++ {
891 runtime.MSpanCountAlloc(s, bits)
892 }
893 })
894 }
895 }
896
897 func countpwg(n *int, ready *sync.WaitGroup, teardown chan bool) {
898 if *n == 0 {
899 ready.Done()
900 <-teardown
901 return
902 }
903 *n--
904 countpwg(n, ready, teardown)
905 }
906
View as plain text