Source file
src/runtime/mgcsweep.go
Documentation: runtime
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25 package runtime
26
27 import (
28 "runtime/internal/atomic"
29 "unsafe"
30 )
31
32 var sweep sweepdata
33
34
35 type sweepdata struct {
36 lock mutex
37 g *g
38 parked bool
39 started bool
40
41 nbgsweep uint32
42 npausesweep uint32
43
44
45
46
47
48
49
50
51 centralIndex sweepClass
52 }
53
54
55
56 type sweepClass uint32
57
58 const (
59 numSweepClasses = numSpanClasses * 2
60 sweepClassDone sweepClass = sweepClass(^uint32(0))
61 )
62
63 func (s *sweepClass) load() sweepClass {
64 return sweepClass(atomic.Load((*uint32)(s)))
65 }
66
67 func (s *sweepClass) update(sNew sweepClass) {
68
69
70 sOld := s.load()
71 for sOld < sNew && !atomic.Cas((*uint32)(s), uint32(sOld), uint32(sNew)) {
72 sOld = s.load()
73 }
74
75
76
77
78
79 }
80
81 func (s *sweepClass) clear() {
82 atomic.Store((*uint32)(s), 0)
83 }
84
85
86
87
88
89 func (s sweepClass) split() (spc spanClass, full bool) {
90 return spanClass(s >> 1), s&1 == 0
91 }
92
93
94
95
96 func (h *mheap) nextSpanForSweep() *mspan {
97 sg := h.sweepgen
98 for sc := sweep.centralIndex.load(); sc < numSweepClasses; sc++ {
99 spc, full := sc.split()
100 c := &h.central[spc].mcentral
101 var s *mspan
102 if full {
103 s = c.fullUnswept(sg).pop()
104 } else {
105 s = c.partialUnswept(sg).pop()
106 }
107 if s != nil {
108
109
110 sweep.centralIndex.update(sc)
111 return s
112 }
113 }
114
115 sweep.centralIndex.update(sweepClassDone)
116 return nil
117 }
118
119
120
121
122
123
124
125 func finishsweep_m() {
126 assertWorldStopped()
127
128
129
130
131
132
133 for sweepone() != ^uintptr(0) {
134 sweep.npausesweep++
135 }
136
137
138
139
140
141 sg := mheap_.sweepgen
142 for i := range mheap_.central {
143 c := &mheap_.central[i].mcentral
144 c.partialUnswept(sg).reset()
145 c.fullUnswept(sg).reset()
146 }
147
148
149
150
151 wakeScavenger()
152
153 nextMarkBitArenaEpoch()
154 }
155
156 func bgsweep() {
157 sweep.g = getg()
158
159 lockInit(&sweep.lock, lockRankSweep)
160 lock(&sweep.lock)
161 sweep.parked = true
162 gcenable_setup <- 1
163 goparkunlock(&sweep.lock, waitReasonGCSweepWait, traceEvGoBlock, 1)
164
165 for {
166 for sweepone() != ^uintptr(0) {
167 sweep.nbgsweep++
168 Gosched()
169 }
170 for freeSomeWbufs(true) {
171 Gosched()
172 }
173 lock(&sweep.lock)
174 if !isSweepDone() {
175
176
177
178 unlock(&sweep.lock)
179 continue
180 }
181 sweep.parked = true
182 goparkunlock(&sweep.lock, waitReasonGCSweepWait, traceEvGoBlock, 1)
183 }
184 }
185
186
187
188 type sweepLocker struct {
189
190 sweepGen uint32
191
192
193
194 blocking bool
195 }
196
197
198 type sweepLocked struct {
199 *mspan
200 }
201
202 func newSweepLocker() sweepLocker {
203 return sweepLocker{
204 sweepGen: mheap_.sweepgen,
205 }
206 }
207
208
209
210 func (l *sweepLocker) tryAcquire(s *mspan) (sweepLocked, bool) {
211
212 if atomic.Load(&s.sweepgen) != l.sweepGen-2 {
213 return sweepLocked{}, false
214 }
215
216
217 l.blockCompletion()
218
219 if !atomic.Cas(&s.sweepgen, l.sweepGen-2, l.sweepGen-1) {
220 return sweepLocked{}, false
221 }
222 return sweepLocked{s}, true
223 }
224
225
226
227 func (l *sweepLocker) blockCompletion() {
228 if !l.blocking {
229 atomic.Xadd(&mheap_.sweepers, +1)
230 l.blocking = true
231 }
232 }
233
234 func (l *sweepLocker) dispose() {
235 if !l.blocking {
236 return
237 }
238
239
240 l.blocking = false
241 if atomic.Xadd(&mheap_.sweepers, -1) == 0 && atomic.Load(&mheap_.sweepDrained) != 0 {
242 l.sweepIsDone()
243 }
244 }
245
246 func (l *sweepLocker) sweepIsDone() {
247 if debug.gcpacertrace > 0 {
248 print("pacer: sweep done at heap size ", gcController.heapLive>>20, "MB; allocated ", (gcController.heapLive-mheap_.sweepHeapLiveBasis)>>20, "MB during sweep; swept ", mheap_.pagesSwept, " pages at ", mheap_.sweepPagesPerByte, " pages/byte\n")
249 }
250 }
251
252
253
254 func sweepone() uintptr {
255 _g_ := getg()
256
257
258
259 _g_.m.locks++
260 if atomic.Load(&mheap_.sweepDrained) != 0 {
261 _g_.m.locks--
262 return ^uintptr(0)
263 }
264
265
266 sl := newSweepLocker()
267
268
269 npages := ^uintptr(0)
270 var noMoreWork bool
271 for {
272 s := mheap_.nextSpanForSweep()
273 if s == nil {
274 noMoreWork = atomic.Cas(&mheap_.sweepDrained, 0, 1)
275 break
276 }
277 if state := s.state.get(); state != mSpanInUse {
278
279
280
281 if !(s.sweepgen == sl.sweepGen || s.sweepgen == sl.sweepGen+3) {
282 print("runtime: bad span s.state=", state, " s.sweepgen=", s.sweepgen, " sweepgen=", sl.sweepGen, "\n")
283 throw("non in-use span in unswept list")
284 }
285 continue
286 }
287 if s, ok := sl.tryAcquire(s); ok {
288
289 npages = s.npages
290 if s.sweep(false) {
291
292
293
294 atomic.Xadduintptr(&mheap_.reclaimCredit, npages)
295 } else {
296
297
298
299 npages = 0
300 }
301 break
302 }
303 }
304
305 sl.dispose()
306
307 if noMoreWork {
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323 systemstack(func() {
324 lock(&mheap_.lock)
325 mheap_.pages.scavengeStartGen()
326 unlock(&mheap_.lock)
327 })
328
329
330
331 readyForScavenger()
332 }
333
334 _g_.m.locks--
335 return npages
336 }
337
338
339
340
341
342
343
344 func isSweepDone() bool {
345
346
347
348 return atomic.Load(&mheap_.sweepDrained) != 0 && atomic.Load(&mheap_.sweepers) == 0
349 }
350
351
352
353 func (s *mspan) ensureSwept() {
354
355
356
357 _g_ := getg()
358 if _g_.m.locks == 0 && _g_.m.mallocing == 0 && _g_ != _g_.m.g0 {
359 throw("mspan.ensureSwept: m is not locked")
360 }
361
362 sl := newSweepLocker()
363
364 if s, ok := sl.tryAcquire(s); ok {
365 s.sweep(false)
366 sl.dispose()
367 return
368 }
369 sl.dispose()
370
371
372 for {
373 spangen := atomic.Load(&s.sweepgen)
374 if spangen == sl.sweepGen || spangen == sl.sweepGen+3 {
375 break
376 }
377 osyield()
378 }
379 }
380
381
382
383
384
385
386 func (sl *sweepLocked) sweep(preserve bool) bool {
387
388
389 _g_ := getg()
390 if _g_.m.locks == 0 && _g_.m.mallocing == 0 && _g_ != _g_.m.g0 {
391 throw("mspan.sweep: m is not locked")
392 }
393
394 s := sl.mspan
395 if !preserve {
396
397
398 sl.mspan = nil
399 }
400
401 sweepgen := mheap_.sweepgen
402 if state := s.state.get(); state != mSpanInUse || s.sweepgen != sweepgen-1 {
403 print("mspan.sweep: state=", state, " sweepgen=", s.sweepgen, " mheap.sweepgen=", sweepgen, "\n")
404 throw("mspan.sweep: bad span state")
405 }
406
407 if trace.enabled {
408 traceGCSweepSpan(s.npages * _PageSize)
409 }
410
411 atomic.Xadd64(&mheap_.pagesSwept, int64(s.npages))
412
413 spc := s.spanclass
414 size := s.elemsize
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432 hadSpecials := s.specials != nil
433 siter := newSpecialsIter(s)
434 for siter.valid() {
435
436 objIndex := uintptr(siter.s.offset) / size
437 p := s.base() + objIndex*size
438 mbits := s.markBitsForIndex(objIndex)
439 if !mbits.isMarked() {
440
441
442 hasFin := false
443 endOffset := p - s.base() + size
444 for tmp := siter.s; tmp != nil && uintptr(tmp.offset) < endOffset; tmp = tmp.next {
445 if tmp.kind == _KindSpecialFinalizer {
446
447 mbits.setMarkedNonAtomic()
448 hasFin = true
449 break
450 }
451 }
452
453 for siter.valid() && uintptr(siter.s.offset) < endOffset {
454
455
456 special := siter.s
457 p := s.base() + uintptr(special.offset)
458 if special.kind == _KindSpecialFinalizer || !hasFin {
459 siter.unlinkAndNext()
460 freeSpecial(special, unsafe.Pointer(p), size)
461 } else {
462
463
464
465 siter.next()
466 }
467 }
468 } else {
469
470 if siter.s.kind == _KindSpecialReachable {
471 special := siter.unlinkAndNext()
472 (*specialReachable)(unsafe.Pointer(special)).reachable = true
473 freeSpecial(special, unsafe.Pointer(p), size)
474 } else {
475
476 siter.next()
477 }
478 }
479 }
480 if hadSpecials && s.specials == nil {
481 spanHasNoSpecials(s)
482 }
483
484 if debug.allocfreetrace != 0 || debug.clobberfree != 0 || raceenabled || msanenabled {
485
486
487 mbits := s.markBitsForBase()
488 abits := s.allocBitsForIndex(0)
489 for i := uintptr(0); i < s.nelems; i++ {
490 if !mbits.isMarked() && (abits.index < s.freeindex || abits.isMarked()) {
491 x := s.base() + i*s.elemsize
492 if debug.allocfreetrace != 0 {
493 tracefree(unsafe.Pointer(x), size)
494 }
495 if debug.clobberfree != 0 {
496 clobberfree(unsafe.Pointer(x), size)
497 }
498 if raceenabled {
499 racefree(unsafe.Pointer(x), size)
500 }
501 if msanenabled {
502 msanfree(unsafe.Pointer(x), size)
503 }
504 }
505 mbits.advance()
506 abits.advance()
507 }
508 }
509
510
511 if s.freeindex < s.nelems {
512
513
514
515
516
517 obj := s.freeindex
518 if (*s.gcmarkBits.bytep(obj / 8)&^*s.allocBits.bytep(obj / 8))>>(obj%8) != 0 {
519 s.reportZombies()
520 }
521
522 for i := obj/8 + 1; i < divRoundUp(s.nelems, 8); i++ {
523 if *s.gcmarkBits.bytep(i)&^*s.allocBits.bytep(i) != 0 {
524 s.reportZombies()
525 }
526 }
527 }
528
529
530 nalloc := uint16(s.countAlloc())
531 nfreed := s.allocCount - nalloc
532 if nalloc > s.allocCount {
533
534
535 print("runtime: nelems=", s.nelems, " nalloc=", nalloc, " previous allocCount=", s.allocCount, " nfreed=", nfreed, "\n")
536 throw("sweep increased allocation count")
537 }
538
539 s.allocCount = nalloc
540 s.freeindex = 0
541 if trace.enabled {
542 getg().m.p.ptr().traceReclaimed += uintptr(nfreed) * s.elemsize
543 }
544
545
546
547 s.allocBits = s.gcmarkBits
548 s.gcmarkBits = newMarkBits(s.nelems)
549
550
551 s.refillAllocCache(0)
552
553
554
555 if state := s.state.get(); state != mSpanInUse || s.sweepgen != sweepgen-1 {
556 print("mspan.sweep: state=", state, " sweepgen=", s.sweepgen, " mheap.sweepgen=", sweepgen, "\n")
557 throw("mspan.sweep: bad span state after sweep")
558 }
559 if s.sweepgen == sweepgen+1 || s.sweepgen == sweepgen+3 {
560 throw("swept cached span")
561 }
562
563
564
565
566
567
568
569
570
571
572
573 atomic.Store(&s.sweepgen, sweepgen)
574
575 if spc.sizeclass() != 0 {
576
577 if nfreed > 0 {
578
579
580
581
582 s.needzero = 1
583 stats := memstats.heapStats.acquire()
584 atomic.Xadd64(&stats.smallFreeCount[spc.sizeclass()], int64(nfreed))
585 memstats.heapStats.release()
586 }
587 if !preserve {
588
589
590
591
592
593 if nalloc == 0 {
594
595 mheap_.freeSpan(s)
596 return true
597 }
598
599 if uintptr(nalloc) == s.nelems {
600 mheap_.central[spc].mcentral.fullSwept(sweepgen).push(s)
601 } else {
602 mheap_.central[spc].mcentral.partialSwept(sweepgen).push(s)
603 }
604 }
605 } else if !preserve {
606
607 if nfreed != 0 {
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624 if debug.efence > 0 {
625 s.limit = 0
626 sysFault(unsafe.Pointer(s.base()), size)
627 } else {
628 mheap_.freeSpan(s)
629 }
630 stats := memstats.heapStats.acquire()
631 atomic.Xadd64(&stats.largeFreeCount, 1)
632 atomic.Xadd64(&stats.largeFree, int64(size))
633 memstats.heapStats.release()
634 return true
635 }
636
637
638 mheap_.central[spc].mcentral.fullSwept(sweepgen).push(s)
639 }
640 return false
641 }
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657 func (s *mspan) reportZombies() {
658 printlock()
659 print("runtime: marked free object in span ", s, ", elemsize=", s.elemsize, " freeindex=", s.freeindex, " (bad use of unsafe.Pointer? try -d=checkptr)\n")
660 mbits := s.markBitsForBase()
661 abits := s.allocBitsForIndex(0)
662 for i := uintptr(0); i < s.nelems; i++ {
663 addr := s.base() + i*s.elemsize
664 print(hex(addr))
665 alloc := i < s.freeindex || abits.isMarked()
666 if alloc {
667 print(" alloc")
668 } else {
669 print(" free ")
670 }
671 if mbits.isMarked() {
672 print(" marked ")
673 } else {
674 print(" unmarked")
675 }
676 zombie := mbits.isMarked() && !alloc
677 if zombie {
678 print(" zombie")
679 }
680 print("\n")
681 if zombie {
682 length := s.elemsize
683 if length > 1024 {
684 length = 1024
685 }
686 hexdumpWords(addr, addr+length, nil)
687 }
688 mbits.advance()
689 abits.advance()
690 }
691 throw("found pointer to free object")
692 }
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711 func deductSweepCredit(spanBytes uintptr, callerSweepPages uintptr) {
712 if mheap_.sweepPagesPerByte == 0 {
713
714 return
715 }
716
717 if trace.enabled {
718 traceGCSweepStart()
719 }
720
721 retry:
722 sweptBasis := atomic.Load64(&mheap_.pagesSweptBasis)
723
724
725 newHeapLive := uintptr(atomic.Load64(&gcController.heapLive)-mheap_.sweepHeapLiveBasis) + spanBytes
726 pagesTarget := int64(mheap_.sweepPagesPerByte*float64(newHeapLive)) - int64(callerSweepPages)
727 for pagesTarget > int64(atomic.Load64(&mheap_.pagesSwept)-sweptBasis) {
728 if sweepone() == ^uintptr(0) {
729 mheap_.sweepPagesPerByte = 0
730 break
731 }
732 if atomic.Load64(&mheap_.pagesSweptBasis) != sweptBasis {
733
734 goto retry
735 }
736 }
737
738 if trace.enabled {
739 traceGCSweepDone()
740 }
741 }
742
743
744
745 func clobberfree(x unsafe.Pointer, size uintptr) {
746
747 for i := uintptr(0); i < size; i += 4 {
748 *(*uint32)(add(x, i)) = 0xdeadbeef
749 }
750 }
751
View as plain text