...

Source file src/runtime/mgcsweep.go

Documentation: runtime

		 1  // Copyright 2009 The Go Authors. All rights reserved.
		 2  // Use of this source code is governed by a BSD-style
		 3  // license that can be found in the LICENSE file.
		 4  
		 5  // Garbage collector: sweeping
		 6  
		 7  // The sweeper consists of two different algorithms:
		 8  //
		 9  // * The object reclaimer finds and frees unmarked slots in spans. It
		10  //	 can free a whole span if none of the objects are marked, but that
		11  //	 isn't its goal. This can be driven either synchronously by
		12  //	 mcentral.cacheSpan for mcentral spans, or asynchronously by
		13  //	 sweepone, which looks at all the mcentral lists.
		14  //
		15  // * The span reclaimer looks for spans that contain no marked objects
		16  //	 and frees whole spans. This is a separate algorithm because
		17  //	 freeing whole spans is the hardest task for the object reclaimer,
		18  //	 but is critical when allocating new spans. The entry point for
		19  //	 this is mheap_.reclaim and it's driven by a sequential scan of
		20  //	 the page marks bitmap in the heap arenas.
		21  //
		22  // Both algorithms ultimately call mspan.sweep, which sweeps a single
		23  // heap span.
		24  
		25  package runtime
		26  
		27  import (
		28  	"runtime/internal/atomic"
		29  	"unsafe"
		30  )
		31  
		32  var sweep sweepdata
		33  
		34  // State of background sweep.
		35  type sweepdata struct {
		36  	lock		mutex
		37  	g			 *g
		38  	parked	bool
		39  	started bool
		40  
		41  	nbgsweep		uint32
		42  	npausesweep uint32
		43  
		44  	// centralIndex is the current unswept span class.
		45  	// It represents an index into the mcentral span
		46  	// sets. Accessed and updated via its load and
		47  	// update methods. Not protected by a lock.
		48  	//
		49  	// Reset at mark termination.
		50  	// Used by mheap.nextSpanForSweep.
		51  	centralIndex sweepClass
		52  }
		53  
		54  // sweepClass is a spanClass and one bit to represent whether we're currently
		55  // sweeping partial or full spans.
		56  type sweepClass uint32
		57  
		58  const (
		59  	numSweepClasses						= numSpanClasses * 2
		60  	sweepClassDone	sweepClass = sweepClass(^uint32(0))
		61  )
		62  
		63  func (s *sweepClass) load() sweepClass {
		64  	return sweepClass(atomic.Load((*uint32)(s)))
		65  }
		66  
		67  func (s *sweepClass) update(sNew sweepClass) {
		68  	// Only update *s if its current value is less than sNew,
		69  	// since *s increases monotonically.
		70  	sOld := s.load()
		71  	for sOld < sNew && !atomic.Cas((*uint32)(s), uint32(sOld), uint32(sNew)) {
		72  		sOld = s.load()
		73  	}
		74  	// TODO(mknyszek): This isn't the only place we have
		75  	// an atomic monotonically increasing counter. It would
		76  	// be nice to have an "atomic max" which is just implemented
		77  	// as the above on most architectures. Some architectures
		78  	// like RISC-V however have native support for an atomic max.
		79  }
		80  
		81  func (s *sweepClass) clear() {
		82  	atomic.Store((*uint32)(s), 0)
		83  }
		84  
		85  // split returns the underlying span class as well as
		86  // whether we're interested in the full or partial
		87  // unswept lists for that class, indicated as a boolean
		88  // (true means "full").
		89  func (s sweepClass) split() (spc spanClass, full bool) {
		90  	return spanClass(s >> 1), s&1 == 0
		91  }
		92  
		93  // nextSpanForSweep finds and pops the next span for sweeping from the
		94  // central sweep buffers. It returns ownership of the span to the caller.
		95  // Returns nil if no such span exists.
		96  func (h *mheap) nextSpanForSweep() *mspan {
		97  	sg := h.sweepgen
		98  	for sc := sweep.centralIndex.load(); sc < numSweepClasses; sc++ {
		99  		spc, full := sc.split()
	 100  		c := &h.central[spc].mcentral
	 101  		var s *mspan
	 102  		if full {
	 103  			s = c.fullUnswept(sg).pop()
	 104  		} else {
	 105  			s = c.partialUnswept(sg).pop()
	 106  		}
	 107  		if s != nil {
	 108  			// Write down that we found something so future sweepers
	 109  			// can start from here.
	 110  			sweep.centralIndex.update(sc)
	 111  			return s
	 112  		}
	 113  	}
	 114  	// Write down that we found nothing.
	 115  	sweep.centralIndex.update(sweepClassDone)
	 116  	return nil
	 117  }
	 118  
	 119  // finishsweep_m ensures that all spans are swept.
	 120  //
	 121  // The world must be stopped. This ensures there are no sweeps in
	 122  // progress.
	 123  //
	 124  //go:nowritebarrier
	 125  func finishsweep_m() {
	 126  	assertWorldStopped()
	 127  
	 128  	// Sweeping must be complete before marking commences, so
	 129  	// sweep any unswept spans. If this is a concurrent GC, there
	 130  	// shouldn't be any spans left to sweep, so this should finish
	 131  	// instantly. If GC was forced before the concurrent sweep
	 132  	// finished, there may be spans to sweep.
	 133  	for sweepone() != ^uintptr(0) {
	 134  		sweep.npausesweep++
	 135  	}
	 136  
	 137  	// Reset all the unswept buffers, which should be empty.
	 138  	// Do this in sweep termination as opposed to mark termination
	 139  	// so that we can catch unswept spans and reclaim blocks as
	 140  	// soon as possible.
	 141  	sg := mheap_.sweepgen
	 142  	for i := range mheap_.central {
	 143  		c := &mheap_.central[i].mcentral
	 144  		c.partialUnswept(sg).reset()
	 145  		c.fullUnswept(sg).reset()
	 146  	}
	 147  
	 148  	// Sweeping is done, so if the scavenger isn't already awake,
	 149  	// wake it up. There's definitely work for it to do at this
	 150  	// point.
	 151  	wakeScavenger()
	 152  
	 153  	nextMarkBitArenaEpoch()
	 154  }
	 155  
	 156  func bgsweep() {
	 157  	sweep.g = getg()
	 158  
	 159  	lockInit(&sweep.lock, lockRankSweep)
	 160  	lock(&sweep.lock)
	 161  	sweep.parked = true
	 162  	gcenable_setup <- 1
	 163  	goparkunlock(&sweep.lock, waitReasonGCSweepWait, traceEvGoBlock, 1)
	 164  
	 165  	for {
	 166  		for sweepone() != ^uintptr(0) {
	 167  			sweep.nbgsweep++
	 168  			Gosched()
	 169  		}
	 170  		for freeSomeWbufs(true) {
	 171  			Gosched()
	 172  		}
	 173  		lock(&sweep.lock)
	 174  		if !isSweepDone() {
	 175  			// This can happen if a GC runs between
	 176  			// gosweepone returning ^0 above
	 177  			// and the lock being acquired.
	 178  			unlock(&sweep.lock)
	 179  			continue
	 180  		}
	 181  		sweep.parked = true
	 182  		goparkunlock(&sweep.lock, waitReasonGCSweepWait, traceEvGoBlock, 1)
	 183  	}
	 184  }
	 185  
	 186  // sweepLocker acquires sweep ownership of spans and blocks sweep
	 187  // completion.
	 188  type sweepLocker struct {
	 189  	// sweepGen is the sweep generation of the heap.
	 190  	sweepGen uint32
	 191  	// blocking indicates that this tracker is blocking sweep
	 192  	// completion, usually as a result of acquiring sweep
	 193  	// ownership of at least one span.
	 194  	blocking bool
	 195  }
	 196  
	 197  // sweepLocked represents sweep ownership of a span.
	 198  type sweepLocked struct {
	 199  	*mspan
	 200  }
	 201  
	 202  func newSweepLocker() sweepLocker {
	 203  	return sweepLocker{
	 204  		sweepGen: mheap_.sweepgen,
	 205  	}
	 206  }
	 207  
	 208  // tryAcquire attempts to acquire sweep ownership of span s. If it
	 209  // successfully acquires ownership, it blocks sweep completion.
	 210  func (l *sweepLocker) tryAcquire(s *mspan) (sweepLocked, bool) {
	 211  	// Check before attempting to CAS.
	 212  	if atomic.Load(&s.sweepgen) != l.sweepGen-2 {
	 213  		return sweepLocked{}, false
	 214  	}
	 215  	// Add ourselves to sweepers before potentially taking
	 216  	// ownership.
	 217  	l.blockCompletion()
	 218  	// Attempt to acquire sweep ownership of s.
	 219  	if !atomic.Cas(&s.sweepgen, l.sweepGen-2, l.sweepGen-1) {
	 220  		return sweepLocked{}, false
	 221  	}
	 222  	return sweepLocked{s}, true
	 223  }
	 224  
	 225  // blockCompletion blocks sweep completion without acquiring any
	 226  // specific spans.
	 227  func (l *sweepLocker) blockCompletion() {
	 228  	if !l.blocking {
	 229  		atomic.Xadd(&mheap_.sweepers, +1)
	 230  		l.blocking = true
	 231  	}
	 232  }
	 233  
	 234  func (l *sweepLocker) dispose() {
	 235  	if !l.blocking {
	 236  		return
	 237  	}
	 238  	// Decrement the number of active sweepers and if this is the
	 239  	// last one, mark sweep as complete.
	 240  	l.blocking = false
	 241  	if atomic.Xadd(&mheap_.sweepers, -1) == 0 && atomic.Load(&mheap_.sweepDrained) != 0 {
	 242  		l.sweepIsDone()
	 243  	}
	 244  }
	 245  
	 246  func (l *sweepLocker) sweepIsDone() {
	 247  	if debug.gcpacertrace > 0 {
	 248  		print("pacer: sweep done at heap size ", gcController.heapLive>>20, "MB; allocated ", (gcController.heapLive-mheap_.sweepHeapLiveBasis)>>20, "MB during sweep; swept ", mheap_.pagesSwept, " pages at ", mheap_.sweepPagesPerByte, " pages/byte\n")
	 249  	}
	 250  }
	 251  
	 252  // sweepone sweeps some unswept heap span and returns the number of pages returned
	 253  // to the heap, or ^uintptr(0) if there was nothing to sweep.
	 254  func sweepone() uintptr {
	 255  	_g_ := getg()
	 256  
	 257  	// increment locks to ensure that the goroutine is not preempted
	 258  	// in the middle of sweep thus leaving the span in an inconsistent state for next GC
	 259  	_g_.m.locks++
	 260  	if atomic.Load(&mheap_.sweepDrained) != 0 {
	 261  		_g_.m.locks--
	 262  		return ^uintptr(0)
	 263  	}
	 264  	// TODO(austin): sweepone is almost always called in a loop;
	 265  	// lift the sweepLocker into its callers.
	 266  	sl := newSweepLocker()
	 267  
	 268  	// Find a span to sweep.
	 269  	npages := ^uintptr(0)
	 270  	var noMoreWork bool
	 271  	for {
	 272  		s := mheap_.nextSpanForSweep()
	 273  		if s == nil {
	 274  			noMoreWork = atomic.Cas(&mheap_.sweepDrained, 0, 1)
	 275  			break
	 276  		}
	 277  		if state := s.state.get(); state != mSpanInUse {
	 278  			// This can happen if direct sweeping already
	 279  			// swept this span, but in that case the sweep
	 280  			// generation should always be up-to-date.
	 281  			if !(s.sweepgen == sl.sweepGen || s.sweepgen == sl.sweepGen+3) {
	 282  				print("runtime: bad span s.state=", state, " s.sweepgen=", s.sweepgen, " sweepgen=", sl.sweepGen, "\n")
	 283  				throw("non in-use span in unswept list")
	 284  			}
	 285  			continue
	 286  		}
	 287  		if s, ok := sl.tryAcquire(s); ok {
	 288  			// Sweep the span we found.
	 289  			npages = s.npages
	 290  			if s.sweep(false) {
	 291  				// Whole span was freed. Count it toward the
	 292  				// page reclaimer credit since these pages can
	 293  				// now be used for span allocation.
	 294  				atomic.Xadduintptr(&mheap_.reclaimCredit, npages)
	 295  			} else {
	 296  				// Span is still in-use, so this returned no
	 297  				// pages to the heap and the span needs to
	 298  				// move to the swept in-use list.
	 299  				npages = 0
	 300  			}
	 301  			break
	 302  		}
	 303  	}
	 304  
	 305  	sl.dispose()
	 306  
	 307  	if noMoreWork {
	 308  		// The sweep list is empty. There may still be
	 309  		// concurrent sweeps running, but we're at least very
	 310  		// close to done sweeping.
	 311  
	 312  		// Move the scavenge gen forward (signalling
	 313  		// that there's new work to do) and wake the scavenger.
	 314  		//
	 315  		// The scavenger is signaled by the last sweeper because once
	 316  		// sweeping is done, we will definitely have useful work for
	 317  		// the scavenger to do, since the scavenger only runs over the
	 318  		// heap once per GC cyle. This update is not done during sweep
	 319  		// termination because in some cases there may be a long delay
	 320  		// between sweep done and sweep termination (e.g. not enough
	 321  		// allocations to trigger a GC) which would be nice to fill in
	 322  		// with scavenging work.
	 323  		systemstack(func() {
	 324  			lock(&mheap_.lock)
	 325  			mheap_.pages.scavengeStartGen()
	 326  			unlock(&mheap_.lock)
	 327  		})
	 328  		// Since we might sweep in an allocation path, it's not possible
	 329  		// for us to wake the scavenger directly via wakeScavenger, since
	 330  		// it could allocate. Ask sysmon to do it for us instead.
	 331  		readyForScavenger()
	 332  	}
	 333  
	 334  	_g_.m.locks--
	 335  	return npages
	 336  }
	 337  
	 338  // isSweepDone reports whether all spans are swept.
	 339  //
	 340  // Note that this condition may transition from false to true at any
	 341  // time as the sweeper runs. It may transition from true to false if a
	 342  // GC runs; to prevent that the caller must be non-preemptible or must
	 343  // somehow block GC progress.
	 344  func isSweepDone() bool {
	 345  	// Check that all spans have at least begun sweeping and there
	 346  	// are no active sweepers. If both are true, then all spans
	 347  	// have finished sweeping.
	 348  	return atomic.Load(&mheap_.sweepDrained) != 0 && atomic.Load(&mheap_.sweepers) == 0
	 349  }
	 350  
	 351  // Returns only when span s has been swept.
	 352  //go:nowritebarrier
	 353  func (s *mspan) ensureSwept() {
	 354  	// Caller must disable preemption.
	 355  	// Otherwise when this function returns the span can become unswept again
	 356  	// (if GC is triggered on another goroutine).
	 357  	_g_ := getg()
	 358  	if _g_.m.locks == 0 && _g_.m.mallocing == 0 && _g_ != _g_.m.g0 {
	 359  		throw("mspan.ensureSwept: m is not locked")
	 360  	}
	 361  
	 362  	sl := newSweepLocker()
	 363  	// The caller must be sure that the span is a mSpanInUse span.
	 364  	if s, ok := sl.tryAcquire(s); ok {
	 365  		s.sweep(false)
	 366  		sl.dispose()
	 367  		return
	 368  	}
	 369  	sl.dispose()
	 370  
	 371  	// unfortunate condition, and we don't have efficient means to wait
	 372  	for {
	 373  		spangen := atomic.Load(&s.sweepgen)
	 374  		if spangen == sl.sweepGen || spangen == sl.sweepGen+3 {
	 375  			break
	 376  		}
	 377  		osyield()
	 378  	}
	 379  }
	 380  
	 381  // Sweep frees or collects finalizers for blocks not marked in the mark phase.
	 382  // It clears the mark bits in preparation for the next GC round.
	 383  // Returns true if the span was returned to heap.
	 384  // If preserve=true, don't return it to heap nor relink in mcentral lists;
	 385  // caller takes care of it.
	 386  func (sl *sweepLocked) sweep(preserve bool) bool {
	 387  	// It's critical that we enter this function with preemption disabled,
	 388  	// GC must not start while we are in the middle of this function.
	 389  	_g_ := getg()
	 390  	if _g_.m.locks == 0 && _g_.m.mallocing == 0 && _g_ != _g_.m.g0 {
	 391  		throw("mspan.sweep: m is not locked")
	 392  	}
	 393  
	 394  	s := sl.mspan
	 395  	if !preserve {
	 396  		// We'll release ownership of this span. Nil it out to
	 397  		// prevent the caller from accidentally using it.
	 398  		sl.mspan = nil
	 399  	}
	 400  
	 401  	sweepgen := mheap_.sweepgen
	 402  	if state := s.state.get(); state != mSpanInUse || s.sweepgen != sweepgen-1 {
	 403  		print("mspan.sweep: state=", state, " sweepgen=", s.sweepgen, " mheap.sweepgen=", sweepgen, "\n")
	 404  		throw("mspan.sweep: bad span state")
	 405  	}
	 406  
	 407  	if trace.enabled {
	 408  		traceGCSweepSpan(s.npages * _PageSize)
	 409  	}
	 410  
	 411  	atomic.Xadd64(&mheap_.pagesSwept, int64(s.npages))
	 412  
	 413  	spc := s.spanclass
	 414  	size := s.elemsize
	 415  
	 416  	// The allocBits indicate which unmarked objects don't need to be
	 417  	// processed since they were free at the end of the last GC cycle
	 418  	// and were not allocated since then.
	 419  	// If the allocBits index is >= s.freeindex and the bit
	 420  	// is not marked then the object remains unallocated
	 421  	// since the last GC.
	 422  	// This situation is analogous to being on a freelist.
	 423  
	 424  	// Unlink & free special records for any objects we're about to free.
	 425  	// Two complications here:
	 426  	// 1. An object can have both finalizer and profile special records.
	 427  	//		In such case we need to queue finalizer for execution,
	 428  	//		mark the object as live and preserve the profile special.
	 429  	// 2. A tiny object can have several finalizers setup for different offsets.
	 430  	//		If such object is not marked, we need to queue all finalizers at once.
	 431  	// Both 1 and 2 are possible at the same time.
	 432  	hadSpecials := s.specials != nil
	 433  	siter := newSpecialsIter(s)
	 434  	for siter.valid() {
	 435  		// A finalizer can be set for an inner byte of an object, find object beginning.
	 436  		objIndex := uintptr(siter.s.offset) / size
	 437  		p := s.base() + objIndex*size
	 438  		mbits := s.markBitsForIndex(objIndex)
	 439  		if !mbits.isMarked() {
	 440  			// This object is not marked and has at least one special record.
	 441  			// Pass 1: see if it has at least one finalizer.
	 442  			hasFin := false
	 443  			endOffset := p - s.base() + size
	 444  			for tmp := siter.s; tmp != nil && uintptr(tmp.offset) < endOffset; tmp = tmp.next {
	 445  				if tmp.kind == _KindSpecialFinalizer {
	 446  					// Stop freeing of object if it has a finalizer.
	 447  					mbits.setMarkedNonAtomic()
	 448  					hasFin = true
	 449  					break
	 450  				}
	 451  			}
	 452  			// Pass 2: queue all finalizers _or_ handle profile record.
	 453  			for siter.valid() && uintptr(siter.s.offset) < endOffset {
	 454  				// Find the exact byte for which the special was setup
	 455  				// (as opposed to object beginning).
	 456  				special := siter.s
	 457  				p := s.base() + uintptr(special.offset)
	 458  				if special.kind == _KindSpecialFinalizer || !hasFin {
	 459  					siter.unlinkAndNext()
	 460  					freeSpecial(special, unsafe.Pointer(p), size)
	 461  				} else {
	 462  					// The object has finalizers, so we're keeping it alive.
	 463  					// All other specials only apply when an object is freed,
	 464  					// so just keep the special record.
	 465  					siter.next()
	 466  				}
	 467  			}
	 468  		} else {
	 469  			// object is still live
	 470  			if siter.s.kind == _KindSpecialReachable {
	 471  				special := siter.unlinkAndNext()
	 472  				(*specialReachable)(unsafe.Pointer(special)).reachable = true
	 473  				freeSpecial(special, unsafe.Pointer(p), size)
	 474  			} else {
	 475  				// keep special record
	 476  				siter.next()
	 477  			}
	 478  		}
	 479  	}
	 480  	if hadSpecials && s.specials == nil {
	 481  		spanHasNoSpecials(s)
	 482  	}
	 483  
	 484  	if debug.allocfreetrace != 0 || debug.clobberfree != 0 || raceenabled || msanenabled {
	 485  		// Find all newly freed objects. This doesn't have to
	 486  		// efficient; allocfreetrace has massive overhead.
	 487  		mbits := s.markBitsForBase()
	 488  		abits := s.allocBitsForIndex(0)
	 489  		for i := uintptr(0); i < s.nelems; i++ {
	 490  			if !mbits.isMarked() && (abits.index < s.freeindex || abits.isMarked()) {
	 491  				x := s.base() + i*s.elemsize
	 492  				if debug.allocfreetrace != 0 {
	 493  					tracefree(unsafe.Pointer(x), size)
	 494  				}
	 495  				if debug.clobberfree != 0 {
	 496  					clobberfree(unsafe.Pointer(x), size)
	 497  				}
	 498  				if raceenabled {
	 499  					racefree(unsafe.Pointer(x), size)
	 500  				}
	 501  				if msanenabled {
	 502  					msanfree(unsafe.Pointer(x), size)
	 503  				}
	 504  			}
	 505  			mbits.advance()
	 506  			abits.advance()
	 507  		}
	 508  	}
	 509  
	 510  	// Check for zombie objects.
	 511  	if s.freeindex < s.nelems {
	 512  		// Everything < freeindex is allocated and hence
	 513  		// cannot be zombies.
	 514  		//
	 515  		// Check the first bitmap byte, where we have to be
	 516  		// careful with freeindex.
	 517  		obj := s.freeindex
	 518  		if (*s.gcmarkBits.bytep(obj / 8)&^*s.allocBits.bytep(obj / 8))>>(obj%8) != 0 {
	 519  			s.reportZombies()
	 520  		}
	 521  		// Check remaining bytes.
	 522  		for i := obj/8 + 1; i < divRoundUp(s.nelems, 8); i++ {
	 523  			if *s.gcmarkBits.bytep(i)&^*s.allocBits.bytep(i) != 0 {
	 524  				s.reportZombies()
	 525  			}
	 526  		}
	 527  	}
	 528  
	 529  	// Count the number of free objects in this span.
	 530  	nalloc := uint16(s.countAlloc())
	 531  	nfreed := s.allocCount - nalloc
	 532  	if nalloc > s.allocCount {
	 533  		// The zombie check above should have caught this in
	 534  		// more detail.
	 535  		print("runtime: nelems=", s.nelems, " nalloc=", nalloc, " previous allocCount=", s.allocCount, " nfreed=", nfreed, "\n")
	 536  		throw("sweep increased allocation count")
	 537  	}
	 538  
	 539  	s.allocCount = nalloc
	 540  	s.freeindex = 0 // reset allocation index to start of span.
	 541  	if trace.enabled {
	 542  		getg().m.p.ptr().traceReclaimed += uintptr(nfreed) * s.elemsize
	 543  	}
	 544  
	 545  	// gcmarkBits becomes the allocBits.
	 546  	// get a fresh cleared gcmarkBits in preparation for next GC
	 547  	s.allocBits = s.gcmarkBits
	 548  	s.gcmarkBits = newMarkBits(s.nelems)
	 549  
	 550  	// Initialize alloc bits cache.
	 551  	s.refillAllocCache(0)
	 552  
	 553  	// The span must be in our exclusive ownership until we update sweepgen,
	 554  	// check for potential races.
	 555  	if state := s.state.get(); state != mSpanInUse || s.sweepgen != sweepgen-1 {
	 556  		print("mspan.sweep: state=", state, " sweepgen=", s.sweepgen, " mheap.sweepgen=", sweepgen, "\n")
	 557  		throw("mspan.sweep: bad span state after sweep")
	 558  	}
	 559  	if s.sweepgen == sweepgen+1 || s.sweepgen == sweepgen+3 {
	 560  		throw("swept cached span")
	 561  	}
	 562  
	 563  	// We need to set s.sweepgen = h.sweepgen only when all blocks are swept,
	 564  	// because of the potential for a concurrent free/SetFinalizer.
	 565  	//
	 566  	// But we need to set it before we make the span available for allocation
	 567  	// (return it to heap or mcentral), because allocation code assumes that a
	 568  	// span is already swept if available for allocation.
	 569  	//
	 570  	// Serialization point.
	 571  	// At this point the mark bits are cleared and allocation ready
	 572  	// to go so release the span.
	 573  	atomic.Store(&s.sweepgen, sweepgen)
	 574  
	 575  	if spc.sizeclass() != 0 {
	 576  		// Handle spans for small objects.
	 577  		if nfreed > 0 {
	 578  			// Only mark the span as needing zeroing if we've freed any
	 579  			// objects, because a fresh span that had been allocated into,
	 580  			// wasn't totally filled, but then swept, still has all of its
	 581  			// free slots zeroed.
	 582  			s.needzero = 1
	 583  			stats := memstats.heapStats.acquire()
	 584  			atomic.Xadd64(&stats.smallFreeCount[spc.sizeclass()], int64(nfreed))
	 585  			memstats.heapStats.release()
	 586  		}
	 587  		if !preserve {
	 588  			// The caller may not have removed this span from whatever
	 589  			// unswept set its on but taken ownership of the span for
	 590  			// sweeping by updating sweepgen. If this span still is in
	 591  			// an unswept set, then the mcentral will pop it off the
	 592  			// set, check its sweepgen, and ignore it.
	 593  			if nalloc == 0 {
	 594  				// Free totally free span directly back to the heap.
	 595  				mheap_.freeSpan(s)
	 596  				return true
	 597  			}
	 598  			// Return span back to the right mcentral list.
	 599  			if uintptr(nalloc) == s.nelems {
	 600  				mheap_.central[spc].mcentral.fullSwept(sweepgen).push(s)
	 601  			} else {
	 602  				mheap_.central[spc].mcentral.partialSwept(sweepgen).push(s)
	 603  			}
	 604  		}
	 605  	} else if !preserve {
	 606  		// Handle spans for large objects.
	 607  		if nfreed != 0 {
	 608  			// Free large object span to heap.
	 609  
	 610  			// NOTE(rsc,dvyukov): The original implementation of efence
	 611  			// in CL 22060046 used sysFree instead of sysFault, so that
	 612  			// the operating system would eventually give the memory
	 613  			// back to us again, so that an efence program could run
	 614  			// longer without running out of memory. Unfortunately,
	 615  			// calling sysFree here without any kind of adjustment of the
	 616  			// heap data structures means that when the memory does
	 617  			// come back to us, we have the wrong metadata for it, either in
	 618  			// the mspan structures or in the garbage collection bitmap.
	 619  			// Using sysFault here means that the program will run out of
	 620  			// memory fairly quickly in efence mode, but at least it won't
	 621  			// have mysterious crashes due to confused memory reuse.
	 622  			// It should be possible to switch back to sysFree if we also
	 623  			// implement and then call some kind of mheap.deleteSpan.
	 624  			if debug.efence > 0 {
	 625  				s.limit = 0 // prevent mlookup from finding this span
	 626  				sysFault(unsafe.Pointer(s.base()), size)
	 627  			} else {
	 628  				mheap_.freeSpan(s)
	 629  			}
	 630  			stats := memstats.heapStats.acquire()
	 631  			atomic.Xadd64(&stats.largeFreeCount, 1)
	 632  			atomic.Xadd64(&stats.largeFree, int64(size))
	 633  			memstats.heapStats.release()
	 634  			return true
	 635  		}
	 636  
	 637  		// Add a large span directly onto the full+swept list.
	 638  		mheap_.central[spc].mcentral.fullSwept(sweepgen).push(s)
	 639  	}
	 640  	return false
	 641  }
	 642  
	 643  // reportZombies reports any marked but free objects in s and throws.
	 644  //
	 645  // This generally means one of the following:
	 646  //
	 647  // 1. User code converted a pointer to a uintptr and then back
	 648  // unsafely, and a GC ran while the uintptr was the only reference to
	 649  // an object.
	 650  //
	 651  // 2. User code (or a compiler bug) constructed a bad pointer that
	 652  // points to a free slot, often a past-the-end pointer.
	 653  //
	 654  // 3. The GC two cycles ago missed a pointer and freed a live object,
	 655  // but it was still live in the last cycle, so this GC cycle found a
	 656  // pointer to that object and marked it.
	 657  func (s *mspan) reportZombies() {
	 658  	printlock()
	 659  	print("runtime: marked free object in span ", s, ", elemsize=", s.elemsize, " freeindex=", s.freeindex, " (bad use of unsafe.Pointer? try -d=checkptr)\n")
	 660  	mbits := s.markBitsForBase()
	 661  	abits := s.allocBitsForIndex(0)
	 662  	for i := uintptr(0); i < s.nelems; i++ {
	 663  		addr := s.base() + i*s.elemsize
	 664  		print(hex(addr))
	 665  		alloc := i < s.freeindex || abits.isMarked()
	 666  		if alloc {
	 667  			print(" alloc")
	 668  		} else {
	 669  			print(" free ")
	 670  		}
	 671  		if mbits.isMarked() {
	 672  			print(" marked	")
	 673  		} else {
	 674  			print(" unmarked")
	 675  		}
	 676  		zombie := mbits.isMarked() && !alloc
	 677  		if zombie {
	 678  			print(" zombie")
	 679  		}
	 680  		print("\n")
	 681  		if zombie {
	 682  			length := s.elemsize
	 683  			if length > 1024 {
	 684  				length = 1024
	 685  			}
	 686  			hexdumpWords(addr, addr+length, nil)
	 687  		}
	 688  		mbits.advance()
	 689  		abits.advance()
	 690  	}
	 691  	throw("found pointer to free object")
	 692  }
	 693  
	 694  // deductSweepCredit deducts sweep credit for allocating a span of
	 695  // size spanBytes. This must be performed *before* the span is
	 696  // allocated to ensure the system has enough credit. If necessary, it
	 697  // performs sweeping to prevent going in to debt. If the caller will
	 698  // also sweep pages (e.g., for a large allocation), it can pass a
	 699  // non-zero callerSweepPages to leave that many pages unswept.
	 700  //
	 701  // deductSweepCredit makes a worst-case assumption that all spanBytes
	 702  // bytes of the ultimately allocated span will be available for object
	 703  // allocation.
	 704  //
	 705  // deductSweepCredit is the core of the "proportional sweep" system.
	 706  // It uses statistics gathered by the garbage collector to perform
	 707  // enough sweeping so that all pages are swept during the concurrent
	 708  // sweep phase between GC cycles.
	 709  //
	 710  // mheap_ must NOT be locked.
	 711  func deductSweepCredit(spanBytes uintptr, callerSweepPages uintptr) {
	 712  	if mheap_.sweepPagesPerByte == 0 {
	 713  		// Proportional sweep is done or disabled.
	 714  		return
	 715  	}
	 716  
	 717  	if trace.enabled {
	 718  		traceGCSweepStart()
	 719  	}
	 720  
	 721  retry:
	 722  	sweptBasis := atomic.Load64(&mheap_.pagesSweptBasis)
	 723  
	 724  	// Fix debt if necessary.
	 725  	newHeapLive := uintptr(atomic.Load64(&gcController.heapLive)-mheap_.sweepHeapLiveBasis) + spanBytes
	 726  	pagesTarget := int64(mheap_.sweepPagesPerByte*float64(newHeapLive)) - int64(callerSweepPages)
	 727  	for pagesTarget > int64(atomic.Load64(&mheap_.pagesSwept)-sweptBasis) {
	 728  		if sweepone() == ^uintptr(0) {
	 729  			mheap_.sweepPagesPerByte = 0
	 730  			break
	 731  		}
	 732  		if atomic.Load64(&mheap_.pagesSweptBasis) != sweptBasis {
	 733  			// Sweep pacing changed. Recompute debt.
	 734  			goto retry
	 735  		}
	 736  	}
	 737  
	 738  	if trace.enabled {
	 739  		traceGCSweepDone()
	 740  	}
	 741  }
	 742  
	 743  // clobberfree sets the memory content at x to bad content, for debugging
	 744  // purposes.
	 745  func clobberfree(x unsafe.Pointer, size uintptr) {
	 746  	// size (span.elemsize) is always a multiple of 4.
	 747  	for i := uintptr(0); i < size; i += 4 {
	 748  		*(*uint32)(add(x, i)) = 0xdeadbeef
	 749  	}
	 750  }
	 751  

View as plain text