...

Source file src/runtime/mcentral.go

Documentation: runtime

		 1  // Copyright 2009 The Go Authors. All rights reserved.
		 2  // Use of this source code is governed by a BSD-style
		 3  // license that can be found in the LICENSE file.
		 4  
		 5  // Central free lists.
		 6  //
		 7  // See malloc.go for an overview.
		 8  //
		 9  // The mcentral doesn't actually contain the list of free objects; the mspan does.
		10  // Each mcentral is two lists of mspans: those with free objects (c->nonempty)
		11  // and those that are completely allocated (c->empty).
		12  
		13  package runtime
		14  
		15  import "runtime/internal/atomic"
		16  
		17  // Central list of free objects of a given size.
		18  //
		19  //go:notinheap
		20  type mcentral struct {
		21  	spanclass spanClass
		22  
		23  	// partial and full contain two mspan sets: one of swept in-use
		24  	// spans, and one of unswept in-use spans. These two trade
		25  	// roles on each GC cycle. The unswept set is drained either by
		26  	// allocation or by the background sweeper in every GC cycle,
		27  	// so only two roles are necessary.
		28  	//
		29  	// sweepgen is increased by 2 on each GC cycle, so the swept
		30  	// spans are in partial[sweepgen/2%2] and the unswept spans are in
		31  	// partial[1-sweepgen/2%2]. Sweeping pops spans from the
		32  	// unswept set and pushes spans that are still in-use on the
		33  	// swept set. Likewise, allocating an in-use span pushes it
		34  	// on the swept set.
		35  	//
		36  	// Some parts of the sweeper can sweep arbitrary spans, and hence
		37  	// can't remove them from the unswept set, but will add the span
		38  	// to the appropriate swept list. As a result, the parts of the
		39  	// sweeper and mcentral that do consume from the unswept list may
		40  	// encounter swept spans, and these should be ignored.
		41  	partial [2]spanSet // list of spans with a free object
		42  	full		[2]spanSet // list of spans with no free objects
		43  }
		44  
		45  // Initialize a single central free list.
		46  func (c *mcentral) init(spc spanClass) {
		47  	c.spanclass = spc
		48  	lockInit(&c.partial[0].spineLock, lockRankSpanSetSpine)
		49  	lockInit(&c.partial[1].spineLock, lockRankSpanSetSpine)
		50  	lockInit(&c.full[0].spineLock, lockRankSpanSetSpine)
		51  	lockInit(&c.full[1].spineLock, lockRankSpanSetSpine)
		52  }
		53  
		54  // partialUnswept returns the spanSet which holds partially-filled
		55  // unswept spans for this sweepgen.
		56  func (c *mcentral) partialUnswept(sweepgen uint32) *spanSet {
		57  	return &c.partial[1-sweepgen/2%2]
		58  }
		59  
		60  // partialSwept returns the spanSet which holds partially-filled
		61  // swept spans for this sweepgen.
		62  func (c *mcentral) partialSwept(sweepgen uint32) *spanSet {
		63  	return &c.partial[sweepgen/2%2]
		64  }
		65  
		66  // fullUnswept returns the spanSet which holds unswept spans without any
		67  // free slots for this sweepgen.
		68  func (c *mcentral) fullUnswept(sweepgen uint32) *spanSet {
		69  	return &c.full[1-sweepgen/2%2]
		70  }
		71  
		72  // fullSwept returns the spanSet which holds swept spans without any
		73  // free slots for this sweepgen.
		74  func (c *mcentral) fullSwept(sweepgen uint32) *spanSet {
		75  	return &c.full[sweepgen/2%2]
		76  }
		77  
		78  // Allocate a span to use in an mcache.
		79  func (c *mcentral) cacheSpan() *mspan {
		80  	// Deduct credit for this span allocation and sweep if necessary.
		81  	spanBytes := uintptr(class_to_allocnpages[c.spanclass.sizeclass()]) * _PageSize
		82  	deductSweepCredit(spanBytes, 0)
		83  
		84  	traceDone := false
		85  	if trace.enabled {
		86  		traceGCSweepStart()
		87  	}
		88  
		89  	// If we sweep spanBudget spans without finding any free
		90  	// space, just allocate a fresh span. This limits the amount
		91  	// of time we can spend trying to find free space and
		92  	// amortizes the cost of small object sweeping over the
		93  	// benefit of having a full free span to allocate from. By
		94  	// setting this to 100, we limit the space overhead to 1%.
		95  	//
		96  	// TODO(austin,mknyszek): This still has bad worst-case
		97  	// throughput. For example, this could find just one free slot
		98  	// on the 100th swept span. That limits allocation latency, but
		99  	// still has very poor throughput. We could instead keep a
	 100  	// running free-to-used budget and switch to fresh span
	 101  	// allocation if the budget runs low.
	 102  	spanBudget := 100
	 103  
	 104  	var s *mspan
	 105  	sl := newSweepLocker()
	 106  	sg := sl.sweepGen
	 107  
	 108  	// Try partial swept spans first.
	 109  	if s = c.partialSwept(sg).pop(); s != nil {
	 110  		goto havespan
	 111  	}
	 112  
	 113  	// Now try partial unswept spans.
	 114  	for ; spanBudget >= 0; spanBudget-- {
	 115  		s = c.partialUnswept(sg).pop()
	 116  		if s == nil {
	 117  			break
	 118  		}
	 119  		if s, ok := sl.tryAcquire(s); ok {
	 120  			// We got ownership of the span, so let's sweep it and use it.
	 121  			s.sweep(true)
	 122  			sl.dispose()
	 123  			goto havespan
	 124  		}
	 125  		// We failed to get ownership of the span, which means it's being or
	 126  		// has been swept by an asynchronous sweeper that just couldn't remove it
	 127  		// from the unswept list. That sweeper took ownership of the span and
	 128  		// responsibility for either freeing it to the heap or putting it on the
	 129  		// right swept list. Either way, we should just ignore it (and it's unsafe
	 130  		// for us to do anything else).
	 131  	}
	 132  	// Now try full unswept spans, sweeping them and putting them into the
	 133  	// right list if we fail to get a span.
	 134  	for ; spanBudget >= 0; spanBudget-- {
	 135  		s = c.fullUnswept(sg).pop()
	 136  		if s == nil {
	 137  			break
	 138  		}
	 139  		if s, ok := sl.tryAcquire(s); ok {
	 140  			// We got ownership of the span, so let's sweep it.
	 141  			s.sweep(true)
	 142  			// Check if there's any free space.
	 143  			freeIndex := s.nextFreeIndex()
	 144  			if freeIndex != s.nelems {
	 145  				s.freeindex = freeIndex
	 146  				sl.dispose()
	 147  				goto havespan
	 148  			}
	 149  			// Add it to the swept list, because sweeping didn't give us any free space.
	 150  			c.fullSwept(sg).push(s.mspan)
	 151  		}
	 152  		// See comment for partial unswept spans.
	 153  	}
	 154  	sl.dispose()
	 155  	if trace.enabled {
	 156  		traceGCSweepDone()
	 157  		traceDone = true
	 158  	}
	 159  
	 160  	// We failed to get a span from the mcentral so get one from mheap.
	 161  	s = c.grow()
	 162  	if s == nil {
	 163  		return nil
	 164  	}
	 165  
	 166  	// At this point s is a span that should have free slots.
	 167  havespan:
	 168  	if trace.enabled && !traceDone {
	 169  		traceGCSweepDone()
	 170  	}
	 171  	n := int(s.nelems) - int(s.allocCount)
	 172  	if n == 0 || s.freeindex == s.nelems || uintptr(s.allocCount) == s.nelems {
	 173  		throw("span has no free objects")
	 174  	}
	 175  	freeByteBase := s.freeindex &^ (64 - 1)
	 176  	whichByte := freeByteBase / 8
	 177  	// Init alloc bits cache.
	 178  	s.refillAllocCache(whichByte)
	 179  
	 180  	// Adjust the allocCache so that s.freeindex corresponds to the low bit in
	 181  	// s.allocCache.
	 182  	s.allocCache >>= s.freeindex % 64
	 183  
	 184  	return s
	 185  }
	 186  
	 187  // Return span from an mcache.
	 188  //
	 189  // s must have a span class corresponding to this
	 190  // mcentral and it must not be empty.
	 191  func (c *mcentral) uncacheSpan(s *mspan) {
	 192  	if s.allocCount == 0 {
	 193  		throw("uncaching span but s.allocCount == 0")
	 194  	}
	 195  
	 196  	sg := mheap_.sweepgen
	 197  	stale := s.sweepgen == sg+1
	 198  
	 199  	// Fix up sweepgen.
	 200  	if stale {
	 201  		// Span was cached before sweep began. It's our
	 202  		// responsibility to sweep it.
	 203  		//
	 204  		// Set sweepgen to indicate it's not cached but needs
	 205  		// sweeping and can't be allocated from. sweep will
	 206  		// set s.sweepgen to indicate s is swept.
	 207  		atomic.Store(&s.sweepgen, sg-1)
	 208  	} else {
	 209  		// Indicate that s is no longer cached.
	 210  		atomic.Store(&s.sweepgen, sg)
	 211  	}
	 212  
	 213  	// Put the span in the appropriate place.
	 214  	if stale {
	 215  		// It's stale, so just sweep it. Sweeping will put it on
	 216  		// the right list.
	 217  		//
	 218  		// We don't use a sweepLocker here. Stale cached spans
	 219  		// aren't in the global sweep lists, so mark termination
	 220  		// itself holds up sweep completion until all mcaches
	 221  		// have been swept.
	 222  		ss := sweepLocked{s}
	 223  		ss.sweep(false)
	 224  	} else {
	 225  		if int(s.nelems)-int(s.allocCount) > 0 {
	 226  			// Put it back on the partial swept list.
	 227  			c.partialSwept(sg).push(s)
	 228  		} else {
	 229  			// There's no free space and it's not stale, so put it on the
	 230  			// full swept list.
	 231  			c.fullSwept(sg).push(s)
	 232  		}
	 233  	}
	 234  }
	 235  
	 236  // grow allocates a new empty span from the heap and initializes it for c's size class.
	 237  func (c *mcentral) grow() *mspan {
	 238  	npages := uintptr(class_to_allocnpages[c.spanclass.sizeclass()])
	 239  	size := uintptr(class_to_size[c.spanclass.sizeclass()])
	 240  
	 241  	s, _ := mheap_.alloc(npages, c.spanclass, true)
	 242  	if s == nil {
	 243  		return nil
	 244  	}
	 245  
	 246  	// Use division by multiplication and shifts to quickly compute:
	 247  	// n := (npages << _PageShift) / size
	 248  	n := s.divideByElemSize(npages << _PageShift)
	 249  	s.limit = s.base() + size*n
	 250  	heapBitsForAddr(s.base()).initSpan(s)
	 251  	return s
	 252  }
	 253  

View as plain text