...

Source file src/runtime/mpagecache.go

Documentation: runtime

		 1  // Copyright 2019 The Go Authors. All rights reserved.
		 2  // Use of this source code is governed by a BSD-style
		 3  // license that can be found in the LICENSE file.
		 4  
		 5  package runtime
		 6  
		 7  import (
		 8  	"runtime/internal/sys"
		 9  	"unsafe"
		10  )
		11  
		12  const pageCachePages = 8 * unsafe.Sizeof(pageCache{}.cache)
		13  
		14  // pageCache represents a per-p cache of pages the allocator can
		15  // allocate from without a lock. More specifically, it represents
		16  // a pageCachePages*pageSize chunk of memory with 0 or more free
		17  // pages in it.
		18  type pageCache struct {
		19  	base	uintptr // base address of the chunk
		20  	cache uint64	// 64-bit bitmap representing free pages (1 means free)
		21  	scav	uint64	// 64-bit bitmap representing scavenged pages (1 means scavenged)
		22  }
		23  
		24  // empty returns true if the pageCache has any free pages, and false
		25  // otherwise.
		26  func (c *pageCache) empty() bool {
		27  	return c.cache == 0
		28  }
		29  
		30  // alloc allocates npages from the page cache and is the main entry
		31  // point for allocation.
		32  //
		33  // Returns a base address and the amount of scavenged memory in the
		34  // allocated region in bytes.
		35  //
		36  // Returns a base address of zero on failure, in which case the
		37  // amount of scavenged memory should be ignored.
		38  func (c *pageCache) alloc(npages uintptr) (uintptr, uintptr) {
		39  	if c.cache == 0 {
		40  		return 0, 0
		41  	}
		42  	if npages == 1 {
		43  		i := uintptr(sys.TrailingZeros64(c.cache))
		44  		scav := (c.scav >> i) & 1
		45  		c.cache &^= 1 << i // set bit to mark in-use
		46  		c.scav &^= 1 << i	// clear bit to mark unscavenged
		47  		return c.base + i*pageSize, uintptr(scav) * pageSize
		48  	}
		49  	return c.allocN(npages)
		50  }
		51  
		52  // allocN is a helper which attempts to allocate npages worth of pages
		53  // from the cache. It represents the general case for allocating from
		54  // the page cache.
		55  //
		56  // Returns a base address and the amount of scavenged memory in the
		57  // allocated region in bytes.
		58  func (c *pageCache) allocN(npages uintptr) (uintptr, uintptr) {
		59  	i := findBitRange64(c.cache, uint(npages))
		60  	if i >= 64 {
		61  		return 0, 0
		62  	}
		63  	mask := ((uint64(1) << npages) - 1) << i
		64  	scav := sys.OnesCount64(c.scav & mask)
		65  	c.cache &^= mask // mark in-use bits
		66  	c.scav &^= mask	// clear scavenged bits
		67  	return c.base + uintptr(i*pageSize), uintptr(scav) * pageSize
		68  }
		69  
		70  // flush empties out unallocated free pages in the given cache
		71  // into s. Then, it clears the cache, such that empty returns
		72  // true.
		73  //
		74  // p.mheapLock must be held.
		75  //
		76  // Must run on the system stack because p.mheapLock must be held.
		77  //
		78  //go:systemstack
		79  func (c *pageCache) flush(p *pageAlloc) {
		80  	assertLockHeld(p.mheapLock)
		81  
		82  	if c.empty() {
		83  		return
		84  	}
		85  	ci := chunkIndex(c.base)
		86  	pi := chunkPageIndex(c.base)
		87  
		88  	// This method is called very infrequently, so just do the
		89  	// slower, safer thing by iterating over each bit individually.
		90  	for i := uint(0); i < 64; i++ {
		91  		if c.cache&(1<<i) != 0 {
		92  			p.chunkOf(ci).free1(pi + i)
		93  		}
		94  		if c.scav&(1<<i) != 0 {
		95  			p.chunkOf(ci).scavenged.setRange(pi+i, 1)
		96  		}
		97  	}
		98  	// Since this is a lot like a free, we need to make sure
		99  	// we update the searchAddr just like free does.
	 100  	if b := (offAddr{c.base}); b.lessThan(p.searchAddr) {
	 101  		p.searchAddr = b
	 102  	}
	 103  	p.update(c.base, pageCachePages, false, false)
	 104  	*c = pageCache{}
	 105  }
	 106  
	 107  // allocToCache acquires a pageCachePages-aligned chunk of free pages which
	 108  // may not be contiguous, and returns a pageCache structure which owns the
	 109  // chunk.
	 110  //
	 111  // p.mheapLock must be held.
	 112  //
	 113  // Must run on the system stack because p.mheapLock must be held.
	 114  //
	 115  //go:systemstack
	 116  func (p *pageAlloc) allocToCache() pageCache {
	 117  	assertLockHeld(p.mheapLock)
	 118  
	 119  	// If the searchAddr refers to a region which has a higher address than
	 120  	// any known chunk, then we know we're out of memory.
	 121  	if chunkIndex(p.searchAddr.addr()) >= p.end {
	 122  		return pageCache{}
	 123  	}
	 124  	c := pageCache{}
	 125  	ci := chunkIndex(p.searchAddr.addr()) // chunk index
	 126  	if p.summary[len(p.summary)-1][ci] != 0 {
	 127  		// Fast path: there's free pages at or near the searchAddr address.
	 128  		chunk := p.chunkOf(ci)
	 129  		j, _ := chunk.find(1, chunkPageIndex(p.searchAddr.addr()))
	 130  		if j == ^uint(0) {
	 131  			throw("bad summary data")
	 132  		}
	 133  		c = pageCache{
	 134  			base:	chunkBase(ci) + alignDown(uintptr(j), 64)*pageSize,
	 135  			cache: ^chunk.pages64(j),
	 136  			scav:	chunk.scavenged.block64(j),
	 137  		}
	 138  	} else {
	 139  		// Slow path: the searchAddr address had nothing there, so go find
	 140  		// the first free page the slow way.
	 141  		addr, _ := p.find(1)
	 142  		if addr == 0 {
	 143  			// We failed to find adequate free space, so mark the searchAddr as OoM
	 144  			// and return an empty pageCache.
	 145  			p.searchAddr = maxSearchAddr
	 146  			return pageCache{}
	 147  		}
	 148  		ci := chunkIndex(addr)
	 149  		chunk := p.chunkOf(ci)
	 150  		c = pageCache{
	 151  			base:	alignDown(addr, 64*pageSize),
	 152  			cache: ^chunk.pages64(chunkPageIndex(addr)),
	 153  			scav:	chunk.scavenged.block64(chunkPageIndex(addr)),
	 154  		}
	 155  	}
	 156  
	 157  	// Set the bits as allocated and clear the scavenged bits.
	 158  	p.allocRange(c.base, pageCachePages)
	 159  
	 160  	// Update as an allocation, but note that it's not contiguous.
	 161  	p.update(c.base, pageCachePages, false, true)
	 162  
	 163  	// Set the search address to the last page represented by the cache.
	 164  	// Since all of the pages in this block are going to the cache, and we
	 165  	// searched for the first free page, we can confidently start at the
	 166  	// next page.
	 167  	//
	 168  	// However, p.searchAddr is not allowed to point into unmapped heap memory
	 169  	// unless it is maxSearchAddr, so make it the last page as opposed to
	 170  	// the page after.
	 171  	p.searchAddr = offAddr{c.base + pageSize*(pageCachePages-1)}
	 172  	return c
	 173  }
	 174  

View as plain text