...

Source file src/runtime/mpagealloc_64bit.go

Documentation: runtime

		 1  // Copyright 2019 The Go Authors. All rights reserved.
		 2  // Use of this source code is governed by a BSD-style
		 3  // license that can be found in the LICENSE file.
		 4  
		 5  //go:build amd64 || arm64 || mips64 || mips64le || ppc64 || ppc64le || riscv64 || s390x
		 6  // +build amd64 arm64 mips64 mips64le ppc64 ppc64le riscv64 s390x
		 7  
		 8  package runtime
		 9  
		10  import "unsafe"
		11  
		12  const (
		13  	// The number of levels in the radix tree.
		14  	summaryLevels = 5
		15  
		16  	// Constants for testing.
		17  	pageAlloc32Bit = 0
		18  	pageAlloc64Bit = 1
		19  
		20  	// Number of bits needed to represent all indices into the L1 of the
		21  	// chunks map.
		22  	//
		23  	// See (*pageAlloc).chunks for more details. Update the documentation
		24  	// there should this number change.
		25  	pallocChunksL1Bits = 13
		26  )
		27  
		28  // levelBits is the number of bits in the radix for a given level in the super summary
		29  // structure.
		30  //
		31  // The sum of all the entries of levelBits should equal heapAddrBits.
		32  var levelBits = [summaryLevels]uint{
		33  	summaryL0Bits,
		34  	summaryLevelBits,
		35  	summaryLevelBits,
		36  	summaryLevelBits,
		37  	summaryLevelBits,
		38  }
		39  
		40  // levelShift is the number of bits to shift to acquire the radix for a given level
		41  // in the super summary structure.
		42  //
		43  // With levelShift, one can compute the index of the summary at level l related to a
		44  // pointer p by doing:
		45  //	 p >> levelShift[l]
		46  var levelShift = [summaryLevels]uint{
		47  	heapAddrBits - summaryL0Bits,
		48  	heapAddrBits - summaryL0Bits - 1*summaryLevelBits,
		49  	heapAddrBits - summaryL0Bits - 2*summaryLevelBits,
		50  	heapAddrBits - summaryL0Bits - 3*summaryLevelBits,
		51  	heapAddrBits - summaryL0Bits - 4*summaryLevelBits,
		52  }
		53  
		54  // levelLogPages is log2 the maximum number of runtime pages in the address space
		55  // a summary in the given level represents.
		56  //
		57  // The leaf level always represents exactly log2 of 1 chunk's worth of pages.
		58  var levelLogPages = [summaryLevels]uint{
		59  	logPallocChunkPages + 4*summaryLevelBits,
		60  	logPallocChunkPages + 3*summaryLevelBits,
		61  	logPallocChunkPages + 2*summaryLevelBits,
		62  	logPallocChunkPages + 1*summaryLevelBits,
		63  	logPallocChunkPages,
		64  }
		65  
		66  // sysInit performs architecture-dependent initialization of fields
		67  // in pageAlloc. pageAlloc should be uninitialized except for sysStat
		68  // if any runtime statistic should be updated.
		69  func (p *pageAlloc) sysInit() {
		70  	// Reserve memory for each level. This will get mapped in
		71  	// as R/W by setArenas.
		72  	for l, shift := range levelShift {
		73  		entries := 1 << (heapAddrBits - shift)
		74  
		75  		// Reserve b bytes of memory anywhere in the address space.
		76  		b := alignUp(uintptr(entries)*pallocSumBytes, physPageSize)
		77  		r := sysReserve(nil, b)
		78  		if r == nil {
		79  			throw("failed to reserve page summary memory")
		80  		}
		81  
		82  		// Put this reservation into a slice.
		83  		sl := notInHeapSlice{(*notInHeap)(r), 0, entries}
		84  		p.summary[l] = *(*[]pallocSum)(unsafe.Pointer(&sl))
		85  	}
		86  }
		87  
		88  // sysGrow performs architecture-dependent operations on heap
		89  // growth for the page allocator, such as mapping in new memory
		90  // for summaries. It also updates the length of the slices in
		91  // [.summary.
		92  //
		93  // base is the base of the newly-added heap memory and limit is
		94  // the first address past the end of the newly-added heap memory.
		95  // Both must be aligned to pallocChunkBytes.
		96  //
		97  // The caller must update p.start and p.end after calling sysGrow.
		98  func (p *pageAlloc) sysGrow(base, limit uintptr) {
		99  	if base%pallocChunkBytes != 0 || limit%pallocChunkBytes != 0 {
	 100  		print("runtime: base = ", hex(base), ", limit = ", hex(limit), "\n")
	 101  		throw("sysGrow bounds not aligned to pallocChunkBytes")
	 102  	}
	 103  
	 104  	// addrRangeToSummaryRange converts a range of addresses into a range
	 105  	// of summary indices which must be mapped to support those addresses
	 106  	// in the summary range.
	 107  	addrRangeToSummaryRange := func(level int, r addrRange) (int, int) {
	 108  		sumIdxBase, sumIdxLimit := addrsToSummaryRange(level, r.base.addr(), r.limit.addr())
	 109  		return blockAlignSummaryRange(level, sumIdxBase, sumIdxLimit)
	 110  	}
	 111  
	 112  	// summaryRangeToSumAddrRange converts a range of indices in any
	 113  	// level of p.summary into page-aligned addresses which cover that
	 114  	// range of indices.
	 115  	summaryRangeToSumAddrRange := func(level, sumIdxBase, sumIdxLimit int) addrRange {
	 116  		baseOffset := alignDown(uintptr(sumIdxBase)*pallocSumBytes, physPageSize)
	 117  		limitOffset := alignUp(uintptr(sumIdxLimit)*pallocSumBytes, physPageSize)
	 118  		base := unsafe.Pointer(&p.summary[level][0])
	 119  		return addrRange{
	 120  			offAddr{uintptr(add(base, baseOffset))},
	 121  			offAddr{uintptr(add(base, limitOffset))},
	 122  		}
	 123  	}
	 124  
	 125  	// addrRangeToSumAddrRange is a convienience function that converts
	 126  	// an address range r to the address range of the given summary level
	 127  	// that stores the summaries for r.
	 128  	addrRangeToSumAddrRange := func(level int, r addrRange) addrRange {
	 129  		sumIdxBase, sumIdxLimit := addrRangeToSummaryRange(level, r)
	 130  		return summaryRangeToSumAddrRange(level, sumIdxBase, sumIdxLimit)
	 131  	}
	 132  
	 133  	// Find the first inUse index which is strictly greater than base.
	 134  	//
	 135  	// Because this function will never be asked remap the same memory
	 136  	// twice, this index is effectively the index at which we would insert
	 137  	// this new growth, and base will never overlap/be contained within
	 138  	// any existing range.
	 139  	//
	 140  	// This will be used to look at what memory in the summary array is already
	 141  	// mapped before and after this new range.
	 142  	inUseIndex := p.inUse.findSucc(base)
	 143  
	 144  	// Walk up the radix tree and map summaries in as needed.
	 145  	for l := range p.summary {
	 146  		// Figure out what part of the summary array this new address space needs.
	 147  		needIdxBase, needIdxLimit := addrRangeToSummaryRange(l, makeAddrRange(base, limit))
	 148  
	 149  		// Update the summary slices with a new upper-bound. This ensures
	 150  		// we get tight bounds checks on at least the top bound.
	 151  		//
	 152  		// We must do this regardless of whether we map new memory.
	 153  		if needIdxLimit > len(p.summary[l]) {
	 154  			p.summary[l] = p.summary[l][:needIdxLimit]
	 155  		}
	 156  
	 157  		// Compute the needed address range in the summary array for level l.
	 158  		need := summaryRangeToSumAddrRange(l, needIdxBase, needIdxLimit)
	 159  
	 160  		// Prune need down to what needs to be newly mapped. Some parts of it may
	 161  		// already be mapped by what inUse describes due to page alignment requirements
	 162  		// for mapping. prune's invariants are guaranteed by the fact that this
	 163  		// function will never be asked to remap the same memory twice.
	 164  		if inUseIndex > 0 {
	 165  			need = need.subtract(addrRangeToSumAddrRange(l, p.inUse.ranges[inUseIndex-1]))
	 166  		}
	 167  		if inUseIndex < len(p.inUse.ranges) {
	 168  			need = need.subtract(addrRangeToSumAddrRange(l, p.inUse.ranges[inUseIndex]))
	 169  		}
	 170  		// It's possible that after our pruning above, there's nothing new to map.
	 171  		if need.size() == 0 {
	 172  			continue
	 173  		}
	 174  
	 175  		// Map and commit need.
	 176  		sysMap(unsafe.Pointer(need.base.addr()), need.size(), p.sysStat)
	 177  		sysUsed(unsafe.Pointer(need.base.addr()), need.size())
	 178  	}
	 179  }
	 180  

View as plain text