...

Source file src/runtime/map.go

Documentation: runtime

		 1  // Copyright 2014 The Go Authors. All rights reserved.
		 2  // Use of this source code is governed by a BSD-style
		 3  // license that can be found in the LICENSE file.
		 4  
		 5  package runtime
		 6  
		 7  // This file contains the implementation of Go's map type.
		 8  //
		 9  // A map is just a hash table. The data is arranged
		10  // into an array of buckets. Each bucket contains up to
		11  // 8 key/elem pairs. The low-order bits of the hash are
		12  // used to select a bucket. Each bucket contains a few
		13  // high-order bits of each hash to distinguish the entries
		14  // within a single bucket.
		15  //
		16  // If more than 8 keys hash to a bucket, we chain on
		17  // extra buckets.
		18  //
		19  // When the hashtable grows, we allocate a new array
		20  // of buckets twice as big. Buckets are incrementally
		21  // copied from the old bucket array to the new bucket array.
		22  //
		23  // Map iterators walk through the array of buckets and
		24  // return the keys in walk order (bucket #, then overflow
		25  // chain order, then bucket index).	To maintain iteration
		26  // semantics, we never move keys within their bucket (if
		27  // we did, keys might be returned 0 or 2 times).	When
		28  // growing the table, iterators remain iterating through the
		29  // old table and must check the new table if the bucket
		30  // they are iterating through has been moved ("evacuated")
		31  // to the new table.
		32  
		33  // Picking loadFactor: too large and we have lots of overflow
		34  // buckets, too small and we waste a lot of space. I wrote
		35  // a simple program to check some stats for different loads:
		36  // (64-bit, 8 byte keys and elems)
		37  //	loadFactor		%overflow	bytes/entry		 hitprobe		missprobe
		38  //				4.00				 2.13				20.77				 3.00				 4.00
		39  //				4.50				 4.05				17.30				 3.25				 4.50
		40  //				5.00				 6.85				14.77				 3.50				 5.00
		41  //				5.50				10.55				12.94				 3.75				 5.50
		42  //				6.00				15.27				11.67				 4.00				 6.00
		43  //				6.50				20.90				10.79				 4.25				 6.50
		44  //				7.00				27.14				10.15				 4.50				 7.00
		45  //				7.50				34.03				 9.73				 4.75				 7.50
		46  //				8.00				41.10				 9.40				 5.00				 8.00
		47  //
		48  // %overflow	 = percentage of buckets which have an overflow bucket
		49  // bytes/entry = overhead bytes used per key/elem pair
		50  // hitprobe		= # of entries to check when looking up a present key
		51  // missprobe	 = # of entries to check when looking up an absent key
		52  //
		53  // Keep in mind this data is for maximally loaded tables, i.e. just
		54  // before the table grows. Typical tables will be somewhat less loaded.
		55  
		56  import (
		57  	"runtime/internal/atomic"
		58  	"runtime/internal/math"
		59  	"runtime/internal/sys"
		60  	"unsafe"
		61  )
		62  
		63  const (
		64  	// Maximum number of key/elem pairs a bucket can hold.
		65  	bucketCntBits = 3
		66  	bucketCnt		 = 1 << bucketCntBits
		67  
		68  	// Maximum average load of a bucket that triggers growth is 6.5.
		69  	// Represent as loadFactorNum/loadFactorDen, to allow integer math.
		70  	loadFactorNum = 13
		71  	loadFactorDen = 2
		72  
		73  	// Maximum key or elem size to keep inline (instead of mallocing per element).
		74  	// Must fit in a uint8.
		75  	// Fast versions cannot handle big elems - the cutoff size for
		76  	// fast versions in cmd/compile/internal/gc/walk.go must be at most this elem.
		77  	maxKeySize	= 128
		78  	maxElemSize = 128
		79  
		80  	// data offset should be the size of the bmap struct, but needs to be
		81  	// aligned correctly. For amd64p32 this means 64-bit alignment
		82  	// even though pointers are 32 bit.
		83  	dataOffset = unsafe.Offsetof(struct {
		84  		b bmap
		85  		v int64
		86  	}{}.v)
		87  
		88  	// Possible tophash values. We reserve a few possibilities for special marks.
		89  	// Each bucket (including its overflow buckets, if any) will have either all or none of its
		90  	// entries in the evacuated* states (except during the evacuate() method, which only happens
		91  	// during map writes and thus no one else can observe the map during that time).
		92  	emptyRest			= 0 // this cell is empty, and there are no more non-empty cells at higher indexes or overflows.
		93  	emptyOne			 = 1 // this cell is empty
		94  	evacuatedX		 = 2 // key/elem is valid.	Entry has been evacuated to first half of larger table.
		95  	evacuatedY		 = 3 // same as above, but evacuated to second half of larger table.
		96  	evacuatedEmpty = 4 // cell is empty, bucket is evacuated.
		97  	minTopHash		 = 5 // minimum tophash for a normal filled cell.
		98  
		99  	// flags
	 100  	iterator		 = 1 // there may be an iterator using buckets
	 101  	oldIterator	= 2 // there may be an iterator using oldbuckets
	 102  	hashWriting	= 4 // a goroutine is writing to the map
	 103  	sameSizeGrow = 8 // the current map growth is to a new map of the same size
	 104  
	 105  	// sentinel bucket ID for iterator checks
	 106  	noCheck = 1<<(8*sys.PtrSize) - 1
	 107  )
	 108  
	 109  // isEmpty reports whether the given tophash array entry represents an empty bucket entry.
	 110  func isEmpty(x uint8) bool {
	 111  	return x <= emptyOne
	 112  }
	 113  
	 114  // A header for a Go map.
	 115  type hmap struct {
	 116  	// Note: the format of the hmap is also encoded in cmd/compile/internal/reflectdata/reflect.go.
	 117  	// Make sure this stays in sync with the compiler's definition.
	 118  	count		 int // # live cells == size of map.	Must be first (used by len() builtin)
	 119  	flags		 uint8
	 120  	B				 uint8	// log_2 of # of buckets (can hold up to loadFactor * 2^B items)
	 121  	noverflow uint16 // approximate number of overflow buckets; see incrnoverflow for details
	 122  	hash0		 uint32 // hash seed
	 123  
	 124  	buckets		unsafe.Pointer // array of 2^B Buckets. may be nil if count==0.
	 125  	oldbuckets unsafe.Pointer // previous bucket array of half the size, non-nil only when growing
	 126  	nevacuate	uintptr				// progress counter for evacuation (buckets less than this have been evacuated)
	 127  
	 128  	extra *mapextra // optional fields
	 129  }
	 130  
	 131  // mapextra holds fields that are not present on all maps.
	 132  type mapextra struct {
	 133  	// If both key and elem do not contain pointers and are inline, then we mark bucket
	 134  	// type as containing no pointers. This avoids scanning such maps.
	 135  	// However, bmap.overflow is a pointer. In order to keep overflow buckets
	 136  	// alive, we store pointers to all overflow buckets in hmap.extra.overflow and hmap.extra.oldoverflow.
	 137  	// overflow and oldoverflow are only used if key and elem do not contain pointers.
	 138  	// overflow contains overflow buckets for hmap.buckets.
	 139  	// oldoverflow contains overflow buckets for hmap.oldbuckets.
	 140  	// The indirection allows to store a pointer to the slice in hiter.
	 141  	overflow		*[]*bmap
	 142  	oldoverflow *[]*bmap
	 143  
	 144  	// nextOverflow holds a pointer to a free overflow bucket.
	 145  	nextOverflow *bmap
	 146  }
	 147  
	 148  // A bucket for a Go map.
	 149  type bmap struct {
	 150  	// tophash generally contains the top byte of the hash value
	 151  	// for each key in this bucket. If tophash[0] < minTopHash,
	 152  	// tophash[0] is a bucket evacuation state instead.
	 153  	tophash [bucketCnt]uint8
	 154  	// Followed by bucketCnt keys and then bucketCnt elems.
	 155  	// NOTE: packing all the keys together and then all the elems together makes the
	 156  	// code a bit more complicated than alternating key/elem/key/elem/... but it allows
	 157  	// us to eliminate padding which would be needed for, e.g., map[int64]int8.
	 158  	// Followed by an overflow pointer.
	 159  }
	 160  
	 161  // A hash iteration structure.
	 162  // If you modify hiter, also change cmd/compile/internal/reflectdata/reflect.go to indicate
	 163  // the layout of this structure.
	 164  type hiter struct {
	 165  	key				 unsafe.Pointer // Must be in first position.	Write nil to indicate iteration end (see cmd/compile/internal/walk/range.go).
	 166  	elem				unsafe.Pointer // Must be in second position (see cmd/compile/internal/walk/range.go).
	 167  	t					 *maptype
	 168  	h					 *hmap
	 169  	buckets		 unsafe.Pointer // bucket ptr at hash_iter initialization time
	 170  	bptr				*bmap					// current bucket
	 171  	overflow		*[]*bmap			 // keeps overflow buckets of hmap.buckets alive
	 172  	oldoverflow *[]*bmap			 // keeps overflow buckets of hmap.oldbuckets alive
	 173  	startBucket uintptr				// bucket iteration started at
	 174  	offset			uint8					// intra-bucket offset to start from during iteration (should be big enough to hold bucketCnt-1)
	 175  	wrapped		 bool					 // already wrapped around from end of bucket array to beginning
	 176  	B					 uint8
	 177  	i					 uint8
	 178  	bucket			uintptr
	 179  	checkBucket uintptr
	 180  }
	 181  
	 182  // bucketShift returns 1<<b, optimized for code generation.
	 183  func bucketShift(b uint8) uintptr {
	 184  	// Masking the shift amount allows overflow checks to be elided.
	 185  	return uintptr(1) << (b & (sys.PtrSize*8 - 1))
	 186  }
	 187  
	 188  // bucketMask returns 1<<b - 1, optimized for code generation.
	 189  func bucketMask(b uint8) uintptr {
	 190  	return bucketShift(b) - 1
	 191  }
	 192  
	 193  // tophash calculates the tophash value for hash.
	 194  func tophash(hash uintptr) uint8 {
	 195  	top := uint8(hash >> (sys.PtrSize*8 - 8))
	 196  	if top < minTopHash {
	 197  		top += minTopHash
	 198  	}
	 199  	return top
	 200  }
	 201  
	 202  func evacuated(b *bmap) bool {
	 203  	h := b.tophash[0]
	 204  	return h > emptyOne && h < minTopHash
	 205  }
	 206  
	 207  func (b *bmap) overflow(t *maptype) *bmap {
	 208  	return *(**bmap)(add(unsafe.Pointer(b), uintptr(t.bucketsize)-sys.PtrSize))
	 209  }
	 210  
	 211  func (b *bmap) setoverflow(t *maptype, ovf *bmap) {
	 212  	*(**bmap)(add(unsafe.Pointer(b), uintptr(t.bucketsize)-sys.PtrSize)) = ovf
	 213  }
	 214  
	 215  func (b *bmap) keys() unsafe.Pointer {
	 216  	return add(unsafe.Pointer(b), dataOffset)
	 217  }
	 218  
	 219  // incrnoverflow increments h.noverflow.
	 220  // noverflow counts the number of overflow buckets.
	 221  // This is used to trigger same-size map growth.
	 222  // See also tooManyOverflowBuckets.
	 223  // To keep hmap small, noverflow is a uint16.
	 224  // When there are few buckets, noverflow is an exact count.
	 225  // When there are many buckets, noverflow is an approximate count.
	 226  func (h *hmap) incrnoverflow() {
	 227  	// We trigger same-size map growth if there are
	 228  	// as many overflow buckets as buckets.
	 229  	// We need to be able to count to 1<<h.B.
	 230  	if h.B < 16 {
	 231  		h.noverflow++
	 232  		return
	 233  	}
	 234  	// Increment with probability 1/(1<<(h.B-15)).
	 235  	// When we reach 1<<15 - 1, we will have approximately
	 236  	// as many overflow buckets as buckets.
	 237  	mask := uint32(1)<<(h.B-15) - 1
	 238  	// Example: if h.B == 18, then mask == 7,
	 239  	// and fastrand & 7 == 0 with probability 1/8.
	 240  	if fastrand()&mask == 0 {
	 241  		h.noverflow++
	 242  	}
	 243  }
	 244  
	 245  func (h *hmap) newoverflow(t *maptype, b *bmap) *bmap {
	 246  	var ovf *bmap
	 247  	if h.extra != nil && h.extra.nextOverflow != nil {
	 248  		// We have preallocated overflow buckets available.
	 249  		// See makeBucketArray for more details.
	 250  		ovf = h.extra.nextOverflow
	 251  		if ovf.overflow(t) == nil {
	 252  			// We're not at the end of the preallocated overflow buckets. Bump the pointer.
	 253  			h.extra.nextOverflow = (*bmap)(add(unsafe.Pointer(ovf), uintptr(t.bucketsize)))
	 254  		} else {
	 255  			// This is the last preallocated overflow bucket.
	 256  			// Reset the overflow pointer on this bucket,
	 257  			// which was set to a non-nil sentinel value.
	 258  			ovf.setoverflow(t, nil)
	 259  			h.extra.nextOverflow = nil
	 260  		}
	 261  	} else {
	 262  		ovf = (*bmap)(newobject(t.bucket))
	 263  	}
	 264  	h.incrnoverflow()
	 265  	if t.bucket.ptrdata == 0 {
	 266  		h.createOverflow()
	 267  		*h.extra.overflow = append(*h.extra.overflow, ovf)
	 268  	}
	 269  	b.setoverflow(t, ovf)
	 270  	return ovf
	 271  }
	 272  
	 273  func (h *hmap) createOverflow() {
	 274  	if h.extra == nil {
	 275  		h.extra = new(mapextra)
	 276  	}
	 277  	if h.extra.overflow == nil {
	 278  		h.extra.overflow = new([]*bmap)
	 279  	}
	 280  }
	 281  
	 282  func makemap64(t *maptype, hint int64, h *hmap) *hmap {
	 283  	if int64(int(hint)) != hint {
	 284  		hint = 0
	 285  	}
	 286  	return makemap(t, int(hint), h)
	 287  }
	 288  
	 289  // makemap_small implements Go map creation for make(map[k]v) and
	 290  // make(map[k]v, hint) when hint is known to be at most bucketCnt
	 291  // at compile time and the map needs to be allocated on the heap.
	 292  func makemap_small() *hmap {
	 293  	h := new(hmap)
	 294  	h.hash0 = fastrand()
	 295  	return h
	 296  }
	 297  
	 298  // makemap implements Go map creation for make(map[k]v, hint).
	 299  // If the compiler has determined that the map or the first bucket
	 300  // can be created on the stack, h and/or bucket may be non-nil.
	 301  // If h != nil, the map can be created directly in h.
	 302  // If h.buckets != nil, bucket pointed to can be used as the first bucket.
	 303  func makemap(t *maptype, hint int, h *hmap) *hmap {
	 304  	mem, overflow := math.MulUintptr(uintptr(hint), t.bucket.size)
	 305  	if overflow || mem > maxAlloc {
	 306  		hint = 0
	 307  	}
	 308  
	 309  	// initialize Hmap
	 310  	if h == nil {
	 311  		h = new(hmap)
	 312  	}
	 313  	h.hash0 = fastrand()
	 314  
	 315  	// Find the size parameter B which will hold the requested # of elements.
	 316  	// For hint < 0 overLoadFactor returns false since hint < bucketCnt.
	 317  	B := uint8(0)
	 318  	for overLoadFactor(hint, B) {
	 319  		B++
	 320  	}
	 321  	h.B = B
	 322  
	 323  	// allocate initial hash table
	 324  	// if B == 0, the buckets field is allocated lazily later (in mapassign)
	 325  	// If hint is large zeroing this memory could take a while.
	 326  	if h.B != 0 {
	 327  		var nextOverflow *bmap
	 328  		h.buckets, nextOverflow = makeBucketArray(t, h.B, nil)
	 329  		if nextOverflow != nil {
	 330  			h.extra = new(mapextra)
	 331  			h.extra.nextOverflow = nextOverflow
	 332  		}
	 333  	}
	 334  
	 335  	return h
	 336  }
	 337  
	 338  // makeBucketArray initializes a backing array for map buckets.
	 339  // 1<<b is the minimum number of buckets to allocate.
	 340  // dirtyalloc should either be nil or a bucket array previously
	 341  // allocated by makeBucketArray with the same t and b parameters.
	 342  // If dirtyalloc is nil a new backing array will be alloced and
	 343  // otherwise dirtyalloc will be cleared and reused as backing array.
	 344  func makeBucketArray(t *maptype, b uint8, dirtyalloc unsafe.Pointer) (buckets unsafe.Pointer, nextOverflow *bmap) {
	 345  	base := bucketShift(b)
	 346  	nbuckets := base
	 347  	// For small b, overflow buckets are unlikely.
	 348  	// Avoid the overhead of the calculation.
	 349  	if b >= 4 {
	 350  		// Add on the estimated number of overflow buckets
	 351  		// required to insert the median number of elements
	 352  		// used with this value of b.
	 353  		nbuckets += bucketShift(b - 4)
	 354  		sz := t.bucket.size * nbuckets
	 355  		up := roundupsize(sz)
	 356  		if up != sz {
	 357  			nbuckets = up / t.bucket.size
	 358  		}
	 359  	}
	 360  
	 361  	if dirtyalloc == nil {
	 362  		buckets = newarray(t.bucket, int(nbuckets))
	 363  	} else {
	 364  		// dirtyalloc was previously generated by
	 365  		// the above newarray(t.bucket, int(nbuckets))
	 366  		// but may not be empty.
	 367  		buckets = dirtyalloc
	 368  		size := t.bucket.size * nbuckets
	 369  		if t.bucket.ptrdata != 0 {
	 370  			memclrHasPointers(buckets, size)
	 371  		} else {
	 372  			memclrNoHeapPointers(buckets, size)
	 373  		}
	 374  	}
	 375  
	 376  	if base != nbuckets {
	 377  		// We preallocated some overflow buckets.
	 378  		// To keep the overhead of tracking these overflow buckets to a minimum,
	 379  		// we use the convention that if a preallocated overflow bucket's overflow
	 380  		// pointer is nil, then there are more available by bumping the pointer.
	 381  		// We need a safe non-nil pointer for the last overflow bucket; just use buckets.
	 382  		nextOverflow = (*bmap)(add(buckets, base*uintptr(t.bucketsize)))
	 383  		last := (*bmap)(add(buckets, (nbuckets-1)*uintptr(t.bucketsize)))
	 384  		last.setoverflow(t, (*bmap)(buckets))
	 385  	}
	 386  	return buckets, nextOverflow
	 387  }
	 388  
	 389  // mapaccess1 returns a pointer to h[key].	Never returns nil, instead
	 390  // it will return a reference to the zero object for the elem type if
	 391  // the key is not in the map.
	 392  // NOTE: The returned pointer may keep the whole map live, so don't
	 393  // hold onto it for very long.
	 394  func mapaccess1(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer {
	 395  	if raceenabled && h != nil {
	 396  		callerpc := getcallerpc()
	 397  		pc := funcPC(mapaccess1)
	 398  		racereadpc(unsafe.Pointer(h), callerpc, pc)
	 399  		raceReadObjectPC(t.key, key, callerpc, pc)
	 400  	}
	 401  	if msanenabled && h != nil {
	 402  		msanread(key, t.key.size)
	 403  	}
	 404  	if h == nil || h.count == 0 {
	 405  		if t.hashMightPanic() {
	 406  			t.hasher(key, 0) // see issue 23734
	 407  		}
	 408  		return unsafe.Pointer(&zeroVal[0])
	 409  	}
	 410  	if h.flags&hashWriting != 0 {
	 411  		throw("concurrent map read and map write")
	 412  	}
	 413  	hash := t.hasher(key, uintptr(h.hash0))
	 414  	m := bucketMask(h.B)
	 415  	b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
	 416  	if c := h.oldbuckets; c != nil {
	 417  		if !h.sameSizeGrow() {
	 418  			// There used to be half as many buckets; mask down one more power of two.
	 419  			m >>= 1
	 420  		}
	 421  		oldb := (*bmap)(add(c, (hash&m)*uintptr(t.bucketsize)))
	 422  		if !evacuated(oldb) {
	 423  			b = oldb
	 424  		}
	 425  	}
	 426  	top := tophash(hash)
	 427  bucketloop:
	 428  	for ; b != nil; b = b.overflow(t) {
	 429  		for i := uintptr(0); i < bucketCnt; i++ {
	 430  			if b.tophash[i] != top {
	 431  				if b.tophash[i] == emptyRest {
	 432  					break bucketloop
	 433  				}
	 434  				continue
	 435  			}
	 436  			k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize))
	 437  			if t.indirectkey() {
	 438  				k = *((*unsafe.Pointer)(k))
	 439  			}
	 440  			if t.key.equal(key, k) {
	 441  				e := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.elemsize))
	 442  				if t.indirectelem() {
	 443  					e = *((*unsafe.Pointer)(e))
	 444  				}
	 445  				return e
	 446  			}
	 447  		}
	 448  	}
	 449  	return unsafe.Pointer(&zeroVal[0])
	 450  }
	 451  
	 452  func mapaccess2(t *maptype, h *hmap, key unsafe.Pointer) (unsafe.Pointer, bool) {
	 453  	if raceenabled && h != nil {
	 454  		callerpc := getcallerpc()
	 455  		pc := funcPC(mapaccess2)
	 456  		racereadpc(unsafe.Pointer(h), callerpc, pc)
	 457  		raceReadObjectPC(t.key, key, callerpc, pc)
	 458  	}
	 459  	if msanenabled && h != nil {
	 460  		msanread(key, t.key.size)
	 461  	}
	 462  	if h == nil || h.count == 0 {
	 463  		if t.hashMightPanic() {
	 464  			t.hasher(key, 0) // see issue 23734
	 465  		}
	 466  		return unsafe.Pointer(&zeroVal[0]), false
	 467  	}
	 468  	if h.flags&hashWriting != 0 {
	 469  		throw("concurrent map read and map write")
	 470  	}
	 471  	hash := t.hasher(key, uintptr(h.hash0))
	 472  	m := bucketMask(h.B)
	 473  	b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
	 474  	if c := h.oldbuckets; c != nil {
	 475  		if !h.sameSizeGrow() {
	 476  			// There used to be half as many buckets; mask down one more power of two.
	 477  			m >>= 1
	 478  		}
	 479  		oldb := (*bmap)(add(c, (hash&m)*uintptr(t.bucketsize)))
	 480  		if !evacuated(oldb) {
	 481  			b = oldb
	 482  		}
	 483  	}
	 484  	top := tophash(hash)
	 485  bucketloop:
	 486  	for ; b != nil; b = b.overflow(t) {
	 487  		for i := uintptr(0); i < bucketCnt; i++ {
	 488  			if b.tophash[i] != top {
	 489  				if b.tophash[i] == emptyRest {
	 490  					break bucketloop
	 491  				}
	 492  				continue
	 493  			}
	 494  			k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize))
	 495  			if t.indirectkey() {
	 496  				k = *((*unsafe.Pointer)(k))
	 497  			}
	 498  			if t.key.equal(key, k) {
	 499  				e := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.elemsize))
	 500  				if t.indirectelem() {
	 501  					e = *((*unsafe.Pointer)(e))
	 502  				}
	 503  				return e, true
	 504  			}
	 505  		}
	 506  	}
	 507  	return unsafe.Pointer(&zeroVal[0]), false
	 508  }
	 509  
	 510  // returns both key and elem. Used by map iterator
	 511  func mapaccessK(t *maptype, h *hmap, key unsafe.Pointer) (unsafe.Pointer, unsafe.Pointer) {
	 512  	if h == nil || h.count == 0 {
	 513  		return nil, nil
	 514  	}
	 515  	hash := t.hasher(key, uintptr(h.hash0))
	 516  	m := bucketMask(h.B)
	 517  	b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
	 518  	if c := h.oldbuckets; c != nil {
	 519  		if !h.sameSizeGrow() {
	 520  			// There used to be half as many buckets; mask down one more power of two.
	 521  			m >>= 1
	 522  		}
	 523  		oldb := (*bmap)(add(c, (hash&m)*uintptr(t.bucketsize)))
	 524  		if !evacuated(oldb) {
	 525  			b = oldb
	 526  		}
	 527  	}
	 528  	top := tophash(hash)
	 529  bucketloop:
	 530  	for ; b != nil; b = b.overflow(t) {
	 531  		for i := uintptr(0); i < bucketCnt; i++ {
	 532  			if b.tophash[i] != top {
	 533  				if b.tophash[i] == emptyRest {
	 534  					break bucketloop
	 535  				}
	 536  				continue
	 537  			}
	 538  			k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize))
	 539  			if t.indirectkey() {
	 540  				k = *((*unsafe.Pointer)(k))
	 541  			}
	 542  			if t.key.equal(key, k) {
	 543  				e := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.elemsize))
	 544  				if t.indirectelem() {
	 545  					e = *((*unsafe.Pointer)(e))
	 546  				}
	 547  				return k, e
	 548  			}
	 549  		}
	 550  	}
	 551  	return nil, nil
	 552  }
	 553  
	 554  func mapaccess1_fat(t *maptype, h *hmap, key, zero unsafe.Pointer) unsafe.Pointer {
	 555  	e := mapaccess1(t, h, key)
	 556  	if e == unsafe.Pointer(&zeroVal[0]) {
	 557  		return zero
	 558  	}
	 559  	return e
	 560  }
	 561  
	 562  func mapaccess2_fat(t *maptype, h *hmap, key, zero unsafe.Pointer) (unsafe.Pointer, bool) {
	 563  	e := mapaccess1(t, h, key)
	 564  	if e == unsafe.Pointer(&zeroVal[0]) {
	 565  		return zero, false
	 566  	}
	 567  	return e, true
	 568  }
	 569  
	 570  // Like mapaccess, but allocates a slot for the key if it is not present in the map.
	 571  func mapassign(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer {
	 572  	if h == nil {
	 573  		panic(plainError("assignment to entry in nil map"))
	 574  	}
	 575  	if raceenabled {
	 576  		callerpc := getcallerpc()
	 577  		pc := funcPC(mapassign)
	 578  		racewritepc(unsafe.Pointer(h), callerpc, pc)
	 579  		raceReadObjectPC(t.key, key, callerpc, pc)
	 580  	}
	 581  	if msanenabled {
	 582  		msanread(key, t.key.size)
	 583  	}
	 584  	if h.flags&hashWriting != 0 {
	 585  		throw("concurrent map writes")
	 586  	}
	 587  	hash := t.hasher(key, uintptr(h.hash0))
	 588  
	 589  	// Set hashWriting after calling t.hasher, since t.hasher may panic,
	 590  	// in which case we have not actually done a write.
	 591  	h.flags ^= hashWriting
	 592  
	 593  	if h.buckets == nil {
	 594  		h.buckets = newobject(t.bucket) // newarray(t.bucket, 1)
	 595  	}
	 596  
	 597  again:
	 598  	bucket := hash & bucketMask(h.B)
	 599  	if h.growing() {
	 600  		growWork(t, h, bucket)
	 601  	}
	 602  	b := (*bmap)(add(h.buckets, bucket*uintptr(t.bucketsize)))
	 603  	top := tophash(hash)
	 604  
	 605  	var inserti *uint8
	 606  	var insertk unsafe.Pointer
	 607  	var elem unsafe.Pointer
	 608  bucketloop:
	 609  	for {
	 610  		for i := uintptr(0); i < bucketCnt; i++ {
	 611  			if b.tophash[i] != top {
	 612  				if isEmpty(b.tophash[i]) && inserti == nil {
	 613  					inserti = &b.tophash[i]
	 614  					insertk = add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize))
	 615  					elem = add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.elemsize))
	 616  				}
	 617  				if b.tophash[i] == emptyRest {
	 618  					break bucketloop
	 619  				}
	 620  				continue
	 621  			}
	 622  			k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize))
	 623  			if t.indirectkey() {
	 624  				k = *((*unsafe.Pointer)(k))
	 625  			}
	 626  			if !t.key.equal(key, k) {
	 627  				continue
	 628  			}
	 629  			// already have a mapping for key. Update it.
	 630  			if t.needkeyupdate() {
	 631  				typedmemmove(t.key, k, key)
	 632  			}
	 633  			elem = add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.elemsize))
	 634  			goto done
	 635  		}
	 636  		ovf := b.overflow(t)
	 637  		if ovf == nil {
	 638  			break
	 639  		}
	 640  		b = ovf
	 641  	}
	 642  
	 643  	// Did not find mapping for key. Allocate new cell & add entry.
	 644  
	 645  	// If we hit the max load factor or we have too many overflow buckets,
	 646  	// and we're not already in the middle of growing, start growing.
	 647  	if !h.growing() && (overLoadFactor(h.count+1, h.B) || tooManyOverflowBuckets(h.noverflow, h.B)) {
	 648  		hashGrow(t, h)
	 649  		goto again // Growing the table invalidates everything, so try again
	 650  	}
	 651  
	 652  	if inserti == nil {
	 653  		// The current bucket and all the overflow buckets connected to it are full, allocate a new one.
	 654  		newb := h.newoverflow(t, b)
	 655  		inserti = &newb.tophash[0]
	 656  		insertk = add(unsafe.Pointer(newb), dataOffset)
	 657  		elem = add(insertk, bucketCnt*uintptr(t.keysize))
	 658  	}
	 659  
	 660  	// store new key/elem at insert position
	 661  	if t.indirectkey() {
	 662  		kmem := newobject(t.key)
	 663  		*(*unsafe.Pointer)(insertk) = kmem
	 664  		insertk = kmem
	 665  	}
	 666  	if t.indirectelem() {
	 667  		vmem := newobject(t.elem)
	 668  		*(*unsafe.Pointer)(elem) = vmem
	 669  	}
	 670  	typedmemmove(t.key, insertk, key)
	 671  	*inserti = top
	 672  	h.count++
	 673  
	 674  done:
	 675  	if h.flags&hashWriting == 0 {
	 676  		throw("concurrent map writes")
	 677  	}
	 678  	h.flags &^= hashWriting
	 679  	if t.indirectelem() {
	 680  		elem = *((*unsafe.Pointer)(elem))
	 681  	}
	 682  	return elem
	 683  }
	 684  
	 685  func mapdelete(t *maptype, h *hmap, key unsafe.Pointer) {
	 686  	if raceenabled && h != nil {
	 687  		callerpc := getcallerpc()
	 688  		pc := funcPC(mapdelete)
	 689  		racewritepc(unsafe.Pointer(h), callerpc, pc)
	 690  		raceReadObjectPC(t.key, key, callerpc, pc)
	 691  	}
	 692  	if msanenabled && h != nil {
	 693  		msanread(key, t.key.size)
	 694  	}
	 695  	if h == nil || h.count == 0 {
	 696  		if t.hashMightPanic() {
	 697  			t.hasher(key, 0) // see issue 23734
	 698  		}
	 699  		return
	 700  	}
	 701  	if h.flags&hashWriting != 0 {
	 702  		throw("concurrent map writes")
	 703  	}
	 704  
	 705  	hash := t.hasher(key, uintptr(h.hash0))
	 706  
	 707  	// Set hashWriting after calling t.hasher, since t.hasher may panic,
	 708  	// in which case we have not actually done a write (delete).
	 709  	h.flags ^= hashWriting
	 710  
	 711  	bucket := hash & bucketMask(h.B)
	 712  	if h.growing() {
	 713  		growWork(t, h, bucket)
	 714  	}
	 715  	b := (*bmap)(add(h.buckets, bucket*uintptr(t.bucketsize)))
	 716  	bOrig := b
	 717  	top := tophash(hash)
	 718  search:
	 719  	for ; b != nil; b = b.overflow(t) {
	 720  		for i := uintptr(0); i < bucketCnt; i++ {
	 721  			if b.tophash[i] != top {
	 722  				if b.tophash[i] == emptyRest {
	 723  					break search
	 724  				}
	 725  				continue
	 726  			}
	 727  			k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize))
	 728  			k2 := k
	 729  			if t.indirectkey() {
	 730  				k2 = *((*unsafe.Pointer)(k2))
	 731  			}
	 732  			if !t.key.equal(key, k2) {
	 733  				continue
	 734  			}
	 735  			// Only clear key if there are pointers in it.
	 736  			if t.indirectkey() {
	 737  				*(*unsafe.Pointer)(k) = nil
	 738  			} else if t.key.ptrdata != 0 {
	 739  				memclrHasPointers(k, t.key.size)
	 740  			}
	 741  			e := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.elemsize))
	 742  			if t.indirectelem() {
	 743  				*(*unsafe.Pointer)(e) = nil
	 744  			} else if t.elem.ptrdata != 0 {
	 745  				memclrHasPointers(e, t.elem.size)
	 746  			} else {
	 747  				memclrNoHeapPointers(e, t.elem.size)
	 748  			}
	 749  			b.tophash[i] = emptyOne
	 750  			// If the bucket now ends in a bunch of emptyOne states,
	 751  			// change those to emptyRest states.
	 752  			// It would be nice to make this a separate function, but
	 753  			// for loops are not currently inlineable.
	 754  			if i == bucketCnt-1 {
	 755  				if b.overflow(t) != nil && b.overflow(t).tophash[0] != emptyRest {
	 756  					goto notLast
	 757  				}
	 758  			} else {
	 759  				if b.tophash[i+1] != emptyRest {
	 760  					goto notLast
	 761  				}
	 762  			}
	 763  			for {
	 764  				b.tophash[i] = emptyRest
	 765  				if i == 0 {
	 766  					if b == bOrig {
	 767  						break // beginning of initial bucket, we're done.
	 768  					}
	 769  					// Find previous bucket, continue at its last entry.
	 770  					c := b
	 771  					for b = bOrig; b.overflow(t) != c; b = b.overflow(t) {
	 772  					}
	 773  					i = bucketCnt - 1
	 774  				} else {
	 775  					i--
	 776  				}
	 777  				if b.tophash[i] != emptyOne {
	 778  					break
	 779  				}
	 780  			}
	 781  		notLast:
	 782  			h.count--
	 783  			// Reset the hash seed to make it more difficult for attackers to
	 784  			// repeatedly trigger hash collisions. See issue 25237.
	 785  			if h.count == 0 {
	 786  				h.hash0 = fastrand()
	 787  			}
	 788  			break search
	 789  		}
	 790  	}
	 791  
	 792  	if h.flags&hashWriting == 0 {
	 793  		throw("concurrent map writes")
	 794  	}
	 795  	h.flags &^= hashWriting
	 796  }
	 797  
	 798  // mapiterinit initializes the hiter struct used for ranging over maps.
	 799  // The hiter struct pointed to by 'it' is allocated on the stack
	 800  // by the compilers order pass or on the heap by reflect_mapiterinit.
	 801  // Both need to have zeroed hiter since the struct contains pointers.
	 802  func mapiterinit(t *maptype, h *hmap, it *hiter) {
	 803  	if raceenabled && h != nil {
	 804  		callerpc := getcallerpc()
	 805  		racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapiterinit))
	 806  	}
	 807  
	 808  	if h == nil || h.count == 0 {
	 809  		return
	 810  	}
	 811  
	 812  	if unsafe.Sizeof(hiter{})/sys.PtrSize != 12 {
	 813  		throw("hash_iter size incorrect") // see cmd/compile/internal/reflectdata/reflect.go
	 814  	}
	 815  	it.t = t
	 816  	it.h = h
	 817  
	 818  	// grab snapshot of bucket state
	 819  	it.B = h.B
	 820  	it.buckets = h.buckets
	 821  	if t.bucket.ptrdata == 0 {
	 822  		// Allocate the current slice and remember pointers to both current and old.
	 823  		// This preserves all relevant overflow buckets alive even if
	 824  		// the table grows and/or overflow buckets are added to the table
	 825  		// while we are iterating.
	 826  		h.createOverflow()
	 827  		it.overflow = h.extra.overflow
	 828  		it.oldoverflow = h.extra.oldoverflow
	 829  	}
	 830  
	 831  	// decide where to start
	 832  	r := uintptr(fastrand())
	 833  	if h.B > 31-bucketCntBits {
	 834  		r += uintptr(fastrand()) << 31
	 835  	}
	 836  	it.startBucket = r & bucketMask(h.B)
	 837  	it.offset = uint8(r >> h.B & (bucketCnt - 1))
	 838  
	 839  	// iterator state
	 840  	it.bucket = it.startBucket
	 841  
	 842  	// Remember we have an iterator.
	 843  	// Can run concurrently with another mapiterinit().
	 844  	if old := h.flags; old&(iterator|oldIterator) != iterator|oldIterator {
	 845  		atomic.Or8(&h.flags, iterator|oldIterator)
	 846  	}
	 847  
	 848  	mapiternext(it)
	 849  }
	 850  
	 851  func mapiternext(it *hiter) {
	 852  	h := it.h
	 853  	if raceenabled {
	 854  		callerpc := getcallerpc()
	 855  		racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapiternext))
	 856  	}
	 857  	if h.flags&hashWriting != 0 {
	 858  		throw("concurrent map iteration and map write")
	 859  	}
	 860  	t := it.t
	 861  	bucket := it.bucket
	 862  	b := it.bptr
	 863  	i := it.i
	 864  	checkBucket := it.checkBucket
	 865  
	 866  next:
	 867  	if b == nil {
	 868  		if bucket == it.startBucket && it.wrapped {
	 869  			// end of iteration
	 870  			it.key = nil
	 871  			it.elem = nil
	 872  			return
	 873  		}
	 874  		if h.growing() && it.B == h.B {
	 875  			// Iterator was started in the middle of a grow, and the grow isn't done yet.
	 876  			// If the bucket we're looking at hasn't been filled in yet (i.e. the old
	 877  			// bucket hasn't been evacuated) then we need to iterate through the old
	 878  			// bucket and only return the ones that will be migrated to this bucket.
	 879  			oldbucket := bucket & it.h.oldbucketmask()
	 880  			b = (*bmap)(add(h.oldbuckets, oldbucket*uintptr(t.bucketsize)))
	 881  			if !evacuated(b) {
	 882  				checkBucket = bucket
	 883  			} else {
	 884  				b = (*bmap)(add(it.buckets, bucket*uintptr(t.bucketsize)))
	 885  				checkBucket = noCheck
	 886  			}
	 887  		} else {
	 888  			b = (*bmap)(add(it.buckets, bucket*uintptr(t.bucketsize)))
	 889  			checkBucket = noCheck
	 890  		}
	 891  		bucket++
	 892  		if bucket == bucketShift(it.B) {
	 893  			bucket = 0
	 894  			it.wrapped = true
	 895  		}
	 896  		i = 0
	 897  	}
	 898  	for ; i < bucketCnt; i++ {
	 899  		offi := (i + it.offset) & (bucketCnt - 1)
	 900  		if isEmpty(b.tophash[offi]) || b.tophash[offi] == evacuatedEmpty {
	 901  			// TODO: emptyRest is hard to use here, as we start iterating
	 902  			// in the middle of a bucket. It's feasible, just tricky.
	 903  			continue
	 904  		}
	 905  		k := add(unsafe.Pointer(b), dataOffset+uintptr(offi)*uintptr(t.keysize))
	 906  		if t.indirectkey() {
	 907  			k = *((*unsafe.Pointer)(k))
	 908  		}
	 909  		e := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+uintptr(offi)*uintptr(t.elemsize))
	 910  		if checkBucket != noCheck && !h.sameSizeGrow() {
	 911  			// Special case: iterator was started during a grow to a larger size
	 912  			// and the grow is not done yet. We're working on a bucket whose
	 913  			// oldbucket has not been evacuated yet. Or at least, it wasn't
	 914  			// evacuated when we started the bucket. So we're iterating
	 915  			// through the oldbucket, skipping any keys that will go
	 916  			// to the other new bucket (each oldbucket expands to two
	 917  			// buckets during a grow).
	 918  			if t.reflexivekey() || t.key.equal(k, k) {
	 919  				// If the item in the oldbucket is not destined for
	 920  				// the current new bucket in the iteration, skip it.
	 921  				hash := t.hasher(k, uintptr(h.hash0))
	 922  				if hash&bucketMask(it.B) != checkBucket {
	 923  					continue
	 924  				}
	 925  			} else {
	 926  				// Hash isn't repeatable if k != k (NaNs).	We need a
	 927  				// repeatable and randomish choice of which direction
	 928  				// to send NaNs during evacuation. We'll use the low
	 929  				// bit of tophash to decide which way NaNs go.
	 930  				// NOTE: this case is why we need two evacuate tophash
	 931  				// values, evacuatedX and evacuatedY, that differ in
	 932  				// their low bit.
	 933  				if checkBucket>>(it.B-1) != uintptr(b.tophash[offi]&1) {
	 934  					continue
	 935  				}
	 936  			}
	 937  		}
	 938  		if (b.tophash[offi] != evacuatedX && b.tophash[offi] != evacuatedY) ||
	 939  			!(t.reflexivekey() || t.key.equal(k, k)) {
	 940  			// This is the golden data, we can return it.
	 941  			// OR
	 942  			// key!=key, so the entry can't be deleted or updated, so we can just return it.
	 943  			// That's lucky for us because when key!=key we can't look it up successfully.
	 944  			it.key = k
	 945  			if t.indirectelem() {
	 946  				e = *((*unsafe.Pointer)(e))
	 947  			}
	 948  			it.elem = e
	 949  		} else {
	 950  			// The hash table has grown since the iterator was started.
	 951  			// The golden data for this key is now somewhere else.
	 952  			// Check the current hash table for the data.
	 953  			// This code handles the case where the key
	 954  			// has been deleted, updated, or deleted and reinserted.
	 955  			// NOTE: we need to regrab the key as it has potentially been
	 956  			// updated to an equal() but not identical key (e.g. +0.0 vs -0.0).
	 957  			rk, re := mapaccessK(t, h, k)
	 958  			if rk == nil {
	 959  				continue // key has been deleted
	 960  			}
	 961  			it.key = rk
	 962  			it.elem = re
	 963  		}
	 964  		it.bucket = bucket
	 965  		if it.bptr != b { // avoid unnecessary write barrier; see issue 14921
	 966  			it.bptr = b
	 967  		}
	 968  		it.i = i + 1
	 969  		it.checkBucket = checkBucket
	 970  		return
	 971  	}
	 972  	b = b.overflow(t)
	 973  	i = 0
	 974  	goto next
	 975  }
	 976  
	 977  // mapclear deletes all keys from a map.
	 978  func mapclear(t *maptype, h *hmap) {
	 979  	if raceenabled && h != nil {
	 980  		callerpc := getcallerpc()
	 981  		pc := funcPC(mapclear)
	 982  		racewritepc(unsafe.Pointer(h), callerpc, pc)
	 983  	}
	 984  
	 985  	if h == nil || h.count == 0 {
	 986  		return
	 987  	}
	 988  
	 989  	if h.flags&hashWriting != 0 {
	 990  		throw("concurrent map writes")
	 991  	}
	 992  
	 993  	h.flags ^= hashWriting
	 994  
	 995  	h.flags &^= sameSizeGrow
	 996  	h.oldbuckets = nil
	 997  	h.nevacuate = 0
	 998  	h.noverflow = 0
	 999  	h.count = 0
	1000  
	1001  	// Reset the hash seed to make it more difficult for attackers to
	1002  	// repeatedly trigger hash collisions. See issue 25237.
	1003  	h.hash0 = fastrand()
	1004  
	1005  	// Keep the mapextra allocation but clear any extra information.
	1006  	if h.extra != nil {
	1007  		*h.extra = mapextra{}
	1008  	}
	1009  
	1010  	// makeBucketArray clears the memory pointed to by h.buckets
	1011  	// and recovers any overflow buckets by generating them
	1012  	// as if h.buckets was newly alloced.
	1013  	_, nextOverflow := makeBucketArray(t, h.B, h.buckets)
	1014  	if nextOverflow != nil {
	1015  		// If overflow buckets are created then h.extra
	1016  		// will have been allocated during initial bucket creation.
	1017  		h.extra.nextOverflow = nextOverflow
	1018  	}
	1019  
	1020  	if h.flags&hashWriting == 0 {
	1021  		throw("concurrent map writes")
	1022  	}
	1023  	h.flags &^= hashWriting
	1024  }
	1025  
	1026  func hashGrow(t *maptype, h *hmap) {
	1027  	// If we've hit the load factor, get bigger.
	1028  	// Otherwise, there are too many overflow buckets,
	1029  	// so keep the same number of buckets and "grow" laterally.
	1030  	bigger := uint8(1)
	1031  	if !overLoadFactor(h.count+1, h.B) {
	1032  		bigger = 0
	1033  		h.flags |= sameSizeGrow
	1034  	}
	1035  	oldbuckets := h.buckets
	1036  	newbuckets, nextOverflow := makeBucketArray(t, h.B+bigger, nil)
	1037  
	1038  	flags := h.flags &^ (iterator | oldIterator)
	1039  	if h.flags&iterator != 0 {
	1040  		flags |= oldIterator
	1041  	}
	1042  	// commit the grow (atomic wrt gc)
	1043  	h.B += bigger
	1044  	h.flags = flags
	1045  	h.oldbuckets = oldbuckets
	1046  	h.buckets = newbuckets
	1047  	h.nevacuate = 0
	1048  	h.noverflow = 0
	1049  
	1050  	if h.extra != nil && h.extra.overflow != nil {
	1051  		// Promote current overflow buckets to the old generation.
	1052  		if h.extra.oldoverflow != nil {
	1053  			throw("oldoverflow is not nil")
	1054  		}
	1055  		h.extra.oldoverflow = h.extra.overflow
	1056  		h.extra.overflow = nil
	1057  	}
	1058  	if nextOverflow != nil {
	1059  		if h.extra == nil {
	1060  			h.extra = new(mapextra)
	1061  		}
	1062  		h.extra.nextOverflow = nextOverflow
	1063  	}
	1064  
	1065  	// the actual copying of the hash table data is done incrementally
	1066  	// by growWork() and evacuate().
	1067  }
	1068  
	1069  // overLoadFactor reports whether count items placed in 1<<B buckets is over loadFactor.
	1070  func overLoadFactor(count int, B uint8) bool {
	1071  	return count > bucketCnt && uintptr(count) > loadFactorNum*(bucketShift(B)/loadFactorDen)
	1072  }
	1073  
	1074  // tooManyOverflowBuckets reports whether noverflow buckets is too many for a map with 1<<B buckets.
	1075  // Note that most of these overflow buckets must be in sparse use;
	1076  // if use was dense, then we'd have already triggered regular map growth.
	1077  func tooManyOverflowBuckets(noverflow uint16, B uint8) bool {
	1078  	// If the threshold is too low, we do extraneous work.
	1079  	// If the threshold is too high, maps that grow and shrink can hold on to lots of unused memory.
	1080  	// "too many" means (approximately) as many overflow buckets as regular buckets.
	1081  	// See incrnoverflow for more details.
	1082  	if B > 15 {
	1083  		B = 15
	1084  	}
	1085  	// The compiler doesn't see here that B < 16; mask B to generate shorter shift code.
	1086  	return noverflow >= uint16(1)<<(B&15)
	1087  }
	1088  
	1089  // growing reports whether h is growing. The growth may be to the same size or bigger.
	1090  func (h *hmap) growing() bool {
	1091  	return h.oldbuckets != nil
	1092  }
	1093  
	1094  // sameSizeGrow reports whether the current growth is to a map of the same size.
	1095  func (h *hmap) sameSizeGrow() bool {
	1096  	return h.flags&sameSizeGrow != 0
	1097  }
	1098  
	1099  // noldbuckets calculates the number of buckets prior to the current map growth.
	1100  func (h *hmap) noldbuckets() uintptr {
	1101  	oldB := h.B
	1102  	if !h.sameSizeGrow() {
	1103  		oldB--
	1104  	}
	1105  	return bucketShift(oldB)
	1106  }
	1107  
	1108  // oldbucketmask provides a mask that can be applied to calculate n % noldbuckets().
	1109  func (h *hmap) oldbucketmask() uintptr {
	1110  	return h.noldbuckets() - 1
	1111  }
	1112  
	1113  func growWork(t *maptype, h *hmap, bucket uintptr) {
	1114  	// make sure we evacuate the oldbucket corresponding
	1115  	// to the bucket we're about to use
	1116  	evacuate(t, h, bucket&h.oldbucketmask())
	1117  
	1118  	// evacuate one more oldbucket to make progress on growing
	1119  	if h.growing() {
	1120  		evacuate(t, h, h.nevacuate)
	1121  	}
	1122  }
	1123  
	1124  func bucketEvacuated(t *maptype, h *hmap, bucket uintptr) bool {
	1125  	b := (*bmap)(add(h.oldbuckets, bucket*uintptr(t.bucketsize)))
	1126  	return evacuated(b)
	1127  }
	1128  
	1129  // evacDst is an evacuation destination.
	1130  type evacDst struct {
	1131  	b *bmap					// current destination bucket
	1132  	i int						// key/elem index into b
	1133  	k unsafe.Pointer // pointer to current key storage
	1134  	e unsafe.Pointer // pointer to current elem storage
	1135  }
	1136  
	1137  func evacuate(t *maptype, h *hmap, oldbucket uintptr) {
	1138  	b := (*bmap)(add(h.oldbuckets, oldbucket*uintptr(t.bucketsize)))
	1139  	newbit := h.noldbuckets()
	1140  	if !evacuated(b) {
	1141  		// TODO: reuse overflow buckets instead of using new ones, if there
	1142  		// is no iterator using the old buckets.	(If !oldIterator.)
	1143  
	1144  		// xy contains the x and y (low and high) evacuation destinations.
	1145  		var xy [2]evacDst
	1146  		x := &xy[0]
	1147  		x.b = (*bmap)(add(h.buckets, oldbucket*uintptr(t.bucketsize)))
	1148  		x.k = add(unsafe.Pointer(x.b), dataOffset)
	1149  		x.e = add(x.k, bucketCnt*uintptr(t.keysize))
	1150  
	1151  		if !h.sameSizeGrow() {
	1152  			// Only calculate y pointers if we're growing bigger.
	1153  			// Otherwise GC can see bad pointers.
	1154  			y := &xy[1]
	1155  			y.b = (*bmap)(add(h.buckets, (oldbucket+newbit)*uintptr(t.bucketsize)))
	1156  			y.k = add(unsafe.Pointer(y.b), dataOffset)
	1157  			y.e = add(y.k, bucketCnt*uintptr(t.keysize))
	1158  		}
	1159  
	1160  		for ; b != nil; b = b.overflow(t) {
	1161  			k := add(unsafe.Pointer(b), dataOffset)
	1162  			e := add(k, bucketCnt*uintptr(t.keysize))
	1163  			for i := 0; i < bucketCnt; i, k, e = i+1, add(k, uintptr(t.keysize)), add(e, uintptr(t.elemsize)) {
	1164  				top := b.tophash[i]
	1165  				if isEmpty(top) {
	1166  					b.tophash[i] = evacuatedEmpty
	1167  					continue
	1168  				}
	1169  				if top < minTopHash {
	1170  					throw("bad map state")
	1171  				}
	1172  				k2 := k
	1173  				if t.indirectkey() {
	1174  					k2 = *((*unsafe.Pointer)(k2))
	1175  				}
	1176  				var useY uint8
	1177  				if !h.sameSizeGrow() {
	1178  					// Compute hash to make our evacuation decision (whether we need
	1179  					// to send this key/elem to bucket x or bucket y).
	1180  					hash := t.hasher(k2, uintptr(h.hash0))
	1181  					if h.flags&iterator != 0 && !t.reflexivekey() && !t.key.equal(k2, k2) {
	1182  						// If key != key (NaNs), then the hash could be (and probably
	1183  						// will be) entirely different from the old hash. Moreover,
	1184  						// it isn't reproducible. Reproducibility is required in the
	1185  						// presence of iterators, as our evacuation decision must
	1186  						// match whatever decision the iterator made.
	1187  						// Fortunately, we have the freedom to send these keys either
	1188  						// way. Also, tophash is meaningless for these kinds of keys.
	1189  						// We let the low bit of tophash drive the evacuation decision.
	1190  						// We recompute a new random tophash for the next level so
	1191  						// these keys will get evenly distributed across all buckets
	1192  						// after multiple grows.
	1193  						useY = top & 1
	1194  						top = tophash(hash)
	1195  					} else {
	1196  						if hash&newbit != 0 {
	1197  							useY = 1
	1198  						}
	1199  					}
	1200  				}
	1201  
	1202  				if evacuatedX+1 != evacuatedY || evacuatedX^1 != evacuatedY {
	1203  					throw("bad evacuatedN")
	1204  				}
	1205  
	1206  				b.tophash[i] = evacuatedX + useY // evacuatedX + 1 == evacuatedY
	1207  				dst := &xy[useY]								 // evacuation destination
	1208  
	1209  				if dst.i == bucketCnt {
	1210  					dst.b = h.newoverflow(t, dst.b)
	1211  					dst.i = 0
	1212  					dst.k = add(unsafe.Pointer(dst.b), dataOffset)
	1213  					dst.e = add(dst.k, bucketCnt*uintptr(t.keysize))
	1214  				}
	1215  				dst.b.tophash[dst.i&(bucketCnt-1)] = top // mask dst.i as an optimization, to avoid a bounds check
	1216  				if t.indirectkey() {
	1217  					*(*unsafe.Pointer)(dst.k) = k2 // copy pointer
	1218  				} else {
	1219  					typedmemmove(t.key, dst.k, k) // copy elem
	1220  				}
	1221  				if t.indirectelem() {
	1222  					*(*unsafe.Pointer)(dst.e) = *(*unsafe.Pointer)(e)
	1223  				} else {
	1224  					typedmemmove(t.elem, dst.e, e)
	1225  				}
	1226  				dst.i++
	1227  				// These updates might push these pointers past the end of the
	1228  				// key or elem arrays.	That's ok, as we have the overflow pointer
	1229  				// at the end of the bucket to protect against pointing past the
	1230  				// end of the bucket.
	1231  				dst.k = add(dst.k, uintptr(t.keysize))
	1232  				dst.e = add(dst.e, uintptr(t.elemsize))
	1233  			}
	1234  		}
	1235  		// Unlink the overflow buckets & clear key/elem to help GC.
	1236  		if h.flags&oldIterator == 0 && t.bucket.ptrdata != 0 {
	1237  			b := add(h.oldbuckets, oldbucket*uintptr(t.bucketsize))
	1238  			// Preserve b.tophash because the evacuation
	1239  			// state is maintained there.
	1240  			ptr := add(b, dataOffset)
	1241  			n := uintptr(t.bucketsize) - dataOffset
	1242  			memclrHasPointers(ptr, n)
	1243  		}
	1244  	}
	1245  
	1246  	if oldbucket == h.nevacuate {
	1247  		advanceEvacuationMark(h, t, newbit)
	1248  	}
	1249  }
	1250  
	1251  func advanceEvacuationMark(h *hmap, t *maptype, newbit uintptr) {
	1252  	h.nevacuate++
	1253  	// Experiments suggest that 1024 is overkill by at least an order of magnitude.
	1254  	// Put it in there as a safeguard anyway, to ensure O(1) behavior.
	1255  	stop := h.nevacuate + 1024
	1256  	if stop > newbit {
	1257  		stop = newbit
	1258  	}
	1259  	for h.nevacuate != stop && bucketEvacuated(t, h, h.nevacuate) {
	1260  		h.nevacuate++
	1261  	}
	1262  	if h.nevacuate == newbit { // newbit == # of oldbuckets
	1263  		// Growing is all done. Free old main bucket array.
	1264  		h.oldbuckets = nil
	1265  		// Can discard old overflow buckets as well.
	1266  		// If they are still referenced by an iterator,
	1267  		// then the iterator holds a pointers to the slice.
	1268  		if h.extra != nil {
	1269  			h.extra.oldoverflow = nil
	1270  		}
	1271  		h.flags &^= sameSizeGrow
	1272  	}
	1273  }
	1274  
	1275  // Reflect stubs. Called from ../reflect/asm_*.s
	1276  
	1277  //go:linkname reflect_makemap reflect.makemap
	1278  func reflect_makemap(t *maptype, cap int) *hmap {
	1279  	// Check invariants and reflects math.
	1280  	if t.key.equal == nil {
	1281  		throw("runtime.reflect_makemap: unsupported map key type")
	1282  	}
	1283  	if t.key.size > maxKeySize && (!t.indirectkey() || t.keysize != uint8(sys.PtrSize)) ||
	1284  		t.key.size <= maxKeySize && (t.indirectkey() || t.keysize != uint8(t.key.size)) {
	1285  		throw("key size wrong")
	1286  	}
	1287  	if t.elem.size > maxElemSize && (!t.indirectelem() || t.elemsize != uint8(sys.PtrSize)) ||
	1288  		t.elem.size <= maxElemSize && (t.indirectelem() || t.elemsize != uint8(t.elem.size)) {
	1289  		throw("elem size wrong")
	1290  	}
	1291  	if t.key.align > bucketCnt {
	1292  		throw("key align too big")
	1293  	}
	1294  	if t.elem.align > bucketCnt {
	1295  		throw("elem align too big")
	1296  	}
	1297  	if t.key.size%uintptr(t.key.align) != 0 {
	1298  		throw("key size not a multiple of key align")
	1299  	}
	1300  	if t.elem.size%uintptr(t.elem.align) != 0 {
	1301  		throw("elem size not a multiple of elem align")
	1302  	}
	1303  	if bucketCnt < 8 {
	1304  		throw("bucketsize too small for proper alignment")
	1305  	}
	1306  	if dataOffset%uintptr(t.key.align) != 0 {
	1307  		throw("need padding in bucket (key)")
	1308  	}
	1309  	if dataOffset%uintptr(t.elem.align) != 0 {
	1310  		throw("need padding in bucket (elem)")
	1311  	}
	1312  
	1313  	return makemap(t, cap, nil)
	1314  }
	1315  
	1316  //go:linkname reflect_mapaccess reflect.mapaccess
	1317  func reflect_mapaccess(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer {
	1318  	elem, ok := mapaccess2(t, h, key)
	1319  	if !ok {
	1320  		// reflect wants nil for a missing element
	1321  		elem = nil
	1322  	}
	1323  	return elem
	1324  }
	1325  
	1326  //go:linkname reflect_mapassign reflect.mapassign
	1327  func reflect_mapassign(t *maptype, h *hmap, key unsafe.Pointer, elem unsafe.Pointer) {
	1328  	p := mapassign(t, h, key)
	1329  	typedmemmove(t.elem, p, elem)
	1330  }
	1331  
	1332  //go:linkname reflect_mapdelete reflect.mapdelete
	1333  func reflect_mapdelete(t *maptype, h *hmap, key unsafe.Pointer) {
	1334  	mapdelete(t, h, key)
	1335  }
	1336  
	1337  //go:linkname reflect_mapiterinit reflect.mapiterinit
	1338  func reflect_mapiterinit(t *maptype, h *hmap) *hiter {
	1339  	it := new(hiter)
	1340  	mapiterinit(t, h, it)
	1341  	return it
	1342  }
	1343  
	1344  //go:linkname reflect_mapiternext reflect.mapiternext
	1345  func reflect_mapiternext(it *hiter) {
	1346  	mapiternext(it)
	1347  }
	1348  
	1349  //go:linkname reflect_mapiterkey reflect.mapiterkey
	1350  func reflect_mapiterkey(it *hiter) unsafe.Pointer {
	1351  	return it.key
	1352  }
	1353  
	1354  //go:linkname reflect_mapiterelem reflect.mapiterelem
	1355  func reflect_mapiterelem(it *hiter) unsafe.Pointer {
	1356  	return it.elem
	1357  }
	1358  
	1359  //go:linkname reflect_maplen reflect.maplen
	1360  func reflect_maplen(h *hmap) int {
	1361  	if h == nil {
	1362  		return 0
	1363  	}
	1364  	if raceenabled {
	1365  		callerpc := getcallerpc()
	1366  		racereadpc(unsafe.Pointer(h), callerpc, funcPC(reflect_maplen))
	1367  	}
	1368  	return h.count
	1369  }
	1370  
	1371  //go:linkname reflectlite_maplen internal/reflectlite.maplen
	1372  func reflectlite_maplen(h *hmap) int {
	1373  	if h == nil {
	1374  		return 0
	1375  	}
	1376  	if raceenabled {
	1377  		callerpc := getcallerpc()
	1378  		racereadpc(unsafe.Pointer(h), callerpc, funcPC(reflect_maplen))
	1379  	}
	1380  	return h.count
	1381  }
	1382  
	1383  const maxZero = 1024 // must match value in reflect/value.go:maxZero cmd/compile/internal/gc/walk.go:zeroValSize
	1384  var zeroVal [maxZero]byte
	1385  

View as plain text