...

Source file src/runtime/map_fast64.go

Documentation: runtime

		 1  // Copyright 2018 The Go Authors. All rights reserved.
		 2  // Use of this source code is governed by a BSD-style
		 3  // license that can be found in the LICENSE file.
		 4  
		 5  package runtime
		 6  
		 7  import (
		 8  	"runtime/internal/sys"
		 9  	"unsafe"
		10  )
		11  
		12  func mapaccess1_fast64(t *maptype, h *hmap, key uint64) unsafe.Pointer {
		13  	if raceenabled && h != nil {
		14  		callerpc := getcallerpc()
		15  		racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess1_fast64))
		16  	}
		17  	if h == nil || h.count == 0 {
		18  		return unsafe.Pointer(&zeroVal[0])
		19  	}
		20  	if h.flags&hashWriting != 0 {
		21  		throw("concurrent map read and map write")
		22  	}
		23  	var b *bmap
		24  	if h.B == 0 {
		25  		// One-bucket table. No need to hash.
		26  		b = (*bmap)(h.buckets)
		27  	} else {
		28  		hash := t.hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
		29  		m := bucketMask(h.B)
		30  		b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
		31  		if c := h.oldbuckets; c != nil {
		32  			if !h.sameSizeGrow() {
		33  				// There used to be half as many buckets; mask down one more power of two.
		34  				m >>= 1
		35  			}
		36  			oldb := (*bmap)(add(c, (hash&m)*uintptr(t.bucketsize)))
		37  			if !evacuated(oldb) {
		38  				b = oldb
		39  			}
		40  		}
		41  	}
		42  	for ; b != nil; b = b.overflow(t) {
		43  		for i, k := uintptr(0), b.keys(); i < bucketCnt; i, k = i+1, add(k, 8) {
		44  			if *(*uint64)(k) == key && !isEmpty(b.tophash[i]) {
		45  				return add(unsafe.Pointer(b), dataOffset+bucketCnt*8+i*uintptr(t.elemsize))
		46  			}
		47  		}
		48  	}
		49  	return unsafe.Pointer(&zeroVal[0])
		50  }
		51  
		52  func mapaccess2_fast64(t *maptype, h *hmap, key uint64) (unsafe.Pointer, bool) {
		53  	if raceenabled && h != nil {
		54  		callerpc := getcallerpc()
		55  		racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess2_fast64))
		56  	}
		57  	if h == nil || h.count == 0 {
		58  		return unsafe.Pointer(&zeroVal[0]), false
		59  	}
		60  	if h.flags&hashWriting != 0 {
		61  		throw("concurrent map read and map write")
		62  	}
		63  	var b *bmap
		64  	if h.B == 0 {
		65  		// One-bucket table. No need to hash.
		66  		b = (*bmap)(h.buckets)
		67  	} else {
		68  		hash := t.hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
		69  		m := bucketMask(h.B)
		70  		b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
		71  		if c := h.oldbuckets; c != nil {
		72  			if !h.sameSizeGrow() {
		73  				// There used to be half as many buckets; mask down one more power of two.
		74  				m >>= 1
		75  			}
		76  			oldb := (*bmap)(add(c, (hash&m)*uintptr(t.bucketsize)))
		77  			if !evacuated(oldb) {
		78  				b = oldb
		79  			}
		80  		}
		81  	}
		82  	for ; b != nil; b = b.overflow(t) {
		83  		for i, k := uintptr(0), b.keys(); i < bucketCnt; i, k = i+1, add(k, 8) {
		84  			if *(*uint64)(k) == key && !isEmpty(b.tophash[i]) {
		85  				return add(unsafe.Pointer(b), dataOffset+bucketCnt*8+i*uintptr(t.elemsize)), true
		86  			}
		87  		}
		88  	}
		89  	return unsafe.Pointer(&zeroVal[0]), false
		90  }
		91  
		92  func mapassign_fast64(t *maptype, h *hmap, key uint64) unsafe.Pointer {
		93  	if h == nil {
		94  		panic(plainError("assignment to entry in nil map"))
		95  	}
		96  	if raceenabled {
		97  		callerpc := getcallerpc()
		98  		racewritepc(unsafe.Pointer(h), callerpc, funcPC(mapassign_fast64))
		99  	}
	 100  	if h.flags&hashWriting != 0 {
	 101  		throw("concurrent map writes")
	 102  	}
	 103  	hash := t.hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
	 104  
	 105  	// Set hashWriting after calling t.hasher for consistency with mapassign.
	 106  	h.flags ^= hashWriting
	 107  
	 108  	if h.buckets == nil {
	 109  		h.buckets = newobject(t.bucket) // newarray(t.bucket, 1)
	 110  	}
	 111  
	 112  again:
	 113  	bucket := hash & bucketMask(h.B)
	 114  	if h.growing() {
	 115  		growWork_fast64(t, h, bucket)
	 116  	}
	 117  	b := (*bmap)(add(h.buckets, bucket*uintptr(t.bucketsize)))
	 118  
	 119  	var insertb *bmap
	 120  	var inserti uintptr
	 121  	var insertk unsafe.Pointer
	 122  
	 123  bucketloop:
	 124  	for {
	 125  		for i := uintptr(0); i < bucketCnt; i++ {
	 126  			if isEmpty(b.tophash[i]) {
	 127  				if insertb == nil {
	 128  					insertb = b
	 129  					inserti = i
	 130  				}
	 131  				if b.tophash[i] == emptyRest {
	 132  					break bucketloop
	 133  				}
	 134  				continue
	 135  			}
	 136  			k := *((*uint64)(add(unsafe.Pointer(b), dataOffset+i*8)))
	 137  			if k != key {
	 138  				continue
	 139  			}
	 140  			insertb = b
	 141  			inserti = i
	 142  			goto done
	 143  		}
	 144  		ovf := b.overflow(t)
	 145  		if ovf == nil {
	 146  			break
	 147  		}
	 148  		b = ovf
	 149  	}
	 150  
	 151  	// Did not find mapping for key. Allocate new cell & add entry.
	 152  
	 153  	// If we hit the max load factor or we have too many overflow buckets,
	 154  	// and we're not already in the middle of growing, start growing.
	 155  	if !h.growing() && (overLoadFactor(h.count+1, h.B) || tooManyOverflowBuckets(h.noverflow, h.B)) {
	 156  		hashGrow(t, h)
	 157  		goto again // Growing the table invalidates everything, so try again
	 158  	}
	 159  
	 160  	if insertb == nil {
	 161  		// The current bucket and all the overflow buckets connected to it are full, allocate a new one.
	 162  		insertb = h.newoverflow(t, b)
	 163  		inserti = 0 // not necessary, but avoids needlessly spilling inserti
	 164  	}
	 165  	insertb.tophash[inserti&(bucketCnt-1)] = tophash(hash) // mask inserti to avoid bounds checks
	 166  
	 167  	insertk = add(unsafe.Pointer(insertb), dataOffset+inserti*8)
	 168  	// store new key at insert position
	 169  	*(*uint64)(insertk) = key
	 170  
	 171  	h.count++
	 172  
	 173  done:
	 174  	elem := add(unsafe.Pointer(insertb), dataOffset+bucketCnt*8+inserti*uintptr(t.elemsize))
	 175  	if h.flags&hashWriting == 0 {
	 176  		throw("concurrent map writes")
	 177  	}
	 178  	h.flags &^= hashWriting
	 179  	return elem
	 180  }
	 181  
	 182  func mapassign_fast64ptr(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer {
	 183  	if h == nil {
	 184  		panic(plainError("assignment to entry in nil map"))
	 185  	}
	 186  	if raceenabled {
	 187  		callerpc := getcallerpc()
	 188  		racewritepc(unsafe.Pointer(h), callerpc, funcPC(mapassign_fast64))
	 189  	}
	 190  	if h.flags&hashWriting != 0 {
	 191  		throw("concurrent map writes")
	 192  	}
	 193  	hash := t.hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
	 194  
	 195  	// Set hashWriting after calling t.hasher for consistency with mapassign.
	 196  	h.flags ^= hashWriting
	 197  
	 198  	if h.buckets == nil {
	 199  		h.buckets = newobject(t.bucket) // newarray(t.bucket, 1)
	 200  	}
	 201  
	 202  again:
	 203  	bucket := hash & bucketMask(h.B)
	 204  	if h.growing() {
	 205  		growWork_fast64(t, h, bucket)
	 206  	}
	 207  	b := (*bmap)(add(h.buckets, bucket*uintptr(t.bucketsize)))
	 208  
	 209  	var insertb *bmap
	 210  	var inserti uintptr
	 211  	var insertk unsafe.Pointer
	 212  
	 213  bucketloop:
	 214  	for {
	 215  		for i := uintptr(0); i < bucketCnt; i++ {
	 216  			if isEmpty(b.tophash[i]) {
	 217  				if insertb == nil {
	 218  					insertb = b
	 219  					inserti = i
	 220  				}
	 221  				if b.tophash[i] == emptyRest {
	 222  					break bucketloop
	 223  				}
	 224  				continue
	 225  			}
	 226  			k := *((*unsafe.Pointer)(add(unsafe.Pointer(b), dataOffset+i*8)))
	 227  			if k != key {
	 228  				continue
	 229  			}
	 230  			insertb = b
	 231  			inserti = i
	 232  			goto done
	 233  		}
	 234  		ovf := b.overflow(t)
	 235  		if ovf == nil {
	 236  			break
	 237  		}
	 238  		b = ovf
	 239  	}
	 240  
	 241  	// Did not find mapping for key. Allocate new cell & add entry.
	 242  
	 243  	// If we hit the max load factor or we have too many overflow buckets,
	 244  	// and we're not already in the middle of growing, start growing.
	 245  	if !h.growing() && (overLoadFactor(h.count+1, h.B) || tooManyOverflowBuckets(h.noverflow, h.B)) {
	 246  		hashGrow(t, h)
	 247  		goto again // Growing the table invalidates everything, so try again
	 248  	}
	 249  
	 250  	if insertb == nil {
	 251  		// The current bucket and all the overflow buckets connected to it are full, allocate a new one.
	 252  		insertb = h.newoverflow(t, b)
	 253  		inserti = 0 // not necessary, but avoids needlessly spilling inserti
	 254  	}
	 255  	insertb.tophash[inserti&(bucketCnt-1)] = tophash(hash) // mask inserti to avoid bounds checks
	 256  
	 257  	insertk = add(unsafe.Pointer(insertb), dataOffset+inserti*8)
	 258  	// store new key at insert position
	 259  	*(*unsafe.Pointer)(insertk) = key
	 260  
	 261  	h.count++
	 262  
	 263  done:
	 264  	elem := add(unsafe.Pointer(insertb), dataOffset+bucketCnt*8+inserti*uintptr(t.elemsize))
	 265  	if h.flags&hashWriting == 0 {
	 266  		throw("concurrent map writes")
	 267  	}
	 268  	h.flags &^= hashWriting
	 269  	return elem
	 270  }
	 271  
	 272  func mapdelete_fast64(t *maptype, h *hmap, key uint64) {
	 273  	if raceenabled && h != nil {
	 274  		callerpc := getcallerpc()
	 275  		racewritepc(unsafe.Pointer(h), callerpc, funcPC(mapdelete_fast64))
	 276  	}
	 277  	if h == nil || h.count == 0 {
	 278  		return
	 279  	}
	 280  	if h.flags&hashWriting != 0 {
	 281  		throw("concurrent map writes")
	 282  	}
	 283  
	 284  	hash := t.hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
	 285  
	 286  	// Set hashWriting after calling t.hasher for consistency with mapdelete
	 287  	h.flags ^= hashWriting
	 288  
	 289  	bucket := hash & bucketMask(h.B)
	 290  	if h.growing() {
	 291  		growWork_fast64(t, h, bucket)
	 292  	}
	 293  	b := (*bmap)(add(h.buckets, bucket*uintptr(t.bucketsize)))
	 294  	bOrig := b
	 295  search:
	 296  	for ; b != nil; b = b.overflow(t) {
	 297  		for i, k := uintptr(0), b.keys(); i < bucketCnt; i, k = i+1, add(k, 8) {
	 298  			if key != *(*uint64)(k) || isEmpty(b.tophash[i]) {
	 299  				continue
	 300  			}
	 301  			// Only clear key if there are pointers in it.
	 302  			if t.key.ptrdata != 0 {
	 303  				if sys.PtrSize == 8 {
	 304  					*(*unsafe.Pointer)(k) = nil
	 305  				} else {
	 306  					// There are three ways to squeeze at one ore more 32 bit pointers into 64 bits.
	 307  					// Just call memclrHasPointers instead of trying to handle all cases here.
	 308  					memclrHasPointers(k, 8)
	 309  				}
	 310  			}
	 311  			e := add(unsafe.Pointer(b), dataOffset+bucketCnt*8+i*uintptr(t.elemsize))
	 312  			if t.elem.ptrdata != 0 {
	 313  				memclrHasPointers(e, t.elem.size)
	 314  			} else {
	 315  				memclrNoHeapPointers(e, t.elem.size)
	 316  			}
	 317  			b.tophash[i] = emptyOne
	 318  			// If the bucket now ends in a bunch of emptyOne states,
	 319  			// change those to emptyRest states.
	 320  			if i == bucketCnt-1 {
	 321  				if b.overflow(t) != nil && b.overflow(t).tophash[0] != emptyRest {
	 322  					goto notLast
	 323  				}
	 324  			} else {
	 325  				if b.tophash[i+1] != emptyRest {
	 326  					goto notLast
	 327  				}
	 328  			}
	 329  			for {
	 330  				b.tophash[i] = emptyRest
	 331  				if i == 0 {
	 332  					if b == bOrig {
	 333  						break // beginning of initial bucket, we're done.
	 334  					}
	 335  					// Find previous bucket, continue at its last entry.
	 336  					c := b
	 337  					for b = bOrig; b.overflow(t) != c; b = b.overflow(t) {
	 338  					}
	 339  					i = bucketCnt - 1
	 340  				} else {
	 341  					i--
	 342  				}
	 343  				if b.tophash[i] != emptyOne {
	 344  					break
	 345  				}
	 346  			}
	 347  		notLast:
	 348  			h.count--
	 349  			// Reset the hash seed to make it more difficult for attackers to
	 350  			// repeatedly trigger hash collisions. See issue 25237.
	 351  			if h.count == 0 {
	 352  				h.hash0 = fastrand()
	 353  			}
	 354  			break search
	 355  		}
	 356  	}
	 357  
	 358  	if h.flags&hashWriting == 0 {
	 359  		throw("concurrent map writes")
	 360  	}
	 361  	h.flags &^= hashWriting
	 362  }
	 363  
	 364  func growWork_fast64(t *maptype, h *hmap, bucket uintptr) {
	 365  	// make sure we evacuate the oldbucket corresponding
	 366  	// to the bucket we're about to use
	 367  	evacuate_fast64(t, h, bucket&h.oldbucketmask())
	 368  
	 369  	// evacuate one more oldbucket to make progress on growing
	 370  	if h.growing() {
	 371  		evacuate_fast64(t, h, h.nevacuate)
	 372  	}
	 373  }
	 374  
	 375  func evacuate_fast64(t *maptype, h *hmap, oldbucket uintptr) {
	 376  	b := (*bmap)(add(h.oldbuckets, oldbucket*uintptr(t.bucketsize)))
	 377  	newbit := h.noldbuckets()
	 378  	if !evacuated(b) {
	 379  		// TODO: reuse overflow buckets instead of using new ones, if there
	 380  		// is no iterator using the old buckets.	(If !oldIterator.)
	 381  
	 382  		// xy contains the x and y (low and high) evacuation destinations.
	 383  		var xy [2]evacDst
	 384  		x := &xy[0]
	 385  		x.b = (*bmap)(add(h.buckets, oldbucket*uintptr(t.bucketsize)))
	 386  		x.k = add(unsafe.Pointer(x.b), dataOffset)
	 387  		x.e = add(x.k, bucketCnt*8)
	 388  
	 389  		if !h.sameSizeGrow() {
	 390  			// Only calculate y pointers if we're growing bigger.
	 391  			// Otherwise GC can see bad pointers.
	 392  			y := &xy[1]
	 393  			y.b = (*bmap)(add(h.buckets, (oldbucket+newbit)*uintptr(t.bucketsize)))
	 394  			y.k = add(unsafe.Pointer(y.b), dataOffset)
	 395  			y.e = add(y.k, bucketCnt*8)
	 396  		}
	 397  
	 398  		for ; b != nil; b = b.overflow(t) {
	 399  			k := add(unsafe.Pointer(b), dataOffset)
	 400  			e := add(k, bucketCnt*8)
	 401  			for i := 0; i < bucketCnt; i, k, e = i+1, add(k, 8), add(e, uintptr(t.elemsize)) {
	 402  				top := b.tophash[i]
	 403  				if isEmpty(top) {
	 404  					b.tophash[i] = evacuatedEmpty
	 405  					continue
	 406  				}
	 407  				if top < minTopHash {
	 408  					throw("bad map state")
	 409  				}
	 410  				var useY uint8
	 411  				if !h.sameSizeGrow() {
	 412  					// Compute hash to make our evacuation decision (whether we need
	 413  					// to send this key/elem to bucket x or bucket y).
	 414  					hash := t.hasher(k, uintptr(h.hash0))
	 415  					if hash&newbit != 0 {
	 416  						useY = 1
	 417  					}
	 418  				}
	 419  
	 420  				b.tophash[i] = evacuatedX + useY // evacuatedX + 1 == evacuatedY, enforced in makemap
	 421  				dst := &xy[useY]								 // evacuation destination
	 422  
	 423  				if dst.i == bucketCnt {
	 424  					dst.b = h.newoverflow(t, dst.b)
	 425  					dst.i = 0
	 426  					dst.k = add(unsafe.Pointer(dst.b), dataOffset)
	 427  					dst.e = add(dst.k, bucketCnt*8)
	 428  				}
	 429  				dst.b.tophash[dst.i&(bucketCnt-1)] = top // mask dst.i as an optimization, to avoid a bounds check
	 430  
	 431  				// Copy key.
	 432  				if t.key.ptrdata != 0 && writeBarrier.enabled {
	 433  					if sys.PtrSize == 8 {
	 434  						// Write with a write barrier.
	 435  						*(*unsafe.Pointer)(dst.k) = *(*unsafe.Pointer)(k)
	 436  					} else {
	 437  						// There are three ways to squeeze at least one 32 bit pointer into 64 bits.
	 438  						// Give up and call typedmemmove.
	 439  						typedmemmove(t.key, dst.k, k)
	 440  					}
	 441  				} else {
	 442  					*(*uint64)(dst.k) = *(*uint64)(k)
	 443  				}
	 444  
	 445  				typedmemmove(t.elem, dst.e, e)
	 446  				dst.i++
	 447  				// These updates might push these pointers past the end of the
	 448  				// key or elem arrays.	That's ok, as we have the overflow pointer
	 449  				// at the end of the bucket to protect against pointing past the
	 450  				// end of the bucket.
	 451  				dst.k = add(dst.k, 8)
	 452  				dst.e = add(dst.e, uintptr(t.elemsize))
	 453  			}
	 454  		}
	 455  		// Unlink the overflow buckets & clear key/elem to help GC.
	 456  		if h.flags&oldIterator == 0 && t.bucket.ptrdata != 0 {
	 457  			b := add(h.oldbuckets, oldbucket*uintptr(t.bucketsize))
	 458  			// Preserve b.tophash because the evacuation
	 459  			// state is maintained there.
	 460  			ptr := add(b, dataOffset)
	 461  			n := uintptr(t.bucketsize) - dataOffset
	 462  			memclrHasPointers(ptr, n)
	 463  		}
	 464  	}
	 465  
	 466  	if oldbucket == h.nevacuate {
	 467  		advanceEvacuationMark(h, t, newbit)
	 468  	}
	 469  }
	 470  

View as plain text