Source file
src/runtime/map_fast64.go
Documentation: runtime
1
2
3
4
5 package runtime
6
7 import (
8 "runtime/internal/sys"
9 "unsafe"
10 )
11
12 func mapaccess1_fast64(t *maptype, h *hmap, key uint64) unsafe.Pointer {
13 if raceenabled && h != nil {
14 callerpc := getcallerpc()
15 racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess1_fast64))
16 }
17 if h == nil || h.count == 0 {
18 return unsafe.Pointer(&zeroVal[0])
19 }
20 if h.flags&hashWriting != 0 {
21 throw("concurrent map read and map write")
22 }
23 var b *bmap
24 if h.B == 0 {
25
26 b = (*bmap)(h.buckets)
27 } else {
28 hash := t.hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
29 m := bucketMask(h.B)
30 b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
31 if c := h.oldbuckets; c != nil {
32 if !h.sameSizeGrow() {
33
34 m >>= 1
35 }
36 oldb := (*bmap)(add(c, (hash&m)*uintptr(t.bucketsize)))
37 if !evacuated(oldb) {
38 b = oldb
39 }
40 }
41 }
42 for ; b != nil; b = b.overflow(t) {
43 for i, k := uintptr(0), b.keys(); i < bucketCnt; i, k = i+1, add(k, 8) {
44 if *(*uint64)(k) == key && !isEmpty(b.tophash[i]) {
45 return add(unsafe.Pointer(b), dataOffset+bucketCnt*8+i*uintptr(t.elemsize))
46 }
47 }
48 }
49 return unsafe.Pointer(&zeroVal[0])
50 }
51
52 func mapaccess2_fast64(t *maptype, h *hmap, key uint64) (unsafe.Pointer, bool) {
53 if raceenabled && h != nil {
54 callerpc := getcallerpc()
55 racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess2_fast64))
56 }
57 if h == nil || h.count == 0 {
58 return unsafe.Pointer(&zeroVal[0]), false
59 }
60 if h.flags&hashWriting != 0 {
61 throw("concurrent map read and map write")
62 }
63 var b *bmap
64 if h.B == 0 {
65
66 b = (*bmap)(h.buckets)
67 } else {
68 hash := t.hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
69 m := bucketMask(h.B)
70 b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
71 if c := h.oldbuckets; c != nil {
72 if !h.sameSizeGrow() {
73
74 m >>= 1
75 }
76 oldb := (*bmap)(add(c, (hash&m)*uintptr(t.bucketsize)))
77 if !evacuated(oldb) {
78 b = oldb
79 }
80 }
81 }
82 for ; b != nil; b = b.overflow(t) {
83 for i, k := uintptr(0), b.keys(); i < bucketCnt; i, k = i+1, add(k, 8) {
84 if *(*uint64)(k) == key && !isEmpty(b.tophash[i]) {
85 return add(unsafe.Pointer(b), dataOffset+bucketCnt*8+i*uintptr(t.elemsize)), true
86 }
87 }
88 }
89 return unsafe.Pointer(&zeroVal[0]), false
90 }
91
92 func mapassign_fast64(t *maptype, h *hmap, key uint64) unsafe.Pointer {
93 if h == nil {
94 panic(plainError("assignment to entry in nil map"))
95 }
96 if raceenabled {
97 callerpc := getcallerpc()
98 racewritepc(unsafe.Pointer(h), callerpc, funcPC(mapassign_fast64))
99 }
100 if h.flags&hashWriting != 0 {
101 throw("concurrent map writes")
102 }
103 hash := t.hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
104
105
106 h.flags ^= hashWriting
107
108 if h.buckets == nil {
109 h.buckets = newobject(t.bucket)
110 }
111
112 again:
113 bucket := hash & bucketMask(h.B)
114 if h.growing() {
115 growWork_fast64(t, h, bucket)
116 }
117 b := (*bmap)(add(h.buckets, bucket*uintptr(t.bucketsize)))
118
119 var insertb *bmap
120 var inserti uintptr
121 var insertk unsafe.Pointer
122
123 bucketloop:
124 for {
125 for i := uintptr(0); i < bucketCnt; i++ {
126 if isEmpty(b.tophash[i]) {
127 if insertb == nil {
128 insertb = b
129 inserti = i
130 }
131 if b.tophash[i] == emptyRest {
132 break bucketloop
133 }
134 continue
135 }
136 k := *((*uint64)(add(unsafe.Pointer(b), dataOffset+i*8)))
137 if k != key {
138 continue
139 }
140 insertb = b
141 inserti = i
142 goto done
143 }
144 ovf := b.overflow(t)
145 if ovf == nil {
146 break
147 }
148 b = ovf
149 }
150
151
152
153
154
155 if !h.growing() && (overLoadFactor(h.count+1, h.B) || tooManyOverflowBuckets(h.noverflow, h.B)) {
156 hashGrow(t, h)
157 goto again
158 }
159
160 if insertb == nil {
161
162 insertb = h.newoverflow(t, b)
163 inserti = 0
164 }
165 insertb.tophash[inserti&(bucketCnt-1)] = tophash(hash)
166
167 insertk = add(unsafe.Pointer(insertb), dataOffset+inserti*8)
168
169 *(*uint64)(insertk) = key
170
171 h.count++
172
173 done:
174 elem := add(unsafe.Pointer(insertb), dataOffset+bucketCnt*8+inserti*uintptr(t.elemsize))
175 if h.flags&hashWriting == 0 {
176 throw("concurrent map writes")
177 }
178 h.flags &^= hashWriting
179 return elem
180 }
181
182 func mapassign_fast64ptr(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer {
183 if h == nil {
184 panic(plainError("assignment to entry in nil map"))
185 }
186 if raceenabled {
187 callerpc := getcallerpc()
188 racewritepc(unsafe.Pointer(h), callerpc, funcPC(mapassign_fast64))
189 }
190 if h.flags&hashWriting != 0 {
191 throw("concurrent map writes")
192 }
193 hash := t.hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
194
195
196 h.flags ^= hashWriting
197
198 if h.buckets == nil {
199 h.buckets = newobject(t.bucket)
200 }
201
202 again:
203 bucket := hash & bucketMask(h.B)
204 if h.growing() {
205 growWork_fast64(t, h, bucket)
206 }
207 b := (*bmap)(add(h.buckets, bucket*uintptr(t.bucketsize)))
208
209 var insertb *bmap
210 var inserti uintptr
211 var insertk unsafe.Pointer
212
213 bucketloop:
214 for {
215 for i := uintptr(0); i < bucketCnt; i++ {
216 if isEmpty(b.tophash[i]) {
217 if insertb == nil {
218 insertb = b
219 inserti = i
220 }
221 if b.tophash[i] == emptyRest {
222 break bucketloop
223 }
224 continue
225 }
226 k := *((*unsafe.Pointer)(add(unsafe.Pointer(b), dataOffset+i*8)))
227 if k != key {
228 continue
229 }
230 insertb = b
231 inserti = i
232 goto done
233 }
234 ovf := b.overflow(t)
235 if ovf == nil {
236 break
237 }
238 b = ovf
239 }
240
241
242
243
244
245 if !h.growing() && (overLoadFactor(h.count+1, h.B) || tooManyOverflowBuckets(h.noverflow, h.B)) {
246 hashGrow(t, h)
247 goto again
248 }
249
250 if insertb == nil {
251
252 insertb = h.newoverflow(t, b)
253 inserti = 0
254 }
255 insertb.tophash[inserti&(bucketCnt-1)] = tophash(hash)
256
257 insertk = add(unsafe.Pointer(insertb), dataOffset+inserti*8)
258
259 *(*unsafe.Pointer)(insertk) = key
260
261 h.count++
262
263 done:
264 elem := add(unsafe.Pointer(insertb), dataOffset+bucketCnt*8+inserti*uintptr(t.elemsize))
265 if h.flags&hashWriting == 0 {
266 throw("concurrent map writes")
267 }
268 h.flags &^= hashWriting
269 return elem
270 }
271
272 func mapdelete_fast64(t *maptype, h *hmap, key uint64) {
273 if raceenabled && h != nil {
274 callerpc := getcallerpc()
275 racewritepc(unsafe.Pointer(h), callerpc, funcPC(mapdelete_fast64))
276 }
277 if h == nil || h.count == 0 {
278 return
279 }
280 if h.flags&hashWriting != 0 {
281 throw("concurrent map writes")
282 }
283
284 hash := t.hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
285
286
287 h.flags ^= hashWriting
288
289 bucket := hash & bucketMask(h.B)
290 if h.growing() {
291 growWork_fast64(t, h, bucket)
292 }
293 b := (*bmap)(add(h.buckets, bucket*uintptr(t.bucketsize)))
294 bOrig := b
295 search:
296 for ; b != nil; b = b.overflow(t) {
297 for i, k := uintptr(0), b.keys(); i < bucketCnt; i, k = i+1, add(k, 8) {
298 if key != *(*uint64)(k) || isEmpty(b.tophash[i]) {
299 continue
300 }
301
302 if t.key.ptrdata != 0 {
303 if sys.PtrSize == 8 {
304 *(*unsafe.Pointer)(k) = nil
305 } else {
306
307
308 memclrHasPointers(k, 8)
309 }
310 }
311 e := add(unsafe.Pointer(b), dataOffset+bucketCnt*8+i*uintptr(t.elemsize))
312 if t.elem.ptrdata != 0 {
313 memclrHasPointers(e, t.elem.size)
314 } else {
315 memclrNoHeapPointers(e, t.elem.size)
316 }
317 b.tophash[i] = emptyOne
318
319
320 if i == bucketCnt-1 {
321 if b.overflow(t) != nil && b.overflow(t).tophash[0] != emptyRest {
322 goto notLast
323 }
324 } else {
325 if b.tophash[i+1] != emptyRest {
326 goto notLast
327 }
328 }
329 for {
330 b.tophash[i] = emptyRest
331 if i == 0 {
332 if b == bOrig {
333 break
334 }
335
336 c := b
337 for b = bOrig; b.overflow(t) != c; b = b.overflow(t) {
338 }
339 i = bucketCnt - 1
340 } else {
341 i--
342 }
343 if b.tophash[i] != emptyOne {
344 break
345 }
346 }
347 notLast:
348 h.count--
349
350
351 if h.count == 0 {
352 h.hash0 = fastrand()
353 }
354 break search
355 }
356 }
357
358 if h.flags&hashWriting == 0 {
359 throw("concurrent map writes")
360 }
361 h.flags &^= hashWriting
362 }
363
364 func growWork_fast64(t *maptype, h *hmap, bucket uintptr) {
365
366
367 evacuate_fast64(t, h, bucket&h.oldbucketmask())
368
369
370 if h.growing() {
371 evacuate_fast64(t, h, h.nevacuate)
372 }
373 }
374
375 func evacuate_fast64(t *maptype, h *hmap, oldbucket uintptr) {
376 b := (*bmap)(add(h.oldbuckets, oldbucket*uintptr(t.bucketsize)))
377 newbit := h.noldbuckets()
378 if !evacuated(b) {
379
380
381
382
383 var xy [2]evacDst
384 x := &xy[0]
385 x.b = (*bmap)(add(h.buckets, oldbucket*uintptr(t.bucketsize)))
386 x.k = add(unsafe.Pointer(x.b), dataOffset)
387 x.e = add(x.k, bucketCnt*8)
388
389 if !h.sameSizeGrow() {
390
391
392 y := &xy[1]
393 y.b = (*bmap)(add(h.buckets, (oldbucket+newbit)*uintptr(t.bucketsize)))
394 y.k = add(unsafe.Pointer(y.b), dataOffset)
395 y.e = add(y.k, bucketCnt*8)
396 }
397
398 for ; b != nil; b = b.overflow(t) {
399 k := add(unsafe.Pointer(b), dataOffset)
400 e := add(k, bucketCnt*8)
401 for i := 0; i < bucketCnt; i, k, e = i+1, add(k, 8), add(e, uintptr(t.elemsize)) {
402 top := b.tophash[i]
403 if isEmpty(top) {
404 b.tophash[i] = evacuatedEmpty
405 continue
406 }
407 if top < minTopHash {
408 throw("bad map state")
409 }
410 var useY uint8
411 if !h.sameSizeGrow() {
412
413
414 hash := t.hasher(k, uintptr(h.hash0))
415 if hash&newbit != 0 {
416 useY = 1
417 }
418 }
419
420 b.tophash[i] = evacuatedX + useY
421 dst := &xy[useY]
422
423 if dst.i == bucketCnt {
424 dst.b = h.newoverflow(t, dst.b)
425 dst.i = 0
426 dst.k = add(unsafe.Pointer(dst.b), dataOffset)
427 dst.e = add(dst.k, bucketCnt*8)
428 }
429 dst.b.tophash[dst.i&(bucketCnt-1)] = top
430
431
432 if t.key.ptrdata != 0 && writeBarrier.enabled {
433 if sys.PtrSize == 8 {
434
435 *(*unsafe.Pointer)(dst.k) = *(*unsafe.Pointer)(k)
436 } else {
437
438
439 typedmemmove(t.key, dst.k, k)
440 }
441 } else {
442 *(*uint64)(dst.k) = *(*uint64)(k)
443 }
444
445 typedmemmove(t.elem, dst.e, e)
446 dst.i++
447
448
449
450
451 dst.k = add(dst.k, 8)
452 dst.e = add(dst.e, uintptr(t.elemsize))
453 }
454 }
455
456 if h.flags&oldIterator == 0 && t.bucket.ptrdata != 0 {
457 b := add(h.oldbuckets, oldbucket*uintptr(t.bucketsize))
458
459
460 ptr := add(b, dataOffset)
461 n := uintptr(t.bucketsize) - dataOffset
462 memclrHasPointers(ptr, n)
463 }
464 }
465
466 if oldbucket == h.nevacuate {
467 advanceEvacuationMark(h, t, newbit)
468 }
469 }
470
View as plain text