Source file
src/runtime/map_fast32.go
Documentation: runtime
1
2
3
4
5 package runtime
6
7 import (
8 "runtime/internal/sys"
9 "unsafe"
10 )
11
12 func mapaccess1_fast32(t *maptype, h *hmap, key uint32) unsafe.Pointer {
13 if raceenabled && h != nil {
14 callerpc := getcallerpc()
15 racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess1_fast32))
16 }
17 if h == nil || h.count == 0 {
18 return unsafe.Pointer(&zeroVal[0])
19 }
20 if h.flags&hashWriting != 0 {
21 throw("concurrent map read and map write")
22 }
23 var b *bmap
24 if h.B == 0 {
25
26 b = (*bmap)(h.buckets)
27 } else {
28 hash := t.hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
29 m := bucketMask(h.B)
30 b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
31 if c := h.oldbuckets; c != nil {
32 if !h.sameSizeGrow() {
33
34 m >>= 1
35 }
36 oldb := (*bmap)(add(c, (hash&m)*uintptr(t.bucketsize)))
37 if !evacuated(oldb) {
38 b = oldb
39 }
40 }
41 }
42 for ; b != nil; b = b.overflow(t) {
43 for i, k := uintptr(0), b.keys(); i < bucketCnt; i, k = i+1, add(k, 4) {
44 if *(*uint32)(k) == key && !isEmpty(b.tophash[i]) {
45 return add(unsafe.Pointer(b), dataOffset+bucketCnt*4+i*uintptr(t.elemsize))
46 }
47 }
48 }
49 return unsafe.Pointer(&zeroVal[0])
50 }
51
52 func mapaccess2_fast32(t *maptype, h *hmap, key uint32) (unsafe.Pointer, bool) {
53 if raceenabled && h != nil {
54 callerpc := getcallerpc()
55 racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess2_fast32))
56 }
57 if h == nil || h.count == 0 {
58 return unsafe.Pointer(&zeroVal[0]), false
59 }
60 if h.flags&hashWriting != 0 {
61 throw("concurrent map read and map write")
62 }
63 var b *bmap
64 if h.B == 0 {
65
66 b = (*bmap)(h.buckets)
67 } else {
68 hash := t.hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
69 m := bucketMask(h.B)
70 b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
71 if c := h.oldbuckets; c != nil {
72 if !h.sameSizeGrow() {
73
74 m >>= 1
75 }
76 oldb := (*bmap)(add(c, (hash&m)*uintptr(t.bucketsize)))
77 if !evacuated(oldb) {
78 b = oldb
79 }
80 }
81 }
82 for ; b != nil; b = b.overflow(t) {
83 for i, k := uintptr(0), b.keys(); i < bucketCnt; i, k = i+1, add(k, 4) {
84 if *(*uint32)(k) == key && !isEmpty(b.tophash[i]) {
85 return add(unsafe.Pointer(b), dataOffset+bucketCnt*4+i*uintptr(t.elemsize)), true
86 }
87 }
88 }
89 return unsafe.Pointer(&zeroVal[0]), false
90 }
91
92 func mapassign_fast32(t *maptype, h *hmap, key uint32) unsafe.Pointer {
93 if h == nil {
94 panic(plainError("assignment to entry in nil map"))
95 }
96 if raceenabled {
97 callerpc := getcallerpc()
98 racewritepc(unsafe.Pointer(h), callerpc, funcPC(mapassign_fast32))
99 }
100 if h.flags&hashWriting != 0 {
101 throw("concurrent map writes")
102 }
103 hash := t.hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
104
105
106 h.flags ^= hashWriting
107
108 if h.buckets == nil {
109 h.buckets = newobject(t.bucket)
110 }
111
112 again:
113 bucket := hash & bucketMask(h.B)
114 if h.growing() {
115 growWork_fast32(t, h, bucket)
116 }
117 b := (*bmap)(add(h.buckets, bucket*uintptr(t.bucketsize)))
118
119 var insertb *bmap
120 var inserti uintptr
121 var insertk unsafe.Pointer
122
123 bucketloop:
124 for {
125 for i := uintptr(0); i < bucketCnt; i++ {
126 if isEmpty(b.tophash[i]) {
127 if insertb == nil {
128 inserti = i
129 insertb = b
130 }
131 if b.tophash[i] == emptyRest {
132 break bucketloop
133 }
134 continue
135 }
136 k := *((*uint32)(add(unsafe.Pointer(b), dataOffset+i*4)))
137 if k != key {
138 continue
139 }
140 inserti = i
141 insertb = b
142 goto done
143 }
144 ovf := b.overflow(t)
145 if ovf == nil {
146 break
147 }
148 b = ovf
149 }
150
151
152
153
154
155 if !h.growing() && (overLoadFactor(h.count+1, h.B) || tooManyOverflowBuckets(h.noverflow, h.B)) {
156 hashGrow(t, h)
157 goto again
158 }
159
160 if insertb == nil {
161
162 insertb = h.newoverflow(t, b)
163 inserti = 0
164 }
165 insertb.tophash[inserti&(bucketCnt-1)] = tophash(hash)
166
167 insertk = add(unsafe.Pointer(insertb), dataOffset+inserti*4)
168
169 *(*uint32)(insertk) = key
170
171 h.count++
172
173 done:
174 elem := add(unsafe.Pointer(insertb), dataOffset+bucketCnt*4+inserti*uintptr(t.elemsize))
175 if h.flags&hashWriting == 0 {
176 throw("concurrent map writes")
177 }
178 h.flags &^= hashWriting
179 return elem
180 }
181
182 func mapassign_fast32ptr(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer {
183 if h == nil {
184 panic(plainError("assignment to entry in nil map"))
185 }
186 if raceenabled {
187 callerpc := getcallerpc()
188 racewritepc(unsafe.Pointer(h), callerpc, funcPC(mapassign_fast32))
189 }
190 if h.flags&hashWriting != 0 {
191 throw("concurrent map writes")
192 }
193 hash := t.hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
194
195
196 h.flags ^= hashWriting
197
198 if h.buckets == nil {
199 h.buckets = newobject(t.bucket)
200 }
201
202 again:
203 bucket := hash & bucketMask(h.B)
204 if h.growing() {
205 growWork_fast32(t, h, bucket)
206 }
207 b := (*bmap)(add(h.buckets, bucket*uintptr(t.bucketsize)))
208
209 var insertb *bmap
210 var inserti uintptr
211 var insertk unsafe.Pointer
212
213 bucketloop:
214 for {
215 for i := uintptr(0); i < bucketCnt; i++ {
216 if isEmpty(b.tophash[i]) {
217 if insertb == nil {
218 inserti = i
219 insertb = b
220 }
221 if b.tophash[i] == emptyRest {
222 break bucketloop
223 }
224 continue
225 }
226 k := *((*unsafe.Pointer)(add(unsafe.Pointer(b), dataOffset+i*4)))
227 if k != key {
228 continue
229 }
230 inserti = i
231 insertb = b
232 goto done
233 }
234 ovf := b.overflow(t)
235 if ovf == nil {
236 break
237 }
238 b = ovf
239 }
240
241
242
243
244
245 if !h.growing() && (overLoadFactor(h.count+1, h.B) || tooManyOverflowBuckets(h.noverflow, h.B)) {
246 hashGrow(t, h)
247 goto again
248 }
249
250 if insertb == nil {
251
252 insertb = h.newoverflow(t, b)
253 inserti = 0
254 }
255 insertb.tophash[inserti&(bucketCnt-1)] = tophash(hash)
256
257 insertk = add(unsafe.Pointer(insertb), dataOffset+inserti*4)
258
259 *(*unsafe.Pointer)(insertk) = key
260
261 h.count++
262
263 done:
264 elem := add(unsafe.Pointer(insertb), dataOffset+bucketCnt*4+inserti*uintptr(t.elemsize))
265 if h.flags&hashWriting == 0 {
266 throw("concurrent map writes")
267 }
268 h.flags &^= hashWriting
269 return elem
270 }
271
272 func mapdelete_fast32(t *maptype, h *hmap, key uint32) {
273 if raceenabled && h != nil {
274 callerpc := getcallerpc()
275 racewritepc(unsafe.Pointer(h), callerpc, funcPC(mapdelete_fast32))
276 }
277 if h == nil || h.count == 0 {
278 return
279 }
280 if h.flags&hashWriting != 0 {
281 throw("concurrent map writes")
282 }
283
284 hash := t.hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
285
286
287 h.flags ^= hashWriting
288
289 bucket := hash & bucketMask(h.B)
290 if h.growing() {
291 growWork_fast32(t, h, bucket)
292 }
293 b := (*bmap)(add(h.buckets, bucket*uintptr(t.bucketsize)))
294 bOrig := b
295 search:
296 for ; b != nil; b = b.overflow(t) {
297 for i, k := uintptr(0), b.keys(); i < bucketCnt; i, k = i+1, add(k, 4) {
298 if key != *(*uint32)(k) || isEmpty(b.tophash[i]) {
299 continue
300 }
301
302
303
304 if sys.PtrSize == 4 && t.key.ptrdata != 0 {
305
306
307 *(*unsafe.Pointer)(k) = nil
308 }
309 e := add(unsafe.Pointer(b), dataOffset+bucketCnt*4+i*uintptr(t.elemsize))
310 if t.elem.ptrdata != 0 {
311 memclrHasPointers(e, t.elem.size)
312 } else {
313 memclrNoHeapPointers(e, t.elem.size)
314 }
315 b.tophash[i] = emptyOne
316
317
318 if i == bucketCnt-1 {
319 if b.overflow(t) != nil && b.overflow(t).tophash[0] != emptyRest {
320 goto notLast
321 }
322 } else {
323 if b.tophash[i+1] != emptyRest {
324 goto notLast
325 }
326 }
327 for {
328 b.tophash[i] = emptyRest
329 if i == 0 {
330 if b == bOrig {
331 break
332 }
333
334 c := b
335 for b = bOrig; b.overflow(t) != c; b = b.overflow(t) {
336 }
337 i = bucketCnt - 1
338 } else {
339 i--
340 }
341 if b.tophash[i] != emptyOne {
342 break
343 }
344 }
345 notLast:
346 h.count--
347
348
349 if h.count == 0 {
350 h.hash0 = fastrand()
351 }
352 break search
353 }
354 }
355
356 if h.flags&hashWriting == 0 {
357 throw("concurrent map writes")
358 }
359 h.flags &^= hashWriting
360 }
361
362 func growWork_fast32(t *maptype, h *hmap, bucket uintptr) {
363
364
365 evacuate_fast32(t, h, bucket&h.oldbucketmask())
366
367
368 if h.growing() {
369 evacuate_fast32(t, h, h.nevacuate)
370 }
371 }
372
373 func evacuate_fast32(t *maptype, h *hmap, oldbucket uintptr) {
374 b := (*bmap)(add(h.oldbuckets, oldbucket*uintptr(t.bucketsize)))
375 newbit := h.noldbuckets()
376 if !evacuated(b) {
377
378
379
380
381 var xy [2]evacDst
382 x := &xy[0]
383 x.b = (*bmap)(add(h.buckets, oldbucket*uintptr(t.bucketsize)))
384 x.k = add(unsafe.Pointer(x.b), dataOffset)
385 x.e = add(x.k, bucketCnt*4)
386
387 if !h.sameSizeGrow() {
388
389
390 y := &xy[1]
391 y.b = (*bmap)(add(h.buckets, (oldbucket+newbit)*uintptr(t.bucketsize)))
392 y.k = add(unsafe.Pointer(y.b), dataOffset)
393 y.e = add(y.k, bucketCnt*4)
394 }
395
396 for ; b != nil; b = b.overflow(t) {
397 k := add(unsafe.Pointer(b), dataOffset)
398 e := add(k, bucketCnt*4)
399 for i := 0; i < bucketCnt; i, k, e = i+1, add(k, 4), add(e, uintptr(t.elemsize)) {
400 top := b.tophash[i]
401 if isEmpty(top) {
402 b.tophash[i] = evacuatedEmpty
403 continue
404 }
405 if top < minTopHash {
406 throw("bad map state")
407 }
408 var useY uint8
409 if !h.sameSizeGrow() {
410
411
412 hash := t.hasher(k, uintptr(h.hash0))
413 if hash&newbit != 0 {
414 useY = 1
415 }
416 }
417
418 b.tophash[i] = evacuatedX + useY
419 dst := &xy[useY]
420
421 if dst.i == bucketCnt {
422 dst.b = h.newoverflow(t, dst.b)
423 dst.i = 0
424 dst.k = add(unsafe.Pointer(dst.b), dataOffset)
425 dst.e = add(dst.k, bucketCnt*4)
426 }
427 dst.b.tophash[dst.i&(bucketCnt-1)] = top
428
429
430 if sys.PtrSize == 4 && t.key.ptrdata != 0 && writeBarrier.enabled {
431
432 *(*unsafe.Pointer)(dst.k) = *(*unsafe.Pointer)(k)
433 } else {
434 *(*uint32)(dst.k) = *(*uint32)(k)
435 }
436
437 typedmemmove(t.elem, dst.e, e)
438 dst.i++
439
440
441
442
443 dst.k = add(dst.k, 4)
444 dst.e = add(dst.e, uintptr(t.elemsize))
445 }
446 }
447
448 if h.flags&oldIterator == 0 && t.bucket.ptrdata != 0 {
449 b := add(h.oldbuckets, oldbucket*uintptr(t.bucketsize))
450
451
452 ptr := add(b, dataOffset)
453 n := uintptr(t.bucketsize) - dataOffset
454 memclrHasPointers(ptr, n)
455 }
456 }
457
458 if oldbucket == h.nevacuate {
459 advanceEvacuationMark(h, t, newbit)
460 }
461 }
462
View as plain text