Source file
src/runtime/export_test.go
Documentation: runtime
1
2
3
4
5
6
7 package runtime
8
9 import (
10 "runtime/internal/atomic"
11 "runtime/internal/sys"
12 "unsafe"
13 )
14
15 var Fadd64 = fadd64
16 var Fsub64 = fsub64
17 var Fmul64 = fmul64
18 var Fdiv64 = fdiv64
19 var F64to32 = f64to32
20 var F32to64 = f32to64
21 var Fcmp64 = fcmp64
22 var Fintto64 = fintto64
23 var F64toint = f64toint
24
25 var Entersyscall = entersyscall
26 var Exitsyscall = exitsyscall
27 var LockedOSThread = lockedOSThread
28 var Xadduintptr = atomic.Xadduintptr
29
30 var FuncPC = funcPC
31
32 var Fastlog2 = fastlog2
33
34 var Atoi = atoi
35 var Atoi32 = atoi32
36
37 var Nanotime = nanotime
38 var NetpollBreak = netpollBreak
39 var Usleep = usleep
40
41 var PhysPageSize = physPageSize
42 var PhysHugePageSize = physHugePageSize
43
44 var NetpollGenericInit = netpollGenericInit
45
46 var Memmove = memmove
47 var MemclrNoHeapPointers = memclrNoHeapPointers
48
49 var LockPartialOrder = lockPartialOrder
50
51 type LockRank lockRank
52
53 func (l LockRank) String() string {
54 return lockRank(l).String()
55 }
56
57 const PreemptMSupported = preemptMSupported
58
59 type LFNode struct {
60 Next uint64
61 Pushcnt uintptr
62 }
63
64 func LFStackPush(head *uint64, node *LFNode) {
65 (*lfstack)(head).push((*lfnode)(unsafe.Pointer(node)))
66 }
67
68 func LFStackPop(head *uint64) *LFNode {
69 return (*LFNode)(unsafe.Pointer((*lfstack)(head).pop()))
70 }
71
72 func Netpoll(delta int64) {
73 systemstack(func() {
74 netpoll(delta)
75 })
76 }
77
78 func GCMask(x interface{}) (ret []byte) {
79 systemstack(func() {
80 ret = getgcmask(x)
81 })
82 return
83 }
84
85 func RunSchedLocalQueueTest() {
86 _p_ := new(p)
87 gs := make([]g, len(_p_.runq))
88 for i := 0; i < len(_p_.runq); i++ {
89 if g, _ := runqget(_p_); g != nil {
90 throw("runq is not empty initially")
91 }
92 for j := 0; j < i; j++ {
93 runqput(_p_, &gs[i], false)
94 }
95 for j := 0; j < i; j++ {
96 if g, _ := runqget(_p_); g != &gs[i] {
97 print("bad element at iter ", i, "/", j, "\n")
98 throw("bad element")
99 }
100 }
101 if g, _ := runqget(_p_); g != nil {
102 throw("runq is not empty afterwards")
103 }
104 }
105 }
106
107 func RunSchedLocalQueueStealTest() {
108 p1 := new(p)
109 p2 := new(p)
110 gs := make([]g, len(p1.runq))
111 for i := 0; i < len(p1.runq); i++ {
112 for j := 0; j < i; j++ {
113 gs[j].sig = 0
114 runqput(p1, &gs[j], false)
115 }
116 gp := runqsteal(p2, p1, true)
117 s := 0
118 if gp != nil {
119 s++
120 gp.sig++
121 }
122 for {
123 gp, _ = runqget(p2)
124 if gp == nil {
125 break
126 }
127 s++
128 gp.sig++
129 }
130 for {
131 gp, _ = runqget(p1)
132 if gp == nil {
133 break
134 }
135 gp.sig++
136 }
137 for j := 0; j < i; j++ {
138 if gs[j].sig != 1 {
139 print("bad element ", j, "(", gs[j].sig, ") at iter ", i, "\n")
140 throw("bad element")
141 }
142 }
143 if s != i/2 && s != i/2+1 {
144 print("bad steal ", s, ", want ", i/2, " or ", i/2+1, ", iter ", i, "\n")
145 throw("bad steal")
146 }
147 }
148 }
149
150
151
152
153 var RunSchedLocalQueueEmptyState struct {
154 done chan bool
155 ready *uint32
156 p *p
157 }
158
159 func RunSchedLocalQueueEmptyTest(iters int) {
160
161
162
163
164 done := make(chan bool, 1)
165 RunSchedLocalQueueEmptyState.done = done
166 p := new(p)
167 RunSchedLocalQueueEmptyState.p = p
168 gs := make([]g, 2)
169 ready := new(uint32)
170 RunSchedLocalQueueEmptyState.ready = ready
171 for i := 0; i < iters; i++ {
172 *ready = 0
173 next0 := (i & 1) == 0
174 next1 := (i & 2) == 0
175 runqput(p, &gs[0], next0)
176 go func() {
177 for atomic.Xadd(RunSchedLocalQueueEmptyState.ready, 1); atomic.Load(RunSchedLocalQueueEmptyState.ready) != 2; {
178 }
179 if runqempty(RunSchedLocalQueueEmptyState.p) {
180
181 throw("queue is empty")
182 }
183 RunSchedLocalQueueEmptyState.done <- true
184 }()
185 for atomic.Xadd(ready, 1); atomic.Load(ready) != 2; {
186 }
187 runqput(p, &gs[1], next1)
188 runqget(p)
189 <-done
190 runqget(p)
191 }
192 }
193
194 var (
195 StringHash = stringHash
196 BytesHash = bytesHash
197 Int32Hash = int32Hash
198 Int64Hash = int64Hash
199 MemHash = memhash
200 MemHash32 = memhash32
201 MemHash64 = memhash64
202 EfaceHash = efaceHash
203 IfaceHash = ifaceHash
204 )
205
206 var UseAeshash = &useAeshash
207
208 func MemclrBytes(b []byte) {
209 s := (*slice)(unsafe.Pointer(&b))
210 memclrNoHeapPointers(s.array, uintptr(s.len))
211 }
212
213 var HashLoad = &hashLoad
214
215
216 func GostringW(w []uint16) (s string) {
217 systemstack(func() {
218 s = gostringw(&w[0])
219 })
220 return
221 }
222
223 var Open = open
224 var Close = closefd
225 var Read = read
226 var Write = write
227
228 func Envs() []string { return envs }
229 func SetEnvs(e []string) { envs = e }
230
231 var BigEndian = sys.BigEndian
232
233
234
235 func BenchSetType(n int, x interface{}) {
236 e := *efaceOf(&x)
237 t := e._type
238 var size uintptr
239 var p unsafe.Pointer
240 switch t.kind & kindMask {
241 case kindPtr:
242 t = (*ptrtype)(unsafe.Pointer(t)).elem
243 size = t.size
244 p = e.data
245 case kindSlice:
246 slice := *(*struct {
247 ptr unsafe.Pointer
248 len, cap uintptr
249 })(e.data)
250 t = (*slicetype)(unsafe.Pointer(t)).elem
251 size = t.size * slice.len
252 p = slice.ptr
253 }
254 allocSize := roundupsize(size)
255 systemstack(func() {
256 for i := 0; i < n; i++ {
257 heapBitsSetType(uintptr(p), allocSize, size, t)
258 }
259 })
260 }
261
262 const PtrSize = sys.PtrSize
263
264 var ForceGCPeriod = &forcegcperiod
265
266
267
268
269 func SetTracebackEnv(level string) {
270 setTraceback(level)
271 traceback_env = traceback_cache
272 }
273
274 var ReadUnaligned32 = readUnaligned32
275 var ReadUnaligned64 = readUnaligned64
276
277 func CountPagesInUse() (pagesInUse, counted uintptr) {
278 stopTheWorld("CountPagesInUse")
279
280 pagesInUse = uintptr(mheap_.pagesInUse)
281
282 for _, s := range mheap_.allspans {
283 if s.state.get() == mSpanInUse {
284 counted += s.npages
285 }
286 }
287
288 startTheWorld()
289
290 return
291 }
292
293 func Fastrand() uint32 { return fastrand() }
294 func Fastrandn(n uint32) uint32 { return fastrandn(n) }
295
296 type ProfBuf profBuf
297
298 func NewProfBuf(hdrsize, bufwords, tags int) *ProfBuf {
299 return (*ProfBuf)(newProfBuf(hdrsize, bufwords, tags))
300 }
301
302 func (p *ProfBuf) Write(tag *unsafe.Pointer, now int64, hdr []uint64, stk []uintptr) {
303 (*profBuf)(p).write(tag, now, hdr, stk)
304 }
305
306 const (
307 ProfBufBlocking = profBufBlocking
308 ProfBufNonBlocking = profBufNonBlocking
309 )
310
311 func (p *ProfBuf) Read(mode profBufReadMode) ([]uint64, []unsafe.Pointer, bool) {
312 return (*profBuf)(p).read(profBufReadMode(mode))
313 }
314
315 func (p *ProfBuf) Close() {
316 (*profBuf)(p).close()
317 }
318
319 func ReadMetricsSlow(memStats *MemStats, samplesp unsafe.Pointer, len, cap int) {
320 stopTheWorld("ReadMetricsSlow")
321
322
323
324 metricsLock()
325 initMetrics()
326 metricsUnlock()
327
328 systemstack(func() {
329
330
331
332
333 readmemstats_m(memStats)
334 })
335
336
337
338
339
340 readMetrics(samplesp, len, cap)
341
342 startTheWorld()
343 }
344
345
346
347 func ReadMemStatsSlow() (base, slow MemStats) {
348 stopTheWorld("ReadMemStatsSlow")
349
350
351 systemstack(func() {
352
353 getg().m.mallocing++
354
355 readmemstats_m(&base)
356
357
358
359 slow = base
360 slow.Alloc = 0
361 slow.TotalAlloc = 0
362 slow.Mallocs = 0
363 slow.Frees = 0
364 slow.HeapReleased = 0
365 var bySize [_NumSizeClasses]struct {
366 Mallocs, Frees uint64
367 }
368
369
370 for _, s := range mheap_.allspans {
371 if s.state.get() != mSpanInUse {
372 continue
373 }
374 if sizeclass := s.spanclass.sizeclass(); sizeclass == 0 {
375 slow.Mallocs++
376 slow.Alloc += uint64(s.elemsize)
377 } else {
378 slow.Mallocs += uint64(s.allocCount)
379 slow.Alloc += uint64(s.allocCount) * uint64(s.elemsize)
380 bySize[sizeclass].Mallocs += uint64(s.allocCount)
381 }
382 }
383
384
385 var m heapStatsDelta
386 memstats.heapStats.unsafeRead(&m)
387
388
389 var smallFree uint64
390 for i := 0; i < _NumSizeClasses; i++ {
391 slow.Frees += uint64(m.smallFreeCount[i])
392 bySize[i].Frees += uint64(m.smallFreeCount[i])
393 bySize[i].Mallocs += uint64(m.smallFreeCount[i])
394 smallFree += uint64(m.smallFreeCount[i]) * uint64(class_to_size[i])
395 }
396 slow.Frees += uint64(m.tinyAllocCount) + uint64(m.largeFreeCount)
397 slow.Mallocs += slow.Frees
398
399 slow.TotalAlloc = slow.Alloc + uint64(m.largeFree) + smallFree
400
401 for i := range slow.BySize {
402 slow.BySize[i].Mallocs = bySize[i].Mallocs
403 slow.BySize[i].Frees = bySize[i].Frees
404 }
405
406 for i := mheap_.pages.start; i < mheap_.pages.end; i++ {
407 chunk := mheap_.pages.tryChunkOf(i)
408 if chunk == nil {
409 continue
410 }
411 pg := chunk.scavenged.popcntRange(0, pallocChunkPages)
412 slow.HeapReleased += uint64(pg) * pageSize
413 }
414 for _, p := range allp {
415 pg := sys.OnesCount64(p.pcache.scav)
416 slow.HeapReleased += uint64(pg) * pageSize
417 }
418
419 getg().m.mallocing--
420 })
421
422 startTheWorld()
423 return
424 }
425
426
427
428
429 func BlockOnSystemStack() {
430 systemstack(blockOnSystemStackInternal)
431 }
432
433 func blockOnSystemStackInternal() {
434 print("x\n")
435 lock(&deadlock)
436 lock(&deadlock)
437 }
438
439 type RWMutex struct {
440 rw rwmutex
441 }
442
443 func (rw *RWMutex) RLock() {
444 rw.rw.rlock()
445 }
446
447 func (rw *RWMutex) RUnlock() {
448 rw.rw.runlock()
449 }
450
451 func (rw *RWMutex) Lock() {
452 rw.rw.lock()
453 }
454
455 func (rw *RWMutex) Unlock() {
456 rw.rw.unlock()
457 }
458
459 const RuntimeHmapSize = unsafe.Sizeof(hmap{})
460
461 func MapBucketsCount(m map[int]int) int {
462 h := *(**hmap)(unsafe.Pointer(&m))
463 return 1 << h.B
464 }
465
466 func MapBucketsPointerIsNil(m map[int]int) bool {
467 h := *(**hmap)(unsafe.Pointer(&m))
468 return h.buckets == nil
469 }
470
471 func LockOSCounts() (external, internal uint32) {
472 g := getg()
473 if g.m.lockedExt+g.m.lockedInt == 0 {
474 if g.lockedm != 0 {
475 panic("lockedm on non-locked goroutine")
476 }
477 } else {
478 if g.lockedm == 0 {
479 panic("nil lockedm on locked goroutine")
480 }
481 }
482 return g.m.lockedExt, g.m.lockedInt
483 }
484
485
486 func TracebackSystemstack(stk []uintptr, i int) int {
487 if i == 0 {
488 pc, sp := getcallerpc(), getcallersp()
489 return gentraceback(pc, sp, 0, getg(), 0, &stk[0], len(stk), nil, nil, _TraceJumpStack)
490 }
491 n := 0
492 systemstack(func() {
493 n = TracebackSystemstack(stk, i-1)
494 })
495 return n
496 }
497
498 func KeepNArenaHints(n int) {
499 hint := mheap_.arenaHints
500 for i := 1; i < n; i++ {
501 hint = hint.next
502 if hint == nil {
503 return
504 }
505 }
506 hint.next = nil
507 }
508
509
510
511
512 func MapNextArenaHint() (start, end uintptr) {
513 hint := mheap_.arenaHints
514 addr := hint.addr
515 if hint.down {
516 start, end = addr-heapArenaBytes, addr
517 addr -= physPageSize
518 } else {
519 start, end = addr, addr+heapArenaBytes
520 }
521 sysReserve(unsafe.Pointer(addr), physPageSize)
522 return
523 }
524
525 func GetNextArenaHint() uintptr {
526 return mheap_.arenaHints.addr
527 }
528
529 type G = g
530
531 type Sudog = sudog
532
533 func Getg() *G {
534 return getg()
535 }
536
537
538 func PanicForTesting(b []byte, i int) byte {
539 return unexportedPanicForTesting(b, i)
540 }
541
542
543 func unexportedPanicForTesting(b []byte, i int) byte {
544 return b[i]
545 }
546
547 func G0StackOverflow() {
548 systemstack(func() {
549 stackOverflow(nil)
550 })
551 }
552
553 func stackOverflow(x *byte) {
554 var buf [256]byte
555 stackOverflow(&buf[0])
556 }
557
558 func MapTombstoneCheck(m map[int]int) {
559
560
561
562 h := *(**hmap)(unsafe.Pointer(&m))
563 i := interface{}(m)
564 t := *(**maptype)(unsafe.Pointer(&i))
565
566 for x := 0; x < 1<<h.B; x++ {
567 b0 := (*bmap)(add(h.buckets, uintptr(x)*uintptr(t.bucketsize)))
568 n := 0
569 for b := b0; b != nil; b = b.overflow(t) {
570 for i := 0; i < bucketCnt; i++ {
571 if b.tophash[i] != emptyRest {
572 n++
573 }
574 }
575 }
576 k := 0
577 for b := b0; b != nil; b = b.overflow(t) {
578 for i := 0; i < bucketCnt; i++ {
579 if k < n && b.tophash[i] == emptyRest {
580 panic("early emptyRest")
581 }
582 if k >= n && b.tophash[i] != emptyRest {
583 panic("late non-emptyRest")
584 }
585 if k == n-1 && b.tophash[i] == emptyOne {
586 panic("last non-emptyRest entry is emptyOne")
587 }
588 k++
589 }
590 }
591 }
592 }
593
594 func RunGetgThreadSwitchTest() {
595
596
597
598
599
600
601 ch := make(chan int)
602 go func(ch chan int) {
603 ch <- 5
604 LockOSThread()
605 }(ch)
606
607 g1 := getg()
608
609
610
611
612
613 <-ch
614
615 g2 := getg()
616 if g1 != g2 {
617 panic("g1 != g2")
618 }
619
620
621
622 g3 := getg()
623 if g1 != g3 {
624 panic("g1 != g3")
625 }
626 }
627
628 const (
629 PageSize = pageSize
630 PallocChunkPages = pallocChunkPages
631 PageAlloc64Bit = pageAlloc64Bit
632 PallocSumBytes = pallocSumBytes
633 )
634
635
636 type PallocSum pallocSum
637
638 func PackPallocSum(start, max, end uint) PallocSum { return PallocSum(packPallocSum(start, max, end)) }
639 func (m PallocSum) Start() uint { return pallocSum(m).start() }
640 func (m PallocSum) Max() uint { return pallocSum(m).max() }
641 func (m PallocSum) End() uint { return pallocSum(m).end() }
642
643
644 type PallocBits pallocBits
645
646 func (b *PallocBits) Find(npages uintptr, searchIdx uint) (uint, uint) {
647 return (*pallocBits)(b).find(npages, searchIdx)
648 }
649 func (b *PallocBits) AllocRange(i, n uint) { (*pallocBits)(b).allocRange(i, n) }
650 func (b *PallocBits) Free(i, n uint) { (*pallocBits)(b).free(i, n) }
651 func (b *PallocBits) Summarize() PallocSum { return PallocSum((*pallocBits)(b).summarize()) }
652 func (b *PallocBits) PopcntRange(i, n uint) uint { return (*pageBits)(b).popcntRange(i, n) }
653
654
655
656 func SummarizeSlow(b *PallocBits) PallocSum {
657 var start, max, end uint
658
659 const N = uint(len(b)) * 64
660 for start < N && (*pageBits)(b).get(start) == 0 {
661 start++
662 }
663 for end < N && (*pageBits)(b).get(N-end-1) == 0 {
664 end++
665 }
666 run := uint(0)
667 for i := uint(0); i < N; i++ {
668 if (*pageBits)(b).get(i) == 0 {
669 run++
670 } else {
671 run = 0
672 }
673 if run > max {
674 max = run
675 }
676 }
677 return PackPallocSum(start, max, end)
678 }
679
680
681 func FindBitRange64(c uint64, n uint) uint { return findBitRange64(c, n) }
682
683
684
685 func DiffPallocBits(a, b *PallocBits) []BitRange {
686 ba := (*pageBits)(a)
687 bb := (*pageBits)(b)
688
689 var d []BitRange
690 base, size := uint(0), uint(0)
691 for i := uint(0); i < uint(len(ba))*64; i++ {
692 if ba.get(i) != bb.get(i) {
693 if size == 0 {
694 base = i
695 }
696 size++
697 } else {
698 if size != 0 {
699 d = append(d, BitRange{base, size})
700 }
701 size = 0
702 }
703 }
704 if size != 0 {
705 d = append(d, BitRange{base, size})
706 }
707 return d
708 }
709
710
711
712
713 func StringifyPallocBits(b *PallocBits, r BitRange) string {
714 str := ""
715 for j := r.I; j < r.I+r.N; j++ {
716 if (*pageBits)(b).get(j) != 0 {
717 str += "1"
718 } else {
719 str += "0"
720 }
721 }
722 return str
723 }
724
725
726 type PallocData pallocData
727
728 func (d *PallocData) FindScavengeCandidate(searchIdx uint, min, max uintptr) (uint, uint) {
729 return (*pallocData)(d).findScavengeCandidate(searchIdx, min, max)
730 }
731 func (d *PallocData) AllocRange(i, n uint) { (*pallocData)(d).allocRange(i, n) }
732 func (d *PallocData) ScavengedSetRange(i, n uint) {
733 (*pallocData)(d).scavenged.setRange(i, n)
734 }
735 func (d *PallocData) PallocBits() *PallocBits {
736 return (*PallocBits)(&(*pallocData)(d).pallocBits)
737 }
738 func (d *PallocData) Scavenged() *PallocBits {
739 return (*PallocBits)(&(*pallocData)(d).scavenged)
740 }
741
742
743 func FillAligned(x uint64, m uint) uint64 { return fillAligned(x, m) }
744
745
746 type PageCache pageCache
747
748 const PageCachePages = pageCachePages
749
750 func NewPageCache(base uintptr, cache, scav uint64) PageCache {
751 return PageCache(pageCache{base: base, cache: cache, scav: scav})
752 }
753 func (c *PageCache) Empty() bool { return (*pageCache)(c).empty() }
754 func (c *PageCache) Base() uintptr { return (*pageCache)(c).base }
755 func (c *PageCache) Cache() uint64 { return (*pageCache)(c).cache }
756 func (c *PageCache) Scav() uint64 { return (*pageCache)(c).scav }
757 func (c *PageCache) Alloc(npages uintptr) (uintptr, uintptr) {
758 return (*pageCache)(c).alloc(npages)
759 }
760 func (c *PageCache) Flush(s *PageAlloc) {
761 cp := (*pageCache)(c)
762 sp := (*pageAlloc)(s)
763
764 systemstack(func() {
765
766
767 lock(sp.mheapLock)
768 cp.flush(sp)
769 unlock(sp.mheapLock)
770 })
771 }
772
773
774 type ChunkIdx chunkIdx
775
776
777
778 type PageAlloc pageAlloc
779
780 func (p *PageAlloc) Alloc(npages uintptr) (uintptr, uintptr) {
781 pp := (*pageAlloc)(p)
782
783 var addr, scav uintptr
784 systemstack(func() {
785
786
787 lock(pp.mheapLock)
788 addr, scav = pp.alloc(npages)
789 unlock(pp.mheapLock)
790 })
791 return addr, scav
792 }
793 func (p *PageAlloc) AllocToCache() PageCache {
794 pp := (*pageAlloc)(p)
795
796 var c PageCache
797 systemstack(func() {
798
799
800 lock(pp.mheapLock)
801 c = PageCache(pp.allocToCache())
802 unlock(pp.mheapLock)
803 })
804 return c
805 }
806 func (p *PageAlloc) Free(base, npages uintptr) {
807 pp := (*pageAlloc)(p)
808
809 systemstack(func() {
810
811
812 lock(pp.mheapLock)
813 pp.free(base, npages)
814 unlock(pp.mheapLock)
815 })
816 }
817 func (p *PageAlloc) Bounds() (ChunkIdx, ChunkIdx) {
818 return ChunkIdx((*pageAlloc)(p).start), ChunkIdx((*pageAlloc)(p).end)
819 }
820 func (p *PageAlloc) Scavenge(nbytes uintptr, mayUnlock bool) (r uintptr) {
821 pp := (*pageAlloc)(p)
822 systemstack(func() {
823
824
825 lock(pp.mheapLock)
826 r = pp.scavenge(nbytes, mayUnlock)
827 unlock(pp.mheapLock)
828 })
829 return
830 }
831 func (p *PageAlloc) InUse() []AddrRange {
832 ranges := make([]AddrRange, 0, len(p.inUse.ranges))
833 for _, r := range p.inUse.ranges {
834 ranges = append(ranges, AddrRange{r})
835 }
836 return ranges
837 }
838
839
840 func (p *PageAlloc) PallocData(i ChunkIdx) *PallocData {
841 ci := chunkIdx(i)
842 return (*PallocData)((*pageAlloc)(p).tryChunkOf(ci))
843 }
844
845
846 type AddrRange struct {
847 addrRange
848 }
849
850
851 func MakeAddrRange(base, limit uintptr) AddrRange {
852 return AddrRange{makeAddrRange(base, limit)}
853 }
854
855
856 func (a AddrRange) Base() uintptr {
857 return a.addrRange.base.addr()
858 }
859
860
861 func (a AddrRange) Limit() uintptr {
862 return a.addrRange.limit.addr()
863 }
864
865
866 func (a AddrRange) Equals(b AddrRange) bool {
867 return a == b
868 }
869
870
871 func (a AddrRange) Size() uintptr {
872 return a.addrRange.size()
873 }
874
875
876 type AddrRanges struct {
877 addrRanges
878 mutable bool
879 }
880
881
882
883
884
885
886
887
888
889
890 func NewAddrRanges() AddrRanges {
891 r := addrRanges{}
892 r.init(new(sysMemStat))
893 return AddrRanges{r, true}
894 }
895
896
897
898
899
900
901 func MakeAddrRanges(a ...AddrRange) AddrRanges {
902
903
904
905
906
907 ranges := make([]addrRange, 0, len(a))
908 total := uintptr(0)
909 for _, r := range a {
910 ranges = append(ranges, r.addrRange)
911 total += r.Size()
912 }
913 return AddrRanges{addrRanges{
914 ranges: ranges,
915 totalBytes: total,
916 sysStat: new(sysMemStat),
917 }, false}
918 }
919
920
921
922 func (a *AddrRanges) Ranges() []AddrRange {
923 result := make([]AddrRange, 0, len(a.addrRanges.ranges))
924 for _, r := range a.addrRanges.ranges {
925 result = append(result, AddrRange{r})
926 }
927 return result
928 }
929
930
931
932 func (a *AddrRanges) FindSucc(base uintptr) int {
933 return a.findSucc(base)
934 }
935
936
937
938
939
940 func (a *AddrRanges) Add(r AddrRange) {
941 if !a.mutable {
942 throw("attempt to mutate immutable AddrRanges")
943 }
944 a.add(r.addrRange)
945 }
946
947
948 func (a *AddrRanges) TotalBytes() uintptr {
949 return a.addrRanges.totalBytes
950 }
951
952
953 type BitRange struct {
954 I, N uint
955 }
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971 func NewPageAlloc(chunks, scav map[ChunkIdx][]BitRange) *PageAlloc {
972 p := new(pageAlloc)
973
974
975 p.init(new(mutex), nil)
976 lockInit(p.mheapLock, lockRankMheap)
977 p.test = true
978
979 for i, init := range chunks {
980 addr := chunkBase(chunkIdx(i))
981
982
983 systemstack(func() {
984 lock(p.mheapLock)
985 p.grow(addr, pallocChunkBytes)
986 unlock(p.mheapLock)
987 })
988
989
990 chunk := p.chunkOf(chunkIndex(addr))
991
992
993 chunk.scavenged.clearRange(0, pallocChunkPages)
994
995
996 if scav != nil {
997 if scvg, ok := scav[i]; ok {
998 for _, s := range scvg {
999
1000
1001 if s.N != 0 {
1002 chunk.scavenged.setRange(s.I, s.N)
1003 }
1004 }
1005 }
1006 }
1007
1008
1009 for _, s := range init {
1010
1011
1012 if s.N != 0 {
1013 chunk.allocRange(s.I, s.N)
1014 }
1015 }
1016
1017
1018 systemstack(func() {
1019 lock(p.mheapLock)
1020 p.update(addr, pallocChunkPages, false, false)
1021 unlock(p.mheapLock)
1022 })
1023 }
1024
1025 systemstack(func() {
1026 lock(p.mheapLock)
1027 p.scavengeStartGen()
1028 unlock(p.mheapLock)
1029 })
1030
1031 return (*PageAlloc)(p)
1032 }
1033
1034
1035
1036
1037 func FreePageAlloc(pp *PageAlloc) {
1038 p := (*pageAlloc)(pp)
1039
1040
1041 if pageAlloc64Bit != 0 {
1042 for l := 0; l < summaryLevels; l++ {
1043 sysFree(unsafe.Pointer(&p.summary[l][0]), uintptr(cap(p.summary[l]))*pallocSumBytes, nil)
1044 }
1045 } else {
1046 resSize := uintptr(0)
1047 for _, s := range p.summary {
1048 resSize += uintptr(cap(s)) * pallocSumBytes
1049 }
1050 sysFree(unsafe.Pointer(&p.summary[0][0]), alignUp(resSize, physPageSize), nil)
1051 }
1052
1053
1054 for i := range p.chunks {
1055 if x := p.chunks[i]; x != nil {
1056 p.chunks[i] = nil
1057
1058 sysFree(unsafe.Pointer(x), unsafe.Sizeof(*p.chunks[0]), nil)
1059 }
1060 }
1061 }
1062
1063
1064
1065
1066
1067
1068
1069 var BaseChunkIdx = func() ChunkIdx {
1070 var prefix uintptr
1071 if pageAlloc64Bit != 0 {
1072 prefix = 0xc000
1073 } else {
1074 prefix = 0x100
1075 }
1076 baseAddr := prefix * pallocChunkBytes
1077 if sys.GoosAix != 0 {
1078 baseAddr += arenaBaseOffset
1079 }
1080 return ChunkIdx(chunkIndex(baseAddr))
1081 }()
1082
1083
1084
1085 func PageBase(c ChunkIdx, pageIdx uint) uintptr {
1086 return chunkBase(chunkIdx(c)) + uintptr(pageIdx)*pageSize
1087 }
1088
1089 type BitsMismatch struct {
1090 Base uintptr
1091 Got, Want uint64
1092 }
1093
1094 func CheckScavengedBitsCleared(mismatches []BitsMismatch) (n int, ok bool) {
1095 ok = true
1096
1097
1098 systemstack(func() {
1099 getg().m.mallocing++
1100
1101
1102 lock(&mheap_.lock)
1103 chunkLoop:
1104 for i := mheap_.pages.start; i < mheap_.pages.end; i++ {
1105 chunk := mheap_.pages.tryChunkOf(i)
1106 if chunk == nil {
1107 continue
1108 }
1109 for j := 0; j < pallocChunkPages/64; j++ {
1110
1111
1112
1113
1114
1115 want := chunk.scavenged[j] &^ chunk.pallocBits[j]
1116 got := chunk.scavenged[j]
1117 if want != got {
1118 ok = false
1119 if n >= len(mismatches) {
1120 break chunkLoop
1121 }
1122 mismatches[n] = BitsMismatch{
1123 Base: chunkBase(i) + uintptr(j)*64*pageSize,
1124 Got: got,
1125 Want: want,
1126 }
1127 n++
1128 }
1129 }
1130 }
1131 unlock(&mheap_.lock)
1132
1133 getg().m.mallocing--
1134 })
1135 return
1136 }
1137
1138 func PageCachePagesLeaked() (leaked uintptr) {
1139 stopTheWorld("PageCachePagesLeaked")
1140
1141
1142 deadp := allp[len(allp):cap(allp)]
1143 for _, p := range deadp {
1144
1145
1146 if p != nil {
1147 leaked += uintptr(sys.OnesCount64(p.pcache.cache))
1148 }
1149 }
1150
1151 startTheWorld()
1152 return
1153 }
1154
1155 var Semacquire = semacquire
1156 var Semrelease1 = semrelease1
1157
1158 func SemNwait(addr *uint32) uint32 {
1159 root := semroot(addr)
1160 return atomic.Load(&root.nwait)
1161 }
1162
1163
1164
1165 type MSpan mspan
1166
1167
1168 func AllocMSpan() *MSpan {
1169 var s *mspan
1170 systemstack(func() {
1171 lock(&mheap_.lock)
1172 s = (*mspan)(mheap_.spanalloc.alloc())
1173 unlock(&mheap_.lock)
1174 })
1175 return (*MSpan)(s)
1176 }
1177
1178
1179 func FreeMSpan(s *MSpan) {
1180 systemstack(func() {
1181 lock(&mheap_.lock)
1182 mheap_.spanalloc.free(unsafe.Pointer(s))
1183 unlock(&mheap_.lock)
1184 })
1185 }
1186
1187 func MSpanCountAlloc(ms *MSpan, bits []byte) int {
1188 s := (*mspan)(ms)
1189 s.nelems = uintptr(len(bits) * 8)
1190 s.gcmarkBits = (*gcBits)(unsafe.Pointer(&bits[0]))
1191 result := s.countAlloc()
1192 s.gcmarkBits = nil
1193 return result
1194 }
1195
1196 const (
1197 TimeHistSubBucketBits = timeHistSubBucketBits
1198 TimeHistNumSubBuckets = timeHistNumSubBuckets
1199 TimeHistNumSuperBuckets = timeHistNumSuperBuckets
1200 )
1201
1202 type TimeHistogram timeHistogram
1203
1204
1205
1206
1207 func (th *TimeHistogram) Count(bucket, subBucket uint) (uint64, bool) {
1208 t := (*timeHistogram)(th)
1209 i := bucket*TimeHistNumSubBuckets + subBucket
1210 if i >= uint(len(t.counts)) {
1211 return t.underflow, false
1212 }
1213 return t.counts[i], true
1214 }
1215
1216 func (th *TimeHistogram) Record(duration int64) {
1217 (*timeHistogram)(th).record(duration)
1218 }
1219
1220 var TimeHistogramMetricsBuckets = timeHistogramMetricsBuckets
1221
1222 func SetIntArgRegs(a int) int {
1223 lock(&finlock)
1224 old := intArgRegs
1225 if a >= 0 {
1226 intArgRegs = a
1227 }
1228 unlock(&finlock)
1229 return old
1230 }
1231
1232 func FinalizerGAsleep() bool {
1233 lock(&finlock)
1234 result := fingwait
1235 unlock(&finlock)
1236 return result
1237 }
1238
1239
1240
1241
1242 var GCTestMoveStackOnNextCall = gcTestMoveStackOnNextCall
1243
1244
1245
1246 func GCTestIsReachable(ptrs ...unsafe.Pointer) (mask uint64) {
1247 return gcTestIsReachable(ptrs...)
1248 }
1249
1250
1251
1252
1253
1254
1255
1256 func GCTestPointerClass(p unsafe.Pointer) string {
1257 return gcTestPointerClass(p)
1258 }
1259
1260 const Raceenabled = raceenabled
1261
View as plain text