Source file
src/runtime/trace.go
Documentation: runtime
1
2
3
4
5
6
7
8
9
10
11
12
13 package runtime
14
15 import (
16 "runtime/internal/atomic"
17 "runtime/internal/sys"
18 "unsafe"
19 )
20
21
22 const (
23 traceEvNone = 0
24 traceEvBatch = 1
25 traceEvFrequency = 2
26 traceEvStack = 3
27 traceEvGomaxprocs = 4
28 traceEvProcStart = 5
29 traceEvProcStop = 6
30 traceEvGCStart = 7
31 traceEvGCDone = 8
32 traceEvGCSTWStart = 9
33 traceEvGCSTWDone = 10
34 traceEvGCSweepStart = 11
35 traceEvGCSweepDone = 12
36 traceEvGoCreate = 13
37 traceEvGoStart = 14
38 traceEvGoEnd = 15
39 traceEvGoStop = 16
40 traceEvGoSched = 17
41 traceEvGoPreempt = 18
42 traceEvGoSleep = 19
43 traceEvGoBlock = 20
44 traceEvGoUnblock = 21
45 traceEvGoBlockSend = 22
46 traceEvGoBlockRecv = 23
47 traceEvGoBlockSelect = 24
48 traceEvGoBlockSync = 25
49 traceEvGoBlockCond = 26
50 traceEvGoBlockNet = 27
51 traceEvGoSysCall = 28
52 traceEvGoSysExit = 29
53 traceEvGoSysBlock = 30
54 traceEvGoWaiting = 31
55 traceEvGoInSyscall = 32
56 traceEvHeapAlloc = 33
57 traceEvHeapGoal = 34
58 traceEvTimerGoroutine = 35
59 traceEvFutileWakeup = 36
60 traceEvString = 37
61 traceEvGoStartLocal = 38
62 traceEvGoUnblockLocal = 39
63 traceEvGoSysExitLocal = 40
64 traceEvGoStartLabel = 41
65 traceEvGoBlockGC = 42
66 traceEvGCMarkAssistStart = 43
67 traceEvGCMarkAssistDone = 44
68 traceEvUserTaskCreate = 45
69 traceEvUserTaskEnd = 46
70 traceEvUserRegion = 47
71 traceEvUserLog = 48
72 traceEvCount = 49
73
74
75
76 )
77
78 const (
79
80
81
82
83
84
85
86
87
88 traceTickDiv = 16 + 48*(sys.Goarch386|sys.GoarchAmd64)
89
90
91
92 traceStackSize = 128
93
94 traceGlobProc = -1
95
96 traceBytesPerNumber = 10
97
98 traceArgCountShift = 6
99
100
101
102
103
104
105 traceFutileWakeup byte = 128
106 )
107
108
109 var trace struct {
110 lock mutex
111 lockOwner *g
112 enabled bool
113 shutdown bool
114 headerWritten bool
115 footerWritten bool
116 shutdownSema uint32
117 seqStart uint64
118 ticksStart int64
119 ticksEnd int64
120 timeStart int64
121 timeEnd int64
122 seqGC uint64
123 reading traceBufPtr
124 empty traceBufPtr
125 fullHead traceBufPtr
126 fullTail traceBufPtr
127 reader guintptr
128 stackTab traceStackTable
129
130
131
132
133
134
135
136 stringsLock mutex
137 strings map[string]uint64
138 stringSeq uint64
139
140
141 markWorkerLabels [len(gcMarkWorkerModeStrings)]uint64
142
143 bufLock mutex
144 buf traceBufPtr
145 }
146
147
148 type traceBufHeader struct {
149 link traceBufPtr
150 lastTicks uint64
151 pos int
152 stk [traceStackSize]uintptr
153 }
154
155
156
157
158 type traceBuf struct {
159 traceBufHeader
160 arr [64<<10 - unsafe.Sizeof(traceBufHeader{})]byte
161 }
162
163
164
165
166
167
168
169
170 type traceBufPtr uintptr
171
172 func (tp traceBufPtr) ptr() *traceBuf { return (*traceBuf)(unsafe.Pointer(tp)) }
173 func (tp *traceBufPtr) set(b *traceBuf) { *tp = traceBufPtr(unsafe.Pointer(b)) }
174 func traceBufPtrOf(b *traceBuf) traceBufPtr {
175 return traceBufPtr(unsafe.Pointer(b))
176 }
177
178
179
180
181
182
183 func StartTrace() error {
184
185
186
187
188
189 stopTheWorldGC("start tracing")
190
191
192 lock(&sched.sysmonlock)
193
194
195
196
197
198
199 lock(&trace.bufLock)
200
201 if trace.enabled || trace.shutdown {
202 unlock(&trace.bufLock)
203 unlock(&sched.sysmonlock)
204 startTheWorldGC()
205 return errorString("tracing is already enabled")
206 }
207
208
209
210
211
212
213
214
215 _g_ := getg()
216 _g_.m.startingtrace = true
217
218
219 mp := acquirem()
220 stkBuf := make([]uintptr, traceStackSize)
221 stackID := traceStackID(mp, stkBuf, 2)
222 releasem(mp)
223
224
225 forEachGRace(func(gp *g) {
226 status := readgstatus(gp)
227 if status != _Gdead {
228 gp.traceseq = 0
229 gp.tracelastp = getg().m.p
230
231 id := trace.stackTab.put([]uintptr{gp.startpc + sys.PCQuantum})
232 traceEvent(traceEvGoCreate, -1, uint64(gp.goid), uint64(id), stackID)
233 }
234 if status == _Gwaiting {
235
236 gp.traceseq++
237 traceEvent(traceEvGoWaiting, -1, uint64(gp.goid))
238 }
239 if status == _Gsyscall {
240 gp.traceseq++
241 traceEvent(traceEvGoInSyscall, -1, uint64(gp.goid))
242 } else {
243 gp.sysblocktraced = false
244 }
245 })
246 traceProcStart()
247 traceGoStart()
248
249
250
251
252 trace.ticksStart = cputicks()
253 trace.timeStart = nanotime()
254 trace.headerWritten = false
255 trace.footerWritten = false
256
257
258
259
260 trace.stringSeq = 0
261 trace.strings = make(map[string]uint64)
262
263 trace.seqGC = 0
264 _g_.m.startingtrace = false
265 trace.enabled = true
266
267
268 _, pid, bufp := traceAcquireBuffer()
269 for i, label := range gcMarkWorkerModeStrings[:] {
270 trace.markWorkerLabels[i], bufp = traceString(bufp, pid, label)
271 }
272 traceReleaseBuffer(pid)
273
274 unlock(&trace.bufLock)
275
276 unlock(&sched.sysmonlock)
277
278 startTheWorldGC()
279 return nil
280 }
281
282
283
284 func StopTrace() {
285
286
287 stopTheWorldGC("stop tracing")
288
289
290 lock(&sched.sysmonlock)
291
292
293 lock(&trace.bufLock)
294
295 if !trace.enabled {
296 unlock(&trace.bufLock)
297 unlock(&sched.sysmonlock)
298 startTheWorldGC()
299 return
300 }
301
302 traceGoSched()
303
304
305
306 for _, p := range allp[:cap(allp)] {
307 buf := p.tracebuf
308 if buf != 0 {
309 traceFullQueue(buf)
310 p.tracebuf = 0
311 }
312 }
313 if trace.buf != 0 {
314 buf := trace.buf
315 trace.buf = 0
316 if buf.ptr().pos != 0 {
317 traceFullQueue(buf)
318 }
319 }
320
321 for {
322 trace.ticksEnd = cputicks()
323 trace.timeEnd = nanotime()
324
325 if trace.timeEnd != trace.timeStart {
326 break
327 }
328 osyield()
329 }
330
331 trace.enabled = false
332 trace.shutdown = true
333 unlock(&trace.bufLock)
334
335 unlock(&sched.sysmonlock)
336
337 startTheWorldGC()
338
339
340
341 semacquire(&trace.shutdownSema)
342 if raceenabled {
343 raceacquire(unsafe.Pointer(&trace.shutdownSema))
344 }
345
346
347 lock(&trace.lock)
348 for _, p := range allp[:cap(allp)] {
349 if p.tracebuf != 0 {
350 throw("trace: non-empty trace buffer in proc")
351 }
352 }
353 if trace.buf != 0 {
354 throw("trace: non-empty global trace buffer")
355 }
356 if trace.fullHead != 0 || trace.fullTail != 0 {
357 throw("trace: non-empty full trace buffer")
358 }
359 if trace.reading != 0 || trace.reader != 0 {
360 throw("trace: reading after shutdown")
361 }
362 for trace.empty != 0 {
363 buf := trace.empty
364 trace.empty = buf.ptr().link
365 sysFree(unsafe.Pointer(buf), unsafe.Sizeof(*buf.ptr()), &memstats.other_sys)
366 }
367 trace.strings = nil
368 trace.shutdown = false
369 unlock(&trace.lock)
370 }
371
372
373
374
375
376
377 func ReadTrace() []byte {
378
379
380
381
382
383
384 lock(&trace.lock)
385 trace.lockOwner = getg()
386
387 if trace.reader != 0 {
388
389
390
391 trace.lockOwner = nil
392 unlock(&trace.lock)
393 println("runtime: ReadTrace called from multiple goroutines simultaneously")
394 return nil
395 }
396
397 if buf := trace.reading; buf != 0 {
398 buf.ptr().link = trace.empty
399 trace.empty = buf
400 trace.reading = 0
401 }
402
403 if !trace.headerWritten {
404 trace.headerWritten = true
405 trace.lockOwner = nil
406 unlock(&trace.lock)
407 return []byte("go 1.11 trace\x00\x00\x00")
408 }
409
410 if trace.fullHead == 0 && !trace.shutdown {
411 trace.reader.set(getg())
412 goparkunlock(&trace.lock, waitReasonTraceReaderBlocked, traceEvGoBlock, 2)
413 lock(&trace.lock)
414 }
415
416 if trace.fullHead != 0 {
417 buf := traceFullDequeue()
418 trace.reading = buf
419 trace.lockOwner = nil
420 unlock(&trace.lock)
421 return buf.ptr().arr[:buf.ptr().pos]
422 }
423
424 if !trace.footerWritten {
425 trace.footerWritten = true
426
427 freq := float64(trace.ticksEnd-trace.ticksStart) * 1e9 / float64(trace.timeEnd-trace.timeStart) / traceTickDiv
428 trace.lockOwner = nil
429 unlock(&trace.lock)
430 var data []byte
431 data = append(data, traceEvFrequency|0<<traceArgCountShift)
432 data = traceAppend(data, uint64(freq))
433
434
435 trace.stackTab.dump()
436 return data
437 }
438
439 if trace.shutdown {
440 trace.lockOwner = nil
441 unlock(&trace.lock)
442 if raceenabled {
443
444
445
446 racerelease(unsafe.Pointer(&trace.shutdownSema))
447 }
448
449 semrelease(&trace.shutdownSema)
450 return nil
451 }
452
453 trace.lockOwner = nil
454 unlock(&trace.lock)
455 println("runtime: spurious wakeup of trace reader")
456 return nil
457 }
458
459
460 func traceReader() *g {
461 if trace.reader == 0 || (trace.fullHead == 0 && !trace.shutdown) {
462 return nil
463 }
464 lock(&trace.lock)
465 if trace.reader == 0 || (trace.fullHead == 0 && !trace.shutdown) {
466 unlock(&trace.lock)
467 return nil
468 }
469 gp := trace.reader.ptr()
470 trace.reader.set(nil)
471 unlock(&trace.lock)
472 return gp
473 }
474
475
476 func traceProcFree(pp *p) {
477 buf := pp.tracebuf
478 pp.tracebuf = 0
479 if buf == 0 {
480 return
481 }
482 lock(&trace.lock)
483 traceFullQueue(buf)
484 unlock(&trace.lock)
485 }
486
487
488 func traceFullQueue(buf traceBufPtr) {
489 buf.ptr().link = 0
490 if trace.fullHead == 0 {
491 trace.fullHead = buf
492 } else {
493 trace.fullTail.ptr().link = buf
494 }
495 trace.fullTail = buf
496 }
497
498
499 func traceFullDequeue() traceBufPtr {
500 buf := trace.fullHead
501 if buf == 0 {
502 return 0
503 }
504 trace.fullHead = buf.ptr().link
505 if trace.fullHead == 0 {
506 trace.fullTail = 0
507 }
508 buf.ptr().link = 0
509 return buf
510 }
511
512
513
514
515
516
517 func traceEvent(ev byte, skip int, args ...uint64) {
518 mp, pid, bufp := traceAcquireBuffer()
519
520
521
522
523
524
525
526
527
528
529
530 if !trace.enabled && !mp.startingtrace {
531 traceReleaseBuffer(pid)
532 return
533 }
534
535 if skip > 0 {
536 if getg() == mp.curg {
537 skip++
538 }
539 }
540 traceEventLocked(0, mp, pid, bufp, ev, skip, args...)
541 traceReleaseBuffer(pid)
542 }
543
544 func traceEventLocked(extraBytes int, mp *m, pid int32, bufp *traceBufPtr, ev byte, skip int, args ...uint64) {
545 buf := bufp.ptr()
546
547 maxSize := 2 + 5*traceBytesPerNumber + extraBytes
548 if buf == nil || len(buf.arr)-buf.pos < maxSize {
549 buf = traceFlush(traceBufPtrOf(buf), pid).ptr()
550 bufp.set(buf)
551 }
552
553 ticks := uint64(cputicks()) / traceTickDiv
554 tickDiff := ticks - buf.lastTicks
555 buf.lastTicks = ticks
556 narg := byte(len(args))
557 if skip >= 0 {
558 narg++
559 }
560
561
562 if narg > 3 {
563 narg = 3
564 }
565 startPos := buf.pos
566 buf.byte(ev | narg<<traceArgCountShift)
567 var lenp *byte
568 if narg == 3 {
569
570 buf.varint(0)
571 lenp = &buf.arr[buf.pos-1]
572 }
573 buf.varint(tickDiff)
574 for _, a := range args {
575 buf.varint(a)
576 }
577 if skip == 0 {
578 buf.varint(0)
579 } else if skip > 0 {
580 buf.varint(traceStackID(mp, buf.stk[:], skip))
581 }
582 evSize := buf.pos - startPos
583 if evSize > maxSize {
584 throw("invalid length of trace event")
585 }
586 if lenp != nil {
587
588 *lenp = byte(evSize - 2)
589 }
590 }
591
592 func traceStackID(mp *m, buf []uintptr, skip int) uint64 {
593 _g_ := getg()
594 gp := mp.curg
595 var nstk int
596 if gp == _g_ {
597 nstk = callers(skip+1, buf)
598 } else if gp != nil {
599 gp = mp.curg
600 nstk = gcallers(gp, skip, buf)
601 }
602 if nstk > 0 {
603 nstk--
604 }
605 if nstk > 0 && gp.goid == 1 {
606 nstk--
607 }
608 id := trace.stackTab.put(buf[:nstk])
609 return uint64(id)
610 }
611
612
613 func traceAcquireBuffer() (mp *m, pid int32, bufp *traceBufPtr) {
614 mp = acquirem()
615 if p := mp.p.ptr(); p != nil {
616 return mp, p.id, &p.tracebuf
617 }
618 lock(&trace.bufLock)
619 return mp, traceGlobProc, &trace.buf
620 }
621
622
623 func traceReleaseBuffer(pid int32) {
624 if pid == traceGlobProc {
625 unlock(&trace.bufLock)
626 }
627 releasem(getg().m)
628 }
629
630
631 func traceFlush(buf traceBufPtr, pid int32) traceBufPtr {
632 owner := trace.lockOwner
633 dolock := owner == nil || owner != getg().m.curg
634 if dolock {
635 lock(&trace.lock)
636 }
637 if buf != 0 {
638 traceFullQueue(buf)
639 }
640 if trace.empty != 0 {
641 buf = trace.empty
642 trace.empty = buf.ptr().link
643 } else {
644 buf = traceBufPtr(sysAlloc(unsafe.Sizeof(traceBuf{}), &memstats.other_sys))
645 if buf == 0 {
646 throw("trace: out of memory")
647 }
648 }
649 bufp := buf.ptr()
650 bufp.link.set(nil)
651 bufp.pos = 0
652
653
654 ticks := uint64(cputicks()) / traceTickDiv
655 bufp.lastTicks = ticks
656 bufp.byte(traceEvBatch | 1<<traceArgCountShift)
657 bufp.varint(uint64(pid))
658 bufp.varint(ticks)
659
660 if dolock {
661 unlock(&trace.lock)
662 }
663 return buf
664 }
665
666
667 func traceString(bufp *traceBufPtr, pid int32, s string) (uint64, *traceBufPtr) {
668 if s == "" {
669 return 0, bufp
670 }
671
672 lock(&trace.stringsLock)
673 if raceenabled {
674
675
676 raceacquire(unsafe.Pointer(&trace.stringsLock))
677 }
678
679 if id, ok := trace.strings[s]; ok {
680 if raceenabled {
681 racerelease(unsafe.Pointer(&trace.stringsLock))
682 }
683 unlock(&trace.stringsLock)
684
685 return id, bufp
686 }
687
688 trace.stringSeq++
689 id := trace.stringSeq
690 trace.strings[s] = id
691
692 if raceenabled {
693 racerelease(unsafe.Pointer(&trace.stringsLock))
694 }
695 unlock(&trace.stringsLock)
696
697
698
699
700
701
702 buf := bufp.ptr()
703 size := 1 + 2*traceBytesPerNumber + len(s)
704 if buf == nil || len(buf.arr)-buf.pos < size {
705 buf = traceFlush(traceBufPtrOf(buf), pid).ptr()
706 bufp.set(buf)
707 }
708 buf.byte(traceEvString)
709 buf.varint(id)
710
711
712
713 slen := len(s)
714 if room := len(buf.arr) - buf.pos; room < slen+traceBytesPerNumber {
715 slen = room
716 }
717
718 buf.varint(uint64(slen))
719 buf.pos += copy(buf.arr[buf.pos:], s[:slen])
720
721 bufp.set(buf)
722 return id, bufp
723 }
724
725
726 func traceAppend(buf []byte, v uint64) []byte {
727 for ; v >= 0x80; v >>= 7 {
728 buf = append(buf, 0x80|byte(v))
729 }
730 buf = append(buf, byte(v))
731 return buf
732 }
733
734
735 func (buf *traceBuf) varint(v uint64) {
736 pos := buf.pos
737 for ; v >= 0x80; v >>= 7 {
738 buf.arr[pos] = 0x80 | byte(v)
739 pos++
740 }
741 buf.arr[pos] = byte(v)
742 pos++
743 buf.pos = pos
744 }
745
746
747 func (buf *traceBuf) byte(v byte) {
748 buf.arr[buf.pos] = v
749 buf.pos++
750 }
751
752
753
754 type traceStackTable struct {
755 lock mutex
756 seq uint32
757 mem traceAlloc
758 tab [1 << 13]traceStackPtr
759 }
760
761
762 type traceStack struct {
763 link traceStackPtr
764 hash uintptr
765 id uint32
766 n int
767 stk [0]uintptr
768 }
769
770 type traceStackPtr uintptr
771
772 func (tp traceStackPtr) ptr() *traceStack { return (*traceStack)(unsafe.Pointer(tp)) }
773
774
775 func (ts *traceStack) stack() []uintptr {
776 return (*[traceStackSize]uintptr)(unsafe.Pointer(&ts.stk))[:ts.n]
777 }
778
779
780
781 func (tab *traceStackTable) put(pcs []uintptr) uint32 {
782 if len(pcs) == 0 {
783 return 0
784 }
785 hash := memhash(unsafe.Pointer(&pcs[0]), 0, uintptr(len(pcs))*unsafe.Sizeof(pcs[0]))
786
787 if id := tab.find(pcs, hash); id != 0 {
788 return id
789 }
790
791 lock(&tab.lock)
792 if id := tab.find(pcs, hash); id != 0 {
793 unlock(&tab.lock)
794 return id
795 }
796
797 tab.seq++
798 stk := tab.newStack(len(pcs))
799 stk.hash = hash
800 stk.id = tab.seq
801 stk.n = len(pcs)
802 stkpc := stk.stack()
803 for i, pc := range pcs {
804 stkpc[i] = pc
805 }
806 part := int(hash % uintptr(len(tab.tab)))
807 stk.link = tab.tab[part]
808 atomicstorep(unsafe.Pointer(&tab.tab[part]), unsafe.Pointer(stk))
809 unlock(&tab.lock)
810 return stk.id
811 }
812
813
814 func (tab *traceStackTable) find(pcs []uintptr, hash uintptr) uint32 {
815 part := int(hash % uintptr(len(tab.tab)))
816 Search:
817 for stk := tab.tab[part].ptr(); stk != nil; stk = stk.link.ptr() {
818 if stk.hash == hash && stk.n == len(pcs) {
819 for i, stkpc := range stk.stack() {
820 if stkpc != pcs[i] {
821 continue Search
822 }
823 }
824 return stk.id
825 }
826 }
827 return 0
828 }
829
830
831 func (tab *traceStackTable) newStack(n int) *traceStack {
832 return (*traceStack)(tab.mem.alloc(unsafe.Sizeof(traceStack{}) + uintptr(n)*sys.PtrSize))
833 }
834
835
836 func allFrames(pcs []uintptr) []Frame {
837 frames := make([]Frame, 0, len(pcs))
838 ci := CallersFrames(pcs)
839 for {
840 f, more := ci.Next()
841 frames = append(frames, f)
842 if !more {
843 return frames
844 }
845 }
846 }
847
848
849
850 func (tab *traceStackTable) dump() {
851 var tmp [(2 + 4*traceStackSize) * traceBytesPerNumber]byte
852 bufp := traceFlush(0, 0)
853 for _, stk := range tab.tab {
854 stk := stk.ptr()
855 for ; stk != nil; stk = stk.link.ptr() {
856 tmpbuf := tmp[:0]
857 tmpbuf = traceAppend(tmpbuf, uint64(stk.id))
858 frames := allFrames(stk.stack())
859 tmpbuf = traceAppend(tmpbuf, uint64(len(frames)))
860 for _, f := range frames {
861 var frame traceFrame
862 frame, bufp = traceFrameForPC(bufp, 0, f)
863 tmpbuf = traceAppend(tmpbuf, uint64(f.PC))
864 tmpbuf = traceAppend(tmpbuf, uint64(frame.funcID))
865 tmpbuf = traceAppend(tmpbuf, uint64(frame.fileID))
866 tmpbuf = traceAppend(tmpbuf, uint64(frame.line))
867 }
868
869 size := 1 + traceBytesPerNumber + len(tmpbuf)
870 if buf := bufp.ptr(); len(buf.arr)-buf.pos < size {
871 bufp = traceFlush(bufp, 0)
872 }
873 buf := bufp.ptr()
874 buf.byte(traceEvStack | 3<<traceArgCountShift)
875 buf.varint(uint64(len(tmpbuf)))
876 buf.pos += copy(buf.arr[buf.pos:], tmpbuf)
877 }
878 }
879
880 lock(&trace.lock)
881 traceFullQueue(bufp)
882 unlock(&trace.lock)
883
884 tab.mem.drop()
885 *tab = traceStackTable{}
886 lockInit(&((*tab).lock), lockRankTraceStackTab)
887 }
888
889 type traceFrame struct {
890 funcID uint64
891 fileID uint64
892 line uint64
893 }
894
895
896
897 func traceFrameForPC(buf traceBufPtr, pid int32, f Frame) (traceFrame, traceBufPtr) {
898 bufp := &buf
899 var frame traceFrame
900
901 fn := f.Function
902 const maxLen = 1 << 10
903 if len(fn) > maxLen {
904 fn = fn[len(fn)-maxLen:]
905 }
906 frame.funcID, bufp = traceString(bufp, pid, fn)
907 frame.line = uint64(f.Line)
908 file := f.File
909 if len(file) > maxLen {
910 file = file[len(file)-maxLen:]
911 }
912 frame.fileID, bufp = traceString(bufp, pid, file)
913 return frame, (*bufp)
914 }
915
916
917
918 type traceAlloc struct {
919 head traceAllocBlockPtr
920 off uintptr
921 }
922
923
924
925
926
927
928
929
930 type traceAllocBlock struct {
931 next traceAllocBlockPtr
932 data [64<<10 - sys.PtrSize]byte
933 }
934
935
936 type traceAllocBlockPtr uintptr
937
938 func (p traceAllocBlockPtr) ptr() *traceAllocBlock { return (*traceAllocBlock)(unsafe.Pointer(p)) }
939 func (p *traceAllocBlockPtr) set(x *traceAllocBlock) { *p = traceAllocBlockPtr(unsafe.Pointer(x)) }
940
941
942 func (a *traceAlloc) alloc(n uintptr) unsafe.Pointer {
943 n = alignUp(n, sys.PtrSize)
944 if a.head == 0 || a.off+n > uintptr(len(a.head.ptr().data)) {
945 if n > uintptr(len(a.head.ptr().data)) {
946 throw("trace: alloc too large")
947 }
948 block := (*traceAllocBlock)(sysAlloc(unsafe.Sizeof(traceAllocBlock{}), &memstats.other_sys))
949 if block == nil {
950 throw("trace: out of memory")
951 }
952 block.next.set(a.head.ptr())
953 a.head.set(block)
954 a.off = 0
955 }
956 p := &a.head.ptr().data[a.off]
957 a.off += n
958 return unsafe.Pointer(p)
959 }
960
961
962 func (a *traceAlloc) drop() {
963 for a.head != 0 {
964 block := a.head.ptr()
965 a.head.set(block.next.ptr())
966 sysFree(unsafe.Pointer(block), unsafe.Sizeof(traceAllocBlock{}), &memstats.other_sys)
967 }
968 }
969
970
971
972 func traceGomaxprocs(procs int32) {
973 traceEvent(traceEvGomaxprocs, 1, uint64(procs))
974 }
975
976 func traceProcStart() {
977 traceEvent(traceEvProcStart, -1, uint64(getg().m.id))
978 }
979
980 func traceProcStop(pp *p) {
981
982
983 mp := acquirem()
984 oldp := mp.p
985 mp.p.set(pp)
986 traceEvent(traceEvProcStop, -1)
987 mp.p = oldp
988 releasem(mp)
989 }
990
991 func traceGCStart() {
992 traceEvent(traceEvGCStart, 3, trace.seqGC)
993 trace.seqGC++
994 }
995
996 func traceGCDone() {
997 traceEvent(traceEvGCDone, -1)
998 }
999
1000 func traceGCSTWStart(kind int) {
1001 traceEvent(traceEvGCSTWStart, -1, uint64(kind))
1002 }
1003
1004 func traceGCSTWDone() {
1005 traceEvent(traceEvGCSTWDone, -1)
1006 }
1007
1008
1009
1010
1011
1012
1013 func traceGCSweepStart() {
1014
1015
1016 _p_ := getg().m.p.ptr()
1017 if _p_.traceSweep {
1018 throw("double traceGCSweepStart")
1019 }
1020 _p_.traceSweep, _p_.traceSwept, _p_.traceReclaimed = true, 0, 0
1021 }
1022
1023
1024
1025
1026
1027 func traceGCSweepSpan(bytesSwept uintptr) {
1028 _p_ := getg().m.p.ptr()
1029 if _p_.traceSweep {
1030 if _p_.traceSwept == 0 {
1031 traceEvent(traceEvGCSweepStart, 1)
1032 }
1033 _p_.traceSwept += bytesSwept
1034 }
1035 }
1036
1037 func traceGCSweepDone() {
1038 _p_ := getg().m.p.ptr()
1039 if !_p_.traceSweep {
1040 throw("missing traceGCSweepStart")
1041 }
1042 if _p_.traceSwept != 0 {
1043 traceEvent(traceEvGCSweepDone, -1, uint64(_p_.traceSwept), uint64(_p_.traceReclaimed))
1044 }
1045 _p_.traceSweep = false
1046 }
1047
1048 func traceGCMarkAssistStart() {
1049 traceEvent(traceEvGCMarkAssistStart, 1)
1050 }
1051
1052 func traceGCMarkAssistDone() {
1053 traceEvent(traceEvGCMarkAssistDone, -1)
1054 }
1055
1056 func traceGoCreate(newg *g, pc uintptr) {
1057 newg.traceseq = 0
1058 newg.tracelastp = getg().m.p
1059
1060 id := trace.stackTab.put([]uintptr{pc + sys.PCQuantum})
1061 traceEvent(traceEvGoCreate, 2, uint64(newg.goid), uint64(id))
1062 }
1063
1064 func traceGoStart() {
1065 _g_ := getg().m.curg
1066 _p_ := _g_.m.p
1067 _g_.traceseq++
1068 if _p_.ptr().gcMarkWorkerMode != gcMarkWorkerNotWorker {
1069 traceEvent(traceEvGoStartLabel, -1, uint64(_g_.goid), _g_.traceseq, trace.markWorkerLabels[_p_.ptr().gcMarkWorkerMode])
1070 } else if _g_.tracelastp == _p_ {
1071 traceEvent(traceEvGoStartLocal, -1, uint64(_g_.goid))
1072 } else {
1073 _g_.tracelastp = _p_
1074 traceEvent(traceEvGoStart, -1, uint64(_g_.goid), _g_.traceseq)
1075 }
1076 }
1077
1078 func traceGoEnd() {
1079 traceEvent(traceEvGoEnd, -1)
1080 }
1081
1082 func traceGoSched() {
1083 _g_ := getg()
1084 _g_.tracelastp = _g_.m.p
1085 traceEvent(traceEvGoSched, 1)
1086 }
1087
1088 func traceGoPreempt() {
1089 _g_ := getg()
1090 _g_.tracelastp = _g_.m.p
1091 traceEvent(traceEvGoPreempt, 1)
1092 }
1093
1094 func traceGoPark(traceEv byte, skip int) {
1095 if traceEv&traceFutileWakeup != 0 {
1096 traceEvent(traceEvFutileWakeup, -1)
1097 }
1098 traceEvent(traceEv & ^traceFutileWakeup, skip)
1099 }
1100
1101 func traceGoUnpark(gp *g, skip int) {
1102 _p_ := getg().m.p
1103 gp.traceseq++
1104 if gp.tracelastp == _p_ {
1105 traceEvent(traceEvGoUnblockLocal, skip, uint64(gp.goid))
1106 } else {
1107 gp.tracelastp = _p_
1108 traceEvent(traceEvGoUnblock, skip, uint64(gp.goid), gp.traceseq)
1109 }
1110 }
1111
1112 func traceGoSysCall() {
1113 traceEvent(traceEvGoSysCall, 1)
1114 }
1115
1116 func traceGoSysExit(ts int64) {
1117 if ts != 0 && ts < trace.ticksStart {
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127 ts = 0
1128 }
1129 _g_ := getg().m.curg
1130 _g_.traceseq++
1131 _g_.tracelastp = _g_.m.p
1132 traceEvent(traceEvGoSysExit, -1, uint64(_g_.goid), _g_.traceseq, uint64(ts)/traceTickDiv)
1133 }
1134
1135 func traceGoSysBlock(pp *p) {
1136
1137
1138 mp := acquirem()
1139 oldp := mp.p
1140 mp.p.set(pp)
1141 traceEvent(traceEvGoSysBlock, -1)
1142 mp.p = oldp
1143 releasem(mp)
1144 }
1145
1146 func traceHeapAlloc() {
1147 traceEvent(traceEvHeapAlloc, -1, gcController.heapLive)
1148 }
1149
1150 func traceHeapGoal() {
1151 if heapGoal := atomic.Load64(&gcController.heapGoal); heapGoal == ^uint64(0) {
1152
1153 traceEvent(traceEvHeapGoal, -1, 0)
1154 } else {
1155 traceEvent(traceEvHeapGoal, -1, heapGoal)
1156 }
1157 }
1158
1159
1160
1161
1162
1163 func trace_userTaskCreate(id, parentID uint64, taskType string) {
1164 if !trace.enabled {
1165 return
1166 }
1167
1168
1169 mp, pid, bufp := traceAcquireBuffer()
1170 if !trace.enabled && !mp.startingtrace {
1171 traceReleaseBuffer(pid)
1172 return
1173 }
1174
1175 typeStringID, bufp := traceString(bufp, pid, taskType)
1176 traceEventLocked(0, mp, pid, bufp, traceEvUserTaskCreate, 3, id, parentID, typeStringID)
1177 traceReleaseBuffer(pid)
1178 }
1179
1180
1181 func trace_userTaskEnd(id uint64) {
1182 traceEvent(traceEvUserTaskEnd, 2, id)
1183 }
1184
1185
1186 func trace_userRegion(id, mode uint64, name string) {
1187 if !trace.enabled {
1188 return
1189 }
1190
1191 mp, pid, bufp := traceAcquireBuffer()
1192 if !trace.enabled && !mp.startingtrace {
1193 traceReleaseBuffer(pid)
1194 return
1195 }
1196
1197 nameStringID, bufp := traceString(bufp, pid, name)
1198 traceEventLocked(0, mp, pid, bufp, traceEvUserRegion, 3, id, mode, nameStringID)
1199 traceReleaseBuffer(pid)
1200 }
1201
1202
1203 func trace_userLog(id uint64, category, message string) {
1204 if !trace.enabled {
1205 return
1206 }
1207
1208 mp, pid, bufp := traceAcquireBuffer()
1209 if !trace.enabled && !mp.startingtrace {
1210 traceReleaseBuffer(pid)
1211 return
1212 }
1213
1214 categoryID, bufp := traceString(bufp, pid, category)
1215
1216 extraSpace := traceBytesPerNumber + len(message)
1217 traceEventLocked(extraSpace, mp, pid, bufp, traceEvUserLog, 3, id, categoryID)
1218
1219
1220 buf := bufp.ptr()
1221
1222
1223
1224 slen := len(message)
1225 if room := len(buf.arr) - buf.pos; room < slen+traceBytesPerNumber {
1226 slen = room
1227 }
1228 buf.varint(uint64(slen))
1229 buf.pos += copy(buf.arr[buf.pos:], message[:slen])
1230
1231 traceReleaseBuffer(pid)
1232 }
1233
View as plain text