...

Source file src/runtime/debuglog.go

Documentation: runtime

		 1  // Copyright 2019 The Go Authors. All rights reserved.
		 2  // Use of this source code is governed by a BSD-style
		 3  // license that can be found in the LICENSE file.
		 4  
		 5  // This file provides an internal debug logging facility. The debug
		 6  // log is a lightweight, in-memory, per-M ring buffer. By default, the
		 7  // runtime prints the debug log on panic.
		 8  //
		 9  // To print something to the debug log, call dlog to obtain a dlogger
		10  // and use the methods on that to add values. The values will be
		11  // space-separated in the output (much like println).
		12  //
		13  // This facility can be enabled by passing -tags debuglog when
		14  // building. Without this tag, dlog calls compile to nothing.
		15  
		16  package runtime
		17  
		18  import (
		19  	"runtime/internal/atomic"
		20  	"unsafe"
		21  )
		22  
		23  // debugLogBytes is the size of each per-M ring buffer. This is
		24  // allocated off-heap to avoid blowing up the M and hence the GC'd
		25  // heap size.
		26  const debugLogBytes = 16 << 10
		27  
		28  // debugLogStringLimit is the maximum number of bytes in a string.
		29  // Above this, the string will be truncated with "..(n more bytes).."
		30  const debugLogStringLimit = debugLogBytes / 8
		31  
		32  // dlog returns a debug logger. The caller can use methods on the
		33  // returned logger to add values, which will be space-separated in the
		34  // final output, much like println. The caller must call end() to
		35  // finish the message.
		36  //
		37  // dlog can be used from highly-constrained corners of the runtime: it
		38  // is safe to use in the signal handler, from within the write
		39  // barrier, from within the stack implementation, and in places that
		40  // must be recursively nosplit.
		41  //
		42  // This will be compiled away if built without the debuglog build tag.
		43  // However, argument construction may not be. If any of the arguments
		44  // are not literals or trivial expressions, consider protecting the
		45  // call with "if dlogEnabled".
		46  //
		47  //go:nosplit
		48  //go:nowritebarrierrec
		49  func dlog() *dlogger {
		50  	if !dlogEnabled {
		51  		return nil
		52  	}
		53  
		54  	// Get the time.
		55  	tick, nano := uint64(cputicks()), uint64(nanotime())
		56  
		57  	// Try to get a cached logger.
		58  	l := getCachedDlogger()
		59  
		60  	// If we couldn't get a cached logger, try to get one from the
		61  	// global pool.
		62  	if l == nil {
		63  		allp := (*uintptr)(unsafe.Pointer(&allDloggers))
		64  		all := (*dlogger)(unsafe.Pointer(atomic.Loaduintptr(allp)))
		65  		for l1 := all; l1 != nil; l1 = l1.allLink {
		66  			if atomic.Load(&l1.owned) == 0 && atomic.Cas(&l1.owned, 0, 1) {
		67  				l = l1
		68  				break
		69  			}
		70  		}
		71  	}
		72  
		73  	// If that failed, allocate a new logger.
		74  	if l == nil {
		75  		l = (*dlogger)(sysAlloc(unsafe.Sizeof(dlogger{}), nil))
		76  		if l == nil {
		77  			throw("failed to allocate debug log")
		78  		}
		79  		l.w.r.data = &l.w.data
		80  		l.owned = 1
		81  
		82  		// Prepend to allDloggers list.
		83  		headp := (*uintptr)(unsafe.Pointer(&allDloggers))
		84  		for {
		85  			head := atomic.Loaduintptr(headp)
		86  			l.allLink = (*dlogger)(unsafe.Pointer(head))
		87  			if atomic.Casuintptr(headp, head, uintptr(unsafe.Pointer(l))) {
		88  				break
		89  			}
		90  		}
		91  	}
		92  
		93  	// If the time delta is getting too high, write a new sync
		94  	// packet. We set the limit so we don't write more than 6
		95  	// bytes of delta in the record header.
		96  	const deltaLimit = 1<<(3*7) - 1 // ~2ms between sync packets
		97  	if tick-l.w.tick > deltaLimit || nano-l.w.nano > deltaLimit {
		98  		l.w.writeSync(tick, nano)
		99  	}
	 100  
	 101  	// Reserve space for framing header.
	 102  	l.w.ensure(debugLogHeaderSize)
	 103  	l.w.write += debugLogHeaderSize
	 104  
	 105  	// Write record header.
	 106  	l.w.uvarint(tick - l.w.tick)
	 107  	l.w.uvarint(nano - l.w.nano)
	 108  	gp := getg()
	 109  	if gp != nil && gp.m != nil && gp.m.p != 0 {
	 110  		l.w.varint(int64(gp.m.p.ptr().id))
	 111  	} else {
	 112  		l.w.varint(-1)
	 113  	}
	 114  
	 115  	return l
	 116  }
	 117  
	 118  // A dlogger writes to the debug log.
	 119  //
	 120  // To obtain a dlogger, call dlog(). When done with the dlogger, call
	 121  // end().
	 122  //
	 123  //go:notinheap
	 124  type dlogger struct {
	 125  	w debugLogWriter
	 126  
	 127  	// allLink is the next dlogger in the allDloggers list.
	 128  	allLink *dlogger
	 129  
	 130  	// owned indicates that this dlogger is owned by an M. This is
	 131  	// accessed atomically.
	 132  	owned uint32
	 133  }
	 134  
	 135  // allDloggers is a list of all dloggers, linked through
	 136  // dlogger.allLink. This is accessed atomically. This is prepend only,
	 137  // so it doesn't need to protect against ABA races.
	 138  var allDloggers *dlogger
	 139  
	 140  //go:nosplit
	 141  func (l *dlogger) end() {
	 142  	if !dlogEnabled {
	 143  		return
	 144  	}
	 145  
	 146  	// Fill in framing header.
	 147  	size := l.w.write - l.w.r.end
	 148  	if !l.w.writeFrameAt(l.w.r.end, size) {
	 149  		throw("record too large")
	 150  	}
	 151  
	 152  	// Commit the record.
	 153  	l.w.r.end = l.w.write
	 154  
	 155  	// Attempt to return this logger to the cache.
	 156  	if putCachedDlogger(l) {
	 157  		return
	 158  	}
	 159  
	 160  	// Return the logger to the global pool.
	 161  	atomic.Store(&l.owned, 0)
	 162  }
	 163  
	 164  const (
	 165  	debugLogUnknown = 1 + iota
	 166  	debugLogBoolTrue
	 167  	debugLogBoolFalse
	 168  	debugLogInt
	 169  	debugLogUint
	 170  	debugLogHex
	 171  	debugLogPtr
	 172  	debugLogString
	 173  	debugLogConstString
	 174  	debugLogStringOverflow
	 175  
	 176  	debugLogPC
	 177  	debugLogTraceback
	 178  )
	 179  
	 180  //go:nosplit
	 181  func (l *dlogger) b(x bool) *dlogger {
	 182  	if !dlogEnabled {
	 183  		return l
	 184  	}
	 185  	if x {
	 186  		l.w.byte(debugLogBoolTrue)
	 187  	} else {
	 188  		l.w.byte(debugLogBoolFalse)
	 189  	}
	 190  	return l
	 191  }
	 192  
	 193  //go:nosplit
	 194  func (l *dlogger) i(x int) *dlogger {
	 195  	return l.i64(int64(x))
	 196  }
	 197  
	 198  //go:nosplit
	 199  func (l *dlogger) i8(x int8) *dlogger {
	 200  	return l.i64(int64(x))
	 201  }
	 202  
	 203  //go:nosplit
	 204  func (l *dlogger) i16(x int16) *dlogger {
	 205  	return l.i64(int64(x))
	 206  }
	 207  
	 208  //go:nosplit
	 209  func (l *dlogger) i32(x int32) *dlogger {
	 210  	return l.i64(int64(x))
	 211  }
	 212  
	 213  //go:nosplit
	 214  func (l *dlogger) i64(x int64) *dlogger {
	 215  	if !dlogEnabled {
	 216  		return l
	 217  	}
	 218  	l.w.byte(debugLogInt)
	 219  	l.w.varint(x)
	 220  	return l
	 221  }
	 222  
	 223  //go:nosplit
	 224  func (l *dlogger) u(x uint) *dlogger {
	 225  	return l.u64(uint64(x))
	 226  }
	 227  
	 228  //go:nosplit
	 229  func (l *dlogger) uptr(x uintptr) *dlogger {
	 230  	return l.u64(uint64(x))
	 231  }
	 232  
	 233  //go:nosplit
	 234  func (l *dlogger) u8(x uint8) *dlogger {
	 235  	return l.u64(uint64(x))
	 236  }
	 237  
	 238  //go:nosplit
	 239  func (l *dlogger) u16(x uint16) *dlogger {
	 240  	return l.u64(uint64(x))
	 241  }
	 242  
	 243  //go:nosplit
	 244  func (l *dlogger) u32(x uint32) *dlogger {
	 245  	return l.u64(uint64(x))
	 246  }
	 247  
	 248  //go:nosplit
	 249  func (l *dlogger) u64(x uint64) *dlogger {
	 250  	if !dlogEnabled {
	 251  		return l
	 252  	}
	 253  	l.w.byte(debugLogUint)
	 254  	l.w.uvarint(x)
	 255  	return l
	 256  }
	 257  
	 258  //go:nosplit
	 259  func (l *dlogger) hex(x uint64) *dlogger {
	 260  	if !dlogEnabled {
	 261  		return l
	 262  	}
	 263  	l.w.byte(debugLogHex)
	 264  	l.w.uvarint(x)
	 265  	return l
	 266  }
	 267  
	 268  //go:nosplit
	 269  func (l *dlogger) p(x interface{}) *dlogger {
	 270  	if !dlogEnabled {
	 271  		return l
	 272  	}
	 273  	l.w.byte(debugLogPtr)
	 274  	if x == nil {
	 275  		l.w.uvarint(0)
	 276  	} else {
	 277  		v := efaceOf(&x)
	 278  		switch v._type.kind & kindMask {
	 279  		case kindChan, kindFunc, kindMap, kindPtr, kindUnsafePointer:
	 280  			l.w.uvarint(uint64(uintptr(v.data)))
	 281  		default:
	 282  			throw("not a pointer type")
	 283  		}
	 284  	}
	 285  	return l
	 286  }
	 287  
	 288  //go:nosplit
	 289  func (l *dlogger) s(x string) *dlogger {
	 290  	if !dlogEnabled {
	 291  		return l
	 292  	}
	 293  	str := stringStructOf(&x)
	 294  	datap := &firstmoduledata
	 295  	if len(x) > 4 && datap.etext <= uintptr(str.str) && uintptr(str.str) < datap.end {
	 296  		// String constants are in the rodata section, which
	 297  		// isn't recorded in moduledata. But it has to be
	 298  		// somewhere between etext and end.
	 299  		l.w.byte(debugLogConstString)
	 300  		l.w.uvarint(uint64(str.len))
	 301  		l.w.uvarint(uint64(uintptr(str.str) - datap.etext))
	 302  	} else {
	 303  		l.w.byte(debugLogString)
	 304  		var b []byte
	 305  		bb := (*slice)(unsafe.Pointer(&b))
	 306  		bb.array = str.str
	 307  		bb.len, bb.cap = str.len, str.len
	 308  		if len(b) > debugLogStringLimit {
	 309  			b = b[:debugLogStringLimit]
	 310  		}
	 311  		l.w.uvarint(uint64(len(b)))
	 312  		l.w.bytes(b)
	 313  		if len(b) != len(x) {
	 314  			l.w.byte(debugLogStringOverflow)
	 315  			l.w.uvarint(uint64(len(x) - len(b)))
	 316  		}
	 317  	}
	 318  	return l
	 319  }
	 320  
	 321  //go:nosplit
	 322  func (l *dlogger) pc(x uintptr) *dlogger {
	 323  	if !dlogEnabled {
	 324  		return l
	 325  	}
	 326  	l.w.byte(debugLogPC)
	 327  	l.w.uvarint(uint64(x))
	 328  	return l
	 329  }
	 330  
	 331  //go:nosplit
	 332  func (l *dlogger) traceback(x []uintptr) *dlogger {
	 333  	if !dlogEnabled {
	 334  		return l
	 335  	}
	 336  	l.w.byte(debugLogTraceback)
	 337  	l.w.uvarint(uint64(len(x)))
	 338  	for _, pc := range x {
	 339  		l.w.uvarint(uint64(pc))
	 340  	}
	 341  	return l
	 342  }
	 343  
	 344  // A debugLogWriter is a ring buffer of binary debug log records.
	 345  //
	 346  // A log record consists of a 2-byte framing header and a sequence of
	 347  // fields. The framing header gives the size of the record as a little
	 348  // endian 16-bit value. Each field starts with a byte indicating its
	 349  // type, followed by type-specific data. If the size in the framing
	 350  // header is 0, it's a sync record consisting of two little endian
	 351  // 64-bit values giving a new time base.
	 352  //
	 353  // Because this is a ring buffer, new records will eventually
	 354  // overwrite old records. Hence, it maintains a reader that consumes
	 355  // the log as it gets overwritten. That reader state is where an
	 356  // actual log reader would start.
	 357  //
	 358  //go:notinheap
	 359  type debugLogWriter struct {
	 360  	write uint64
	 361  	data	debugLogBuf
	 362  
	 363  	// tick and nano are the time bases from the most recently
	 364  	// written sync record.
	 365  	tick, nano uint64
	 366  
	 367  	// r is a reader that consumes records as they get overwritten
	 368  	// by the writer. It also acts as the initial reader state
	 369  	// when printing the log.
	 370  	r debugLogReader
	 371  
	 372  	// buf is a scratch buffer for encoding. This is here to
	 373  	// reduce stack usage.
	 374  	buf [10]byte
	 375  }
	 376  
	 377  //go:notinheap
	 378  type debugLogBuf [debugLogBytes]byte
	 379  
	 380  const (
	 381  	// debugLogHeaderSize is the number of bytes in the framing
	 382  	// header of every dlog record.
	 383  	debugLogHeaderSize = 2
	 384  
	 385  	// debugLogSyncSize is the number of bytes in a sync record.
	 386  	debugLogSyncSize = debugLogHeaderSize + 2*8
	 387  )
	 388  
	 389  //go:nosplit
	 390  func (l *debugLogWriter) ensure(n uint64) {
	 391  	for l.write+n >= l.r.begin+uint64(len(l.data)) {
	 392  		// Consume record at begin.
	 393  		if l.r.skip() == ^uint64(0) {
	 394  			// Wrapped around within a record.
	 395  			//
	 396  			// TODO(austin): It would be better to just
	 397  			// eat the whole buffer at this point, but we
	 398  			// have to communicate that to the reader
	 399  			// somehow.
	 400  			throw("record wrapped around")
	 401  		}
	 402  	}
	 403  }
	 404  
	 405  //go:nosplit
	 406  func (l *debugLogWriter) writeFrameAt(pos, size uint64) bool {
	 407  	l.data[pos%uint64(len(l.data))] = uint8(size)
	 408  	l.data[(pos+1)%uint64(len(l.data))] = uint8(size >> 8)
	 409  	return size <= 0xFFFF
	 410  }
	 411  
	 412  //go:nosplit
	 413  func (l *debugLogWriter) writeSync(tick, nano uint64) {
	 414  	l.tick, l.nano = tick, nano
	 415  	l.ensure(debugLogHeaderSize)
	 416  	l.writeFrameAt(l.write, 0)
	 417  	l.write += debugLogHeaderSize
	 418  	l.writeUint64LE(tick)
	 419  	l.writeUint64LE(nano)
	 420  	l.r.end = l.write
	 421  }
	 422  
	 423  //go:nosplit
	 424  func (l *debugLogWriter) writeUint64LE(x uint64) {
	 425  	var b [8]byte
	 426  	b[0] = byte(x)
	 427  	b[1] = byte(x >> 8)
	 428  	b[2] = byte(x >> 16)
	 429  	b[3] = byte(x >> 24)
	 430  	b[4] = byte(x >> 32)
	 431  	b[5] = byte(x >> 40)
	 432  	b[6] = byte(x >> 48)
	 433  	b[7] = byte(x >> 56)
	 434  	l.bytes(b[:])
	 435  }
	 436  
	 437  //go:nosplit
	 438  func (l *debugLogWriter) byte(x byte) {
	 439  	l.ensure(1)
	 440  	pos := l.write
	 441  	l.write++
	 442  	l.data[pos%uint64(len(l.data))] = x
	 443  }
	 444  
	 445  //go:nosplit
	 446  func (l *debugLogWriter) bytes(x []byte) {
	 447  	l.ensure(uint64(len(x)))
	 448  	pos := l.write
	 449  	l.write += uint64(len(x))
	 450  	for len(x) > 0 {
	 451  		n := copy(l.data[pos%uint64(len(l.data)):], x)
	 452  		pos += uint64(n)
	 453  		x = x[n:]
	 454  	}
	 455  }
	 456  
	 457  //go:nosplit
	 458  func (l *debugLogWriter) varint(x int64) {
	 459  	var u uint64
	 460  	if x < 0 {
	 461  		u = (^uint64(x) << 1) | 1 // complement i, bit 0 is 1
	 462  	} else {
	 463  		u = (uint64(x) << 1) // do not complement i, bit 0 is 0
	 464  	}
	 465  	l.uvarint(u)
	 466  }
	 467  
	 468  //go:nosplit
	 469  func (l *debugLogWriter) uvarint(u uint64) {
	 470  	i := 0
	 471  	for u >= 0x80 {
	 472  		l.buf[i] = byte(u) | 0x80
	 473  		u >>= 7
	 474  		i++
	 475  	}
	 476  	l.buf[i] = byte(u)
	 477  	i++
	 478  	l.bytes(l.buf[:i])
	 479  }
	 480  
	 481  type debugLogReader struct {
	 482  	data *debugLogBuf
	 483  
	 484  	// begin and end are the positions in the log of the beginning
	 485  	// and end of the log data, modulo len(data).
	 486  	begin, end uint64
	 487  
	 488  	// tick and nano are the current time base at begin.
	 489  	tick, nano uint64
	 490  }
	 491  
	 492  //go:nosplit
	 493  func (r *debugLogReader) skip() uint64 {
	 494  	// Read size at pos.
	 495  	if r.begin+debugLogHeaderSize > r.end {
	 496  		return ^uint64(0)
	 497  	}
	 498  	size := uint64(r.readUint16LEAt(r.begin))
	 499  	if size == 0 {
	 500  		// Sync packet.
	 501  		r.tick = r.readUint64LEAt(r.begin + debugLogHeaderSize)
	 502  		r.nano = r.readUint64LEAt(r.begin + debugLogHeaderSize + 8)
	 503  		size = debugLogSyncSize
	 504  	}
	 505  	if r.begin+size > r.end {
	 506  		return ^uint64(0)
	 507  	}
	 508  	r.begin += size
	 509  	return size
	 510  }
	 511  
	 512  //go:nosplit
	 513  func (r *debugLogReader) readUint16LEAt(pos uint64) uint16 {
	 514  	return uint16(r.data[pos%uint64(len(r.data))]) |
	 515  		uint16(r.data[(pos+1)%uint64(len(r.data))])<<8
	 516  }
	 517  
	 518  //go:nosplit
	 519  func (r *debugLogReader) readUint64LEAt(pos uint64) uint64 {
	 520  	var b [8]byte
	 521  	for i := range b {
	 522  		b[i] = r.data[pos%uint64(len(r.data))]
	 523  		pos++
	 524  	}
	 525  	return uint64(b[0]) | uint64(b[1])<<8 |
	 526  		uint64(b[2])<<16 | uint64(b[3])<<24 |
	 527  		uint64(b[4])<<32 | uint64(b[5])<<40 |
	 528  		uint64(b[6])<<48 | uint64(b[7])<<56
	 529  }
	 530  
	 531  func (r *debugLogReader) peek() (tick uint64) {
	 532  	// Consume any sync records.
	 533  	size := uint64(0)
	 534  	for size == 0 {
	 535  		if r.begin+debugLogHeaderSize > r.end {
	 536  			return ^uint64(0)
	 537  		}
	 538  		size = uint64(r.readUint16LEAt(r.begin))
	 539  		if size != 0 {
	 540  			break
	 541  		}
	 542  		if r.begin+debugLogSyncSize > r.end {
	 543  			return ^uint64(0)
	 544  		}
	 545  		// Sync packet.
	 546  		r.tick = r.readUint64LEAt(r.begin + debugLogHeaderSize)
	 547  		r.nano = r.readUint64LEAt(r.begin + debugLogHeaderSize + 8)
	 548  		r.begin += debugLogSyncSize
	 549  	}
	 550  
	 551  	// Peek tick delta.
	 552  	if r.begin+size > r.end {
	 553  		return ^uint64(0)
	 554  	}
	 555  	pos := r.begin + debugLogHeaderSize
	 556  	var u uint64
	 557  	for i := uint(0); ; i += 7 {
	 558  		b := r.data[pos%uint64(len(r.data))]
	 559  		pos++
	 560  		u |= uint64(b&^0x80) << i
	 561  		if b&0x80 == 0 {
	 562  			break
	 563  		}
	 564  	}
	 565  	if pos > r.begin+size {
	 566  		return ^uint64(0)
	 567  	}
	 568  	return r.tick + u
	 569  }
	 570  
	 571  func (r *debugLogReader) header() (end, tick, nano uint64, p int) {
	 572  	// Read size. We've already skipped sync packets and checked
	 573  	// bounds in peek.
	 574  	size := uint64(r.readUint16LEAt(r.begin))
	 575  	end = r.begin + size
	 576  	r.begin += debugLogHeaderSize
	 577  
	 578  	// Read tick, nano, and p.
	 579  	tick = r.uvarint() + r.tick
	 580  	nano = r.uvarint() + r.nano
	 581  	p = int(r.varint())
	 582  
	 583  	return
	 584  }
	 585  
	 586  func (r *debugLogReader) uvarint() uint64 {
	 587  	var u uint64
	 588  	for i := uint(0); ; i += 7 {
	 589  		b := r.data[r.begin%uint64(len(r.data))]
	 590  		r.begin++
	 591  		u |= uint64(b&^0x80) << i
	 592  		if b&0x80 == 0 {
	 593  			break
	 594  		}
	 595  	}
	 596  	return u
	 597  }
	 598  
	 599  func (r *debugLogReader) varint() int64 {
	 600  	u := r.uvarint()
	 601  	var v int64
	 602  	if u&1 == 0 {
	 603  		v = int64(u >> 1)
	 604  	} else {
	 605  		v = ^int64(u >> 1)
	 606  	}
	 607  	return v
	 608  }
	 609  
	 610  func (r *debugLogReader) printVal() bool {
	 611  	typ := r.data[r.begin%uint64(len(r.data))]
	 612  	r.begin++
	 613  
	 614  	switch typ {
	 615  	default:
	 616  		print("<unknown field type ", hex(typ), " pos ", r.begin-1, " end ", r.end, ">\n")
	 617  		return false
	 618  
	 619  	case debugLogUnknown:
	 620  		print("<unknown kind>")
	 621  
	 622  	case debugLogBoolTrue:
	 623  		print(true)
	 624  
	 625  	case debugLogBoolFalse:
	 626  		print(false)
	 627  
	 628  	case debugLogInt:
	 629  		print(r.varint())
	 630  
	 631  	case debugLogUint:
	 632  		print(r.uvarint())
	 633  
	 634  	case debugLogHex, debugLogPtr:
	 635  		print(hex(r.uvarint()))
	 636  
	 637  	case debugLogString:
	 638  		sl := r.uvarint()
	 639  		if r.begin+sl > r.end {
	 640  			r.begin = r.end
	 641  			print("<string length corrupted>")
	 642  			break
	 643  		}
	 644  		for sl > 0 {
	 645  			b := r.data[r.begin%uint64(len(r.data)):]
	 646  			if uint64(len(b)) > sl {
	 647  				b = b[:sl]
	 648  			}
	 649  			r.begin += uint64(len(b))
	 650  			sl -= uint64(len(b))
	 651  			gwrite(b)
	 652  		}
	 653  
	 654  	case debugLogConstString:
	 655  		len, ptr := int(r.uvarint()), uintptr(r.uvarint())
	 656  		ptr += firstmoduledata.etext
	 657  		str := stringStruct{
	 658  			str: unsafe.Pointer(ptr),
	 659  			len: len,
	 660  		}
	 661  		s := *(*string)(unsafe.Pointer(&str))
	 662  		print(s)
	 663  
	 664  	case debugLogStringOverflow:
	 665  		print("..(", r.uvarint(), " more bytes)..")
	 666  
	 667  	case debugLogPC:
	 668  		printDebugLogPC(uintptr(r.uvarint()), false)
	 669  
	 670  	case debugLogTraceback:
	 671  		n := int(r.uvarint())
	 672  		for i := 0; i < n; i++ {
	 673  			print("\n\t")
	 674  			// gentraceback PCs are always return PCs.
	 675  			// Convert them to call PCs.
	 676  			//
	 677  			// TODO(austin): Expand inlined frames.
	 678  			printDebugLogPC(uintptr(r.uvarint()), true)
	 679  		}
	 680  	}
	 681  
	 682  	return true
	 683  }
	 684  
	 685  // printDebugLog prints the debug log.
	 686  func printDebugLog() {
	 687  	if !dlogEnabled {
	 688  		return
	 689  	}
	 690  
	 691  	// This function should not panic or throw since it is used in
	 692  	// the fatal panic path and this may deadlock.
	 693  
	 694  	printlock()
	 695  
	 696  	// Get the list of all debug logs.
	 697  	allp := (*uintptr)(unsafe.Pointer(&allDloggers))
	 698  	all := (*dlogger)(unsafe.Pointer(atomic.Loaduintptr(allp)))
	 699  
	 700  	// Count the logs.
	 701  	n := 0
	 702  	for l := all; l != nil; l = l.allLink {
	 703  		n++
	 704  	}
	 705  	if n == 0 {
	 706  		printunlock()
	 707  		return
	 708  	}
	 709  
	 710  	// Prepare read state for all logs.
	 711  	type readState struct {
	 712  		debugLogReader
	 713  		first		bool
	 714  		lost		 uint64
	 715  		nextTick uint64
	 716  	}
	 717  	state1 := sysAlloc(unsafe.Sizeof(readState{})*uintptr(n), nil)
	 718  	if state1 == nil {
	 719  		println("failed to allocate read state for", n, "logs")
	 720  		printunlock()
	 721  		return
	 722  	}
	 723  	state := (*[1 << 20]readState)(state1)[:n]
	 724  	{
	 725  		l := all
	 726  		for i := range state {
	 727  			s := &state[i]
	 728  			s.debugLogReader = l.w.r
	 729  			s.first = true
	 730  			s.lost = l.w.r.begin
	 731  			s.nextTick = s.peek()
	 732  			l = l.allLink
	 733  		}
	 734  	}
	 735  
	 736  	// Print records.
	 737  	for {
	 738  		// Find the next record.
	 739  		var best struct {
	 740  			tick uint64
	 741  			i		int
	 742  		}
	 743  		best.tick = ^uint64(0)
	 744  		for i := range state {
	 745  			if state[i].nextTick < best.tick {
	 746  				best.tick = state[i].nextTick
	 747  				best.i = i
	 748  			}
	 749  		}
	 750  		if best.tick == ^uint64(0) {
	 751  			break
	 752  		}
	 753  
	 754  		// Print record.
	 755  		s := &state[best.i]
	 756  		if s.first {
	 757  			print(">> begin log ", best.i)
	 758  			if s.lost != 0 {
	 759  				print("; lost first ", s.lost>>10, "KB")
	 760  			}
	 761  			print(" <<\n")
	 762  			s.first = false
	 763  		}
	 764  
	 765  		end, _, nano, p := s.header()
	 766  		oldEnd := s.end
	 767  		s.end = end
	 768  
	 769  		print("[")
	 770  		var tmpbuf [21]byte
	 771  		pnano := int64(nano) - runtimeInitTime
	 772  		if pnano < 0 {
	 773  			// Logged before runtimeInitTime was set.
	 774  			pnano = 0
	 775  		}
	 776  		print(string(itoaDiv(tmpbuf[:], uint64(pnano), 9)))
	 777  		print(" P ", p, "] ")
	 778  
	 779  		for i := 0; s.begin < s.end; i++ {
	 780  			if i > 0 {
	 781  				print(" ")
	 782  			}
	 783  			if !s.printVal() {
	 784  				// Abort this P log.
	 785  				print("<aborting P log>")
	 786  				end = oldEnd
	 787  				break
	 788  			}
	 789  		}
	 790  		println()
	 791  
	 792  		// Move on to the next record.
	 793  		s.begin = end
	 794  		s.end = oldEnd
	 795  		s.nextTick = s.peek()
	 796  	}
	 797  
	 798  	printunlock()
	 799  }
	 800  
	 801  // printDebugLogPC prints a single symbolized PC. If returnPC is true,
	 802  // pc is a return PC that must first be converted to a call PC.
	 803  func printDebugLogPC(pc uintptr, returnPC bool) {
	 804  	fn := findfunc(pc)
	 805  	if returnPC && (!fn.valid() || pc > fn.entry) {
	 806  		// TODO(austin): Don't back up if the previous frame
	 807  		// was a sigpanic.
	 808  		pc--
	 809  	}
	 810  
	 811  	print(hex(pc))
	 812  	if !fn.valid() {
	 813  		print(" [unknown PC]")
	 814  	} else {
	 815  		name := funcname(fn)
	 816  		file, line := funcline(fn, pc)
	 817  		print(" [", name, "+", hex(pc-fn.entry),
	 818  			" ", file, ":", line, "]")
	 819  	}
	 820  }
	 821  

View as plain text