Source file src/runtime/debuglog.go

     1  // Copyright 2019 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  // This file provides an internal debug logging facility. The debug
     6  // log is a lightweight, in-memory, per-M ring buffer. By default, the
     7  // runtime prints the debug log on panic.
     8  //
     9  // To print something to the debug log, call dlog to obtain a dlogger
    10  // and use the methods on that to add values. The values will be
    11  // space-separated in the output (much like println).
    12  //
    13  // This facility can be enabled by passing -tags debuglog when
    14  // building. Without this tag, dlog calls compile to nothing.
    15  //
    16  // Implementation notes
    17  //
    18  // There are two implementations of the dlog interface: dloggerImpl and
    19  // dloggerFake. dloggerFake is a no-op implementation. dlogger is type-aliased
    20  // to one or the other depending on the debuglog build tag. However, both types
    21  // always exist and are always built. This helps ensure we compile as much of
    22  // the implementation as possible in the default build configuration, while also
    23  // enabling us to achieve good test coverage of the real debuglog implementation
    24  // even when the debuglog build tag is not set.
    25  
    26  package runtime
    27  
    28  import (
    29  	"internal/abi"
    30  	"internal/byteorder"
    31  	"internal/runtime/atomic"
    32  	"internal/runtime/sys"
    33  	"unsafe"
    34  )
    35  
    36  // debugLogBytes is the size of each per-M ring buffer. This is
    37  // allocated off-heap to avoid blowing up the M and hence the GC'd
    38  // heap size.
    39  const debugLogBytes = 16 << 10
    40  
    41  // debugLogStringLimit is the maximum number of bytes in a string.
    42  // Above this, the string will be truncated with "..(n more bytes).."
    43  const debugLogStringLimit = debugLogBytes / 8
    44  
    45  // dlog returns a debug logger. The caller can use methods on the
    46  // returned logger to add values, which will be space-separated in the
    47  // final output, much like println. The caller must call end() to
    48  // finish the message.
    49  //
    50  // dlog can be used from highly-constrained corners of the runtime: it
    51  // is safe to use in the signal handler, from within the write
    52  // barrier, from within the stack implementation, and in places that
    53  // must be recursively nosplit.
    54  //
    55  // This will be compiled away if built without the debuglog build tag.
    56  // However, argument construction may not be. If any of the arguments
    57  // are not literals or trivial expressions, consider protecting the
    58  // call with "if dlogEnabled".
    59  //
    60  //go:nosplit
    61  //go:nowritebarrierrec
    62  func dlog() dlogger {
    63  	// dlog1 is defined to either dlogImpl or dlogFake.
    64  	return dlog1()
    65  }
    66  
    67  //go:nosplit
    68  //go:nowritebarrierrec
    69  func dlogFake() dloggerFake {
    70  	return dloggerFake{}
    71  }
    72  
    73  //go:nosplit
    74  //go:nowritebarrierrec
    75  func dlogImpl() *dloggerImpl {
    76  	// Get the time.
    77  	tick, nano := uint64(cputicks()), uint64(nanotime())
    78  
    79  	// Try to get a cached logger.
    80  	l := getCachedDlogger()
    81  
    82  	// If we couldn't get a cached logger, try to get one from the
    83  	// global pool.
    84  	if l == nil {
    85  		allp := (*uintptr)(unsafe.Pointer(&allDloggers))
    86  		all := (*dloggerImpl)(unsafe.Pointer(atomic.Loaduintptr(allp)))
    87  		for l1 := all; l1 != nil; l1 = l1.allLink {
    88  			if l1.owned.Load() == 0 && l1.owned.CompareAndSwap(0, 1) {
    89  				l = l1
    90  				break
    91  			}
    92  		}
    93  	}
    94  
    95  	// If that failed, allocate a new logger.
    96  	if l == nil {
    97  		// Use sysAllocOS instead of sysAlloc because we want to interfere
    98  		// with the runtime as little as possible, and sysAlloc updates accounting.
    99  		l = (*dloggerImpl)(sysAllocOS(unsafe.Sizeof(dloggerImpl{}), "debug log"))
   100  		if l == nil {
   101  			throw("failed to allocate debug log")
   102  		}
   103  		l.w.r.data = &l.w.data
   104  		l.owned.Store(1)
   105  
   106  		// Prepend to allDloggers list.
   107  		headp := (*uintptr)(unsafe.Pointer(&allDloggers))
   108  		for {
   109  			head := atomic.Loaduintptr(headp)
   110  			l.allLink = (*dloggerImpl)(unsafe.Pointer(head))
   111  			if atomic.Casuintptr(headp, head, uintptr(unsafe.Pointer(l))) {
   112  				break
   113  			}
   114  		}
   115  	}
   116  
   117  	// If the time delta is getting too high, write a new sync
   118  	// packet. We set the limit so we don't write more than 6
   119  	// bytes of delta in the record header.
   120  	const deltaLimit = 1<<(3*7) - 1 // ~2ms between sync packets
   121  	if tick-l.w.tick > deltaLimit || nano-l.w.nano > deltaLimit {
   122  		l.w.writeSync(tick, nano)
   123  	}
   124  
   125  	// Reserve space for framing header.
   126  	l.w.ensure(debugLogHeaderSize)
   127  	l.w.write += debugLogHeaderSize
   128  
   129  	// Write record header.
   130  	l.w.uvarint(tick - l.w.tick)
   131  	l.w.uvarint(nano - l.w.nano)
   132  	gp := getg()
   133  	if gp != nil && gp.m != nil && gp.m.p != 0 {
   134  		l.w.varint(int64(gp.m.p.ptr().id))
   135  	} else {
   136  		l.w.varint(-1)
   137  	}
   138  
   139  	return l
   140  }
   141  
   142  // A dloggerImpl writes to the debug log.
   143  //
   144  // To obtain a dloggerImpl, call dlog(). When done with the dloggerImpl, call
   145  // end().
   146  type dloggerImpl struct {
   147  	_ sys.NotInHeap
   148  	w debugLogWriter
   149  
   150  	// allLink is the next dlogger in the allDloggers list.
   151  	allLink *dloggerImpl
   152  
   153  	// owned indicates that this dlogger is owned by an M. This is
   154  	// accessed atomically.
   155  	owned atomic.Uint32
   156  }
   157  
   158  // allDloggers is a list of all dloggers, linked through
   159  // dlogger.allLink. This is accessed atomically. This is prepend only,
   160  // so it doesn't need to protect against ABA races.
   161  var allDloggers *dloggerImpl
   162  
   163  // A dloggerFake is a no-op implementation of dlogger.
   164  type dloggerFake struct{}
   165  
   166  //go:nosplit
   167  func (l dloggerFake) end() {}
   168  
   169  //go:nosplit
   170  func (l *dloggerImpl) end() {
   171  	// Fill in framing header.
   172  	size := l.w.write - l.w.r.end
   173  	if !l.w.writeFrameAt(l.w.r.end, size) {
   174  		throw("record too large")
   175  	}
   176  
   177  	// Commit the record.
   178  	l.w.r.end = l.w.write
   179  
   180  	// Attempt to return this logger to the cache.
   181  	if putCachedDlogger(l) {
   182  		return
   183  	}
   184  
   185  	// Return the logger to the global pool.
   186  	l.owned.Store(0)
   187  }
   188  
   189  const (
   190  	debugLogUnknown = 1 + iota
   191  	debugLogBoolTrue
   192  	debugLogBoolFalse
   193  	debugLogInt
   194  	debugLogUint
   195  	debugLogHex
   196  	debugLogPtr
   197  	debugLogString
   198  	debugLogConstString
   199  	debugLogHexdump
   200  	debugLogOverflow
   201  
   202  	debugLogPC
   203  	debugLogTraceback
   204  )
   205  
   206  //go:nosplit
   207  func (l dloggerFake) b(x bool) dloggerFake { return l }
   208  
   209  //go:nosplit
   210  func (l *dloggerImpl) b(x bool) *dloggerImpl {
   211  	if x {
   212  		l.w.byte(debugLogBoolTrue)
   213  	} else {
   214  		l.w.byte(debugLogBoolFalse)
   215  	}
   216  	return l
   217  }
   218  
   219  //go:nosplit
   220  func (l dloggerFake) i(x int) dloggerFake { return l }
   221  
   222  //go:nosplit
   223  func (l *dloggerImpl) i(x int) *dloggerImpl {
   224  	return l.i64(int64(x))
   225  }
   226  
   227  //go:nosplit
   228  func (l dloggerFake) i8(x int8) dloggerFake { return l }
   229  
   230  //go:nosplit
   231  func (l *dloggerImpl) i8(x int8) *dloggerImpl {
   232  	return l.i64(int64(x))
   233  }
   234  
   235  //go:nosplit
   236  func (l dloggerFake) i16(x int16) dloggerFake { return l }
   237  
   238  //go:nosplit
   239  func (l *dloggerImpl) i16(x int16) *dloggerImpl {
   240  	return l.i64(int64(x))
   241  }
   242  
   243  //go:nosplit
   244  func (l dloggerFake) i32(x int32) dloggerFake { return l }
   245  
   246  //go:nosplit
   247  func (l *dloggerImpl) i32(x int32) *dloggerImpl {
   248  	return l.i64(int64(x))
   249  }
   250  
   251  //go:nosplit
   252  func (l dloggerFake) i64(x int64) dloggerFake { return l }
   253  
   254  //go:nosplit
   255  func (l *dloggerImpl) i64(x int64) *dloggerImpl {
   256  	l.w.byte(debugLogInt)
   257  	l.w.varint(x)
   258  	return l
   259  }
   260  
   261  //go:nosplit
   262  func (l dloggerFake) u(x uint) dloggerFake { return l }
   263  
   264  //go:nosplit
   265  func (l *dloggerImpl) u(x uint) *dloggerImpl {
   266  	return l.u64(uint64(x))
   267  }
   268  
   269  //go:nosplit
   270  func (l dloggerFake) uptr(x uintptr) dloggerFake { return l }
   271  
   272  //go:nosplit
   273  func (l *dloggerImpl) uptr(x uintptr) *dloggerImpl {
   274  	return l.u64(uint64(x))
   275  }
   276  
   277  //go:nosplit
   278  func (l dloggerFake) u8(x uint8) dloggerFake { return l }
   279  
   280  //go:nosplit
   281  func (l *dloggerImpl) u8(x uint8) *dloggerImpl {
   282  	return l.u64(uint64(x))
   283  }
   284  
   285  //go:nosplit
   286  func (l dloggerFake) u16(x uint16) dloggerFake { return l }
   287  
   288  //go:nosplit
   289  func (l *dloggerImpl) u16(x uint16) *dloggerImpl {
   290  	return l.u64(uint64(x))
   291  }
   292  
   293  //go:nosplit
   294  func (l dloggerFake) u32(x uint32) dloggerFake { return l }
   295  
   296  //go:nosplit
   297  func (l *dloggerImpl) u32(x uint32) *dloggerImpl {
   298  	return l.u64(uint64(x))
   299  }
   300  
   301  //go:nosplit
   302  func (l dloggerFake) u64(x uint64) dloggerFake { return l }
   303  
   304  //go:nosplit
   305  func (l *dloggerImpl) u64(x uint64) *dloggerImpl {
   306  	l.w.byte(debugLogUint)
   307  	l.w.uvarint(x)
   308  	return l
   309  }
   310  
   311  //go:nosplit
   312  func (l dloggerFake) hex(x uint64) dloggerFake { return l }
   313  
   314  //go:nosplit
   315  func (l *dloggerImpl) hex(x uint64) *dloggerImpl {
   316  	l.w.byte(debugLogHex)
   317  	l.w.uvarint(x)
   318  	return l
   319  }
   320  
   321  //go:nosplit
   322  func (l dloggerFake) p(x any) dloggerFake { return l }
   323  
   324  //go:nosplit
   325  func (l *dloggerImpl) p(x any) *dloggerImpl {
   326  	l.w.byte(debugLogPtr)
   327  	if x == nil {
   328  		l.w.uvarint(0)
   329  	} else {
   330  		v := efaceOf(&x)
   331  		switch v._type.Kind() {
   332  		case abi.Chan, abi.Func, abi.Map, abi.Pointer, abi.UnsafePointer:
   333  			l.w.uvarint(uint64(uintptr(v.data)))
   334  		default:
   335  			throw("not a pointer type")
   336  		}
   337  	}
   338  	return l
   339  }
   340  
   341  //go:nosplit
   342  func (l dloggerFake) s(x string) dloggerFake { return l }
   343  
   344  //go:nosplit
   345  func (l *dloggerImpl) s(x string) *dloggerImpl {
   346  	strData := unsafe.StringData(x)
   347  	datap := &firstmoduledata
   348  	if len(x) > 4 && datap.etext <= uintptr(unsafe.Pointer(strData)) && uintptr(unsafe.Pointer(strData)) < datap.end {
   349  		// String constants are in the rodata section, which
   350  		// isn't recorded in moduledata. But it has to be
   351  		// somewhere between etext and end.
   352  		l.w.byte(debugLogConstString)
   353  		l.w.uvarint(uint64(len(x)))
   354  		l.w.uvarint(uint64(uintptr(unsafe.Pointer(strData)) - datap.etext))
   355  	} else {
   356  		l.w.byte(debugLogString)
   357  		// We can't use unsafe.Slice as it may panic, which isn't safe
   358  		// in this (potentially) nowritebarrier context.
   359  		var b []byte
   360  		bb := (*slice)(unsafe.Pointer(&b))
   361  		bb.array = unsafe.Pointer(strData)
   362  		bb.len, bb.cap = len(x), len(x)
   363  		if len(b) > debugLogStringLimit {
   364  			b = b[:debugLogStringLimit]
   365  		}
   366  		l.w.uvarint(uint64(len(b)))
   367  		l.w.bytes(b)
   368  		if len(b) != len(x) {
   369  			l.w.byte(debugLogOverflow)
   370  			l.w.uvarint(uint64(len(x) - len(b)))
   371  		}
   372  	}
   373  	return l
   374  }
   375  
   376  //go:nosplit
   377  func (l dloggerFake) hexdump(p unsafe.Pointer, bytes uintptr) dloggerFake { return l }
   378  
   379  //go:nosplit
   380  func (l *dloggerImpl) hexdump(p unsafe.Pointer, bytes uintptr) *dloggerImpl {
   381  	var b []byte
   382  	bb := (*slice)(unsafe.Pointer(&b))
   383  	bb.array = unsafe.Pointer(p)
   384  	bb.len, bb.cap = int(bytes), int(bytes)
   385  	if len(b) > debugLogStringLimit {
   386  		b = b[:debugLogStringLimit]
   387  	}
   388  
   389  	l.w.byte(debugLogHexdump)
   390  	l.w.uvarint(uint64(uintptr(p)))
   391  	l.w.uvarint(uint64(len(b)))
   392  	l.w.bytes(b)
   393  
   394  	if uintptr(len(b)) != bytes {
   395  		l.w.byte(debugLogOverflow)
   396  		l.w.uvarint(uint64(bytes) - uint64(len(b)))
   397  	}
   398  
   399  	return l
   400  }
   401  
   402  //go:nosplit
   403  func (l dloggerFake) pc(x uintptr) dloggerFake { return l }
   404  
   405  //go:nosplit
   406  func (l *dloggerImpl) pc(x uintptr) *dloggerImpl {
   407  	l.w.byte(debugLogPC)
   408  	l.w.uvarint(uint64(x))
   409  	return l
   410  }
   411  
   412  //go:nosplit
   413  func (l dloggerFake) traceback(x []uintptr) dloggerFake { return l }
   414  
   415  //go:nosplit
   416  func (l *dloggerImpl) traceback(x []uintptr) *dloggerImpl {
   417  	l.w.byte(debugLogTraceback)
   418  	l.w.uvarint(uint64(len(x)))
   419  	for _, pc := range x {
   420  		l.w.uvarint(uint64(pc))
   421  	}
   422  	return l
   423  }
   424  
   425  // A debugLogWriter is a ring buffer of binary debug log records.
   426  //
   427  // A log record consists of a 2-byte framing header and a sequence of
   428  // fields. The framing header gives the size of the record as a little
   429  // endian 16-bit value. Each field starts with a byte indicating its
   430  // type, followed by type-specific data. If the size in the framing
   431  // header is 0, it's a sync record consisting of two little endian
   432  // 64-bit values giving a new time base.
   433  //
   434  // Because this is a ring buffer, new records will eventually
   435  // overwrite old records. Hence, it maintains a reader that consumes
   436  // the log as it gets overwritten. That reader state is where an
   437  // actual log reader would start.
   438  type debugLogWriter struct {
   439  	_     sys.NotInHeap
   440  	write uint64
   441  	data  debugLogBuf
   442  
   443  	// tick and nano are the time bases from the most recently
   444  	// written sync record.
   445  	tick, nano uint64
   446  
   447  	// r is a reader that consumes records as they get overwritten
   448  	// by the writer. It also acts as the initial reader state
   449  	// when printing the log.
   450  	r debugLogReader
   451  
   452  	// buf is a scratch buffer for encoding. This is here to
   453  	// reduce stack usage.
   454  	buf [10]byte
   455  }
   456  
   457  type debugLogBuf struct {
   458  	_ sys.NotInHeap
   459  	b [debugLogBytes]byte
   460  }
   461  
   462  const (
   463  	// debugLogHeaderSize is the number of bytes in the framing
   464  	// header of every dlog record.
   465  	debugLogHeaderSize = 2
   466  
   467  	// debugLogSyncSize is the number of bytes in a sync record.
   468  	debugLogSyncSize = debugLogHeaderSize + 2*8
   469  )
   470  
   471  //go:nosplit
   472  func (l *debugLogWriter) ensure(n uint64) {
   473  	for l.write+n >= l.r.begin+uint64(len(l.data.b)) {
   474  		// Consume record at begin.
   475  		if l.r.skip() == ^uint64(0) {
   476  			// Wrapped around within a record.
   477  			//
   478  			// TODO(austin): It would be better to just
   479  			// eat the whole buffer at this point, but we
   480  			// have to communicate that to the reader
   481  			// somehow.
   482  			throw("record wrapped around")
   483  		}
   484  	}
   485  }
   486  
   487  //go:nosplit
   488  func (l *debugLogWriter) writeFrameAt(pos, size uint64) bool {
   489  	l.data.b[pos%uint64(len(l.data.b))] = uint8(size)
   490  	l.data.b[(pos+1)%uint64(len(l.data.b))] = uint8(size >> 8)
   491  	return size <= 0xFFFF
   492  }
   493  
   494  //go:nosplit
   495  func (l *debugLogWriter) writeSync(tick, nano uint64) {
   496  	l.tick, l.nano = tick, nano
   497  	l.ensure(debugLogHeaderSize)
   498  	l.writeFrameAt(l.write, 0)
   499  	l.write += debugLogHeaderSize
   500  	l.writeUint64LE(tick)
   501  	l.writeUint64LE(nano)
   502  	l.r.end = l.write
   503  }
   504  
   505  //go:nosplit
   506  func (l *debugLogWriter) writeUint64LE(x uint64) {
   507  	var b [8]byte
   508  	byteorder.LEPutUint64(b[:], x)
   509  	l.bytes(b[:])
   510  }
   511  
   512  //go:nosplit
   513  func (l *debugLogWriter) byte(x byte) {
   514  	l.ensure(1)
   515  	pos := l.write
   516  	l.write++
   517  	l.data.b[pos%uint64(len(l.data.b))] = x
   518  }
   519  
   520  //go:nosplit
   521  func (l *debugLogWriter) bytes(x []byte) {
   522  	l.ensure(uint64(len(x)))
   523  	pos := l.write
   524  	l.write += uint64(len(x))
   525  	for len(x) > 0 {
   526  		n := copy(l.data.b[pos%uint64(len(l.data.b)):], x)
   527  		pos += uint64(n)
   528  		x = x[n:]
   529  	}
   530  }
   531  
   532  //go:nosplit
   533  func (l *debugLogWriter) varint(x int64) {
   534  	var u uint64
   535  	if x < 0 {
   536  		u = (^uint64(x) << 1) | 1 // complement i, bit 0 is 1
   537  	} else {
   538  		u = (uint64(x) << 1) // do not complement i, bit 0 is 0
   539  	}
   540  	l.uvarint(u)
   541  }
   542  
   543  //go:nosplit
   544  func (l *debugLogWriter) uvarint(u uint64) {
   545  	i := 0
   546  	for u >= 0x80 {
   547  		l.buf[i] = byte(u) | 0x80
   548  		u >>= 7
   549  		i++
   550  	}
   551  	l.buf[i] = byte(u)
   552  	i++
   553  	l.bytes(l.buf[:i])
   554  }
   555  
   556  type debugLogReader struct {
   557  	data *debugLogBuf
   558  
   559  	// begin and end are the positions in the log of the beginning
   560  	// and end of the log data, modulo len(data).
   561  	begin, end uint64
   562  
   563  	// tick and nano are the current time base at begin.
   564  	tick, nano uint64
   565  }
   566  
   567  //go:nosplit
   568  func (r *debugLogReader) skip() uint64 {
   569  	// Read size at pos.
   570  	if r.begin+debugLogHeaderSize > r.end {
   571  		return ^uint64(0)
   572  	}
   573  	size := uint64(r.readUint16LEAt(r.begin))
   574  	if size == 0 {
   575  		// Sync packet.
   576  		r.tick = r.readUint64LEAt(r.begin + debugLogHeaderSize)
   577  		r.nano = r.readUint64LEAt(r.begin + debugLogHeaderSize + 8)
   578  		size = debugLogSyncSize
   579  	}
   580  	if r.begin+size > r.end {
   581  		return ^uint64(0)
   582  	}
   583  	r.begin += size
   584  	return size
   585  }
   586  
   587  //go:nosplit
   588  func (r *debugLogReader) readUint16LEAt(pos uint64) uint16 {
   589  	return uint16(r.data.b[pos%uint64(len(r.data.b))]) |
   590  		uint16(r.data.b[(pos+1)%uint64(len(r.data.b))])<<8
   591  }
   592  
   593  //go:nosplit
   594  func (r *debugLogReader) readUint64LEAt(pos uint64) uint64 {
   595  	var b [8]byte
   596  	for i := range b {
   597  		b[i] = r.data.b[pos%uint64(len(r.data.b))]
   598  		pos++
   599  	}
   600  	return byteorder.LEUint64(b[:])
   601  }
   602  
   603  func (r *debugLogReader) peek() (tick uint64) {
   604  	// Consume any sync records.
   605  	size := uint64(0)
   606  	for size == 0 {
   607  		if r.begin+debugLogHeaderSize > r.end {
   608  			return ^uint64(0)
   609  		}
   610  		size = uint64(r.readUint16LEAt(r.begin))
   611  		if size != 0 {
   612  			break
   613  		}
   614  		if r.begin+debugLogSyncSize > r.end {
   615  			return ^uint64(0)
   616  		}
   617  		// Sync packet.
   618  		r.tick = r.readUint64LEAt(r.begin + debugLogHeaderSize)
   619  		r.nano = r.readUint64LEAt(r.begin + debugLogHeaderSize + 8)
   620  		r.begin += debugLogSyncSize
   621  	}
   622  
   623  	// Peek tick delta.
   624  	if r.begin+size > r.end {
   625  		return ^uint64(0)
   626  	}
   627  	pos := r.begin + debugLogHeaderSize
   628  	var u uint64
   629  	for i := uint(0); ; i += 7 {
   630  		b := r.data.b[pos%uint64(len(r.data.b))]
   631  		pos++
   632  		u |= uint64(b&^0x80) << i
   633  		if b&0x80 == 0 {
   634  			break
   635  		}
   636  	}
   637  	if pos > r.begin+size {
   638  		return ^uint64(0)
   639  	}
   640  	return r.tick + u
   641  }
   642  
   643  func (r *debugLogReader) header() (end, tick, nano uint64, p int) {
   644  	// Read size. We've already skipped sync packets and checked
   645  	// bounds in peek.
   646  	size := uint64(r.readUint16LEAt(r.begin))
   647  	end = r.begin + size
   648  	r.begin += debugLogHeaderSize
   649  
   650  	// Read tick, nano, and p.
   651  	tick = r.uvarint() + r.tick
   652  	nano = r.uvarint() + r.nano
   653  	p = int(r.varint())
   654  
   655  	return
   656  }
   657  
   658  func (r *debugLogReader) uvarint() uint64 {
   659  	var u uint64
   660  	for i := uint(0); ; i += 7 {
   661  		b := r.data.b[r.begin%uint64(len(r.data.b))]
   662  		r.begin++
   663  		u |= uint64(b&^0x80) << i
   664  		if b&0x80 == 0 {
   665  			break
   666  		}
   667  	}
   668  	return u
   669  }
   670  
   671  func (r *debugLogReader) varint() int64 {
   672  	u := r.uvarint()
   673  	var v int64
   674  	if u&1 == 0 {
   675  		v = int64(u >> 1)
   676  	} else {
   677  		v = ^int64(u >> 1)
   678  	}
   679  	return v
   680  }
   681  
   682  func (r *debugLogReader) printVal() bool {
   683  	typ := r.data.b[r.begin%uint64(len(r.data.b))]
   684  	r.begin++
   685  
   686  	switch typ {
   687  	default:
   688  		print("<unknown field type ", hex(typ), " pos ", r.begin-1, " end ", r.end, ">\n")
   689  		return false
   690  
   691  	case debugLogUnknown:
   692  		print("<unknown kind>")
   693  
   694  	case debugLogBoolTrue:
   695  		print(true)
   696  
   697  	case debugLogBoolFalse:
   698  		print(false)
   699  
   700  	case debugLogInt:
   701  		print(r.varint())
   702  
   703  	case debugLogUint:
   704  		print(r.uvarint())
   705  
   706  	case debugLogHex, debugLogPtr:
   707  		print(hex(r.uvarint()))
   708  
   709  	case debugLogString:
   710  		sl := r.uvarint()
   711  		if r.begin+sl > r.end {
   712  			r.begin = r.end
   713  			print("<string length corrupted>")
   714  			break
   715  		}
   716  		for sl > 0 {
   717  			b := r.data.b[r.begin%uint64(len(r.data.b)):]
   718  			if uint64(len(b)) > sl {
   719  				b = b[:sl]
   720  			}
   721  			r.begin += uint64(len(b))
   722  			sl -= uint64(len(b))
   723  			gwrite(b)
   724  		}
   725  
   726  	case debugLogConstString:
   727  		len, ptr := int(r.uvarint()), uintptr(r.uvarint())
   728  		ptr += firstmoduledata.etext
   729  		// We can't use unsafe.String as it may panic, which isn't safe
   730  		// in this (potentially) nowritebarrier context.
   731  		str := stringStruct{
   732  			str: unsafe.Pointer(ptr),
   733  			len: len,
   734  		}
   735  		s := *(*string)(unsafe.Pointer(&str))
   736  		print(s)
   737  
   738  	case debugLogOverflow:
   739  		print("..(", r.uvarint(), " more bytes)..")
   740  
   741  	case debugLogHexdump:
   742  		p := uintptr(r.uvarint())
   743  		bl := r.uvarint()
   744  		if r.begin+bl > r.end {
   745  			r.begin = r.end
   746  			print("<hexdump length corrupted>")
   747  			break
   748  		}
   749  		println() // Start on a new line
   750  		hd := hexdumper{addr: p}
   751  		for bl > 0 {
   752  			b := r.data.b[r.begin%uint64(len(r.data.b)):]
   753  			if uint64(len(b)) > bl {
   754  				b = b[:bl]
   755  			}
   756  			r.begin += uint64(len(b))
   757  			bl -= uint64(len(b))
   758  			hd.write(b)
   759  		}
   760  		hd.close()
   761  
   762  	case debugLogPC:
   763  		printDebugLogPC(uintptr(r.uvarint()), false)
   764  
   765  	case debugLogTraceback:
   766  		n := int(r.uvarint())
   767  		for i := 0; i < n; i++ {
   768  			print("\n\t")
   769  			// gentraceback PCs are always return PCs.
   770  			// Convert them to call PCs.
   771  			//
   772  			// TODO(austin): Expand inlined frames.
   773  			printDebugLogPC(uintptr(r.uvarint()), true)
   774  		}
   775  	}
   776  
   777  	return true
   778  }
   779  
   780  // printDebugLog prints the debug log.
   781  func printDebugLog() {
   782  	if dlogEnabled {
   783  		printDebugLogImpl()
   784  	}
   785  }
   786  
   787  func printDebugLogImpl() {
   788  	// This function should not panic or throw since it is used in
   789  	// the fatal panic path and this may deadlock.
   790  
   791  	printlock()
   792  
   793  	// Get the list of all debug logs.
   794  	allp := (*uintptr)(unsafe.Pointer(&allDloggers))
   795  	all := (*dloggerImpl)(unsafe.Pointer(atomic.Loaduintptr(allp)))
   796  
   797  	// Count the logs.
   798  	n := 0
   799  	for l := all; l != nil; l = l.allLink {
   800  		n++
   801  	}
   802  	if n == 0 {
   803  		printunlock()
   804  		return
   805  	}
   806  
   807  	// Prepare read state for all logs.
   808  	type readState struct {
   809  		debugLogReader
   810  		first    bool
   811  		lost     uint64
   812  		nextTick uint64
   813  	}
   814  	// Use sysAllocOS instead of sysAlloc because we want to interfere
   815  	// with the runtime as little as possible, and sysAlloc updates accounting.
   816  	state1 := sysAllocOS(unsafe.Sizeof(readState{})*uintptr(n), "debug log")
   817  	if state1 == nil {
   818  		println("failed to allocate read state for", n, "logs")
   819  		printunlock()
   820  		return
   821  	}
   822  	state := (*[1 << 20]readState)(state1)[:n]
   823  	{
   824  		l := all
   825  		for i := range state {
   826  			s := &state[i]
   827  			s.debugLogReader = l.w.r
   828  			s.first = true
   829  			s.lost = l.w.r.begin
   830  			s.nextTick = s.peek()
   831  			l = l.allLink
   832  		}
   833  	}
   834  
   835  	// Print records.
   836  	for {
   837  		// Find the next record.
   838  		var best struct {
   839  			tick uint64
   840  			i    int
   841  		}
   842  		best.tick = ^uint64(0)
   843  		for i := range state {
   844  			if state[i].nextTick < best.tick {
   845  				best.tick = state[i].nextTick
   846  				best.i = i
   847  			}
   848  		}
   849  		if best.tick == ^uint64(0) {
   850  			break
   851  		}
   852  
   853  		// Print record.
   854  		s := &state[best.i]
   855  		if s.first {
   856  			print(">> begin log ", best.i)
   857  			if s.lost != 0 {
   858  				print("; lost first ", s.lost>>10, "KB")
   859  			}
   860  			print(" <<\n")
   861  			s.first = false
   862  		}
   863  
   864  		end, _, nano, p := s.header()
   865  		oldEnd := s.end
   866  		s.end = end
   867  
   868  		print("[")
   869  		var tmpbuf [21]byte
   870  		pnano := int64(nano) - runtimeInitTime
   871  		if pnano < 0 {
   872  			// Logged before runtimeInitTime was set.
   873  			pnano = 0
   874  		}
   875  		pnanoBytes := itoaDiv(tmpbuf[:], uint64(pnano), 9)
   876  		print(slicebytetostringtmp((*byte)(noescape(unsafe.Pointer(&pnanoBytes[0]))), len(pnanoBytes)))
   877  		print(" P ", p, "] ")
   878  
   879  		for i := 0; s.begin < s.end; i++ {
   880  			if i > 0 {
   881  				print(" ")
   882  			}
   883  			if !s.printVal() {
   884  				// Abort this P log.
   885  				print("<aborting P log>")
   886  				end = oldEnd
   887  				break
   888  			}
   889  		}
   890  		println()
   891  
   892  		// Move on to the next record.
   893  		s.begin = end
   894  		s.end = oldEnd
   895  		s.nextTick = s.peek()
   896  	}
   897  
   898  	printunlock()
   899  }
   900  
   901  // printDebugLogPC prints a single symbolized PC. If returnPC is true,
   902  // pc is a return PC that must first be converted to a call PC.
   903  func printDebugLogPC(pc uintptr, returnPC bool) {
   904  	fn := findfunc(pc)
   905  	if returnPC && (!fn.valid() || pc > fn.entry()) {
   906  		// TODO(austin): Don't back up if the previous frame
   907  		// was a sigpanic.
   908  		pc--
   909  	}
   910  
   911  	print(hex(pc))
   912  	if !fn.valid() {
   913  		print(" [unknown PC]")
   914  	} else {
   915  		name := funcname(fn)
   916  		file, line := funcline(fn, pc)
   917  		print(" [", name, "+", hex(pc-fn.entry()),
   918  			" ", file, ":", line, "]")
   919  	}
   920  }
   921  

View as plain text