1// Copyright 2019 The Go Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5// This file provides an internal debug logging facility. The debug
6// log is a lightweight, in-memory, per-M ring buffer. By default, the
7// runtime prints the debug log on panic.
8//
9// To print something to the debug log, call dlog to obtain a dlogger
10// and use the methods on that to add values. The values will be
11// space-separated in the output (much like println).
12//
13// This facility can be enabled by passing -tags debuglog when
14// building. Without this tag, dlog calls compile to nothing.
15
16package runtime
17
18import (
19	"internal/abi"
20	"internal/runtime/atomic"
21	"runtime/internal/sys"
22	"unsafe"
23)
24
25// debugLogBytes is the size of each per-M ring buffer. This is
26// allocated off-heap to avoid blowing up the M and hence the GC'd
27// heap size.
28const debugLogBytes = 16 << 10
29
30// debugLogStringLimit is the maximum number of bytes in a string.
31// Above this, the string will be truncated with "..(n more bytes).."
32const debugLogStringLimit = debugLogBytes / 8
33
34// dlog returns a debug logger. The caller can use methods on the
35// returned logger to add values, which will be space-separated in the
36// final output, much like println. The caller must call end() to
37// finish the message.
38//
39// dlog can be used from highly-constrained corners of the runtime: it
40// is safe to use in the signal handler, from within the write
41// barrier, from within the stack implementation, and in places that
42// must be recursively nosplit.
43//
44// This will be compiled away if built without the debuglog build tag.
45// However, argument construction may not be. If any of the arguments
46// are not literals or trivial expressions, consider protecting the
47// call with "if dlogEnabled".
48//
49//go:nosplit
50//go:nowritebarrierrec
51func dlog() *dlogger {
52	if !dlogEnabled {
53		return nil
54	}
55
56	// Get the time.
57	tick, nano := uint64(cputicks()), uint64(nanotime())
58
59	// Try to get a cached logger.
60	l := getCachedDlogger()
61
62	// If we couldn't get a cached logger, try to get one from the
63	// global pool.
64	if l == nil {
65		allp := (*uintptr)(unsafe.Pointer(&allDloggers))
66		all := (*dlogger)(unsafe.Pointer(atomic.Loaduintptr(allp)))
67		for l1 := all; l1 != nil; l1 = l1.allLink {
68			if l1.owned.Load() == 0 && l1.owned.CompareAndSwap(0, 1) {
69				l = l1
70				break
71			}
72		}
73	}
74
75	// If that failed, allocate a new logger.
76	if l == nil {
77		// Use sysAllocOS instead of sysAlloc because we want to interfere
78		// with the runtime as little as possible, and sysAlloc updates accounting.
79		l = (*dlogger)(sysAllocOS(unsafe.Sizeof(dlogger{})))
80		if l == nil {
81			throw("failed to allocate debug log")
82		}
83		l.w.r.data = &l.w.data
84		l.owned.Store(1)
85
86		// Prepend to allDloggers list.
87		headp := (*uintptr)(unsafe.Pointer(&allDloggers))
88		for {
89			head := atomic.Loaduintptr(headp)
90			l.allLink = (*dlogger)(unsafe.Pointer(head))
91			if atomic.Casuintptr(headp, head, uintptr(unsafe.Pointer(l))) {
92				break
93			}
94		}
95	}
96
97	// If the time delta is getting too high, write a new sync
98	// packet. We set the limit so we don't write more than 6
99	// bytes of delta in the record header.
100	const deltaLimit = 1<<(3*7) - 1 // ~2ms between sync packets
101	if tick-l.w.tick > deltaLimit || nano-l.w.nano > deltaLimit {
102		l.w.writeSync(tick, nano)
103	}
104
105	// Reserve space for framing header.
106	l.w.ensure(debugLogHeaderSize)
107	l.w.write += debugLogHeaderSize
108
109	// Write record header.
110	l.w.uvarint(tick - l.w.tick)
111	l.w.uvarint(nano - l.w.nano)
112	gp := getg()
113	if gp != nil && gp.m != nil && gp.m.p != 0 {
114		l.w.varint(int64(gp.m.p.ptr().id))
115	} else {
116		l.w.varint(-1)
117	}
118
119	return l
120}
121
122// A dlogger writes to the debug log.
123//
124// To obtain a dlogger, call dlog(). When done with the dlogger, call
125// end().
126type dlogger struct {
127	_ sys.NotInHeap
128	w debugLogWriter
129
130	// allLink is the next dlogger in the allDloggers list.
131	allLink *dlogger
132
133	// owned indicates that this dlogger is owned by an M. This is
134	// accessed atomically.
135	owned atomic.Uint32
136}
137
138// allDloggers is a list of all dloggers, linked through
139// dlogger.allLink. This is accessed atomically. This is prepend only,
140// so it doesn't need to protect against ABA races.
141var allDloggers *dlogger
142
143//go:nosplit
144func (l *dlogger) end() {
145	if !dlogEnabled {
146		return
147	}
148
149	// Fill in framing header.
150	size := l.w.write - l.w.r.end
151	if !l.w.writeFrameAt(l.w.r.end, size) {
152		throw("record too large")
153	}
154
155	// Commit the record.
156	l.w.r.end = l.w.write
157
158	// Attempt to return this logger to the cache.
159	if putCachedDlogger(l) {
160		return
161	}
162
163	// Return the logger to the global pool.
164	l.owned.Store(0)
165}
166
167const (
168	debugLogUnknown = 1 + iota
169	debugLogBoolTrue
170	debugLogBoolFalse
171	debugLogInt
172	debugLogUint
173	debugLogHex
174	debugLogPtr
175	debugLogString
176	debugLogConstString
177	debugLogStringOverflow
178
179	debugLogPC
180	debugLogTraceback
181)
182
183//go:nosplit
184func (l *dlogger) b(x bool) *dlogger {
185	if !dlogEnabled {
186		return l
187	}
188	if x {
189		l.w.byte(debugLogBoolTrue)
190	} else {
191		l.w.byte(debugLogBoolFalse)
192	}
193	return l
194}
195
196//go:nosplit
197func (l *dlogger) i(x int) *dlogger {
198	return l.i64(int64(x))
199}
200
201//go:nosplit
202func (l *dlogger) i8(x int8) *dlogger {
203	return l.i64(int64(x))
204}
205
206//go:nosplit
207func (l *dlogger) i16(x int16) *dlogger {
208	return l.i64(int64(x))
209}
210
211//go:nosplit
212func (l *dlogger) i32(x int32) *dlogger {
213	return l.i64(int64(x))
214}
215
216//go:nosplit
217func (l *dlogger) i64(x int64) *dlogger {
218	if !dlogEnabled {
219		return l
220	}
221	l.w.byte(debugLogInt)
222	l.w.varint(x)
223	return l
224}
225
226//go:nosplit
227func (l *dlogger) u(x uint) *dlogger {
228	return l.u64(uint64(x))
229}
230
231//go:nosplit
232func (l *dlogger) uptr(x uintptr) *dlogger {
233	return l.u64(uint64(x))
234}
235
236//go:nosplit
237func (l *dlogger) u8(x uint8) *dlogger {
238	return l.u64(uint64(x))
239}
240
241//go:nosplit
242func (l *dlogger) u16(x uint16) *dlogger {
243	return l.u64(uint64(x))
244}
245
246//go:nosplit
247func (l *dlogger) u32(x uint32) *dlogger {
248	return l.u64(uint64(x))
249}
250
251//go:nosplit
252func (l *dlogger) u64(x uint64) *dlogger {
253	if !dlogEnabled {
254		return l
255	}
256	l.w.byte(debugLogUint)
257	l.w.uvarint(x)
258	return l
259}
260
261//go:nosplit
262func (l *dlogger) hex(x uint64) *dlogger {
263	if !dlogEnabled {
264		return l
265	}
266	l.w.byte(debugLogHex)
267	l.w.uvarint(x)
268	return l
269}
270
271//go:nosplit
272func (l *dlogger) p(x any) *dlogger {
273	if !dlogEnabled {
274		return l
275	}
276	l.w.byte(debugLogPtr)
277	if x == nil {
278		l.w.uvarint(0)
279	} else {
280		v := efaceOf(&x)
281		switch v._type.Kind_ & abi.KindMask {
282		case abi.Chan, abi.Func, abi.Map, abi.Pointer, abi.UnsafePointer:
283			l.w.uvarint(uint64(uintptr(v.data)))
284		default:
285			throw("not a pointer type")
286		}
287	}
288	return l
289}
290
291//go:nosplit
292func (l *dlogger) s(x string) *dlogger {
293	if !dlogEnabled {
294		return l
295	}
296
297	strData := unsafe.StringData(x)
298	datap := &firstmoduledata
299	if len(x) > 4 && datap.etext <= uintptr(unsafe.Pointer(strData)) && uintptr(unsafe.Pointer(strData)) < datap.end {
300		// String constants are in the rodata section, which
301		// isn't recorded in moduledata. But it has to be
302		// somewhere between etext and end.
303		l.w.byte(debugLogConstString)
304		l.w.uvarint(uint64(len(x)))
305		l.w.uvarint(uint64(uintptr(unsafe.Pointer(strData)) - datap.etext))
306	} else {
307		l.w.byte(debugLogString)
308		// We can't use unsafe.Slice as it may panic, which isn't safe
309		// in this (potentially) nowritebarrier context.
310		var b []byte
311		bb := (*slice)(unsafe.Pointer(&b))
312		bb.array = unsafe.Pointer(strData)
313		bb.len, bb.cap = len(x), len(x)
314		if len(b) > debugLogStringLimit {
315			b = b[:debugLogStringLimit]
316		}
317		l.w.uvarint(uint64(len(b)))
318		l.w.bytes(b)
319		if len(b) != len(x) {
320			l.w.byte(debugLogStringOverflow)
321			l.w.uvarint(uint64(len(x) - len(b)))
322		}
323	}
324	return l
325}
326
327//go:nosplit
328func (l *dlogger) pc(x uintptr) *dlogger {
329	if !dlogEnabled {
330		return l
331	}
332	l.w.byte(debugLogPC)
333	l.w.uvarint(uint64(x))
334	return l
335}
336
337//go:nosplit
338func (l *dlogger) traceback(x []uintptr) *dlogger {
339	if !dlogEnabled {
340		return l
341	}
342	l.w.byte(debugLogTraceback)
343	l.w.uvarint(uint64(len(x)))
344	for _, pc := range x {
345		l.w.uvarint(uint64(pc))
346	}
347	return l
348}
349
350// A debugLogWriter is a ring buffer of binary debug log records.
351//
352// A log record consists of a 2-byte framing header and a sequence of
353// fields. The framing header gives the size of the record as a little
354// endian 16-bit value. Each field starts with a byte indicating its
355// type, followed by type-specific data. If the size in the framing
356// header is 0, it's a sync record consisting of two little endian
357// 64-bit values giving a new time base.
358//
359// Because this is a ring buffer, new records will eventually
360// overwrite old records. Hence, it maintains a reader that consumes
361// the log as it gets overwritten. That reader state is where an
362// actual log reader would start.
363type debugLogWriter struct {
364	_     sys.NotInHeap
365	write uint64
366	data  debugLogBuf
367
368	// tick and nano are the time bases from the most recently
369	// written sync record.
370	tick, nano uint64
371
372	// r is a reader that consumes records as they get overwritten
373	// by the writer. It also acts as the initial reader state
374	// when printing the log.
375	r debugLogReader
376
377	// buf is a scratch buffer for encoding. This is here to
378	// reduce stack usage.
379	buf [10]byte
380}
381
382type debugLogBuf struct {
383	_ sys.NotInHeap
384	b [debugLogBytes]byte
385}
386
387const (
388	// debugLogHeaderSize is the number of bytes in the framing
389	// header of every dlog record.
390	debugLogHeaderSize = 2
391
392	// debugLogSyncSize is the number of bytes in a sync record.
393	debugLogSyncSize = debugLogHeaderSize + 2*8
394)
395
396//go:nosplit
397func (l *debugLogWriter) ensure(n uint64) {
398	for l.write+n >= l.r.begin+uint64(len(l.data.b)) {
399		// Consume record at begin.
400		if l.r.skip() == ^uint64(0) {
401			// Wrapped around within a record.
402			//
403			// TODO(austin): It would be better to just
404			// eat the whole buffer at this point, but we
405			// have to communicate that to the reader
406			// somehow.
407			throw("record wrapped around")
408		}
409	}
410}
411
412//go:nosplit
413func (l *debugLogWriter) writeFrameAt(pos, size uint64) bool {
414	l.data.b[pos%uint64(len(l.data.b))] = uint8(size)
415	l.data.b[(pos+1)%uint64(len(l.data.b))] = uint8(size >> 8)
416	return size <= 0xFFFF
417}
418
419//go:nosplit
420func (l *debugLogWriter) writeSync(tick, nano uint64) {
421	l.tick, l.nano = tick, nano
422	l.ensure(debugLogHeaderSize)
423	l.writeFrameAt(l.write, 0)
424	l.write += debugLogHeaderSize
425	l.writeUint64LE(tick)
426	l.writeUint64LE(nano)
427	l.r.end = l.write
428}
429
430//go:nosplit
431func (l *debugLogWriter) writeUint64LE(x uint64) {
432	var b [8]byte
433	b[0] = byte(x)
434	b[1] = byte(x >> 8)
435	b[2] = byte(x >> 16)
436	b[3] = byte(x >> 24)
437	b[4] = byte(x >> 32)
438	b[5] = byte(x >> 40)
439	b[6] = byte(x >> 48)
440	b[7] = byte(x >> 56)
441	l.bytes(b[:])
442}
443
444//go:nosplit
445func (l *debugLogWriter) byte(x byte) {
446	l.ensure(1)
447	pos := l.write
448	l.write++
449	l.data.b[pos%uint64(len(l.data.b))] = x
450}
451
452//go:nosplit
453func (l *debugLogWriter) bytes(x []byte) {
454	l.ensure(uint64(len(x)))
455	pos := l.write
456	l.write += uint64(len(x))
457	for len(x) > 0 {
458		n := copy(l.data.b[pos%uint64(len(l.data.b)):], x)
459		pos += uint64(n)
460		x = x[n:]
461	}
462}
463
464//go:nosplit
465func (l *debugLogWriter) varint(x int64) {
466	var u uint64
467	if x < 0 {
468		u = (^uint64(x) << 1) | 1 // complement i, bit 0 is 1
469	} else {
470		u = (uint64(x) << 1) // do not complement i, bit 0 is 0
471	}
472	l.uvarint(u)
473}
474
475//go:nosplit
476func (l *debugLogWriter) uvarint(u uint64) {
477	i := 0
478	for u >= 0x80 {
479		l.buf[i] = byte(u) | 0x80
480		u >>= 7
481		i++
482	}
483	l.buf[i] = byte(u)
484	i++
485	l.bytes(l.buf[:i])
486}
487
488type debugLogReader struct {
489	data *debugLogBuf
490
491	// begin and end are the positions in the log of the beginning
492	// and end of the log data, modulo len(data).
493	begin, end uint64
494
495	// tick and nano are the current time base at begin.
496	tick, nano uint64
497}
498
499//go:nosplit
500func (r *debugLogReader) skip() uint64 {
501	// Read size at pos.
502	if r.begin+debugLogHeaderSize > r.end {
503		return ^uint64(0)
504	}
505	size := uint64(r.readUint16LEAt(r.begin))
506	if size == 0 {
507		// Sync packet.
508		r.tick = r.readUint64LEAt(r.begin + debugLogHeaderSize)
509		r.nano = r.readUint64LEAt(r.begin + debugLogHeaderSize + 8)
510		size = debugLogSyncSize
511	}
512	if r.begin+size > r.end {
513		return ^uint64(0)
514	}
515	r.begin += size
516	return size
517}
518
519//go:nosplit
520func (r *debugLogReader) readUint16LEAt(pos uint64) uint16 {
521	return uint16(r.data.b[pos%uint64(len(r.data.b))]) |
522		uint16(r.data.b[(pos+1)%uint64(len(r.data.b))])<<8
523}
524
525//go:nosplit
526func (r *debugLogReader) readUint64LEAt(pos uint64) uint64 {
527	var b [8]byte
528	for i := range b {
529		b[i] = r.data.b[pos%uint64(len(r.data.b))]
530		pos++
531	}
532	return uint64(b[0]) | uint64(b[1])<<8 |
533		uint64(b[2])<<16 | uint64(b[3])<<24 |
534		uint64(b[4])<<32 | uint64(b[5])<<40 |
535		uint64(b[6])<<48 | uint64(b[7])<<56
536}
537
538func (r *debugLogReader) peek() (tick uint64) {
539	// Consume any sync records.
540	size := uint64(0)
541	for size == 0 {
542		if r.begin+debugLogHeaderSize > r.end {
543			return ^uint64(0)
544		}
545		size = uint64(r.readUint16LEAt(r.begin))
546		if size != 0 {
547			break
548		}
549		if r.begin+debugLogSyncSize > r.end {
550			return ^uint64(0)
551		}
552		// Sync packet.
553		r.tick = r.readUint64LEAt(r.begin + debugLogHeaderSize)
554		r.nano = r.readUint64LEAt(r.begin + debugLogHeaderSize + 8)
555		r.begin += debugLogSyncSize
556	}
557
558	// Peek tick delta.
559	if r.begin+size > r.end {
560		return ^uint64(0)
561	}
562	pos := r.begin + debugLogHeaderSize
563	var u uint64
564	for i := uint(0); ; i += 7 {
565		b := r.data.b[pos%uint64(len(r.data.b))]
566		pos++
567		u |= uint64(b&^0x80) << i
568		if b&0x80 == 0 {
569			break
570		}
571	}
572	if pos > r.begin+size {
573		return ^uint64(0)
574	}
575	return r.tick + u
576}
577
578func (r *debugLogReader) header() (end, tick, nano uint64, p int) {
579	// Read size. We've already skipped sync packets and checked
580	// bounds in peek.
581	size := uint64(r.readUint16LEAt(r.begin))
582	end = r.begin + size
583	r.begin += debugLogHeaderSize
584
585	// Read tick, nano, and p.
586	tick = r.uvarint() + r.tick
587	nano = r.uvarint() + r.nano
588	p = int(r.varint())
589
590	return
591}
592
593func (r *debugLogReader) uvarint() uint64 {
594	var u uint64
595	for i := uint(0); ; i += 7 {
596		b := r.data.b[r.begin%uint64(len(r.data.b))]
597		r.begin++
598		u |= uint64(b&^0x80) << i
599		if b&0x80 == 0 {
600			break
601		}
602	}
603	return u
604}
605
606func (r *debugLogReader) varint() int64 {
607	u := r.uvarint()
608	var v int64
609	if u&1 == 0 {
610		v = int64(u >> 1)
611	} else {
612		v = ^int64(u >> 1)
613	}
614	return v
615}
616
617func (r *debugLogReader) printVal() bool {
618	typ := r.data.b[r.begin%uint64(len(r.data.b))]
619	r.begin++
620
621	switch typ {
622	default:
623		print("<unknown field type ", hex(typ), " pos ", r.begin-1, " end ", r.end, ">\n")
624		return false
625
626	case debugLogUnknown:
627		print("<unknown kind>")
628
629	case debugLogBoolTrue:
630		print(true)
631
632	case debugLogBoolFalse:
633		print(false)
634
635	case debugLogInt:
636		print(r.varint())
637
638	case debugLogUint:
639		print(r.uvarint())
640
641	case debugLogHex, debugLogPtr:
642		print(hex(r.uvarint()))
643
644	case debugLogString:
645		sl := r.uvarint()
646		if r.begin+sl > r.end {
647			r.begin = r.end
648			print("<string length corrupted>")
649			break
650		}
651		for sl > 0 {
652			b := r.data.b[r.begin%uint64(len(r.data.b)):]
653			if uint64(len(b)) > sl {
654				b = b[:sl]
655			}
656			r.begin += uint64(len(b))
657			sl -= uint64(len(b))
658			gwrite(b)
659		}
660
661	case debugLogConstString:
662		len, ptr := int(r.uvarint()), uintptr(r.uvarint())
663		ptr += firstmoduledata.etext
664		// We can't use unsafe.String as it may panic, which isn't safe
665		// in this (potentially) nowritebarrier context.
666		str := stringStruct{
667			str: unsafe.Pointer(ptr),
668			len: len,
669		}
670		s := *(*string)(unsafe.Pointer(&str))
671		print(s)
672
673	case debugLogStringOverflow:
674		print("..(", r.uvarint(), " more bytes)..")
675
676	case debugLogPC:
677		printDebugLogPC(uintptr(r.uvarint()), false)
678
679	case debugLogTraceback:
680		n := int(r.uvarint())
681		for i := 0; i < n; i++ {
682			print("\n\t")
683			// gentraceback PCs are always return PCs.
684			// Convert them to call PCs.
685			//
686			// TODO(austin): Expand inlined frames.
687			printDebugLogPC(uintptr(r.uvarint()), true)
688		}
689	}
690
691	return true
692}
693
694// printDebugLog prints the debug log.
695func printDebugLog() {
696	if !dlogEnabled {
697		return
698	}
699
700	// This function should not panic or throw since it is used in
701	// the fatal panic path and this may deadlock.
702
703	printlock()
704
705	// Get the list of all debug logs.
706	allp := (*uintptr)(unsafe.Pointer(&allDloggers))
707	all := (*dlogger)(unsafe.Pointer(atomic.Loaduintptr(allp)))
708
709	// Count the logs.
710	n := 0
711	for l := all; l != nil; l = l.allLink {
712		n++
713	}
714	if n == 0 {
715		printunlock()
716		return
717	}
718
719	// Prepare read state for all logs.
720	type readState struct {
721		debugLogReader
722		first    bool
723		lost     uint64
724		nextTick uint64
725	}
726	// Use sysAllocOS instead of sysAlloc because we want to interfere
727	// with the runtime as little as possible, and sysAlloc updates accounting.
728	state1 := sysAllocOS(unsafe.Sizeof(readState{}) * uintptr(n))
729	if state1 == nil {
730		println("failed to allocate read state for", n, "logs")
731		printunlock()
732		return
733	}
734	state := (*[1 << 20]readState)(state1)[:n]
735	{
736		l := all
737		for i := range state {
738			s := &state[i]
739			s.debugLogReader = l.w.r
740			s.first = true
741			s.lost = l.w.r.begin
742			s.nextTick = s.peek()
743			l = l.allLink
744		}
745	}
746
747	// Print records.
748	for {
749		// Find the next record.
750		var best struct {
751			tick uint64
752			i    int
753		}
754		best.tick = ^uint64(0)
755		for i := range state {
756			if state[i].nextTick < best.tick {
757				best.tick = state[i].nextTick
758				best.i = i
759			}
760		}
761		if best.tick == ^uint64(0) {
762			break
763		}
764
765		// Print record.
766		s := &state[best.i]
767		if s.first {
768			print(">> begin log ", best.i)
769			if s.lost != 0 {
770				print("; lost first ", s.lost>>10, "KB")
771			}
772			print(" <<\n")
773			s.first = false
774		}
775
776		end, _, nano, p := s.header()
777		oldEnd := s.end
778		s.end = end
779
780		print("[")
781		var tmpbuf [21]byte
782		pnano := int64(nano) - runtimeInitTime
783		if pnano < 0 {
784			// Logged before runtimeInitTime was set.
785			pnano = 0
786		}
787		pnanoBytes := itoaDiv(tmpbuf[:], uint64(pnano), 9)
788		print(slicebytetostringtmp((*byte)(noescape(unsafe.Pointer(&pnanoBytes[0]))), len(pnanoBytes)))
789		print(" P ", p, "] ")
790
791		for i := 0; s.begin < s.end; i++ {
792			if i > 0 {
793				print(" ")
794			}
795			if !s.printVal() {
796				// Abort this P log.
797				print("<aborting P log>")
798				end = oldEnd
799				break
800			}
801		}
802		println()
803
804		// Move on to the next record.
805		s.begin = end
806		s.end = oldEnd
807		s.nextTick = s.peek()
808	}
809
810	printunlock()
811}
812
813// printDebugLogPC prints a single symbolized PC. If returnPC is true,
814// pc is a return PC that must first be converted to a call PC.
815func printDebugLogPC(pc uintptr, returnPC bool) {
816	fn := findfunc(pc)
817	if returnPC && (!fn.valid() || pc > fn.entry()) {
818		// TODO(austin): Don't back up if the previous frame
819		// was a sigpanic.
820		pc--
821	}
822
823	print(hex(pc))
824	if !fn.valid() {
825		print(" [unknown PC]")
826	} else {
827		name := funcname(fn)
828		file, line := funcline(fn, pc)
829		print(" [", name, "+", hex(pc-fn.entry()),
830			" ", file, ":", line, "]")
831	}
832}
833