1// Copyright 2018 The Go Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5// TODO: This test could be implemented on all (most?) UNIXes if we
6// added syscall.Tgkill more widely.
7
8// We skip all of these tests under race mode because our test thread
9// spends all of its time in the race runtime, which isn't a safe
10// point.
11
12//go:build (amd64 || arm64 || ppc64le) && linux && !race
13
14package runtime_test
15
16import (
17	"fmt"
18	"internal/abi"
19	"math"
20	"os"
21	"regexp"
22	"runtime"
23	"runtime/debug"
24	"sync/atomic"
25	"syscall"
26	"testing"
27)
28
29func startDebugCallWorker(t *testing.T) (g *runtime.G, after func()) {
30	// This can deadlock if run under a debugger because it
31	// depends on catching SIGTRAP, which is usually swallowed by
32	// a debugger.
33	skipUnderDebugger(t)
34
35	// This can deadlock if there aren't enough threads or if a GC
36	// tries to interrupt an atomic loop (see issue #10958). Execute
37	// an extra GC to ensure even the sweep phase is done (out of
38	// caution to prevent #49370 from happening).
39	// TODO(mknyszek): This extra GC cycle is likely unnecessary
40	// because preemption (which may happen during the sweep phase)
41	// isn't much of an issue anymore thanks to asynchronous preemption.
42	// The biggest risk is having a write barrier in the debug call
43	// injection test code fire, because it runs in a signal handler
44	// and may not have a P.
45	//
46	// We use 8 Ps so there's room for the debug call worker,
47	// something that's trying to preempt the call worker, and the
48	// goroutine that's trying to stop the call worker.
49	ogomaxprocs := runtime.GOMAXPROCS(8)
50	ogcpercent := debug.SetGCPercent(-1)
51	runtime.GC()
52
53	// ready is a buffered channel so debugCallWorker won't block
54	// on sending to it. This makes it less likely we'll catch
55	// debugCallWorker while it's in the runtime.
56	ready := make(chan *runtime.G, 1)
57	var stop uint32
58	done := make(chan error)
59	go debugCallWorker(ready, &stop, done)
60	g = <-ready
61	return g, func() {
62		atomic.StoreUint32(&stop, 1)
63		err := <-done
64		if err != nil {
65			t.Fatal(err)
66		}
67		runtime.GOMAXPROCS(ogomaxprocs)
68		debug.SetGCPercent(ogcpercent)
69	}
70}
71
72func debugCallWorker(ready chan<- *runtime.G, stop *uint32, done chan<- error) {
73	runtime.LockOSThread()
74	defer runtime.UnlockOSThread()
75
76	ready <- runtime.Getg()
77
78	x := 2
79	debugCallWorker2(stop, &x)
80	if x != 1 {
81		done <- fmt.Errorf("want x = 2, got %d; register pointer not adjusted?", x)
82	}
83	close(done)
84}
85
86// Don't inline this function, since we want to test adjusting
87// pointers in the arguments.
88//
89//go:noinline
90func debugCallWorker2(stop *uint32, x *int) {
91	for atomic.LoadUint32(stop) == 0 {
92		// Strongly encourage x to live in a register so we
93		// can test pointer register adjustment.
94		*x++
95	}
96	*x = 1
97}
98
99func debugCallTKill(tid int) error {
100	return syscall.Tgkill(syscall.Getpid(), tid, syscall.SIGTRAP)
101}
102
103// skipUnderDebugger skips the current test when running under a
104// debugger (specifically if this process has a tracer). This is
105// Linux-specific.
106func skipUnderDebugger(t *testing.T) {
107	pid := syscall.Getpid()
108	status, err := os.ReadFile(fmt.Sprintf("/proc/%d/status", pid))
109	if err != nil {
110		t.Logf("couldn't get proc tracer: %s", err)
111		return
112	}
113	re := regexp.MustCompile(`TracerPid:\s+([0-9]+)`)
114	sub := re.FindSubmatch(status)
115	if sub == nil {
116		t.Logf("couldn't find proc tracer PID")
117		return
118	}
119	if string(sub[1]) == "0" {
120		return
121	}
122	t.Skip("test will deadlock under a debugger")
123}
124
125func TestDebugCall(t *testing.T) {
126	g, after := startDebugCallWorker(t)
127	defer after()
128
129	type stackArgs struct {
130		x0    int
131		x1    float64
132		y0Ret int
133		y1Ret float64
134	}
135
136	// Inject a call into the debugCallWorker goroutine and test
137	// basic argument and result passing.
138	fn := func(x int, y float64) (y0Ret int, y1Ret float64) {
139		return x + 1, y + 1.0
140	}
141	var args *stackArgs
142	var regs abi.RegArgs
143	intRegs := regs.Ints[:]
144	floatRegs := regs.Floats[:]
145	fval := float64(42.0)
146	if len(intRegs) > 0 {
147		intRegs[0] = 42
148		floatRegs[0] = math.Float64bits(fval)
149	} else {
150		args = &stackArgs{
151			x0: 42,
152			x1: 42.0,
153		}
154	}
155
156	if _, err := runtime.InjectDebugCall(g, fn, &regs, args, debugCallTKill, false); err != nil {
157		t.Fatal(err)
158	}
159	var result0 int
160	var result1 float64
161	if len(intRegs) > 0 {
162		result0 = int(intRegs[0])
163		result1 = math.Float64frombits(floatRegs[0])
164	} else {
165		result0 = args.y0Ret
166		result1 = args.y1Ret
167	}
168	if result0 != 43 {
169		t.Errorf("want 43, got %d", result0)
170	}
171	if result1 != fval+1 {
172		t.Errorf("want 43, got %f", result1)
173	}
174}
175
176func TestDebugCallLarge(t *testing.T) {
177	g, after := startDebugCallWorker(t)
178	defer after()
179
180	// Inject a call with a large call frame.
181	const N = 128
182	var args struct {
183		in  [N]int
184		out [N]int
185	}
186	fn := func(in [N]int) (out [N]int) {
187		for i := range in {
188			out[i] = in[i] + 1
189		}
190		return
191	}
192	var want [N]int
193	for i := range args.in {
194		args.in[i] = i
195		want[i] = i + 1
196	}
197	if _, err := runtime.InjectDebugCall(g, fn, nil, &args, debugCallTKill, false); err != nil {
198		t.Fatal(err)
199	}
200	if want != args.out {
201		t.Fatalf("want %v, got %v", want, args.out)
202	}
203}
204
205func TestDebugCallGC(t *testing.T) {
206	g, after := startDebugCallWorker(t)
207	defer after()
208
209	// Inject a call that performs a GC.
210	if _, err := runtime.InjectDebugCall(g, runtime.GC, nil, nil, debugCallTKill, false); err != nil {
211		t.Fatal(err)
212	}
213}
214
215func TestDebugCallGrowStack(t *testing.T) {
216	g, after := startDebugCallWorker(t)
217	defer after()
218
219	// Inject a call that grows the stack. debugCallWorker checks
220	// for stack pointer breakage.
221	if _, err := runtime.InjectDebugCall(g, func() { growStack(nil) }, nil, nil, debugCallTKill, false); err != nil {
222		t.Fatal(err)
223	}
224}
225
226//go:nosplit
227func debugCallUnsafePointWorker(gpp **runtime.G, ready, stop *uint32) {
228	// The nosplit causes this function to not contain safe-points
229	// except at calls.
230	runtime.LockOSThread()
231	defer runtime.UnlockOSThread()
232
233	*gpp = runtime.Getg()
234
235	for atomic.LoadUint32(stop) == 0 {
236		atomic.StoreUint32(ready, 1)
237	}
238}
239
240func TestDebugCallUnsafePoint(t *testing.T) {
241	skipUnderDebugger(t)
242
243	// This can deadlock if there aren't enough threads or if a GC
244	// tries to interrupt an atomic loop (see issue #10958).
245	defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(8))
246
247	// InjectDebugCall cannot be executed while a GC is actively in
248	// progress. Wait until the current GC is done, and turn it off.
249	//
250	// See #49370.
251	runtime.GC()
252	defer debug.SetGCPercent(debug.SetGCPercent(-1))
253
254	// Test that the runtime refuses call injection at unsafe points.
255	var g *runtime.G
256	var ready, stop uint32
257	defer atomic.StoreUint32(&stop, 1)
258	go debugCallUnsafePointWorker(&g, &ready, &stop)
259	for atomic.LoadUint32(&ready) == 0 {
260		runtime.Gosched()
261	}
262
263	_, err := runtime.InjectDebugCall(g, func() {}, nil, nil, debugCallTKill, true)
264	if msg := "call not at safe point"; err == nil || err.Error() != msg {
265		t.Fatalf("want %q, got %s", msg, err)
266	}
267}
268
269func TestDebugCallPanic(t *testing.T) {
270	skipUnderDebugger(t)
271
272	// This can deadlock if there aren't enough threads.
273	defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(8))
274
275	// InjectDebugCall cannot be executed while a GC is actively in
276	// progress. Wait until the current GC is done, and turn it off.
277	//
278	// See #10958 and #49370.
279	defer debug.SetGCPercent(debug.SetGCPercent(-1))
280	// TODO(mknyszek): This extra GC cycle is likely unnecessary
281	// because preemption (which may happen during the sweep phase)
282	// isn't much of an issue anymore thanks to asynchronous preemption.
283	// The biggest risk is having a write barrier in the debug call
284	// injection test code fire, because it runs in a signal handler
285	// and may not have a P.
286	runtime.GC()
287
288	ready := make(chan *runtime.G)
289	var stop uint32
290	defer atomic.StoreUint32(&stop, 1)
291	go func() {
292		runtime.LockOSThread()
293		defer runtime.UnlockOSThread()
294		ready <- runtime.Getg()
295		for atomic.LoadUint32(&stop) == 0 {
296		}
297	}()
298	g := <-ready
299
300	p, err := runtime.InjectDebugCall(g, func() { panic("test") }, nil, nil, debugCallTKill, false)
301	if err != nil {
302		t.Fatal(err)
303	}
304	if ps, ok := p.(string); !ok || ps != "test" {
305		t.Fatalf("wanted panic %v, got %v", "test", p)
306	}
307}
308