1// Copyright 2021 The Go Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5//go:build race
6
7#include "go_asm.h"
8#include "funcdata.h"
9#include "textflag.h"
10
11// The following thunks allow calling the gcc-compiled race runtime directly
12// from Go code without going all the way through cgo.
13// First, it's much faster (up to 50% speedup for real Go programs).
14// Second, it eliminates race-related special cases from cgocall and scheduler.
15// Third, in long-term it will allow to remove cyclic runtime/race dependency on cmd/go.
16
17// A brief recap of the s390x C calling convention.
18// Arguments are passed in R2...R6, the rest is on stack.
19// Callee-saved registers are: R6...R13, R15.
20// Temporary registers are: R0...R5, R14.
21
22// When calling racecalladdr, R1 is the call target address.
23
24// The race ctx, ThreadState *thr below, is passed in R2 and loaded in racecalladdr.
25
26// func runtime·raceread(addr uintptr)
27// Called from instrumented code.
28TEXT	runtime·raceread(SB), NOSPLIT, $0-8
29	// void __tsan_read(ThreadState *thr, void *addr, void *pc);
30	MOVD	$__tsan_read(SB), R1
31	MOVD	addr+0(FP), R3
32	MOVD	R14, R4
33	JMP	racecalladdr<>(SB)
34
35// func runtime·RaceRead(addr uintptr)
36TEXT	runtime·RaceRead(SB), NOSPLIT, $0-8
37	// This needs to be a tail call, because raceread reads caller pc.
38	JMP	runtime·raceread(SB)
39
40// func runtime·racereadpc(void *addr, void *callpc, void *pc)
41TEXT	runtime·racereadpc(SB), NOSPLIT, $0-24
42	// void __tsan_read_pc(ThreadState *thr, void *addr, void *callpc, void *pc);
43	MOVD	$__tsan_read_pc(SB), R1
44	LMG	addr+0(FP), R3, R5
45	JMP	racecalladdr<>(SB)
46
47// func runtime·racewrite(addr uintptr)
48// Called from instrumented code.
49TEXT	runtime·racewrite(SB), NOSPLIT, $0-8
50	// void __tsan_write(ThreadState *thr, void *addr, void *pc);
51	MOVD	$__tsan_write(SB), R1
52	MOVD	addr+0(FP), R3
53	MOVD	R14, R4
54	JMP	racecalladdr<>(SB)
55
56// func runtime·RaceWrite(addr uintptr)
57TEXT	runtime·RaceWrite(SB), NOSPLIT, $0-8
58	// This needs to be a tail call, because racewrite reads caller pc.
59	JMP	runtime·racewrite(SB)
60
61// func runtime·racewritepc(void *addr, void *callpc, void *pc)
62TEXT	runtime·racewritepc(SB), NOSPLIT, $0-24
63	// void __tsan_write_pc(ThreadState *thr, void *addr, void *callpc, void *pc);
64	MOVD	$__tsan_write_pc(SB), R1
65	LMG	addr+0(FP), R3, R5
66	JMP	racecalladdr<>(SB)
67
68// func runtime·racereadrange(addr, size uintptr)
69// Called from instrumented code.
70TEXT	runtime·racereadrange(SB), NOSPLIT, $0-16
71	// void __tsan_read_range(ThreadState *thr, void *addr, uintptr size, void *pc);
72	MOVD	$__tsan_read_range(SB), R1
73	LMG	addr+0(FP), R3, R4
74	MOVD	R14, R5
75	JMP	racecalladdr<>(SB)
76
77// func runtime·RaceReadRange(addr, size uintptr)
78TEXT	runtime·RaceReadRange(SB), NOSPLIT, $0-16
79	// This needs to be a tail call, because racereadrange reads caller pc.
80	JMP	runtime·racereadrange(SB)
81
82// func runtime·racereadrangepc1(void *addr, uintptr sz, void *pc)
83TEXT	runtime·racereadrangepc1(SB), NOSPLIT, $0-24
84	// void __tsan_read_range(ThreadState *thr, void *addr, uintptr size, void *pc);
85	MOVD	$__tsan_read_range(SB), R1
86	LMG	addr+0(FP), R3, R5
87	// pc is an interceptor address, but TSan expects it to point to the
88	// middle of an interceptor (see LLVM's SCOPED_INTERCEPTOR_RAW).
89	ADD	$2, R5
90	JMP	racecalladdr<>(SB)
91
92// func runtime·racewriterange(addr, size uintptr)
93// Called from instrumented code.
94TEXT	runtime·racewriterange(SB), NOSPLIT, $0-16
95	// void __tsan_write_range(ThreadState *thr, void *addr, uintptr size, void *pc);
96	MOVD	$__tsan_write_range(SB), R1
97	LMG	addr+0(FP), R3, R4
98	MOVD	R14, R5
99	JMP	racecalladdr<>(SB)
100
101// func runtime·RaceWriteRange(addr, size uintptr)
102TEXT	runtime·RaceWriteRange(SB), NOSPLIT, $0-16
103	// This needs to be a tail call, because racewriterange reads caller pc.
104	JMP	runtime·racewriterange(SB)
105
106// func runtime·racewriterangepc1(void *addr, uintptr sz, void *pc)
107TEXT	runtime·racewriterangepc1(SB), NOSPLIT, $0-24
108	// void __tsan_write_range(ThreadState *thr, void *addr, uintptr size, void *pc);
109	MOVD	$__tsan_write_range(SB), R1
110	LMG	addr+0(FP), R3, R5
111	// pc is an interceptor address, but TSan expects it to point to the
112	// middle of an interceptor (see LLVM's SCOPED_INTERCEPTOR_RAW).
113	ADD	$2, R5
114	JMP	racecalladdr<>(SB)
115
116// If R3 is out of range, do nothing. Otherwise, setup goroutine context and
117// invoke racecall. Other arguments are already set.
118TEXT	racecalladdr<>(SB), NOSPLIT, $0-0
119	MOVD	runtime·racearenastart(SB), R0
120	CMPUBLT	R3, R0, data			// Before racearena start?
121	MOVD	runtime·racearenaend(SB), R0
122	CMPUBLT	R3, R0, call			// Before racearena end?
123data:
124	MOVD	runtime·racedatastart(SB), R0
125	CMPUBLT	R3, R0, ret			// Before racedata start?
126	MOVD	runtime·racedataend(SB), R0
127	CMPUBGE	R3, R0, ret			// At or after racedata end?
128call:
129	MOVD	g_racectx(g), R2
130	JMP	racecall<>(SB)
131ret:
132	RET
133
134// func runtime·racefuncenter(pc uintptr)
135// Called from instrumented code.
136TEXT	runtime·racefuncenter(SB), NOSPLIT, $0-8
137	MOVD	callpc+0(FP), R3
138	JMP	racefuncenter<>(SB)
139
140// Common code for racefuncenter
141// R3 = caller's return address
142TEXT	racefuncenter<>(SB), NOSPLIT, $0-0
143	// void __tsan_func_enter(ThreadState *thr, void *pc);
144	MOVD	$__tsan_func_enter(SB), R1
145	MOVD	g_racectx(g), R2
146	BL	racecall<>(SB)
147	RET
148
149// func runtime·racefuncexit()
150// Called from instrumented code.
151TEXT	runtime·racefuncexit(SB), NOSPLIT, $0-0
152	// void __tsan_func_exit(ThreadState *thr);
153	MOVD	$__tsan_func_exit(SB), R1
154	MOVD	g_racectx(g), R2
155	JMP	racecall<>(SB)
156
157// Atomic operations for sync/atomic package.
158
159// Load
160
161TEXT	syncatomic·LoadInt32(SB), NOSPLIT, $0-12
162	GO_ARGS
163	MOVD	$__tsan_go_atomic32_load(SB), R1
164	BL	racecallatomic<>(SB)
165	RET
166
167TEXT	syncatomic·LoadInt64(SB), NOSPLIT, $0-16
168	GO_ARGS
169	MOVD	$__tsan_go_atomic64_load(SB), R1
170	BL	racecallatomic<>(SB)
171	RET
172
173TEXT	syncatomic·LoadUint32(SB), NOSPLIT, $0-12
174	GO_ARGS
175	JMP	syncatomic·LoadInt32(SB)
176
177TEXT	syncatomic·LoadUint64(SB), NOSPLIT, $0-16
178	GO_ARGS
179	JMP	syncatomic·LoadInt64(SB)
180
181TEXT	syncatomic·LoadUintptr(SB), NOSPLIT, $0-16
182	GO_ARGS
183	JMP	syncatomic·LoadInt64(SB)
184
185TEXT	syncatomic·LoadPointer(SB), NOSPLIT, $0-16
186	GO_ARGS
187	JMP	syncatomic·LoadInt64(SB)
188
189// Store
190
191TEXT	syncatomic·StoreInt32(SB), NOSPLIT, $0-12
192	GO_ARGS
193	MOVD	$__tsan_go_atomic32_store(SB), R1
194	BL	racecallatomic<>(SB)
195	RET
196
197TEXT	syncatomic·StoreInt64(SB), NOSPLIT, $0-16
198	GO_ARGS
199	MOVD	$__tsan_go_atomic64_store(SB), R1
200	BL	racecallatomic<>(SB)
201	RET
202
203TEXT	syncatomic·StoreUint32(SB), NOSPLIT, $0-12
204	GO_ARGS
205	JMP	syncatomic·StoreInt32(SB)
206
207TEXT	syncatomic·StoreUint64(SB), NOSPLIT, $0-16
208	GO_ARGS
209	JMP	syncatomic·StoreInt64(SB)
210
211TEXT	syncatomic·StoreUintptr(SB), NOSPLIT, $0-16
212	GO_ARGS
213	JMP	syncatomic·StoreInt64(SB)
214
215// Swap
216
217TEXT	syncatomic·SwapInt32(SB), NOSPLIT, $0-20
218	GO_ARGS
219	MOVD	$__tsan_go_atomic32_exchange(SB), R1
220	BL	racecallatomic<>(SB)
221	RET
222
223TEXT	syncatomic·SwapInt64(SB), NOSPLIT, $0-24
224	GO_ARGS
225	MOVD	$__tsan_go_atomic64_exchange(SB), R1
226	BL	racecallatomic<>(SB)
227	RET
228
229TEXT	syncatomic·SwapUint32(SB), NOSPLIT, $0-20
230	GO_ARGS
231	JMP	syncatomic·SwapInt32(SB)
232
233TEXT	syncatomic·SwapUint64(SB), NOSPLIT, $0-24
234	GO_ARGS
235	JMP	syncatomic·SwapInt64(SB)
236
237TEXT	syncatomic·SwapUintptr(SB), NOSPLIT, $0-24
238	GO_ARGS
239	JMP	syncatomic·SwapInt64(SB)
240
241// Add
242
243TEXT	syncatomic·AddInt32(SB), NOSPLIT, $0-20
244	GO_ARGS
245	MOVD	$__tsan_go_atomic32_fetch_add(SB), R1
246	BL	racecallatomic<>(SB)
247	// TSan performed fetch_add, but Go needs add_fetch.
248	MOVW	add+8(FP), R0
249	MOVW	ret+16(FP), R1
250	ADD	R0, R1, R0
251	MOVW	R0, ret+16(FP)
252	RET
253
254TEXT	syncatomic·AddInt64(SB), NOSPLIT, $0-24
255	GO_ARGS
256	MOVD	$__tsan_go_atomic64_fetch_add(SB), R1
257	BL	racecallatomic<>(SB)
258	// TSan performed fetch_add, but Go needs add_fetch.
259	MOVD	add+8(FP), R0
260	MOVD	ret+16(FP), R1
261	ADD	R0, R1, R0
262	MOVD	R0, ret+16(FP)
263	RET
264
265TEXT	syncatomic·AddUint32(SB), NOSPLIT, $0-20
266	GO_ARGS
267	JMP	syncatomic·AddInt32(SB)
268
269TEXT	syncatomic·AddUint64(SB), NOSPLIT, $0-24
270	GO_ARGS
271	JMP	syncatomic·AddInt64(SB)
272
273TEXT	syncatomic·AddUintptr(SB), NOSPLIT, $0-24
274	GO_ARGS
275	JMP	syncatomic·AddInt64(SB)
276
277// And
278TEXT	syncatomic·AndInt32(SB), NOSPLIT, $0-20
279	GO_ARGS
280	MOVD	$__tsan_go_atomic32_fetch_and(SB), R1
281	BL	racecallatomic<>(SB)
282	RET
283
284TEXT	syncatomic·AndInt64(SB), NOSPLIT, $0-24
285	GO_ARGS
286	MOVD	$__tsan_go_atomic64_fetch_and(SB), R1
287	BL	racecallatomic<>(SB)
288	RET
289
290TEXT	syncatomic·AndUint32(SB), NOSPLIT, $0-20
291	GO_ARGS
292	JMP	syncatomic·AndInt32(SB)
293
294TEXT	syncatomic·AndUint64(SB), NOSPLIT, $0-24
295	GO_ARGS
296	JMP	syncatomic·AndInt64(SB)
297
298TEXT	syncatomic·AndUintptr(SB), NOSPLIT, $0-24
299	GO_ARGS
300	JMP	syncatomic·AndInt64(SB)
301
302// Or
303TEXT	syncatomic·OrInt32(SB), NOSPLIT, $0-20
304	GO_ARGS
305	MOVD	$__tsan_go_atomic32_fetch_or(SB), R1
306	BL	racecallatomic<>(SB)
307	RET
308
309TEXT	syncatomic·OrInt64(SB), NOSPLIT, $0-24
310	GO_ARGS
311	MOVD	$__tsan_go_atomic64_fetch_or(SB), R1
312	BL	racecallatomic<>(SB)
313	RET
314
315TEXT	syncatomic·OrUint32(SB), NOSPLIT, $0-20
316	GO_ARGS
317	JMP	syncatomic·OrInt32(SB)
318
319TEXT	syncatomic·OrUint64(SB), NOSPLIT, $0-24
320	GO_ARGS
321	JMP	syncatomic·OrInt64(SB)
322
323TEXT	syncatomic·OrUintptr(SB), NOSPLIT, $0-24
324	GO_ARGS
325	JMP	syncatomic·OrInt64(SB)
326
327// CompareAndSwap
328
329TEXT	syncatomic·CompareAndSwapInt32(SB), NOSPLIT, $0-17
330	GO_ARGS
331	MOVD	$__tsan_go_atomic32_compare_exchange(SB), R1
332	BL	racecallatomic<>(SB)
333	RET
334
335TEXT	syncatomic·CompareAndSwapInt64(SB), NOSPLIT, $0-25
336	GO_ARGS
337	MOVD	$__tsan_go_atomic64_compare_exchange(SB), R1
338	BL	racecallatomic<>(SB)
339	RET
340
341TEXT	syncatomic·CompareAndSwapUint32(SB), NOSPLIT, $0-17
342	GO_ARGS
343	JMP	syncatomic·CompareAndSwapInt32(SB)
344
345TEXT	syncatomic·CompareAndSwapUint64(SB), NOSPLIT, $0-25
346	GO_ARGS
347	JMP	syncatomic·CompareAndSwapInt64(SB)
348
349TEXT	syncatomic·CompareAndSwapUintptr(SB), NOSPLIT, $0-25
350	GO_ARGS
351	JMP	syncatomic·CompareAndSwapInt64(SB)
352
353// Common code for atomic operations. Calls R1.
354TEXT	racecallatomic<>(SB), NOSPLIT, $0
355	MOVD	24(R15), R5			// Address (arg1, after 2xBL).
356	// If we pass an invalid pointer to the TSan runtime, it will cause a
357	// "fatal error: unknown caller pc". So trigger a SEGV here instead.
358	MOVB	(R5), R0
359	MOVD	runtime·racearenastart(SB), R0
360	CMPUBLT	R5, R0, racecallatomic_data	// Before racearena start?
361	MOVD	runtime·racearenaend(SB), R0
362	CMPUBLT	R5, R0, racecallatomic_ok	// Before racearena end?
363racecallatomic_data:
364	MOVD	runtime·racedatastart(SB), R0
365	CMPUBLT	R5, R0, racecallatomic_ignore	// Before racedata start?
366	MOVD	runtime·racedataend(SB), R0
367	CMPUBGE	R5, R0,	racecallatomic_ignore	// At or after racearena end?
368racecallatomic_ok:
369	MOVD	g_racectx(g), R2		// ThreadState *.
370	MOVD	8(R15), R3			// Caller PC.
371	MOVD	R14, R4				// PC.
372	ADD	$24, R15, R5			// Arguments.
373	// Tail call fails to restore R15, so use a normal one.
374	BL	racecall<>(SB)
375	RET
376racecallatomic_ignore:
377	// Call __tsan_go_ignore_sync_begin to ignore synchronization during
378	// the atomic op. An attempt to synchronize on the address would cause
379	// a crash.
380	MOVD	R1, R6				// Save target function.
381	MOVD	R14, R7				// Save PC.
382	MOVD	$__tsan_go_ignore_sync_begin(SB), R1
383	MOVD	g_racectx(g), R2		// ThreadState *.
384	BL	racecall<>(SB)
385	MOVD	R6, R1				// Restore target function.
386	MOVD	g_racectx(g), R2		// ThreadState *.
387	MOVD	8(R15), R3			// Caller PC.
388	MOVD	R7, R4				// PC.
389	ADD	$24, R15, R5			// Arguments.
390	BL	racecall<>(SB)
391	MOVD	$__tsan_go_ignore_sync_end(SB), R1
392	MOVD	g_racectx(g), R2		// ThreadState *.
393	BL	racecall<>(SB)
394	RET
395
396// func runtime·racecall(void(*f)(...), ...)
397// Calls C function f from race runtime and passes up to 4 arguments to it.
398// The arguments are never heap-object-preserving pointers, so we pretend there
399// are no arguments.
400TEXT	runtime·racecall(SB), NOSPLIT, $0-0
401	MOVD	fn+0(FP), R1
402	MOVD	arg0+8(FP), R2
403	MOVD	arg1+16(FP), R3
404	MOVD	arg2+24(FP), R4
405	MOVD	arg3+32(FP), R5
406	JMP	racecall<>(SB)
407
408// Switches SP to g0 stack and calls R1. Arguments are already set.
409TEXT	racecall<>(SB), NOSPLIT, $0-0
410	BL	runtime·save_g(SB)		// Save g for callbacks.
411	MOVD	R15, R7				// Save SP.
412	MOVD	g_m(g), R8			// R8 = thread.
413	MOVD	m_g0(R8), R8			// R8 = g0.
414	CMPBEQ	R8, g, call			// Already on g0?
415	MOVD	(g_sched+gobuf_sp)(R8), R15	// Switch SP to g0.
416call:	SUB	$160, R15			// Allocate C frame.
417	BL	R1				// Call C code.
418	MOVD	R7, R15				// Restore SP.
419	RET					// Return to Go.
420
421// C->Go callback thunk that allows to call runtime·racesymbolize from C
422// code. racecall has only switched SP, finish g->g0 switch by setting correct
423// g. R2 contains command code, R3 contains command-specific context. See
424// racecallback for command codes.
425TEXT	runtime·racecallbackthunk(SB), NOSPLIT|NOFRAME, $0
426	STMG	R6, R15, 48(R15)		// Save non-volatile regs.
427	BL	runtime·load_g(SB)		// Saved by racecall.
428	CMPBNE	R2, $0, rest			// raceGetProcCmd?
429	MOVD	g_m(g), R2			// R2 = thread.
430	MOVD	m_p(R2), R2			// R2 = processor.
431	MVC	$8, p_raceprocctx(R2), (R3)	// *R3 = ThreadState *.
432	LMG	48(R15), R6, R15		// Restore non-volatile regs.
433	BR	R14				// Return to C.
434rest:	MOVD	g_m(g), R4			// R4 = current thread.
435	MOVD	m_g0(R4), g			// Switch to g0.
436	SUB	$24, R15			// Allocate Go argument slots.
437	STMG	R2, R3, 8(R15)			// Fill Go frame.
438	BL	runtime·racecallback(SB)	// Call Go code.
439	LMG	72(R15), R6, R15		// Restore non-volatile regs.
440	BR	R14				// Return to C.
441