1// Copyright 2014 The Go Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5package runtime
6
7import "unsafe"
8
9type mts struct {
10	tv_sec  int64
11	tv_nsec int64
12}
13
14type mscratch struct {
15	v [6]uintptr
16}
17
18type mOS struct {
19	waitsema uintptr // semaphore for parking on locks
20	perrno   *int32  // pointer to tls errno
21	// these are here because they are too large to be on the stack
22	// of low-level NOSPLIT functions.
23	//LibCall       libcall;
24	ts      mts
25	scratch mscratch
26}
27
28type libcFunc uintptr
29
30//go:linkname asmsysvicall6x runtime.asmsysvicall6
31var asmsysvicall6x libcFunc // name to take addr of asmsysvicall6
32
33func asmsysvicall6() // declared for vet; do NOT call
34
35//go:nosplit
36func sysvicall0(fn *libcFunc) uintptr {
37	// Leave caller's PC/SP around for traceback.
38	gp := getg()
39	var mp *m
40	if gp != nil {
41		mp = gp.m
42	}
43	if mp != nil && mp.libcallsp == 0 {
44		mp.libcallg.set(gp)
45		mp.libcallpc = getcallerpc()
46		// sp must be the last, because once async cpu profiler finds
47		// all three values to be non-zero, it will use them
48		mp.libcallsp = getcallersp()
49	} else {
50		mp = nil // See comment in sys_darwin.go:libcCall
51	}
52
53	var libcall libcall
54	libcall.fn = uintptr(unsafe.Pointer(fn))
55	libcall.n = 0
56	libcall.args = uintptr(unsafe.Pointer(fn)) // it's unused but must be non-nil, otherwise crashes
57	asmcgocall(unsafe.Pointer(&asmsysvicall6x), unsafe.Pointer(&libcall))
58	if mp != nil {
59		mp.libcallsp = 0
60	}
61	return libcall.r1
62}
63
64//go:nosplit
65func sysvicall1(fn *libcFunc, a1 uintptr) uintptr {
66	r1, _ := sysvicall1Err(fn, a1)
67	return r1
68}
69
70// sysvicall1Err returns both the system call result and the errno value.
71// This is used by sysvicall1 and pipe.
72//
73//go:nosplit
74func sysvicall1Err(fn *libcFunc, a1 uintptr) (r1, err uintptr) {
75	// Leave caller's PC/SP around for traceback.
76	gp := getg()
77	var mp *m
78	if gp != nil {
79		mp = gp.m
80	}
81	if mp != nil && mp.libcallsp == 0 {
82		mp.libcallg.set(gp)
83		mp.libcallpc = getcallerpc()
84		// sp must be the last, because once async cpu profiler finds
85		// all three values to be non-zero, it will use them
86		mp.libcallsp = getcallersp()
87	} else {
88		mp = nil
89	}
90
91	var libcall libcall
92	libcall.fn = uintptr(unsafe.Pointer(fn))
93	libcall.n = 1
94	// TODO(rsc): Why is noescape necessary here and below?
95	libcall.args = uintptr(noescape(unsafe.Pointer(&a1)))
96	asmcgocall(unsafe.Pointer(&asmsysvicall6x), unsafe.Pointer(&libcall))
97	if mp != nil {
98		mp.libcallsp = 0
99	}
100	return libcall.r1, libcall.err
101}
102
103//go:nosplit
104func sysvicall2(fn *libcFunc, a1, a2 uintptr) uintptr {
105	r1, _ := sysvicall2Err(fn, a1, a2)
106	return r1
107}
108
109//go:nosplit
110//go:cgo_unsafe_args
111
112// sysvicall2Err returns both the system call result and the errno value.
113// This is used by sysvicall2 and pipe2.
114func sysvicall2Err(fn *libcFunc, a1, a2 uintptr) (uintptr, uintptr) {
115	// Leave caller's PC/SP around for traceback.
116	gp := getg()
117	var mp *m
118	if gp != nil {
119		mp = gp.m
120	}
121	if mp != nil && mp.libcallsp == 0 {
122		mp.libcallg.set(gp)
123		mp.libcallpc = getcallerpc()
124		// sp must be the last, because once async cpu profiler finds
125		// all three values to be non-zero, it will use them
126		mp.libcallsp = getcallersp()
127	} else {
128		mp = nil
129	}
130
131	var libcall libcall
132	libcall.fn = uintptr(unsafe.Pointer(fn))
133	libcall.n = 2
134	libcall.args = uintptr(noescape(unsafe.Pointer(&a1)))
135	asmcgocall(unsafe.Pointer(&asmsysvicall6x), unsafe.Pointer(&libcall))
136	if mp != nil {
137		mp.libcallsp = 0
138	}
139	return libcall.r1, libcall.err
140}
141
142//go:nosplit
143func sysvicall3(fn *libcFunc, a1, a2, a3 uintptr) uintptr {
144	r1, _ := sysvicall3Err(fn, a1, a2, a3)
145	return r1
146}
147
148//go:nosplit
149//go:cgo_unsafe_args
150
151// sysvicall3Err returns both the system call result and the errno value.
152// This is used by sysvicall3 and write1.
153func sysvicall3Err(fn *libcFunc, a1, a2, a3 uintptr) (r1, err uintptr) {
154	// Leave caller's PC/SP around for traceback.
155	gp := getg()
156	var mp *m
157	if gp != nil {
158		mp = gp.m
159	}
160	if mp != nil && mp.libcallsp == 0 {
161		mp.libcallg.set(gp)
162		mp.libcallpc = getcallerpc()
163		// sp must be the last, because once async cpu profiler finds
164		// all three values to be non-zero, it will use them
165		mp.libcallsp = getcallersp()
166	} else {
167		mp = nil
168	}
169
170	var libcall libcall
171	libcall.fn = uintptr(unsafe.Pointer(fn))
172	libcall.n = 3
173	libcall.args = uintptr(noescape(unsafe.Pointer(&a1)))
174	asmcgocall(unsafe.Pointer(&asmsysvicall6x), unsafe.Pointer(&libcall))
175	if mp != nil {
176		mp.libcallsp = 0
177	}
178	return libcall.r1, libcall.err
179}
180
181//go:nosplit
182//go:cgo_unsafe_args
183func sysvicall4(fn *libcFunc, a1, a2, a3, a4 uintptr) uintptr {
184	// Leave caller's PC/SP around for traceback.
185	gp := getg()
186	var mp *m
187	if gp != nil {
188		mp = gp.m
189	}
190	if mp != nil && mp.libcallsp == 0 {
191		mp.libcallg.set(gp)
192		mp.libcallpc = getcallerpc()
193		// sp must be the last, because once async cpu profiler finds
194		// all three values to be non-zero, it will use them
195		mp.libcallsp = getcallersp()
196	} else {
197		mp = nil
198	}
199
200	var libcall libcall
201	libcall.fn = uintptr(unsafe.Pointer(fn))
202	libcall.n = 4
203	libcall.args = uintptr(noescape(unsafe.Pointer(&a1)))
204	asmcgocall(unsafe.Pointer(&asmsysvicall6x), unsafe.Pointer(&libcall))
205	if mp != nil {
206		mp.libcallsp = 0
207	}
208	return libcall.r1
209}
210
211//go:nosplit
212//go:cgo_unsafe_args
213func sysvicall5(fn *libcFunc, a1, a2, a3, a4, a5 uintptr) uintptr {
214	// Leave caller's PC/SP around for traceback.
215	gp := getg()
216	var mp *m
217	if gp != nil {
218		mp = gp.m
219	}
220	if mp != nil && mp.libcallsp == 0 {
221		mp.libcallg.set(gp)
222		mp.libcallpc = getcallerpc()
223		// sp must be the last, because once async cpu profiler finds
224		// all three values to be non-zero, it will use them
225		mp.libcallsp = getcallersp()
226	} else {
227		mp = nil
228	}
229
230	var libcall libcall
231	libcall.fn = uintptr(unsafe.Pointer(fn))
232	libcall.n = 5
233	libcall.args = uintptr(noescape(unsafe.Pointer(&a1)))
234	asmcgocall(unsafe.Pointer(&asmsysvicall6x), unsafe.Pointer(&libcall))
235	if mp != nil {
236		mp.libcallsp = 0
237	}
238	return libcall.r1
239}
240
241//go:nosplit
242//go:cgo_unsafe_args
243func sysvicall6(fn *libcFunc, a1, a2, a3, a4, a5, a6 uintptr) uintptr {
244	// Leave caller's PC/SP around for traceback.
245	gp := getg()
246	var mp *m
247	if gp != nil {
248		mp = gp.m
249	}
250	if mp != nil && mp.libcallsp == 0 {
251		mp.libcallg.set(gp)
252		mp.libcallpc = getcallerpc()
253		// sp must be the last, because once async cpu profiler finds
254		// all three values to be non-zero, it will use them
255		mp.libcallsp = getcallersp()
256	} else {
257		mp = nil
258	}
259
260	var libcall libcall
261	libcall.fn = uintptr(unsafe.Pointer(fn))
262	libcall.n = 6
263	libcall.args = uintptr(noescape(unsafe.Pointer(&a1)))
264	asmcgocall(unsafe.Pointer(&asmsysvicall6x), unsafe.Pointer(&libcall))
265	if mp != nil {
266		mp.libcallsp = 0
267	}
268	return libcall.r1
269}
270
271func issetugid() int32 {
272	return int32(sysvicall0(&libc_issetugid))
273}
274