1// Copyright 2011 The Go Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5//go:build aix || darwin || netbsd || openbsd || plan9 || solaris || windows
6
7package runtime
8
9import (
10	"internal/runtime/atomic"
11	"unsafe"
12)
13
14// This implementation depends on OS-specific implementations of
15//
16//	func semacreate(mp *m)
17//		Create a semaphore for mp, if it does not already have one.
18//
19//	func semasleep(ns int64) int32
20//		If ns < 0, acquire m's semaphore and return 0.
21//		If ns >= 0, try to acquire m's semaphore for at most ns nanoseconds.
22//		Return 0 if the semaphore was acquired, -1 if interrupted or timed out.
23//
24//	func semawakeup(mp *m)
25//		Wake up mp, which is or will soon be sleeping on its semaphore.
26const (
27	locked uintptr = 1
28
29	active_spin     = 4
30	active_spin_cnt = 30
31	passive_spin    = 1
32)
33
34func mutexContended(l *mutex) bool {
35	return atomic.Loaduintptr(&l.key) > locked
36}
37
38func lock(l *mutex) {
39	lockWithRank(l, getLockRank(l))
40}
41
42func lock2(l *mutex) {
43	gp := getg()
44	if gp.m.locks < 0 {
45		throw("runtime·lock: lock count")
46	}
47	gp.m.locks++
48
49	// Speculative grab for lock.
50	if atomic.Casuintptr(&l.key, 0, locked) {
51		return
52	}
53	semacreate(gp.m)
54
55	timer := &lockTimer{lock: l}
56	timer.begin()
57	// On uniprocessor's, no point spinning.
58	// On multiprocessors, spin for ACTIVE_SPIN attempts.
59	spin := 0
60	if ncpu > 1 {
61		spin = active_spin
62	}
63Loop:
64	for i := 0; ; i++ {
65		v := atomic.Loaduintptr(&l.key)
66		if v&locked == 0 {
67			// Unlocked. Try to lock.
68			if atomic.Casuintptr(&l.key, v, v|locked) {
69				timer.end()
70				return
71			}
72			i = 0
73		}
74		if i < spin {
75			procyield(active_spin_cnt)
76		} else if i < spin+passive_spin {
77			osyield()
78		} else {
79			// Someone else has it.
80			// l->waitm points to a linked list of M's waiting
81			// for this lock, chained through m->nextwaitm.
82			// Queue this M.
83			for {
84				gp.m.nextwaitm = muintptr(v &^ locked)
85				if atomic.Casuintptr(&l.key, v, uintptr(unsafe.Pointer(gp.m))|locked) {
86					break
87				}
88				v = atomic.Loaduintptr(&l.key)
89				if v&locked == 0 {
90					continue Loop
91				}
92			}
93			if v&locked != 0 {
94				// Queued. Wait.
95				semasleep(-1)
96				i = 0
97			}
98		}
99	}
100}
101
102func unlock(l *mutex) {
103	unlockWithRank(l)
104}
105
106// We might not be holding a p in this code.
107//
108//go:nowritebarrier
109func unlock2(l *mutex) {
110	gp := getg()
111	var mp *m
112	for {
113		v := atomic.Loaduintptr(&l.key)
114		if v == locked {
115			if atomic.Casuintptr(&l.key, locked, 0) {
116				break
117			}
118		} else {
119			// Other M's are waiting for the lock.
120			// Dequeue an M.
121			mp = muintptr(v &^ locked).ptr()
122			if atomic.Casuintptr(&l.key, v, uintptr(mp.nextwaitm)) {
123				// Dequeued an M.  Wake it.
124				semawakeup(mp)
125				break
126			}
127		}
128	}
129	gp.m.mLockProfile.recordUnlock(l)
130	gp.m.locks--
131	if gp.m.locks < 0 {
132		throw("runtime·unlock: lock count")
133	}
134	if gp.m.locks == 0 && gp.preempt { // restore the preemption request in case we've cleared it in newstack
135		gp.stackguard0 = stackPreempt
136	}
137}
138
139// One-time notifications.
140func noteclear(n *note) {
141	n.key = 0
142}
143
144func notewakeup(n *note) {
145	var v uintptr
146	for {
147		v = atomic.Loaduintptr(&n.key)
148		if atomic.Casuintptr(&n.key, v, locked) {
149			break
150		}
151	}
152
153	// Successfully set waitm to locked.
154	// What was it before?
155	switch {
156	case v == 0:
157		// Nothing was waiting. Done.
158	case v == locked:
159		// Two notewakeups! Not allowed.
160		throw("notewakeup - double wakeup")
161	default:
162		// Must be the waiting m. Wake it up.
163		semawakeup((*m)(unsafe.Pointer(v)))
164	}
165}
166
167func notesleep(n *note) {
168	gp := getg()
169	if gp != gp.m.g0 {
170		throw("notesleep not on g0")
171	}
172	semacreate(gp.m)
173	if !atomic.Casuintptr(&n.key, 0, uintptr(unsafe.Pointer(gp.m))) {
174		// Must be locked (got wakeup).
175		if n.key != locked {
176			throw("notesleep - waitm out of sync")
177		}
178		return
179	}
180	// Queued. Sleep.
181	gp.m.blocked = true
182	if *cgo_yield == nil {
183		semasleep(-1)
184	} else {
185		// Sleep for an arbitrary-but-moderate interval to poll libc interceptors.
186		const ns = 10e6
187		for atomic.Loaduintptr(&n.key) == 0 {
188			semasleep(ns)
189			asmcgocall(*cgo_yield, nil)
190		}
191	}
192	gp.m.blocked = false
193}
194
195//go:nosplit
196func notetsleep_internal(n *note, ns int64, gp *g, deadline int64) bool {
197	// gp and deadline are logically local variables, but they are written
198	// as parameters so that the stack space they require is charged
199	// to the caller.
200	// This reduces the nosplit footprint of notetsleep_internal.
201	gp = getg()
202
203	// Register for wakeup on n->waitm.
204	if !atomic.Casuintptr(&n.key, 0, uintptr(unsafe.Pointer(gp.m))) {
205		// Must be locked (got wakeup).
206		if n.key != locked {
207			throw("notetsleep - waitm out of sync")
208		}
209		return true
210	}
211	if ns < 0 {
212		// Queued. Sleep.
213		gp.m.blocked = true
214		if *cgo_yield == nil {
215			semasleep(-1)
216		} else {
217			// Sleep in arbitrary-but-moderate intervals to poll libc interceptors.
218			const ns = 10e6
219			for semasleep(ns) < 0 {
220				asmcgocall(*cgo_yield, nil)
221			}
222		}
223		gp.m.blocked = false
224		return true
225	}
226
227	deadline = nanotime() + ns
228	for {
229		// Registered. Sleep.
230		gp.m.blocked = true
231		if *cgo_yield != nil && ns > 10e6 {
232			ns = 10e6
233		}
234		if semasleep(ns) >= 0 {
235			gp.m.blocked = false
236			// Acquired semaphore, semawakeup unregistered us.
237			// Done.
238			return true
239		}
240		if *cgo_yield != nil {
241			asmcgocall(*cgo_yield, nil)
242		}
243		gp.m.blocked = false
244		// Interrupted or timed out. Still registered. Semaphore not acquired.
245		ns = deadline - nanotime()
246		if ns <= 0 {
247			break
248		}
249		// Deadline hasn't arrived. Keep sleeping.
250	}
251
252	// Deadline arrived. Still registered. Semaphore not acquired.
253	// Want to give up and return, but have to unregister first,
254	// so that any notewakeup racing with the return does not
255	// try to grant us the semaphore when we don't expect it.
256	for {
257		v := atomic.Loaduintptr(&n.key)
258		switch v {
259		case uintptr(unsafe.Pointer(gp.m)):
260			// No wakeup yet; unregister if possible.
261			if atomic.Casuintptr(&n.key, v, 0) {
262				return false
263			}
264		case locked:
265			// Wakeup happened so semaphore is available.
266			// Grab it to avoid getting out of sync.
267			gp.m.blocked = true
268			if semasleep(-1) < 0 {
269				throw("runtime: unable to acquire - semaphore out of sync")
270			}
271			gp.m.blocked = false
272			return true
273		default:
274			throw("runtime: unexpected waitm - semaphore out of sync")
275		}
276	}
277}
278
279func notetsleep(n *note, ns int64) bool {
280	gp := getg()
281	if gp != gp.m.g0 {
282		throw("notetsleep not on g0")
283	}
284	semacreate(gp.m)
285	return notetsleep_internal(n, ns, nil, 0)
286}
287
288// same as runtime·notetsleep, but called on user g (not g0)
289// calls only nosplit functions between entersyscallblock/exitsyscall.
290func notetsleepg(n *note, ns int64) bool {
291	gp := getg()
292	if gp == gp.m.g0 {
293		throw("notetsleepg on g0")
294	}
295	semacreate(gp.m)
296	entersyscallblock()
297	ok := notetsleep_internal(n, ns, nil, 0)
298	exitsyscall()
299	return ok
300}
301
302func beforeIdle(int64, int64) (*g, bool) {
303	return nil, false
304}
305
306func checkTimeouts() {}
307