1// Copyright 2011 The Go Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5//go:build dragonfly || freebsd || linux
6
7package runtime
8
9import (
10	"internal/runtime/atomic"
11	"unsafe"
12)
13
14// This implementation depends on OS-specific implementations of
15//
16//	futexsleep(addr *uint32, val uint32, ns int64)
17//		Atomically,
18//			if *addr == val { sleep }
19//		Might be woken up spuriously; that's allowed.
20//		Don't sleep longer than ns; ns < 0 means forever.
21//
22//	futexwakeup(addr *uint32, cnt uint32)
23//		If any procs are sleeping on addr, wake up at most cnt.
24
25const (
26	mutex_unlocked = 0
27	mutex_locked   = 1
28	mutex_sleeping = 2
29
30	active_spin     = 4
31	active_spin_cnt = 30
32	passive_spin    = 1
33)
34
35// Possible lock states are mutex_unlocked, mutex_locked and mutex_sleeping.
36// mutex_sleeping means that there is presumably at least one sleeping thread.
37// Note that there can be spinning threads during all states - they do not
38// affect mutex's state.
39
40// We use the uintptr mutex.key and note.key as a uint32.
41//
42//go:nosplit
43func key32(p *uintptr) *uint32 {
44	return (*uint32)(unsafe.Pointer(p))
45}
46
47func mutexContended(l *mutex) bool {
48	return atomic.Load(key32(&l.key)) > mutex_locked
49}
50
51func lock(l *mutex) {
52	lockWithRank(l, getLockRank(l))
53}
54
55func lock2(l *mutex) {
56	gp := getg()
57
58	if gp.m.locks < 0 {
59		throw("runtime·lock: lock count")
60	}
61	gp.m.locks++
62
63	// Speculative grab for lock.
64	v := atomic.Xchg(key32(&l.key), mutex_locked)
65	if v == mutex_unlocked {
66		return
67	}
68
69	// wait is either MUTEX_LOCKED or MUTEX_SLEEPING
70	// depending on whether there is a thread sleeping
71	// on this mutex. If we ever change l->key from
72	// MUTEX_SLEEPING to some other value, we must be
73	// careful to change it back to MUTEX_SLEEPING before
74	// returning, to ensure that the sleeping thread gets
75	// its wakeup call.
76	wait := v
77
78	timer := &lockTimer{lock: l}
79	timer.begin()
80	// On uniprocessors, no point spinning.
81	// On multiprocessors, spin for ACTIVE_SPIN attempts.
82	spin := 0
83	if ncpu > 1 {
84		spin = active_spin
85	}
86	for {
87		// Try for lock, spinning.
88		for i := 0; i < spin; i++ {
89			for l.key == mutex_unlocked {
90				if atomic.Cas(key32(&l.key), mutex_unlocked, wait) {
91					timer.end()
92					return
93				}
94			}
95			procyield(active_spin_cnt)
96		}
97
98		// Try for lock, rescheduling.
99		for i := 0; i < passive_spin; i++ {
100			for l.key == mutex_unlocked {
101				if atomic.Cas(key32(&l.key), mutex_unlocked, wait) {
102					timer.end()
103					return
104				}
105			}
106			osyield()
107		}
108
109		// Sleep.
110		v = atomic.Xchg(key32(&l.key), mutex_sleeping)
111		if v == mutex_unlocked {
112			timer.end()
113			return
114		}
115		wait = mutex_sleeping
116		futexsleep(key32(&l.key), mutex_sleeping, -1)
117	}
118}
119
120func unlock(l *mutex) {
121	unlockWithRank(l)
122}
123
124func unlock2(l *mutex) {
125	v := atomic.Xchg(key32(&l.key), mutex_unlocked)
126	if v == mutex_unlocked {
127		throw("unlock of unlocked lock")
128	}
129	if v == mutex_sleeping {
130		futexwakeup(key32(&l.key), 1)
131	}
132
133	gp := getg()
134	gp.m.mLockProfile.recordUnlock(l)
135	gp.m.locks--
136	if gp.m.locks < 0 {
137		throw("runtime·unlock: lock count")
138	}
139	if gp.m.locks == 0 && gp.preempt { // restore the preemption request in case we've cleared it in newstack
140		gp.stackguard0 = stackPreempt
141	}
142}
143
144// One-time notifications.
145func noteclear(n *note) {
146	n.key = 0
147}
148
149func notewakeup(n *note) {
150	old := atomic.Xchg(key32(&n.key), 1)
151	if old != 0 {
152		print("notewakeup - double wakeup (", old, ")\n")
153		throw("notewakeup - double wakeup")
154	}
155	futexwakeup(key32(&n.key), 1)
156}
157
158func notesleep(n *note) {
159	gp := getg()
160	if gp != gp.m.g0 {
161		throw("notesleep not on g0")
162	}
163	ns := int64(-1)
164	if *cgo_yield != nil {
165		// Sleep for an arbitrary-but-moderate interval to poll libc interceptors.
166		ns = 10e6
167	}
168	for atomic.Load(key32(&n.key)) == 0 {
169		gp.m.blocked = true
170		futexsleep(key32(&n.key), 0, ns)
171		if *cgo_yield != nil {
172			asmcgocall(*cgo_yield, nil)
173		}
174		gp.m.blocked = false
175	}
176}
177
178// May run with m.p==nil if called from notetsleep, so write barriers
179// are not allowed.
180//
181//go:nosplit
182//go:nowritebarrier
183func notetsleep_internal(n *note, ns int64) bool {
184	gp := getg()
185
186	if ns < 0 {
187		if *cgo_yield != nil {
188			// Sleep for an arbitrary-but-moderate interval to poll libc interceptors.
189			ns = 10e6
190		}
191		for atomic.Load(key32(&n.key)) == 0 {
192			gp.m.blocked = true
193			futexsleep(key32(&n.key), 0, ns)
194			if *cgo_yield != nil {
195				asmcgocall(*cgo_yield, nil)
196			}
197			gp.m.blocked = false
198		}
199		return true
200	}
201
202	if atomic.Load(key32(&n.key)) != 0 {
203		return true
204	}
205
206	deadline := nanotime() + ns
207	for {
208		if *cgo_yield != nil && ns > 10e6 {
209			ns = 10e6
210		}
211		gp.m.blocked = true
212		futexsleep(key32(&n.key), 0, ns)
213		if *cgo_yield != nil {
214			asmcgocall(*cgo_yield, nil)
215		}
216		gp.m.blocked = false
217		if atomic.Load(key32(&n.key)) != 0 {
218			break
219		}
220		now := nanotime()
221		if now >= deadline {
222			break
223		}
224		ns = deadline - now
225	}
226	return atomic.Load(key32(&n.key)) != 0
227}
228
229func notetsleep(n *note, ns int64) bool {
230	gp := getg()
231	if gp != gp.m.g0 && gp.m.preemptoff != "" {
232		throw("notetsleep not on g0")
233	}
234
235	return notetsleep_internal(n, ns)
236}
237
238// same as runtime·notetsleep, but called on user g (not g0)
239// calls only nosplit functions between entersyscallblock/exitsyscall.
240func notetsleepg(n *note, ns int64) bool {
241	gp := getg()
242	if gp == gp.m.g0 {
243		throw("notetsleepg on g0")
244	}
245
246	entersyscallblock()
247	ok := notetsleep_internal(n, ns)
248	exitsyscall()
249	return ok
250}
251
252func beforeIdle(int64, int64) (*g, bool) {
253	return nil, false
254}
255
256func checkTimeouts() {}
257