1// Copyright 2009 The Go Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5// Package sync provides basic synchronization primitives such as mutual
6// exclusion locks. Other than the [Once] and [WaitGroup] types, most are intended
7// for use by low-level library routines. Higher-level synchronization is
8// better done via channels and communication.
9//
10// Values containing the types defined in this package should not be copied.
11package sync
12
13import (
14	"internal/race"
15	"sync/atomic"
16	"unsafe"
17)
18
19// Provided by runtime via linkname.
20func throw(string)
21func fatal(string)
22
23// A Mutex is a mutual exclusion lock.
24// The zero value for a Mutex is an unlocked mutex.
25//
26// A Mutex must not be copied after first use.
27//
28// In the terminology of [the Go memory model],
29// the n'th call to [Mutex.Unlock] “synchronizes before” the m'th call to [Mutex.Lock]
30// for any n < m.
31// A successful call to [Mutex.TryLock] is equivalent to a call to Lock.
32// A failed call to TryLock does not establish any “synchronizes before”
33// relation at all.
34//
35// [the Go memory model]: https://go.dev/ref/mem
36type Mutex struct {
37	state int32
38	sema  uint32
39}
40
41// A Locker represents an object that can be locked and unlocked.
42type Locker interface {
43	Lock()
44	Unlock()
45}
46
47const (
48	mutexLocked = 1 << iota // mutex is locked
49	mutexWoken
50	mutexStarving
51	mutexWaiterShift = iota
52
53	// Mutex fairness.
54	//
55	// Mutex can be in 2 modes of operations: normal and starvation.
56	// In normal mode waiters are queued in FIFO order, but a woken up waiter
57	// does not own the mutex and competes with new arriving goroutines over
58	// the ownership. New arriving goroutines have an advantage -- they are
59	// already running on CPU and there can be lots of them, so a woken up
60	// waiter has good chances of losing. In such case it is queued at front
61	// of the wait queue. If a waiter fails to acquire the mutex for more than 1ms,
62	// it switches mutex to the starvation mode.
63	//
64	// In starvation mode ownership of the mutex is directly handed off from
65	// the unlocking goroutine to the waiter at the front of the queue.
66	// New arriving goroutines don't try to acquire the mutex even if it appears
67	// to be unlocked, and don't try to spin. Instead they queue themselves at
68	// the tail of the wait queue.
69	//
70	// If a waiter receives ownership of the mutex and sees that either
71	// (1) it is the last waiter in the queue, or (2) it waited for less than 1 ms,
72	// it switches mutex back to normal operation mode.
73	//
74	// Normal mode has considerably better performance as a goroutine can acquire
75	// a mutex several times in a row even if there are blocked waiters.
76	// Starvation mode is important to prevent pathological cases of tail latency.
77	starvationThresholdNs = 1e6
78)
79
80// Lock locks m.
81// If the lock is already in use, the calling goroutine
82// blocks until the mutex is available.
83func (m *Mutex) Lock() {
84	// Fast path: grab unlocked mutex.
85	if atomic.CompareAndSwapInt32(&m.state, 0, mutexLocked) {
86		if race.Enabled {
87			race.Acquire(unsafe.Pointer(m))
88		}
89		return
90	}
91	// Slow path (outlined so that the fast path can be inlined)
92	m.lockSlow()
93}
94
95// TryLock tries to lock m and reports whether it succeeded.
96//
97// Note that while correct uses of TryLock do exist, they are rare,
98// and use of TryLock is often a sign of a deeper problem
99// in a particular use of mutexes.
100func (m *Mutex) TryLock() bool {
101	old := m.state
102	if old&(mutexLocked|mutexStarving) != 0 {
103		return false
104	}
105
106	// There may be a goroutine waiting for the mutex, but we are
107	// running now and can try to grab the mutex before that
108	// goroutine wakes up.
109	if !atomic.CompareAndSwapInt32(&m.state, old, old|mutexLocked) {
110		return false
111	}
112
113	if race.Enabled {
114		race.Acquire(unsafe.Pointer(m))
115	}
116	return true
117}
118
119func (m *Mutex) lockSlow() {
120	var waitStartTime int64
121	starving := false
122	awoke := false
123	iter := 0
124	old := m.state
125	for {
126		// Don't spin in starvation mode, ownership is handed off to waiters
127		// so we won't be able to acquire the mutex anyway.
128		if old&(mutexLocked|mutexStarving) == mutexLocked && runtime_canSpin(iter) {
129			// Active spinning makes sense.
130			// Try to set mutexWoken flag to inform Unlock
131			// to not wake other blocked goroutines.
132			if !awoke && old&mutexWoken == 0 && old>>mutexWaiterShift != 0 &&
133				atomic.CompareAndSwapInt32(&m.state, old, old|mutexWoken) {
134				awoke = true
135			}
136			runtime_doSpin()
137			iter++
138			old = m.state
139			continue
140		}
141		new := old
142		// Don't try to acquire starving mutex, new arriving goroutines must queue.
143		if old&mutexStarving == 0 {
144			new |= mutexLocked
145		}
146		if old&(mutexLocked|mutexStarving) != 0 {
147			new += 1 << mutexWaiterShift
148		}
149		// The current goroutine switches mutex to starvation mode.
150		// But if the mutex is currently unlocked, don't do the switch.
151		// Unlock expects that starving mutex has waiters, which will not
152		// be true in this case.
153		if starving && old&mutexLocked != 0 {
154			new |= mutexStarving
155		}
156		if awoke {
157			// The goroutine has been woken from sleep,
158			// so we need to reset the flag in either case.
159			if new&mutexWoken == 0 {
160				throw("sync: inconsistent mutex state")
161			}
162			new &^= mutexWoken
163		}
164		if atomic.CompareAndSwapInt32(&m.state, old, new) {
165			if old&(mutexLocked|mutexStarving) == 0 {
166				break // locked the mutex with CAS
167			}
168			// If we were already waiting before, queue at the front of the queue.
169			queueLifo := waitStartTime != 0
170			if waitStartTime == 0 {
171				waitStartTime = runtime_nanotime()
172			}
173			runtime_SemacquireMutex(&m.sema, queueLifo, 1)
174			starving = starving || runtime_nanotime()-waitStartTime > starvationThresholdNs
175			old = m.state
176			if old&mutexStarving != 0 {
177				// If this goroutine was woken and mutex is in starvation mode,
178				// ownership was handed off to us but mutex is in somewhat
179				// inconsistent state: mutexLocked is not set and we are still
180				// accounted as waiter. Fix that.
181				if old&(mutexLocked|mutexWoken) != 0 || old>>mutexWaiterShift == 0 {
182					throw("sync: inconsistent mutex state")
183				}
184				delta := int32(mutexLocked - 1<<mutexWaiterShift)
185				if !starving || old>>mutexWaiterShift == 1 {
186					// Exit starvation mode.
187					// Critical to do it here and consider wait time.
188					// Starvation mode is so inefficient, that two goroutines
189					// can go lock-step infinitely once they switch mutex
190					// to starvation mode.
191					delta -= mutexStarving
192				}
193				atomic.AddInt32(&m.state, delta)
194				break
195			}
196			awoke = true
197			iter = 0
198		} else {
199			old = m.state
200		}
201	}
202
203	if race.Enabled {
204		race.Acquire(unsafe.Pointer(m))
205	}
206}
207
208// Unlock unlocks m.
209// It is a run-time error if m is not locked on entry to Unlock.
210//
211// A locked [Mutex] is not associated with a particular goroutine.
212// It is allowed for one goroutine to lock a Mutex and then
213// arrange for another goroutine to unlock it.
214func (m *Mutex) Unlock() {
215	if race.Enabled {
216		_ = m.state
217		race.Release(unsafe.Pointer(m))
218	}
219
220	// Fast path: drop lock bit.
221	new := atomic.AddInt32(&m.state, -mutexLocked)
222	if new != 0 {
223		// Outlined slow path to allow inlining the fast path.
224		// To hide unlockSlow during tracing we skip one extra frame when tracing GoUnblock.
225		m.unlockSlow(new)
226	}
227}
228
229func (m *Mutex) unlockSlow(new int32) {
230	if (new+mutexLocked)&mutexLocked == 0 {
231		fatal("sync: unlock of unlocked mutex")
232	}
233	if new&mutexStarving == 0 {
234		old := new
235		for {
236			// If there are no waiters or a goroutine has already
237			// been woken or grabbed the lock, no need to wake anyone.
238			// In starvation mode ownership is directly handed off from unlocking
239			// goroutine to the next waiter. We are not part of this chain,
240			// since we did not observe mutexStarving when we unlocked the mutex above.
241			// So get off the way.
242			if old>>mutexWaiterShift == 0 || old&(mutexLocked|mutexWoken|mutexStarving) != 0 {
243				return
244			}
245			// Grab the right to wake someone.
246			new = (old - 1<<mutexWaiterShift) | mutexWoken
247			if atomic.CompareAndSwapInt32(&m.state, old, new) {
248				runtime_Semrelease(&m.sema, false, 1)
249				return
250			}
251			old = m.state
252		}
253	} else {
254		// Starving mode: handoff mutex ownership to the next waiter, and yield
255		// our time slice so that the next waiter can start to run immediately.
256		// Note: mutexLocked is not set, the waiter will set it after wakeup.
257		// But mutex is still considered locked if mutexStarving is set,
258		// so new coming goroutines won't acquire it.
259		runtime_Semrelease(&m.sema, true, 1)
260	}
261}
262