1// Copyright 2016 The Go Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5package sync_test
6
7import (
8	"internal/testenv"
9	"math/rand"
10	"reflect"
11	"runtime"
12	"sync"
13	"sync/atomic"
14	"testing"
15	"testing/quick"
16)
17
18type mapOp string
19
20const (
21	opLoad             = mapOp("Load")
22	opStore            = mapOp("Store")
23	opLoadOrStore      = mapOp("LoadOrStore")
24	opLoadAndDelete    = mapOp("LoadAndDelete")
25	opDelete           = mapOp("Delete")
26	opSwap             = mapOp("Swap")
27	opCompareAndSwap   = mapOp("CompareAndSwap")
28	opCompareAndDelete = mapOp("CompareAndDelete")
29	opClear            = mapOp("Clear")
30)
31
32var mapOps = [...]mapOp{
33	opLoad,
34	opStore,
35	opLoadOrStore,
36	opLoadAndDelete,
37	opDelete,
38	opSwap,
39	opCompareAndSwap,
40	opCompareAndDelete,
41	opClear,
42}
43
44// mapCall is a quick.Generator for calls on mapInterface.
45type mapCall struct {
46	op   mapOp
47	k, v any
48}
49
50func (c mapCall) apply(m mapInterface) (any, bool) {
51	switch c.op {
52	case opLoad:
53		return m.Load(c.k)
54	case opStore:
55		m.Store(c.k, c.v)
56		return nil, false
57	case opLoadOrStore:
58		return m.LoadOrStore(c.k, c.v)
59	case opLoadAndDelete:
60		return m.LoadAndDelete(c.k)
61	case opDelete:
62		m.Delete(c.k)
63		return nil, false
64	case opSwap:
65		return m.Swap(c.k, c.v)
66	case opCompareAndSwap:
67		if m.CompareAndSwap(c.k, c.v, rand.Int()) {
68			m.Delete(c.k)
69			return c.v, true
70		}
71		return nil, false
72	case opCompareAndDelete:
73		if m.CompareAndDelete(c.k, c.v) {
74			if _, ok := m.Load(c.k); !ok {
75				return nil, true
76			}
77		}
78		return nil, false
79	case opClear:
80		m.Clear()
81		return nil, false
82	default:
83		panic("invalid mapOp")
84	}
85}
86
87type mapResult struct {
88	value any
89	ok    bool
90}
91
92func randValue(r *rand.Rand) any {
93	b := make([]byte, r.Intn(4))
94	for i := range b {
95		b[i] = 'a' + byte(rand.Intn(26))
96	}
97	return string(b)
98}
99
100func (mapCall) Generate(r *rand.Rand, size int) reflect.Value {
101	c := mapCall{op: mapOps[rand.Intn(len(mapOps))], k: randValue(r)}
102	switch c.op {
103	case opStore, opLoadOrStore:
104		c.v = randValue(r)
105	}
106	return reflect.ValueOf(c)
107}
108
109func applyCalls(m mapInterface, calls []mapCall) (results []mapResult, final map[any]any) {
110	for _, c := range calls {
111		v, ok := c.apply(m)
112		results = append(results, mapResult{v, ok})
113	}
114
115	final = make(map[any]any)
116	m.Range(func(k, v any) bool {
117		final[k] = v
118		return true
119	})
120
121	return results, final
122}
123
124func applyMap(calls []mapCall) ([]mapResult, map[any]any) {
125	return applyCalls(new(sync.Map), calls)
126}
127
128func applyRWMutexMap(calls []mapCall) ([]mapResult, map[any]any) {
129	return applyCalls(new(RWMutexMap), calls)
130}
131
132func applyDeepCopyMap(calls []mapCall) ([]mapResult, map[any]any) {
133	return applyCalls(new(DeepCopyMap), calls)
134}
135
136func TestMapMatchesRWMutex(t *testing.T) {
137	if err := quick.CheckEqual(applyMap, applyRWMutexMap, nil); err != nil {
138		t.Error(err)
139	}
140}
141
142func TestMapMatchesDeepCopy(t *testing.T) {
143	if err := quick.CheckEqual(applyMap, applyDeepCopyMap, nil); err != nil {
144		t.Error(err)
145	}
146}
147
148func TestConcurrentRange(t *testing.T) {
149	const mapSize = 1 << 10
150
151	m := new(sync.Map)
152	for n := int64(1); n <= mapSize; n++ {
153		m.Store(n, int64(n))
154	}
155
156	done := make(chan struct{})
157	var wg sync.WaitGroup
158	defer func() {
159		close(done)
160		wg.Wait()
161	}()
162	for g := int64(runtime.GOMAXPROCS(0)); g > 0; g-- {
163		r := rand.New(rand.NewSource(g))
164		wg.Add(1)
165		go func(g int64) {
166			defer wg.Done()
167			for i := int64(0); ; i++ {
168				select {
169				case <-done:
170					return
171				default:
172				}
173				for n := int64(1); n < mapSize; n++ {
174					if r.Int63n(mapSize) == 0 {
175						m.Store(n, n*i*g)
176					} else {
177						m.Load(n)
178					}
179				}
180			}
181		}(g)
182	}
183
184	iters := 1 << 10
185	if testing.Short() {
186		iters = 16
187	}
188	for n := iters; n > 0; n-- {
189		seen := make(map[int64]bool, mapSize)
190
191		m.Range(func(ki, vi any) bool {
192			k, v := ki.(int64), vi.(int64)
193			if v%k != 0 {
194				t.Fatalf("while Storing multiples of %v, Range saw value %v", k, v)
195			}
196			if seen[k] {
197				t.Fatalf("Range visited key %v twice", k)
198			}
199			seen[k] = true
200			return true
201		})
202
203		if len(seen) != mapSize {
204			t.Fatalf("Range visited %v elements of %v-element Map", len(seen), mapSize)
205		}
206	}
207}
208
209func TestIssue40999(t *testing.T) {
210	var m sync.Map
211
212	// Since the miss-counting in missLocked (via Delete)
213	// compares the miss count with len(m.dirty),
214	// add an initial entry to bias len(m.dirty) above the miss count.
215	m.Store(nil, struct{}{})
216
217	var finalized uint32
218
219	// Set finalizers that count for collected keys. A non-zero count
220	// indicates that keys have not been leaked.
221	for atomic.LoadUint32(&finalized) == 0 {
222		p := new(int)
223		runtime.SetFinalizer(p, func(*int) {
224			atomic.AddUint32(&finalized, 1)
225		})
226		m.Store(p, struct{}{})
227		m.Delete(p)
228		runtime.GC()
229	}
230}
231
232func TestMapRangeNestedCall(t *testing.T) { // Issue 46399
233	var m sync.Map
234	for i, v := range [3]string{"hello", "world", "Go"} {
235		m.Store(i, v)
236	}
237	m.Range(func(key, value any) bool {
238		m.Range(func(key, value any) bool {
239			// We should be able to load the key offered in the Range callback,
240			// because there are no concurrent Delete involved in this tested map.
241			if v, ok := m.Load(key); !ok || !reflect.DeepEqual(v, value) {
242				t.Fatalf("Nested Range loads unexpected value, got %+v want %+v", v, value)
243			}
244
245			// We didn't keep 42 and a value into the map before, if somehow we loaded
246			// a value from such a key, meaning there must be an internal bug regarding
247			// nested range in the Map.
248			if _, loaded := m.LoadOrStore(42, "dummy"); loaded {
249				t.Fatalf("Nested Range loads unexpected value, want store a new value")
250			}
251
252			// Try to Store then LoadAndDelete the corresponding value with the key
253			// 42 to the Map. In this case, the key 42 and associated value should be
254			// removed from the Map. Therefore any future range won't observe key 42
255			// as we checked in above.
256			val := "sync.Map"
257			m.Store(42, val)
258			if v, loaded := m.LoadAndDelete(42); !loaded || !reflect.DeepEqual(v, val) {
259				t.Fatalf("Nested Range loads unexpected value, got %v, want %v", v, val)
260			}
261			return true
262		})
263
264		// Remove key from Map on-the-fly.
265		m.Delete(key)
266		return true
267	})
268
269	// After a Range of Delete, all keys should be removed and any
270	// further Range won't invoke the callback. Hence length remains 0.
271	length := 0
272	m.Range(func(key, value any) bool {
273		length++
274		return true
275	})
276
277	if length != 0 {
278		t.Fatalf("Unexpected sync.Map size, got %v want %v", length, 0)
279	}
280}
281
282func TestCompareAndSwap_NonExistingKey(t *testing.T) {
283	m := &sync.Map{}
284	if m.CompareAndSwap(m, nil, 42) {
285		// See https://go.dev/issue/51972#issuecomment-1126408637.
286		t.Fatalf("CompareAndSwap on a non-existing key succeeded")
287	}
288}
289
290func TestMapRangeNoAllocations(t *testing.T) { // Issue 62404
291	testenv.SkipIfOptimizationOff(t)
292	var m sync.Map
293	allocs := testing.AllocsPerRun(10, func() {
294		m.Range(func(key, value any) bool {
295			return true
296		})
297	})
298	if allocs > 0 {
299		t.Errorf("AllocsPerRun of m.Range = %v; want 0", allocs)
300	}
301}
302
303// TestConcurrentClear tests concurrent behavior of sync.Map properties to ensure no data races.
304// Checks for proper synchronization between Clear, Store, Load operations.
305func TestConcurrentClear(t *testing.T) {
306	var m sync.Map
307
308	wg := sync.WaitGroup{}
309	wg.Add(30) // 10 goroutines for writing, 10 goroutines for reading, 10 goroutines for waiting
310
311	// Writing data to the map concurrently
312	for i := 0; i < 10; i++ {
313		go func(k, v int) {
314			defer wg.Done()
315			m.Store(k, v)
316		}(i, i*10)
317	}
318
319	// Reading data from the map concurrently
320	for i := 0; i < 10; i++ {
321		go func(k int) {
322			defer wg.Done()
323			if value, ok := m.Load(k); ok {
324				t.Logf("Key: %v, Value: %v\n", k, value)
325			} else {
326				t.Logf("Key: %v not found\n", k)
327			}
328		}(i)
329	}
330
331	// Clearing data from the map concurrently
332	for i := 0; i < 10; i++ {
333		go func() {
334			defer wg.Done()
335			m.Clear()
336		}()
337	}
338
339	wg.Wait()
340
341	m.Clear()
342
343	m.Range(func(k, v any) bool {
344		t.Errorf("after Clear, Map contains (%v, %v); expected to be empty", k, v)
345
346		return true
347	})
348}
349
350func TestMapClearNoAllocations(t *testing.T) {
351	testenv.SkipIfOptimizationOff(t)
352	var m sync.Map
353	allocs := testing.AllocsPerRun(10, func() {
354		m.Clear()
355	})
356	if allocs > 0 {
357		t.Errorf("AllocsPerRun of m.Clear = %v; want 0", allocs)
358	}
359}
360