1 // Copyright 2023 Google LLC
2 //
3 // Licensed under the Apache License, Version 2.0 (the "License");
4 // you may not use this file except in compliance with the License.
5 // You may obtain a copy of the License at
6 //
7 // http://www.apache.org/licenses/LICENSE-2.0
8 //
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS,
11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 // See the License for the specific language governing permissions and
13 // limitations under the License.
14
15 #![allow(clippy::unwrap_used, clippy::expect_used)]
16
17 use crate::*;
18
19 use core::ops::{Deref, DerefMut};
20 use std::collections::HashSet;
21 use std::sync::Arc;
22 use std::thread;
23
24 // Maximum number of active handles used across all tests
25 // Chosen to be divisible by the number of active threads
26 const MAX_ACTIVE_HANDLES: u32 = NUM_ACTIVE_THREADS * MAX_ACTIVE_HANDLES_PER_THREAD;
27
28 const MAX_ACTIVE_HANDLES_PER_THREAD: u32 = 16384;
29
30 // Deliberately picking a low number of shards so that we
31 // are more likely to discover conflicts between threads.
32 const NUM_SHARDS: u8 = 4;
33
34 // Deliberately picking a higher number of threads.
35 const NUM_ACTIVE_THREADS: u32 = 8;
36
37 const DEFAULT_DIMENSIONS: HandleMapDimensions = HandleMapDimensions {
38 num_shards: NUM_SHARDS,
39 max_active_handles: MAX_ACTIVE_HANDLES,
40 };
41
build_handle_map<T: Send + Sync>() -> HandleMap<T>42 fn build_handle_map<T: Send + Sync>() -> HandleMap<T> {
43 HandleMap::with_dimensions(DEFAULT_DIMENSIONS)
44 }
45
46 /// Performs the same testing function for each thread
test_for_each_thread<F>(test_function_ref: Arc<F>, num_repetitions_per_thread: usize) where F: Fn() + Send + Sync + 'static,47 fn test_for_each_thread<F>(test_function_ref: Arc<F>, num_repetitions_per_thread: usize)
48 where
49 F: Fn() + Send + Sync + 'static,
50 {
51 let mut join_handles = Vec::new();
52 for _ in 0..NUM_ACTIVE_THREADS {
53 let test_function_clone = test_function_ref.clone();
54 let join_handle = thread::spawn(move || {
55 for _ in 0..num_repetitions_per_thread {
56 test_function_clone();
57 }
58 });
59 join_handles.push(join_handle);
60 }
61 for join_handle in join_handles {
62 join_handle.join().unwrap()
63 }
64 }
65
66 /// Tests the consistency of reads from the same handle across
67 /// multiple threads.
68 #[test]
test_read_consistency_same_address()69 fn test_read_consistency_same_address() {
70 let num_repetitions_per_thread = 10000;
71 let handle_map = build_handle_map::<String>();
72 let handle = handle_map
73 .allocate(|| "hello".to_string())
74 .expect("Allocation shouldn't fail");
75 let test_fn = Arc::new(move || {
76 let value_ref = handle_map.get(handle).expect("Getting shouldn't fail");
77 assert_eq!("hello", value_ref.deref());
78 });
79 test_for_each_thread(test_fn, num_repetitions_per_thread);
80 }
81
82 /// Tests overloading the table with allocations to ensure
83 /// that when all is said and done, we still haven't exceeded
84 /// the allocations limit.
85 #[test]
86 #[allow(unused_must_use)]
test_overload_with_allocations()87 fn test_overload_with_allocations() {
88 let num_repetitions_per_thread = 2 * MAX_ACTIVE_HANDLES_PER_THREAD as usize;
89 let handle_map = build_handle_map::<u8>();
90
91 let handle_map_function_ref = Arc::new(handle_map);
92 let handle_map_post_function_ref = handle_map_function_ref.clone();
93
94 let test_fn = Arc::new(move || {
95 handle_map_function_ref.allocate(|| 0xFF);
96 });
97 test_for_each_thread(test_fn, num_repetitions_per_thread);
98
99 let actual_num_active_handles = handle_map_post_function_ref.get_current_allocation_count();
100 assert_eq!(MAX_ACTIVE_HANDLES, actual_num_active_handles);
101 }
102
103 /// Tests deallocations and allocations near the allocation limit.
104 #[test]
105 #[allow(unused_must_use)]
test_overload_allocations_deallocations()106 fn test_overload_allocations_deallocations() {
107 let num_repetitions_per_thread = 10000;
108
109 //Pre-fill the map so that there's only one available entry.
110 let handle_map = build_handle_map::<u8>();
111 for i in 0..(MAX_ACTIVE_HANDLES - 1) {
112 handle_map.allocate(|| (i % 256) as u8);
113 }
114
115 let handle_map_function_ref = Arc::new(handle_map);
116 let handle_map_post_function_ref = handle_map_function_ref.clone();
117
118 let test_fn = Arc::new(move || {
119 let allocation_result = handle_map_function_ref.allocate(|| 0xFF);
120 if let Ok(handle) = allocation_result {
121 let _ = handle_map_function_ref.deallocate(handle).unwrap();
122 }
123 });
124 test_for_each_thread(test_fn, num_repetitions_per_thread);
125
126 // No matter what happened above, we should have the same number
127 // of handles as when we started, because every successful allocation
128 // should have been paired with a successful deallocation.
129 let actual_num_active_handles = handle_map_post_function_ref.get_current_allocation_count();
130 assert_eq!(MAX_ACTIVE_HANDLES - 1, actual_num_active_handles);
131
132 //Verify that we still have space for one more entry after all that.
133 let _ = handle_map_post_function_ref.allocate(|| 0xEE).unwrap();
134 }
135
136 /// Tests the progress of allocate/read/write/read/deallocate
137 /// to independent handles across multiple threads.
138 #[test]
test_full_lifecycle_independent_handles()139 fn test_full_lifecycle_independent_handles() {
140 let num_repetitions_per_thread = 10000;
141 let handle_map = build_handle_map::<String>();
142 let test_fn = Arc::new(move || {
143 let handle = handle_map
144 .allocate(|| "Hello".to_string())
145 .expect("Allocation shouldn't fail");
146 {
147 let value_ref = handle_map
148 .get(handle)
149 .expect("Getting the value shouldn't fail");
150 assert_eq!("Hello", &*value_ref);
151 };
152 {
153 let mut value_mut_ref = handle_map
154 .get_mut(handle)
155 .expect("Mutating the value shouldn't fail");
156 value_mut_ref.deref_mut().push_str(" World!");
157 };
158 {
159 let value_ref = handle_map
160 .get(handle)
161 .expect("Getting the value after modification shouldn't fail");
162 assert_eq!("Hello World!", &*value_ref);
163 };
164 let removed = handle_map
165 .deallocate(handle)
166 .expect("Deallocation shouldn't fail");
167 assert_eq!("Hello World!", removed);
168 });
169 test_for_each_thread(test_fn, num_repetitions_per_thread);
170 }
171
172 /// Tests the consistency of reads+writes to the same handle,
173 /// where threads modify and read different parts of an
174 /// underlying structure.
175 #[test]
test_consistency_of_same_handle_multithreaded_modifications()176 fn test_consistency_of_same_handle_multithreaded_modifications() {
177 let num_repetitions_per_thread = 10000;
178 let handle_map = Arc::new(build_handle_map::<(String, String)>());
179 let handle = handle_map
180 .allocate(|| ("A".to_string(), "B".to_string()))
181 .expect("Allocation shouldn't fail");
182
183 let handle_map_second_ref = handle_map.clone();
184
185 let join_handle_a = thread::spawn(move || {
186 for i in 1..num_repetitions_per_thread {
187 {
188 let value_ref = handle_map
189 .get(handle)
190 .expect("Getting the value from thread A shouldn't fail");
191 let value = &value_ref.0;
192 assert_eq!(i, value.len());
193 }
194 {
195 let mut value_mut_ref = handle_map
196 .get_mut(handle)
197 .expect("Mutating the value from thread A shouldn't fail");
198 value_mut_ref.0.push('A');
199 }
200 }
201 });
202
203 let join_handle_b = thread::spawn(move || {
204 for i in 1..num_repetitions_per_thread {
205 {
206 let value_ref = handle_map_second_ref
207 .get(handle)
208 .expect("Getting the value from thread B shouldn't fail");
209 let value = &value_ref.1;
210 assert_eq!(i, value.len());
211 }
212 {
213 let mut value_mut_ref = handle_map_second_ref
214 .get_mut(handle)
215 .expect("Mutating the value from thread B shouldn't fail");
216 value_mut_ref.1.push('B');
217 }
218 }
219 });
220
221 join_handle_a.join().unwrap();
222 join_handle_b.join().unwrap();
223 }
224
225 /// Multi-threaded test to ensure that when attempting
226 /// to allocate over handle IDs which are already allocated,
227 /// all threads eventually get distinct, unused handle IDs
228 /// for their own allocations.
229 #[test]
test_non_overwriting_old_handles()230 fn test_non_overwriting_old_handles() {
231 let mut all_handles: HashSet<Handle> = HashSet::new();
232 let num_repetitions_per_thread = 100;
233 let mut handle_map = build_handle_map::<u8>();
234 for _ in 0..(num_repetitions_per_thread * NUM_ACTIVE_THREADS) {
235 let handle = handle_map
236 .allocate(|| 0xFF)
237 .expect("Initial allocations shouldn't fail");
238 let _ = all_handles.insert(handle);
239 }
240 // Reset the new-handle-id counter
241 handle_map.set_new_handle_id_counter(0);
242
243 let handle_map = Arc::new(handle_map);
244
245 let mut thread_handles: Vec<thread::JoinHandle<Vec<Handle>>> = Vec::new();
246 for _ in 0..NUM_ACTIVE_THREADS {
247 let handle_map_reference = handle_map.clone();
248 let thread_handle = thread::spawn(move || {
249 let mut handles = Vec::new();
250 for i in 0..num_repetitions_per_thread {
251 let handle = handle_map_reference
252 .allocate(move || (i % 256) as u8)
253 .expect("No allocation should fail");
254 handles.push(handle);
255 }
256 handles
257 });
258 thread_handles.push(thread_handle);
259 }
260 for thread_handle in thread_handles {
261 let handles: Vec<Handle> = thread_handle
262 .join()
263 .expect("Individual threads shouldn't fail");
264 for handle in handles {
265 let was_distinct = all_handles.insert(handle);
266 assert!(was_distinct);
267 }
268 }
269 }
270
271 #[test]
test_id_wraparound()272 fn test_id_wraparound() {
273 let mut handle_map = build_handle_map::<u8>();
274 handle_map.set_new_handle_id_counter(u64::MAX);
275 let _ = handle_map
276 .allocate(|| 0xAB)
277 .expect("Counter wrap-around allocation should not fail");
278 let _ = handle_map
279 .allocate(|| 0xCD)
280 .expect("Post-counter-wrap-around allocation should not fail");
281 }
282
283 #[test]
test_deallocate_unallocated_handle()284 fn test_deallocate_unallocated_handle() {
285 let handle_map = build_handle_map::<usize>();
286 let handle = handle_map
287 .allocate(|| 2)
288 .expect("Allocation shouldn't fail");
289 let deallocated = handle_map
290 .deallocate(handle)
291 .expect("Deallocation shouldn't fail");
292 assert_eq!(2, deallocated);
293 let double_deallocate_result = handle_map.deallocate(handle);
294 assert!(double_deallocate_result.is_err());
295 }
296
297 #[test]
test_get_unallocated_handle()298 fn test_get_unallocated_handle() {
299 let handle_map = build_handle_map::<u8>();
300 let handle = handle_map
301 .allocate(|| 0xFE)
302 .expect("Allocation shouldn't fail");
303 let deallocated = handle_map
304 .deallocate(handle)
305 .expect("Deallocation shouldn't fail");
306 assert_eq!(0xFE, deallocated);
307 let read_result = handle_map.get(handle);
308 assert!(read_result.is_err());
309 }
310
311 #[test]
test_get_mut_unallocated_handle()312 fn test_get_mut_unallocated_handle() {
313 let handle_map = build_handle_map::<(usize, usize, usize)>();
314 let handle = handle_map
315 .allocate(|| (1, 2, 3))
316 .expect("Allocation shouldn't fail");
317 let deallocated = handle_map
318 .deallocate(handle)
319 .expect("Deallocation shouldn't fail");
320 assert_eq!((1, 2, 3), deallocated);
321 let get_mut_result = handle_map.get_mut(handle);
322 assert!(get_mut_result.is_err());
323 }
324