1 // Copyright 2023 Google LLC
2 //
3 // Licensed under the Apache License, Version 2.0 (the "License");
4 // you may not use this file except in compliance with the License.
5 // You may obtain a copy of the License at
6 //
7 //      http://www.apache.org/licenses/LICENSE-2.0
8 //
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS,
11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 // See the License for the specific language governing permissions and
13 // limitations under the License.
14 
15 //! A thread-safe implementation of a map for managing object handles,
16 //! a safer alternative to raw pointers for FFI interop.
17 
18 use core::fmt::Debug;
19 use std::sync::atomic::{AtomicU32, AtomicU64, Ordering};
20 
21 pub mod declare_handle_map;
22 mod guard;
23 pub(crate) mod shard;
24 
25 #[cfg(test)]
26 mod tests;
27 
28 pub use guard::{ObjectReadGuardImpl, ObjectReadWriteGuardImpl};
29 
30 use shard::{HandleMapShard, ShardAllocationError};
31 
32 /// An individual handle to be given out by a [`HandleMap`].
33 /// This representation is untyped, and just a wrapper
34 /// around a handle-id, in contrast to implementors of `HandleLike`.
35 #[derive(Clone, Copy, PartialEq, Eq, Hash)]
36 pub struct Handle {
37     handle_id: u64,
38 }
39 
40 impl From<&Handle> for Handle {
from(handle: &Handle) -> Self41     fn from(handle: &Handle) -> Self {
42         *handle
43     }
44 }
45 
46 impl Handle {
47     /// Constructs a handle wrapping the given ID.
48     ///
49     /// No validity checks are done on the wrapped ID
50     /// to ensure that the given ID is active in
51     /// any specific handle-map, and the type
52     /// of the handle is not represented.
53     ///
54     /// As a result, this method is only useful for
55     /// allowing access to handles from an FFI layer.
from_id(handle_id: u64) -> Self56     pub fn from_id(handle_id: u64) -> Self {
57         Self { handle_id }
58     }
59 
60     /// Gets the ID for this handle.
61     ///
62     /// Since the underlying handle is un-typed,`
63     /// this method is only suitable for
64     /// transmitting handles across an FFI layer.
get_id(&self) -> u6465     pub fn get_id(&self) -> u64 {
66         self.handle_id
67     }
68 
69     /// Derives the shard index from the handle id
get_shard_index(&self, num_shards: u8) -> usize70     fn get_shard_index(&self, num_shards: u8) -> usize {
71         (self.handle_id % (num_shards as u64)) as usize
72     }
73 }
74 
75 /// Error raised when attempting to allocate into a full handle-map.
76 #[derive(Debug)]
77 pub struct HandleMapFullError;
78 
79 /// Error raised when the entry for a given [`Handle`] doesn't exist.
80 #[derive(Debug)]
81 pub struct HandleNotPresentError;
82 
83 /// Errors which may be raised while attempting to allocate
84 /// a handle from contents given by a (fallible) value-provider.
85 #[derive(Debug)]
86 pub enum HandleMapTryAllocateError<E: Debug> {
87     /// The call to the value-provider for the allocation failed.
88     ValueProviderFailed(E),
89     /// We couldn't reserve a spot for the allocation, because
90     /// the handle-map was full.
91     HandleMapFull,
92 }
93 
94 /// FFI-transmissible structure expressing the dimensions
95 /// (max # of allocatable slots, number of shards) of a handle-map
96 /// to be used upon initialization.
97 #[repr(C)]
98 #[derive(Clone, Copy)]
99 pub struct HandleMapDimensions {
100     /// The number of shards which are employed
101     /// by the associated handle-map.
102     pub num_shards: u8,
103     /// The maximum number of active handles which may be
104     /// stored within the associated handle-map.
105     pub max_active_handles: u32,
106 }
107 
108 /// A thread-safe mapping from "handle"s [like pointers, but safer]
109 /// to underlying structures, supporting allocations, reads, writes,
110 /// and deallocations of objects behind handles.
111 pub struct HandleMap<T: Send + Sync> {
112     /// The dimensions of this handle-map
113     dimensions: HandleMapDimensions,
114 
115     /// The individually-lockable "shards" of the handle-map,
116     /// among which the keys will be roughly uniformly-distributed.
117     handle_map_shards: Box<[HandleMapShard<T>]>,
118 
119     /// An atomically-incrementing counter which tracks the
120     /// next handle ID which allocations will attempt to use.
121     new_handle_id_counter: AtomicU64,
122 
123     /// An atomic integer roughly tracking the number of
124     /// currently-outstanding allocated entries in this
125     /// handle-map among all [`HandleMapShard`]s.
126     outstanding_allocations_counter: AtomicU32,
127 }
128 
129 impl<T: Send + Sync> HandleMap<T> {
130     /// Creates a new handle-map with the given `HandleMapDimensions`.
with_dimensions(dimensions: HandleMapDimensions) -> Self131     pub fn with_dimensions(dimensions: HandleMapDimensions) -> Self {
132         let mut handle_map_shards = Vec::with_capacity(dimensions.num_shards as usize);
133         for _ in 0..dimensions.num_shards {
134             handle_map_shards.push(HandleMapShard::default());
135         }
136         let handle_map_shards = handle_map_shards.into_boxed_slice();
137         Self {
138             dimensions,
139             handle_map_shards,
140             new_handle_id_counter: AtomicU64::new(0),
141             outstanding_allocations_counter: AtomicU32::new(0),
142         }
143     }
144 }
145 
146 impl<T: Send + Sync> HandleMap<T> {
147     /// Allocates a new object within the given handle-map, returning
148     /// a handle to the location it was stored at. This operation
149     /// may fail if attempting to allocate over the `dimensions.max_active_handles`
150     /// limit imposed on the handle-map, in which case this method
151     /// will return a `HandleMapFullError`.
152     ///
153     /// If you want the passed closure to be able to possibly fail, see
154     /// [`Self::try_allocate`] instead.
allocate( &self, initial_value_provider: impl FnOnce() -> T, ) -> Result<Handle, HandleMapFullError>155     pub fn allocate(
156         &self,
157         initial_value_provider: impl FnOnce() -> T,
158     ) -> Result<Handle, HandleMapFullError> {
159         let wrapped_value_provider = move || Ok(initial_value_provider());
160         self.try_allocate::<core::convert::Infallible>(wrapped_value_provider)
161             .map_err(|e| match e {
162                 HandleMapTryAllocateError::ValueProviderFailed(never) => match never {},
163                 HandleMapTryAllocateError::HandleMapFull => HandleMapFullError,
164             })
165     }
166 
167     /// Attempts to allocate a new object within the given handle-map, returning
168     /// a handle to the location it was stored at.
169     ///
170     /// This operation may fail if attempting to allocate over the `dimensions.max_active_handles`
171     /// limit imposed on the handle-map, in which case this method will return a
172     /// `HandleMapTryAllocateError::HandleMapFull` and the given `initial_value_provider` will not
173     /// be run.
174     ///
175     /// The passed initial-value provider may also fail, in which case this will return the error
176     /// wrapped in `HandleMapTryAllocateError::ValueProviderFailed`.
177     ///
178     /// If your initial-value provider is infallible, see [`Self::allocate`] instead.
try_allocate<E: Debug>( &self, initial_value_provider: impl FnOnce() -> Result<T, E>, ) -> Result<Handle, HandleMapTryAllocateError<E>>179     pub fn try_allocate<E: Debug>(
180         &self,
181         initial_value_provider: impl FnOnce() -> Result<T, E>,
182     ) -> Result<Handle, HandleMapTryAllocateError<E>> {
183         let mut initial_value_provider = initial_value_provider;
184         loop {
185             // Increment the new-handle-ID counter using relaxed memory ordering,
186             // since the only invariant that we want to enforce is that concurrently-running
187             // threads always get distinct new handle-ids.
188             let new_handle_id = self.new_handle_id_counter.fetch_add(1, Ordering::Relaxed);
189             let new_handle = Handle::from_id(new_handle_id);
190             let shard_index = new_handle.get_shard_index(self.dimensions.num_shards);
191 
192             // Now, check the shard to see if we can actually allocate into it.
193             #[allow(clippy::expect_used)]
194             let shard_allocate_result = self
195                 .handle_map_shards
196                 .get(shard_index)
197                 .expect("Shard index is always within range")
198                 .try_allocate(
199                     new_handle,
200                     initial_value_provider,
201                     &self.outstanding_allocations_counter,
202                     self.dimensions.max_active_handles,
203                 );
204             match shard_allocate_result {
205                 Ok(_) => {
206                     return Ok(new_handle);
207                 }
208                 Err(ShardAllocationError::ValueProviderFailed(e)) => {
209                     return Err(HandleMapTryAllocateError::ValueProviderFailed(e))
210                 }
211                 Err(ShardAllocationError::ExceedsAllocationLimit) => {
212                     return Err(HandleMapTryAllocateError::HandleMapFull);
213                 }
214                 Err(ShardAllocationError::EntryOccupied(thrown_back_provider)) => {
215                     // We need to do the whole thing again with a new ID
216                     initial_value_provider = thrown_back_provider;
217                 }
218             }
219         }
220     }
221 
222     /// Gets a read-only reference to an object within the given handle-map,
223     /// if the given handle is present. Otherwise, returns [`HandleNotPresentError`].
get(&self, handle: Handle) -> Result<ObjectReadGuardImpl<T>, HandleNotPresentError>224     pub fn get(&self, handle: Handle) -> Result<ObjectReadGuardImpl<T>, HandleNotPresentError> {
225         let shard_index = handle.get_shard_index(self.dimensions.num_shards);
226         #[allow(clippy::expect_used)]
227         self.handle_map_shards
228             .get(shard_index)
229             .expect("shard index is always within range")
230             .get(handle)
231     }
232 
233     /// Gets a read+write reference to an object within the given handle-map,
234     /// if the given handle is present. Otherwise, returns [`HandleNotPresentError`].
get_mut( &self, handle: Handle, ) -> Result<ObjectReadWriteGuardImpl<T>, HandleNotPresentError>235     pub fn get_mut(
236         &self,
237         handle: Handle,
238     ) -> Result<ObjectReadWriteGuardImpl<T>, HandleNotPresentError> {
239         let shard_index = handle.get_shard_index(self.dimensions.num_shards);
240         #[allow(clippy::expect_used)]
241         self.handle_map_shards
242             .get(shard_index)
243             .expect("shard_index is always in range")
244             .get_mut(handle)
245     }
246 
247     /// Removes the object pointed to by the given handle in
248     /// the handle-map, returning the removed object if it
249     /// exists. Otherwise, returns [`HandleNotPresentError`].
deallocate(&self, handle: Handle) -> Result<T, HandleNotPresentError>250     pub fn deallocate(&self, handle: Handle) -> Result<T, HandleNotPresentError> {
251         let shard_index = handle.get_shard_index(self.dimensions.num_shards);
252         #[allow(clippy::expect_used)]
253         self.handle_map_shards
254             .get(shard_index)
255             .expect("shard index is always in range")
256             .deallocate(handle, &self.outstanding_allocations_counter)
257     }
258 
259     /// Gets the actual number of elements stored in the entire map.
get_current_allocation_count(&self) -> u32260     pub fn get_current_allocation_count(&self) -> u32 {
261         self.outstanding_allocations_counter.load(Ordering::Relaxed)
262     }
263 
264     /// Sets the new-handle-id counter to the given value.
265     /// Only suitable for tests.
266     #[cfg(test)]
set_new_handle_id_counter(&mut self, value: u64)267     pub(crate) fn set_new_handle_id_counter(&mut self, value: u64) {
268         self.new_handle_id_counter = AtomicU64::new(value);
269     }
270 }
271 
272 /// Externally-facing trait for things which behave like handle-map handles
273 /// with a globally-defined handle-map for the type.
274 pub trait HandleLike: Sized {
275     /// The underlying object type pointed-to by this handle
276     type Object: Send + Sync;
277 
278     /// Tries to allocate a new handle using the given (fallible)
279     /// provider to construct the underlying stored object as
280     /// a new entry into the global handle table for this type.
try_allocate<E: Debug>( initial_value_provider: impl FnOnce() -> Result<Self::Object, E>, ) -> Result<Self, HandleMapTryAllocateError<E>>281     fn try_allocate<E: Debug>(
282         initial_value_provider: impl FnOnce() -> Result<Self::Object, E>,
283     ) -> Result<Self, HandleMapTryAllocateError<E>>;
284 
285     /// Tries to allocate a new handle using the given (infallible)
286     /// provider to construct the underlying stored object as
287     /// a new entry into the global handle table for this type.
allocate( initial_value_provider: impl FnOnce() -> Self::Object, ) -> Result<Self, HandleMapFullError>288     fn allocate(
289         initial_value_provider: impl FnOnce() -> Self::Object,
290     ) -> Result<Self, HandleMapFullError>;
291 
292     /// Gets a RAII read-guard on the contents behind this handle.
get(&self) -> Result<ObjectReadGuardImpl<Self::Object>, HandleNotPresentError>293     fn get(&self) -> Result<ObjectReadGuardImpl<Self::Object>, HandleNotPresentError>;
294 
295     /// Gets a RAII read-write guard on the contents behind this handle.
get_mut(&self) -> Result<ObjectReadWriteGuardImpl<Self::Object>, HandleNotPresentError>296     fn get_mut(&self) -> Result<ObjectReadWriteGuardImpl<Self::Object>, HandleNotPresentError>;
297 
298     /// Deallocates the contents behind this handle.
deallocate(self) -> Result<Self::Object, HandleNotPresentError>299     fn deallocate(self) -> Result<Self::Object, HandleNotPresentError>;
300 }
301