1 use crate::sync::batch_semaphore::Semaphore; 2 use crate::sync::rwlock::read_guard::RwLockReadGuard; 3 use crate::sync::rwlock::write_guard_mapped::RwLockMappedWriteGuard; 4 use std::marker::PhantomData; 5 use std::{fmt, mem, ops}; 6 7 /// RAII structure used to release the exclusive write access of a lock when 8 /// dropped. 9 /// 10 /// This structure is created by the [`write`] method 11 /// on [`RwLock`]. 12 /// 13 /// [`write`]: method@crate::sync::RwLock::write 14 /// [`RwLock`]: struct@crate::sync::RwLock 15 #[clippy::has_significant_drop] 16 #[must_use = "if unused the RwLock will immediately unlock"] 17 pub struct RwLockWriteGuard<'a, T: ?Sized> { 18 // When changing the fields in this struct, make sure to update the 19 // `skip_drop` method. 20 #[cfg(all(tokio_unstable, feature = "tracing"))] 21 pub(super) resource_span: tracing::Span, 22 pub(super) permits_acquired: u32, 23 pub(super) s: &'a Semaphore, 24 pub(super) data: *mut T, 25 pub(super) marker: PhantomData<&'a mut T>, 26 } 27 28 #[allow(dead_code)] // Unused fields are still used in Drop. 29 struct Inner<'a, T: ?Sized> { 30 #[cfg(all(tokio_unstable, feature = "tracing"))] 31 resource_span: tracing::Span, 32 permits_acquired: u32, 33 s: &'a Semaphore, 34 data: *mut T, 35 } 36 37 impl<'a, T: ?Sized> RwLockWriteGuard<'a, T> { skip_drop(self) -> Inner<'a, T>38 fn skip_drop(self) -> Inner<'a, T> { 39 let me = mem::ManuallyDrop::new(self); 40 // SAFETY: This duplicates the values in every field of the guard, then 41 // forgets the originals, so in the end no value is duplicated. 42 Inner { 43 #[cfg(all(tokio_unstable, feature = "tracing"))] 44 resource_span: unsafe { std::ptr::read(&me.resource_span) }, 45 permits_acquired: me.permits_acquired, 46 s: me.s, 47 data: me.data, 48 } 49 } 50 51 /// Makes a new [`RwLockMappedWriteGuard`] for a component of the locked data. 52 /// 53 /// This operation cannot fail as the `RwLockWriteGuard` passed in already 54 /// locked the data. 55 /// 56 /// This is an associated function that needs to be used as 57 /// `RwLockWriteGuard::map(..)`. A method would interfere with methods of 58 /// the same name on the contents of the locked data. 59 /// 60 /// This is an asynchronous version of [`RwLockWriteGuard::map`] from the 61 /// [`parking_lot` crate]. 62 /// 63 /// [`RwLockMappedWriteGuard`]: struct@crate::sync::RwLockMappedWriteGuard 64 /// [`RwLockWriteGuard::map`]: https://docs.rs/lock_api/latest/lock_api/struct.RwLockWriteGuard.html#method.map 65 /// [`parking_lot` crate]: https://crates.io/crates/parking_lot 66 /// 67 /// # Examples 68 /// 69 /// ``` 70 /// use tokio::sync::{RwLock, RwLockWriteGuard}; 71 /// 72 /// #[derive(Debug, Clone, Copy, PartialEq, Eq)] 73 /// struct Foo(u32); 74 /// 75 /// # #[tokio::main] 76 /// # async fn main() { 77 /// let lock = RwLock::new(Foo(1)); 78 /// 79 /// { 80 /// let mut mapped = RwLockWriteGuard::map(lock.write().await, |f| &mut f.0); 81 /// *mapped = 2; 82 /// } 83 /// 84 /// assert_eq!(Foo(2), *lock.read().await); 85 /// # } 86 /// ``` 87 #[inline] map<F, U: ?Sized>(mut this: Self, f: F) -> RwLockMappedWriteGuard<'a, U> where F: FnOnce(&mut T) -> &mut U,88 pub fn map<F, U: ?Sized>(mut this: Self, f: F) -> RwLockMappedWriteGuard<'a, U> 89 where 90 F: FnOnce(&mut T) -> &mut U, 91 { 92 let data = f(&mut *this) as *mut U; 93 let this = this.skip_drop(); 94 95 RwLockMappedWriteGuard { 96 permits_acquired: this.permits_acquired, 97 s: this.s, 98 data, 99 marker: PhantomData, 100 #[cfg(all(tokio_unstable, feature = "tracing"))] 101 resource_span: this.resource_span, 102 } 103 } 104 105 /// Makes a new [`RwLockReadGuard`] for a component of the locked data. 106 /// 107 /// This operation cannot fail as the `RwLockWriteGuard` passed in already 108 /// locked the data. 109 /// 110 /// This is an associated function that needs to be used as 111 /// `RwLockWriteGuard::downgrade_map(..)`. A method would interfere with methods of 112 /// the same name on the contents of the locked data. 113 /// 114 /// This is equivalent to a combination of asynchronous [`RwLockWriteGuard::map`] and [`RwLockWriteGuard::downgrade`] 115 /// from the [`parking_lot` crate]. 116 /// 117 /// Inside of `f`, you retain exclusive access to the data, despite only being given a `&T`. Handing out a 118 /// `&mut T` would result in unsoundness, as you could use interior mutability. 119 /// 120 /// [`RwLockMappedWriteGuard`]: struct@crate::sync::RwLockMappedWriteGuard 121 /// [`RwLockWriteGuard::map`]: https://docs.rs/lock_api/latest/lock_api/struct.RwLockWriteGuard.html#method.map 122 /// [`RwLockWriteGuard::downgrade`]: https://docs.rs/lock_api/latest/lock_api/struct.RwLockWriteGuard.html#method.downgrade 123 /// [`parking_lot` crate]: https://crates.io/crates/parking_lot 124 /// 125 /// # Examples 126 /// 127 /// ``` 128 /// use tokio::sync::{RwLock, RwLockWriteGuard}; 129 /// 130 /// #[derive(Debug, Clone, Copy, PartialEq, Eq)] 131 /// struct Foo(u32); 132 /// 133 /// # #[tokio::main] 134 /// # async fn main() { 135 /// let lock = RwLock::new(Foo(1)); 136 /// 137 /// let mapped = RwLockWriteGuard::downgrade_map(lock.write().await, |f| &f.0); 138 /// let foo = lock.read().await; 139 /// assert_eq!(foo.0, *mapped); 140 /// # } 141 /// ``` 142 #[inline] downgrade_map<F, U: ?Sized>(this: Self, f: F) -> RwLockReadGuard<'a, U> where F: FnOnce(&T) -> &U,143 pub fn downgrade_map<F, U: ?Sized>(this: Self, f: F) -> RwLockReadGuard<'a, U> 144 where 145 F: FnOnce(&T) -> &U, 146 { 147 let data = f(&*this) as *const U; 148 let this = this.skip_drop(); 149 let guard = RwLockReadGuard { 150 s: this.s, 151 data, 152 marker: PhantomData, 153 #[cfg(all(tokio_unstable, feature = "tracing"))] 154 resource_span: this.resource_span, 155 }; 156 157 // Release all but one of the permits held by the write guard 158 let to_release = (this.permits_acquired - 1) as usize; 159 this.s.release(to_release); 160 161 #[cfg(all(tokio_unstable, feature = "tracing"))] 162 guard.resource_span.in_scope(|| { 163 tracing::trace!( 164 target: "runtime::resource::state_update", 165 write_locked = false, 166 write_locked.op = "override", 167 ) 168 }); 169 170 #[cfg(all(tokio_unstable, feature = "tracing"))] 171 guard.resource_span.in_scope(|| { 172 tracing::trace!( 173 target: "runtime::resource::state_update", 174 current_readers = 1, 175 current_readers.op = "add", 176 ) 177 }); 178 179 guard 180 } 181 182 /// Attempts to make a new [`RwLockMappedWriteGuard`] for a component of 183 /// the locked data. The original guard is returned if the closure returns 184 /// `None`. 185 /// 186 /// This operation cannot fail as the `RwLockWriteGuard` passed in already 187 /// locked the data. 188 /// 189 /// This is an associated function that needs to be 190 /// used as `RwLockWriteGuard::try_map(...)`. A method would interfere with 191 /// methods of the same name on the contents of the locked data. 192 /// 193 /// This is an asynchronous version of [`RwLockWriteGuard::try_map`] from 194 /// the [`parking_lot` crate]. 195 /// 196 /// [`RwLockMappedWriteGuard`]: struct@crate::sync::RwLockMappedWriteGuard 197 /// [`RwLockWriteGuard::try_map`]: https://docs.rs/lock_api/latest/lock_api/struct.RwLockWriteGuard.html#method.try_map 198 /// [`parking_lot` crate]: https://crates.io/crates/parking_lot 199 /// 200 /// # Examples 201 /// 202 /// ``` 203 /// use tokio::sync::{RwLock, RwLockWriteGuard}; 204 /// 205 /// #[derive(Debug, Clone, Copy, PartialEq, Eq)] 206 /// struct Foo(u32); 207 /// 208 /// # #[tokio::main] 209 /// # async fn main() { 210 /// let lock = RwLock::new(Foo(1)); 211 /// 212 /// { 213 /// let guard = lock.write().await; 214 /// let mut guard = RwLockWriteGuard::try_map(guard, |f| Some(&mut f.0)).expect("should not fail"); 215 /// *guard = 2; 216 /// } 217 /// 218 /// assert_eq!(Foo(2), *lock.read().await); 219 /// # } 220 /// ``` 221 #[inline] try_map<F, U: ?Sized>( mut this: Self, f: F, ) -> Result<RwLockMappedWriteGuard<'a, U>, Self> where F: FnOnce(&mut T) -> Option<&mut U>,222 pub fn try_map<F, U: ?Sized>( 223 mut this: Self, 224 f: F, 225 ) -> Result<RwLockMappedWriteGuard<'a, U>, Self> 226 where 227 F: FnOnce(&mut T) -> Option<&mut U>, 228 { 229 let data = match f(&mut *this) { 230 Some(data) => data as *mut U, 231 None => return Err(this), 232 }; 233 let this = this.skip_drop(); 234 235 Ok(RwLockMappedWriteGuard { 236 permits_acquired: this.permits_acquired, 237 s: this.s, 238 data, 239 marker: PhantomData, 240 #[cfg(all(tokio_unstable, feature = "tracing"))] 241 resource_span: this.resource_span, 242 }) 243 } 244 245 /// Attempts to make a new [`RwLockReadGuard`] for a component of 246 /// the locked data. The original guard is returned if the closure returns 247 /// `None`. 248 /// 249 /// This operation cannot fail as the `RwLockWriteGuard` passed in already 250 /// locked the data. 251 /// 252 /// This is an associated function that needs to be 253 /// used as `RwLockWriteGuard::try_downgrade_map(...)`. A method would interfere with 254 /// methods of the same name on the contents of the locked data. 255 /// 256 /// This is equivalent to a combination of asynchronous [`RwLockWriteGuard::try_map`] and [`RwLockWriteGuard::downgrade`] 257 /// from the [`parking_lot` crate]. 258 /// 259 /// Inside of `f`, you retain exclusive access to the data, despite only being given a `&T`. Handing out a 260 /// `&mut T` would result in unsoundness, as you could use interior mutability. 261 /// 262 /// If this function returns `Err(...)`, the lock is never unlocked nor downgraded. 263 /// 264 /// [`RwLockMappedWriteGuard`]: struct@crate::sync::RwLockMappedWriteGuard 265 /// [`RwLockWriteGuard::map`]: https://docs.rs/lock_api/latest/lock_api/struct.RwLockWriteGuard.html#method.map 266 /// [`RwLockWriteGuard::downgrade`]: https://docs.rs/lock_api/latest/lock_api/struct.RwLockWriteGuard.html#method.downgrade 267 /// [`parking_lot` crate]: https://crates.io/crates/parking_lot 268 /// 269 /// # Examples 270 /// 271 /// ``` 272 /// use tokio::sync::{RwLock, RwLockWriteGuard}; 273 /// 274 /// #[derive(Debug, Clone, Copy, PartialEq, Eq)] 275 /// struct Foo(u32); 276 /// 277 /// # #[tokio::main] 278 /// # async fn main() { 279 /// let lock = RwLock::new(Foo(1)); 280 /// 281 /// let guard = RwLockWriteGuard::try_downgrade_map(lock.write().await, |f| Some(&f.0)).expect("should not fail"); 282 /// let foo = lock.read().await; 283 /// assert_eq!(foo.0, *guard); 284 /// # } 285 /// ``` 286 #[inline] try_downgrade_map<F, U: ?Sized>(this: Self, f: F) -> Result<RwLockReadGuard<'a, U>, Self> where F: FnOnce(&T) -> Option<&U>,287 pub fn try_downgrade_map<F, U: ?Sized>(this: Self, f: F) -> Result<RwLockReadGuard<'a, U>, Self> 288 where 289 F: FnOnce(&T) -> Option<&U>, 290 { 291 let data = match f(&*this) { 292 Some(data) => data as *const U, 293 None => return Err(this), 294 }; 295 let this = this.skip_drop(); 296 let guard = RwLockReadGuard { 297 s: this.s, 298 data, 299 marker: PhantomData, 300 #[cfg(all(tokio_unstable, feature = "tracing"))] 301 resource_span: this.resource_span, 302 }; 303 304 // Release all but one of the permits held by the write guard 305 let to_release = (this.permits_acquired - 1) as usize; 306 this.s.release(to_release); 307 308 #[cfg(all(tokio_unstable, feature = "tracing"))] 309 guard.resource_span.in_scope(|| { 310 tracing::trace!( 311 target: "runtime::resource::state_update", 312 write_locked = false, 313 write_locked.op = "override", 314 ) 315 }); 316 317 #[cfg(all(tokio_unstable, feature = "tracing"))] 318 guard.resource_span.in_scope(|| { 319 tracing::trace!( 320 target: "runtime::resource::state_update", 321 current_readers = 1, 322 current_readers.op = "add", 323 ) 324 }); 325 326 Ok(guard) 327 } 328 329 /// Converts this `RwLockWriteGuard` into an `RwLockMappedWriteGuard`. This 330 /// method can be used to store a non-mapped guard in a struct field that 331 /// expects a mapped guard. 332 /// 333 /// This is equivalent to calling `RwLockWriteGuard::map(guard, |me| me)`. 334 #[inline] into_mapped(this: Self) -> RwLockMappedWriteGuard<'a, T>335 pub fn into_mapped(this: Self) -> RwLockMappedWriteGuard<'a, T> { 336 RwLockWriteGuard::map(this, |me| me) 337 } 338 339 /// Atomically downgrades a write lock into a read lock without allowing 340 /// any writers to take exclusive access of the lock in the meantime. 341 /// 342 /// **Note:** This won't *necessarily* allow any additional readers to acquire 343 /// locks, since [`RwLock`] is fair and it is possible that a writer is next 344 /// in line. 345 /// 346 /// Returns an RAII guard which will drop this read access of the `RwLock` 347 /// when dropped. 348 /// 349 /// # Examples 350 /// 351 /// ``` 352 /// # use tokio::sync::RwLock; 353 /// # use std::sync::Arc; 354 /// # 355 /// # #[tokio::main] 356 /// # async fn main() { 357 /// let lock = Arc::new(RwLock::new(1)); 358 /// 359 /// let n = lock.write().await; 360 /// 361 /// let cloned_lock = lock.clone(); 362 /// let handle = tokio::spawn(async move { 363 /// *cloned_lock.write().await = 2; 364 /// }); 365 /// 366 /// let n = n.downgrade(); 367 /// assert_eq!(*n, 1, "downgrade is atomic"); 368 /// 369 /// drop(n); 370 /// handle.await.unwrap(); 371 /// assert_eq!(*lock.read().await, 2, "second writer obtained write lock"); 372 /// # } 373 /// ``` 374 /// 375 /// [`RwLock`]: struct@crate::sync::RwLock downgrade(self) -> RwLockReadGuard<'a, T>376 pub fn downgrade(self) -> RwLockReadGuard<'a, T> { 377 let this = self.skip_drop(); 378 let guard = RwLockReadGuard { 379 s: this.s, 380 data: this.data, 381 marker: PhantomData, 382 #[cfg(all(tokio_unstable, feature = "tracing"))] 383 resource_span: this.resource_span, 384 }; 385 386 // Release all but one of the permits held by the write guard 387 let to_release = (this.permits_acquired - 1) as usize; 388 this.s.release(to_release); 389 390 #[cfg(all(tokio_unstable, feature = "tracing"))] 391 guard.resource_span.in_scope(|| { 392 tracing::trace!( 393 target: "runtime::resource::state_update", 394 write_locked = false, 395 write_locked.op = "override", 396 ) 397 }); 398 399 #[cfg(all(tokio_unstable, feature = "tracing"))] 400 guard.resource_span.in_scope(|| { 401 tracing::trace!( 402 target: "runtime::resource::state_update", 403 current_readers = 1, 404 current_readers.op = "add", 405 ) 406 }); 407 408 guard 409 } 410 } 411 412 impl<T: ?Sized> ops::Deref for RwLockWriteGuard<'_, T> { 413 type Target = T; 414 deref(&self) -> &T415 fn deref(&self) -> &T { 416 unsafe { &*self.data } 417 } 418 } 419 420 impl<T: ?Sized> ops::DerefMut for RwLockWriteGuard<'_, T> { deref_mut(&mut self) -> &mut T421 fn deref_mut(&mut self) -> &mut T { 422 unsafe { &mut *self.data } 423 } 424 } 425 426 impl<'a, T: ?Sized> fmt::Debug for RwLockWriteGuard<'a, T> 427 where 428 T: fmt::Debug, 429 { fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result430 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 431 fmt::Debug::fmt(&**self, f) 432 } 433 } 434 435 impl<'a, T: ?Sized> fmt::Display for RwLockWriteGuard<'a, T> 436 where 437 T: fmt::Display, 438 { fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result439 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 440 fmt::Display::fmt(&**self, f) 441 } 442 } 443 444 impl<'a, T: ?Sized> Drop for RwLockWriteGuard<'a, T> { drop(&mut self)445 fn drop(&mut self) { 446 self.s.release(self.permits_acquired as usize); 447 448 #[cfg(all(tokio_unstable, feature = "tracing"))] 449 self.resource_span.in_scope(|| { 450 tracing::trace!( 451 target: "runtime::resource::state_update", 452 write_locked = false, 453 write_locked.op = "override", 454 ) 455 }); 456 } 457 } 458