1 /*
2  * Copyright (c) 2024 Google Inc. All rights reserved
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining
5  * a copy of this software and associated documentation files
6  * (the "Software"), to deal in the Software without restriction,
7  * including without limitation the rights to use, copy, modify, merge,
8  * publish, distribute, sublicense, and/or sell copies of the Software,
9  * and to permit persons to whom the Software is furnished to do so,
10  * subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice shall be
13  * included in all copies or substantial portions of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
18  * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
19  * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
20  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
21  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
22  */
23 
24 use alloc::boxed::Box;
25 
26 use core::ffi::c_void;
27 use core::ptr::null_mut;
28 
29 pub use crate::sys::handle_close;
30 pub use crate::sys::handle_decref;
31 pub use crate::sys::handle_wait;
32 
33 pub use crate::sys::IPC_HANDLE_POLL_ERROR;
34 pub use crate::sys::IPC_HANDLE_POLL_HUP;
35 pub use crate::sys::IPC_HANDLE_POLL_MSG;
36 pub use crate::sys::IPC_HANDLE_POLL_NONE;
37 pub use crate::sys::IPC_HANDLE_POLL_READY;
38 pub use crate::sys::IPC_HANDLE_POLL_SEND_UNBLOCKED;
39 
40 pub use crate::sys::handle;
41 pub use crate::sys::handle_ref;
42 
43 use crate::sys::list_node;
44 
45 use crate::handle_set::handle_set_detach_ref;
46 
47 impl Default for list_node {
default() -> Self48     fn default() -> Self {
49         Self { prev: core::ptr::null_mut(), next: core::ptr::null_mut() }
50     }
51 }
52 
53 // nodes in a linked list refer to adjacent nodes by address and should be pinned
54 // TODO: add Unpin as a negative trait bound once the rustc feature is stabilized.
55 // impl !Unpin for list_node {}
56 
57 impl Default for handle_ref {
default() -> Self58     fn default() -> Self {
59         Self {
60             set_node: Default::default(),
61             ready_node: Default::default(),
62             uctx_node: Default::default(),
63             waiter: Default::default(),
64             parent: core::ptr::null_mut(),
65             handle: core::ptr::null_mut(),
66             id: 0,
67             emask: 0,
68             cookie: core::ptr::null_mut(),
69         }
70     }
71 }
72 
73 // `handle_ref`s should not move since they are inserted as nodes in linked lists
74 // and the kernel may write back to the non-node fields as well.
75 // TODO: add Unpin as a negative trait bound once the rustc feature is stabilized.
76 // impl !Unpin for handle_ref {}
77 
78 #[derive(Default)]
79 pub struct HandleRef {
80     // Box the `handle_ref` so it doesn't get moved with the `HandleRef`
81     inner: Box<handle_ref>,
82     pub(crate) attached: bool,
83 }
84 
85 impl HandleRef {
detach(&mut self)86     pub fn detach(&mut self) {
87         if self.attached {
88             // Safety: `inner` was initialized and attached to a handle set
89             unsafe { handle_set_detach_ref(&mut *self.inner) }
90             self.attached = false;
91         }
92     }
93 
handle_close(&mut self)94     pub fn handle_close(&mut self) {
95         if !self.inner.handle.is_null() {
96             // Safety: `handle` is non-null so it wasn't closed
97             unsafe { handle_close(self.inner.handle) };
98             self.inner.handle = null_mut();
99         }
100     }
101 
handle_decref(&mut self)102     pub fn handle_decref(&mut self) {
103         if self.inner.handle.is_null() {
104             panic!("handle is null; can't decrease its reference count");
105         }
106 
107         // Safety: `handle` is non-null so it wasn't closed
108         unsafe { handle_decref(self.inner.handle) };
109     }
110 
as_mut_ptr(&mut self) -> *mut handle_ref111     pub fn as_mut_ptr(&mut self) -> *mut handle_ref {
112         &mut *self.inner
113     }
114 
cookie(&self) -> *mut c_void115     pub fn cookie(&self) -> *mut c_void {
116         self.inner.cookie
117     }
118 
set_cookie(&mut self, cookie: *mut c_void)119     pub fn set_cookie(&mut self, cookie: *mut c_void) {
120         self.inner.cookie = cookie;
121     }
122 
emask(&self) -> u32123     pub fn emask(&self) -> u32 {
124         self.inner.emask
125     }
126 
set_emask(&mut self, emask: u32)127     pub fn set_emask(&mut self, emask: u32) {
128         self.inner.emask = emask;
129     }
130 
handle(&mut self) -> *mut handle131     pub fn handle(&mut self) -> *mut handle {
132         self.inner.handle
133     }
134 
id(&mut self) -> u32135     pub fn id(&mut self) -> u32 {
136         self.inner.id
137     }
138 
set_id(&mut self, id: u32)139     pub fn set_id(&mut self, id: u32) {
140         self.inner.id = id;
141     }
142 }
143 
144 impl Drop for HandleRef {
drop(&mut self)145     fn drop(&mut self) {
146         self.detach()
147     }
148 }
149 
150 // Safety: the kernel synchronizes operations on handle refs so they can be passed
151 // from one thread to another
152 unsafe impl Send for HandleRef {}
153 
154 // Safety: the kernel synchronizes operations on handle refs so it safe to share
155 // references between threads
156 unsafe impl Sync for HandleRef {}
157