1 //! linux_raw syscalls supporting `rustix::io`.
2 //!
3 //! # Safety
4 //!
5 //! See the `rustix::backend` module documentation for details.
6 #![allow(unsafe_code)]
7 #![allow(clippy::undocumented_unsafe_blocks)]
8 
9 use super::types::{
10     Advice, MapFlags, MlockAllFlags, MlockFlags, MprotectFlags, MremapFlags, MsyncFlags, ProtFlags,
11     UserfaultfdFlags,
12 };
13 use crate::backend::c;
14 #[cfg(target_pointer_width = "64")]
15 use crate::backend::conv::loff_t_from_u64;
16 use crate::backend::conv::{c_uint, no_fd, pass_usize, ret, ret_owned_fd, ret_void_star};
17 use crate::fd::{BorrowedFd, OwnedFd};
18 use crate::io;
19 use linux_raw_sys::general::MAP_ANONYMOUS;
20 
21 #[inline]
madvise(addr: *mut c::c_void, len: usize, advice: Advice) -> io::Result<()>22 pub(crate) fn madvise(addr: *mut c::c_void, len: usize, advice: Advice) -> io::Result<()> {
23     unsafe {
24         ret(syscall!(
25             __NR_madvise,
26             addr,
27             pass_usize(len),
28             c_uint(advice as c::c_uint)
29         ))
30     }
31 }
32 
33 #[inline]
msync(addr: *mut c::c_void, len: usize, flags: MsyncFlags) -> io::Result<()>34 pub(crate) unsafe fn msync(addr: *mut c::c_void, len: usize, flags: MsyncFlags) -> io::Result<()> {
35     ret(syscall!(__NR_msync, addr, pass_usize(len), flags))
36 }
37 
38 /// # Safety
39 ///
40 /// `mmap` is primarily unsafe due to the `addr` parameter, as anything working
41 /// with memory pointed to by raw pointers is unsafe.
42 #[inline]
mmap( addr: *mut c::c_void, length: usize, prot: ProtFlags, flags: MapFlags, fd: BorrowedFd<'_>, offset: u64, ) -> io::Result<*mut c::c_void>43 pub(crate) unsafe fn mmap(
44     addr: *mut c::c_void,
45     length: usize,
46     prot: ProtFlags,
47     flags: MapFlags,
48     fd: BorrowedFd<'_>,
49     offset: u64,
50 ) -> io::Result<*mut c::c_void> {
51     #[cfg(target_pointer_width = "32")]
52     {
53         ret_void_star(syscall!(
54             __NR_mmap2,
55             addr,
56             pass_usize(length),
57             prot,
58             flags,
59             fd,
60             (offset / 4096)
61                 .try_into()
62                 .map(pass_usize)
63                 .map_err(|_| io::Errno::INVAL)?
64         ))
65     }
66     #[cfg(target_pointer_width = "64")]
67     {
68         ret_void_star(syscall!(
69             __NR_mmap,
70             addr,
71             pass_usize(length),
72             prot,
73             flags,
74             fd,
75             loff_t_from_u64(offset)
76         ))
77     }
78 }
79 
80 /// # Safety
81 ///
82 /// `mmap` is primarily unsafe due to the `addr` parameter, as anything working
83 /// with memory pointed to by raw pointers is unsafe.
84 #[inline]
mmap_anonymous( addr: *mut c::c_void, length: usize, prot: ProtFlags, flags: MapFlags, ) -> io::Result<*mut c::c_void>85 pub(crate) unsafe fn mmap_anonymous(
86     addr: *mut c::c_void,
87     length: usize,
88     prot: ProtFlags,
89     flags: MapFlags,
90 ) -> io::Result<*mut c::c_void> {
91     #[cfg(target_pointer_width = "32")]
92     {
93         ret_void_star(syscall!(
94             __NR_mmap2,
95             addr,
96             pass_usize(length),
97             prot,
98             c_uint(flags.bits() | MAP_ANONYMOUS),
99             no_fd(),
100             pass_usize(0)
101         ))
102     }
103     #[cfg(target_pointer_width = "64")]
104     {
105         ret_void_star(syscall!(
106             __NR_mmap,
107             addr,
108             pass_usize(length),
109             prot,
110             c_uint(flags.bits() | MAP_ANONYMOUS),
111             no_fd(),
112             loff_t_from_u64(0)
113         ))
114     }
115 }
116 
117 #[inline]
mprotect( ptr: *mut c::c_void, len: usize, flags: MprotectFlags, ) -> io::Result<()>118 pub(crate) unsafe fn mprotect(
119     ptr: *mut c::c_void,
120     len: usize,
121     flags: MprotectFlags,
122 ) -> io::Result<()> {
123     ret(syscall!(__NR_mprotect, ptr, pass_usize(len), flags))
124 }
125 
126 /// # Safety
127 ///
128 /// `munmap` is primarily unsafe due to the `addr` parameter, as anything
129 /// working with memory pointed to by raw pointers is unsafe.
130 #[inline]
munmap(addr: *mut c::c_void, length: usize) -> io::Result<()>131 pub(crate) unsafe fn munmap(addr: *mut c::c_void, length: usize) -> io::Result<()> {
132     ret(syscall!(__NR_munmap, addr, pass_usize(length)))
133 }
134 
135 /// # Safety
136 ///
137 /// `mremap` is primarily unsafe due to the `old_address` parameter, as
138 /// anything working with memory pointed to by raw pointers is unsafe.
139 #[inline]
mremap( old_address: *mut c::c_void, old_size: usize, new_size: usize, flags: MremapFlags, ) -> io::Result<*mut c::c_void>140 pub(crate) unsafe fn mremap(
141     old_address: *mut c::c_void,
142     old_size: usize,
143     new_size: usize,
144     flags: MremapFlags,
145 ) -> io::Result<*mut c::c_void> {
146     ret_void_star(syscall!(
147         __NR_mremap,
148         old_address,
149         pass_usize(old_size),
150         pass_usize(new_size),
151         flags
152     ))
153 }
154 
155 /// # Safety
156 ///
157 /// `mremap_fixed` is primarily unsafe due to the `old_address` and
158 /// `new_address` parameters, as anything working with memory pointed to by raw
159 /// pointers is unsafe.
160 #[inline]
mremap_fixed( old_address: *mut c::c_void, old_size: usize, new_size: usize, flags: MremapFlags, new_address: *mut c::c_void, ) -> io::Result<*mut c::c_void>161 pub(crate) unsafe fn mremap_fixed(
162     old_address: *mut c::c_void,
163     old_size: usize,
164     new_size: usize,
165     flags: MremapFlags,
166     new_address: *mut c::c_void,
167 ) -> io::Result<*mut c::c_void> {
168     ret_void_star(syscall!(
169         __NR_mremap,
170         old_address,
171         pass_usize(old_size),
172         pass_usize(new_size),
173         flags,
174         new_address
175     ))
176 }
177 
178 /// # Safety
179 ///
180 /// `mlock` operates on raw pointers and may round out to the nearest page
181 /// boundaries.
182 #[inline]
mlock(addr: *mut c::c_void, length: usize) -> io::Result<()>183 pub(crate) unsafe fn mlock(addr: *mut c::c_void, length: usize) -> io::Result<()> {
184     ret(syscall!(__NR_mlock, addr, pass_usize(length)))
185 }
186 
187 /// # Safety
188 ///
189 /// `mlock_with` operates on raw pointers and may round out to the nearest page
190 /// boundaries.
191 #[inline]
mlock_with( addr: *mut c::c_void, length: usize, flags: MlockFlags, ) -> io::Result<()>192 pub(crate) unsafe fn mlock_with(
193     addr: *mut c::c_void,
194     length: usize,
195     flags: MlockFlags,
196 ) -> io::Result<()> {
197     ret(syscall!(__NR_mlock2, addr, pass_usize(length), flags))
198 }
199 
200 /// # Safety
201 ///
202 /// `munlock` operates on raw pointers and may round out to the nearest page
203 /// boundaries.
204 #[inline]
munlock(addr: *mut c::c_void, length: usize) -> io::Result<()>205 pub(crate) unsafe fn munlock(addr: *mut c::c_void, length: usize) -> io::Result<()> {
206     ret(syscall!(__NR_munlock, addr, pass_usize(length)))
207 }
208 
209 #[inline]
userfaultfd(flags: UserfaultfdFlags) -> io::Result<OwnedFd>210 pub(crate) unsafe fn userfaultfd(flags: UserfaultfdFlags) -> io::Result<OwnedFd> {
211     ret_owned_fd(syscall_readonly!(__NR_userfaultfd, flags))
212 }
213 
214 /// Locks all pages mapped into the address space of the calling process.
215 ///
216 /// This includes the pages of the code, data, and stack segment, as well as
217 /// shared libraries, user space kernel data, shared memory, and memory-mapped
218 /// files. All mapped pages are guaranteed to be resident in RAM when the call
219 /// returns successfully; the pages are guaranteed to stay in RAM until later
220 /// unlocked.
221 #[inline]
mlockall(flags: MlockAllFlags) -> io::Result<()>222 pub(crate) fn mlockall(flags: MlockAllFlags) -> io::Result<()> {
223     // When `mlockall` is used with `MCL_ONFAULT | MCL_FUTURE`, the ordering
224     // of `mlockall` with respect to arbitrary loads may be significant,
225     // because if a load happens and evokes a fault before the `mlockall`,
226     // the memory doesn't get locked, but if the load and therefore
227     // the fault happens after, then the memory does get locked.
228     // So to be conservative in this regard, we use `syscall` instead
229     // of `syscall_readonly`
230     unsafe { ret(syscall!(__NR_mlockall, flags)) }
231 }
232 
233 /// Unlocks all pages mapped into the address space of the calling process.
234 #[inline]
munlockall() -> io::Result<()>235 pub(crate) fn munlockall() -> io::Result<()> {
236     unsafe { ret(syscall_readonly!(__NR_munlockall)) }
237 }
238