1 // SPDX-License-Identifier: GPL-2.0
2
3 //! Slices to user space memory regions.
4 //!
5 //! C header: [`include/linux/uaccess.h`](srctree/include/linux/uaccess.h)
6
7 use crate::{
8 alloc::{Allocator, Flags},
9 bindings,
10 dma::Coherent,
11 error::Result,
12 ffi::{c_char, c_void},
13 fs::file,
14 prelude::*,
15 ptr::KnownSize,
16 transmute::{AsBytes, FromBytes},
17 };
18 use core::mem::{size_of, MaybeUninit};
19
20 /// A pointer into userspace.
21 ///
22 /// This is the Rust equivalent to C pointers tagged with `__user`.
23 #[repr(transparent)]
24 #[derive(Copy, Clone, Zeroable)]
25 pub struct UserPtr(*mut c_void);
26
27 impl UserPtr {
28 /// Create a `UserPtr` from an integer representing the userspace address.
29 #[inline]
from_addr(addr: usize) -> Self30 pub fn from_addr(addr: usize) -> Self {
31 Self(addr as *mut c_void)
32 }
33
34 /// Create a `UserPtr` from a pointer representing the userspace address.
35 #[inline]
from_ptr(addr: *mut c_void) -> Self36 pub fn from_ptr(addr: *mut c_void) -> Self {
37 Self(addr)
38 }
39
40 /// Cast this userspace pointer to a raw const void pointer.
41 ///
42 /// It is up to the caller to use the returned pointer correctly.
43 #[inline]
as_const_ptr(self) -> *const c_void44 pub fn as_const_ptr(self) -> *const c_void {
45 self.0
46 }
47
48 /// Cast this userspace pointer to a raw mutable void pointer.
49 ///
50 /// It is up to the caller to use the returned pointer correctly.
51 #[inline]
as_mut_ptr(self) -> *mut c_void52 pub fn as_mut_ptr(self) -> *mut c_void {
53 self.0
54 }
55
56 /// Increment this user pointer by `add` bytes.
57 ///
58 /// This addition is wrapping, so wrapping around the address space does not result in a panic
59 /// even if `CONFIG_RUST_OVERFLOW_CHECKS` is enabled.
60 #[inline]
wrapping_byte_add(self, add: usize) -> UserPtr61 pub fn wrapping_byte_add(self, add: usize) -> UserPtr {
62 UserPtr(self.0.wrapping_byte_add(add))
63 }
64 }
65
66 /// A pointer to an area in userspace memory, which can be either read-only or read-write.
67 ///
68 /// All methods on this struct are safe: attempting to read or write on bad addresses (either out of
69 /// the bound of the slice or unmapped addresses) will return [`EFAULT`]. Concurrent access,
70 /// *including data races to/from userspace memory*, is permitted, because fundamentally another
71 /// userspace thread/process could always be modifying memory at the same time (in the same way that
72 /// userspace Rust's [`std::io`] permits data races with the contents of files on disk). In the
73 /// presence of a race, the exact byte values read/written are unspecified but the operation is
74 /// well-defined. Kernelspace code should validate its copy of data after completing a read, and not
75 /// expect that multiple reads of the same address will return the same value.
76 ///
77 /// These APIs are designed to make it difficult to accidentally write TOCTOU (time-of-check to
78 /// time-of-use) bugs. Every time a memory location is read, the reader's position is advanced by
79 /// the read length and the next read will start from there. This helps prevent accidentally reading
80 /// the same location twice and causing a TOCTOU bug.
81 ///
82 /// Creating a [`UserSliceReader`] and/or [`UserSliceWriter`] consumes the `UserSlice`, helping
83 /// ensure that there aren't multiple readers or writers to the same location.
84 ///
85 /// If double-fetching a memory location is necessary for some reason, then that is done by creating
86 /// multiple readers to the same memory location, e.g. using [`clone_reader`].
87 ///
88 /// # Examples
89 ///
90 /// Takes a region of userspace memory from the current process, and modify it by adding one to
91 /// every byte in the region.
92 ///
93 /// ```no_run
94 /// use kernel::ffi::c_void;
95 /// use kernel::uaccess::{UserPtr, UserSlice};
96 ///
97 /// fn bytes_add_one(uptr: UserPtr, len: usize) -> Result {
98 /// let (read, mut write) = UserSlice::new(uptr, len).reader_writer();
99 ///
100 /// let mut buf = KVec::new();
101 /// read.read_all(&mut buf, GFP_KERNEL)?;
102 ///
103 /// for b in &mut buf {
104 /// *b = b.wrapping_add(1);
105 /// }
106 ///
107 /// write.write_slice(&buf)?;
108 /// Ok(())
109 /// }
110 /// ```
111 ///
112 /// Example illustrating a TOCTOU (time-of-check to time-of-use) bug.
113 ///
114 /// ```no_run
115 /// use kernel::ffi::c_void;
116 /// use kernel::uaccess::{UserPtr, UserSlice};
117 ///
118 /// /// Returns whether the data in this region is valid.
119 /// fn is_valid(uptr: UserPtr, len: usize) -> Result<bool> {
120 /// let read = UserSlice::new(uptr, len).reader();
121 ///
122 /// let mut buf = KVec::new();
123 /// read.read_all(&mut buf, GFP_KERNEL)?;
124 ///
125 /// todo!()
126 /// }
127 ///
128 /// /// Returns the bytes behind this user pointer if they are valid.
129 /// fn get_bytes_if_valid(uptr: UserPtr, len: usize) -> Result<KVec<u8>> {
130 /// if !is_valid(uptr, len)? {
131 /// return Err(EINVAL);
132 /// }
133 ///
134 /// let read = UserSlice::new(uptr, len).reader();
135 ///
136 /// let mut buf = KVec::new();
137 /// read.read_all(&mut buf, GFP_KERNEL)?;
138 ///
139 /// // THIS IS A BUG! The bytes could have changed since we checked them.
140 /// //
141 /// // To avoid this kind of bug, don't call `UserSlice::new` multiple
142 /// // times with the same address.
143 /// Ok(buf)
144 /// }
145 /// ```
146 ///
147 /// [`std::io`]: https://doc.rust-lang.org/std/io/index.html
148 /// [`clone_reader`]: UserSliceReader::clone_reader
149 pub struct UserSlice {
150 ptr: UserPtr,
151 length: usize,
152 }
153
154 impl UserSlice {
155 /// Constructs a user slice from a raw pointer and a length in bytes.
156 ///
157 /// Constructing a [`UserSlice`] performs no checks on the provided address and length, it can
158 /// safely be constructed inside a kernel thread with no current userspace process. Reads and
159 /// writes wrap the kernel APIs `copy_from_user` and `copy_to_user`, which check the memory map
160 /// of the current process and enforce that the address range is within the user range (no
161 /// additional calls to `access_ok` are needed). Validity of the pointer is checked when you
162 /// attempt to read or write, not in the call to `UserSlice::new`.
163 ///
164 /// Callers must be careful to avoid time-of-check-time-of-use (TOCTOU) issues. The simplest way
165 /// is to create a single instance of [`UserSlice`] per user memory block as it reads each byte
166 /// at most once.
new(ptr: UserPtr, length: usize) -> Self167 pub fn new(ptr: UserPtr, length: usize) -> Self {
168 UserSlice { ptr, length }
169 }
170
171 /// Reads the entirety of the user slice, appending it to the end of the provided buffer.
172 ///
173 /// Fails with [`EFAULT`] if the read happens on a bad address.
read_all<A: Allocator>(self, buf: &mut Vec<u8, A>, flags: Flags) -> Result174 pub fn read_all<A: Allocator>(self, buf: &mut Vec<u8, A>, flags: Flags) -> Result {
175 self.reader().read_all(buf, flags)
176 }
177
178 /// Constructs a [`UserSliceReader`].
reader(self) -> UserSliceReader179 pub fn reader(self) -> UserSliceReader {
180 UserSliceReader {
181 ptr: self.ptr,
182 length: self.length,
183 }
184 }
185
186 /// Constructs a [`UserSliceWriter`].
writer(self) -> UserSliceWriter187 pub fn writer(self) -> UserSliceWriter {
188 UserSliceWriter {
189 ptr: self.ptr,
190 length: self.length,
191 }
192 }
193
194 /// Constructs both a [`UserSliceReader`] and a [`UserSliceWriter`].
195 ///
196 /// Usually when this is used, you will first read the data, and then overwrite it afterwards.
reader_writer(self) -> (UserSliceReader, UserSliceWriter)197 pub fn reader_writer(self) -> (UserSliceReader, UserSliceWriter) {
198 (
199 UserSliceReader {
200 ptr: self.ptr,
201 length: self.length,
202 },
203 UserSliceWriter {
204 ptr: self.ptr,
205 length: self.length,
206 },
207 )
208 }
209 }
210
211 /// A reader for [`UserSlice`].
212 ///
213 /// Used to incrementally read from the user slice.
214 pub struct UserSliceReader {
215 ptr: UserPtr,
216 length: usize,
217 }
218
219 impl UserSliceReader {
220 /// Skip the provided number of bytes.
221 ///
222 /// Returns an error if skipping more than the length of the buffer.
skip(&mut self, num_skip: usize) -> Result223 pub fn skip(&mut self, num_skip: usize) -> Result {
224 // Update `self.length` first since that's the fallible part of this operation.
225 self.length = self.length.checked_sub(num_skip).ok_or(EFAULT)?;
226 self.ptr = self.ptr.wrapping_byte_add(num_skip);
227 Ok(())
228 }
229
230 /// Create a reader that can access the same range of data.
231 ///
232 /// Reading from the clone does not advance the current reader.
233 ///
234 /// The caller should take care to not introduce TOCTOU issues, as described in the
235 /// documentation for [`UserSlice`].
clone_reader(&self) -> UserSliceReader236 pub fn clone_reader(&self) -> UserSliceReader {
237 UserSliceReader {
238 ptr: self.ptr,
239 length: self.length,
240 }
241 }
242
243 /// Returns the number of bytes left to be read from this reader.
244 ///
245 /// Note that even reading less than this number of bytes may fail.
len(&self) -> usize246 pub fn len(&self) -> usize {
247 self.length
248 }
249
250 /// Returns `true` if no data is available in the io buffer.
is_empty(&self) -> bool251 pub fn is_empty(&self) -> bool {
252 self.length == 0
253 }
254
255 /// Reads raw data from the user slice into a kernel buffer.
256 ///
257 /// For a version that uses `&mut [u8]`, please see [`UserSliceReader::read_slice`].
258 ///
259 /// Fails with [`EFAULT`] if the read happens on a bad address, or if the read goes out of
260 /// bounds of this [`UserSliceReader`]. This call may modify `out` even if it returns an error.
261 ///
262 /// # Guarantees
263 ///
264 /// After a successful call to this method, all bytes in `out` are initialized.
read_raw(&mut self, out: &mut [MaybeUninit<u8>]) -> Result265 pub fn read_raw(&mut self, out: &mut [MaybeUninit<u8>]) -> Result {
266 let len = out.len();
267 let out_ptr = out.as_mut_ptr().cast::<c_void>();
268 if len > self.length {
269 return Err(EFAULT);
270 }
271 // SAFETY: `out_ptr` points into a mutable slice of length `len`, so we may write
272 // that many bytes to it.
273 let res = unsafe { bindings::copy_from_user(out_ptr, self.ptr.as_const_ptr(), len) };
274 if res != 0 {
275 return Err(EFAULT);
276 }
277 self.ptr = self.ptr.wrapping_byte_add(len);
278 self.length -= len;
279 Ok(())
280 }
281
282 /// Reads raw data from the user slice into a kernel buffer.
283 ///
284 /// Fails with [`EFAULT`] if the read happens on a bad address, or if the read goes out of
285 /// bounds of this [`UserSliceReader`]. This call may modify `out` even if it returns an error.
read_slice(&mut self, out: &mut [u8]) -> Result286 pub fn read_slice(&mut self, out: &mut [u8]) -> Result {
287 // SAFETY: The types are compatible and `read_raw` doesn't write uninitialized bytes to
288 // `out`.
289 let out = unsafe { &mut *(core::ptr::from_mut(out) as *mut [MaybeUninit<u8>]) };
290 self.read_raw(out)
291 }
292
293 /// Reads raw data from the user slice into a kernel buffer partially.
294 ///
295 /// This is the same as [`Self::read_slice`] but considers the given `offset` into `out` and
296 /// truncates the read to the boundaries of `self` and `out`.
297 ///
298 /// On success, returns the number of bytes read.
read_slice_partial(&mut self, out: &mut [u8], offset: usize) -> Result<usize>299 pub fn read_slice_partial(&mut self, out: &mut [u8], offset: usize) -> Result<usize> {
300 let end = offset.saturating_add(self.len()).min(out.len());
301
302 let Some(dst) = out.get_mut(offset..end) else {
303 return Ok(0);
304 };
305
306 self.read_slice(dst)?;
307 Ok(dst.len())
308 }
309
310 /// Reads raw data from the user slice into a kernel buffer partially.
311 ///
312 /// This is the same as [`Self::read_slice_partial`] but updates the given [`file::Offset`] by
313 /// the number of bytes read.
314 ///
315 /// This is equivalent to C's `simple_write_to_buffer()`.
316 ///
317 /// On success, returns the number of bytes read.
read_slice_file(&mut self, out: &mut [u8], offset: &mut file::Offset) -> Result<usize>318 pub fn read_slice_file(&mut self, out: &mut [u8], offset: &mut file::Offset) -> Result<usize> {
319 if offset.is_negative() {
320 return Err(EINVAL);
321 }
322
323 let Ok(offset_index) = (*offset).try_into() else {
324 return Ok(0);
325 };
326
327 let read = self.read_slice_partial(out, offset_index)?;
328
329 // OVERFLOW: `offset + read <= data.len() <= isize::MAX <= Offset::MAX`
330 *offset += read as i64;
331
332 Ok(read)
333 }
334
335 /// Reads a value of the specified type.
336 ///
337 /// Fails with [`EFAULT`] if the read happens on a bad address, or if the read goes out of
338 /// bounds of this [`UserSliceReader`].
read<T: FromBytes>(&mut self) -> Result<T>339 pub fn read<T: FromBytes>(&mut self) -> Result<T> {
340 let len = size_of::<T>();
341 if len > self.length {
342 return Err(EFAULT);
343 }
344 let mut out: MaybeUninit<T> = MaybeUninit::uninit();
345 // SAFETY: The local variable `out` is valid for writing `size_of::<T>()` bytes.
346 //
347 // By using the _copy_from_user variant, we skip the check_object_size check that verifies
348 // the kernel pointer. This mirrors the logic on the C side that skips the check when the
349 // length is a compile-time constant.
350 let res = unsafe {
351 bindings::_copy_from_user(
352 out.as_mut_ptr().cast::<c_void>(),
353 self.ptr.as_const_ptr(),
354 len,
355 )
356 };
357 if res != 0 {
358 return Err(EFAULT);
359 }
360 self.ptr = self.ptr.wrapping_byte_add(len);
361 self.length -= len;
362 // SAFETY: The read above has initialized all bytes in `out`, and since `T` implements
363 // `FromBytes`, any bit-pattern is a valid value for this type.
364 Ok(unsafe { out.assume_init() })
365 }
366
367 /// Reads the entirety of the user slice, appending it to the end of the provided buffer.
368 ///
369 /// Fails with [`EFAULT`] if the read happens on a bad address.
read_all<A: Allocator>(mut self, buf: &mut Vec<u8, A>, flags: Flags) -> Result370 pub fn read_all<A: Allocator>(mut self, buf: &mut Vec<u8, A>, flags: Flags) -> Result {
371 let len = self.length;
372 buf.reserve(len, flags)?;
373
374 // The call to `reserve` was successful, so the spare capacity is at least `len` bytes long.
375 self.read_raw(&mut buf.spare_capacity_mut()[..len])?;
376
377 // SAFETY: Since the call to `read_raw` was successful, so the next `len` bytes of the
378 // vector have been initialized.
379 unsafe { buf.inc_len(len) };
380 Ok(())
381 }
382
383 /// Read a NUL-terminated string from userspace and return it.
384 ///
385 /// The string is read into `buf` and a NUL-terminator is added if the end of `buf` is reached.
386 /// Since there must be space to add a NUL-terminator, the buffer must not be empty. The
387 /// returned `&CStr` points into `buf`.
388 ///
389 /// Fails with [`EFAULT`] if the read happens on a bad address (some data may have been
390 /// copied).
391 #[doc(alias = "strncpy_from_user")]
strcpy_into_buf<'buf>(self, buf: &'buf mut [u8]) -> Result<&'buf CStr>392 pub fn strcpy_into_buf<'buf>(self, buf: &'buf mut [u8]) -> Result<&'buf CStr> {
393 if buf.is_empty() {
394 return Err(EINVAL);
395 }
396
397 // SAFETY: The types are compatible and `strncpy_from_user` doesn't write uninitialized
398 // bytes to `buf`.
399 let mut dst = unsafe { &mut *(core::ptr::from_mut(buf) as *mut [MaybeUninit<u8>]) };
400
401 // We never read more than `self.length` bytes.
402 if dst.len() > self.length {
403 dst = &mut dst[..self.length];
404 }
405
406 let mut len = raw_strncpy_from_user(dst, self.ptr)?;
407 if len < dst.len() {
408 // Add one to include the NUL-terminator.
409 len += 1;
410 } else if len < buf.len() {
411 // This implies that `len == dst.len() < buf.len()`.
412 //
413 // This means that we could not fill the entire buffer, but we had to stop reading
414 // because we hit the `self.length` limit of this `UserSliceReader`. Since we did not
415 // fill the buffer, we treat this case as if we tried to read past the `self.length`
416 // limit and received a page fault, which is consistent with other `UserSliceReader`
417 // methods that also return page faults when you exceed `self.length`.
418 return Err(EFAULT);
419 } else {
420 // This implies that `len == buf.len()`.
421 //
422 // This means that we filled the buffer exactly. In this case, we add a NUL-terminator
423 // and return it. Unlike the `len < dst.len()` branch, don't modify `len` because it
424 // already represents the length including the NUL-terminator.
425 //
426 // SAFETY: Due to the check at the beginning, the buffer is not empty.
427 unsafe { *buf.last_mut().unwrap_unchecked() = 0 };
428 }
429
430 // This method consumes `self`, so it can only be called once, thus we do not need to
431 // update `self.length`. This sidesteps concerns such as whether `self.length` should be
432 // incremented by `len` or `len-1` in the `len == buf.len()` case.
433
434 // SAFETY: There are two cases:
435 // * If we hit the `len < dst.len()` case, then `raw_strncpy_from_user` guarantees that
436 // this slice contains exactly one NUL byte at the end of the string.
437 // * Otherwise, `raw_strncpy_from_user` guarantees that the string contained no NUL bytes,
438 // and we have since added a NUL byte at the end.
439 Ok(unsafe { CStr::from_bytes_with_nul_unchecked(&buf[..len]) })
440 }
441 }
442
443 /// A writer for [`UserSlice`].
444 ///
445 /// Used to incrementally write into the user slice.
446 pub struct UserSliceWriter {
447 ptr: UserPtr,
448 length: usize,
449 }
450
451 impl UserSliceWriter {
452 /// Returns the amount of space remaining in this buffer.
453 ///
454 /// Note that even writing less than this number of bytes may fail.
len(&self) -> usize455 pub fn len(&self) -> usize {
456 self.length
457 }
458
459 /// Returns `true` if no more data can be written to this buffer.
is_empty(&self) -> bool460 pub fn is_empty(&self) -> bool {
461 self.length == 0
462 }
463
464 /// Low-level write from a raw pointer.
465 ///
466 /// # Safety
467 ///
468 /// The caller must ensure that `from` is valid for reads of `len` bytes.
write_raw(&mut self, from: *const u8, len: usize) -> Result469 unsafe fn write_raw(&mut self, from: *const u8, len: usize) -> Result {
470 if len > self.length {
471 return Err(EFAULT);
472 }
473
474 // SAFETY: Caller guarantees `from` is valid for `len` bytes (see this function's
475 // safety contract).
476 let res = unsafe { bindings::copy_to_user(self.ptr.as_mut_ptr(), from.cast(), len) };
477 if res != 0 {
478 return Err(EFAULT);
479 }
480 self.ptr = self.ptr.wrapping_byte_add(len);
481 self.length -= len;
482 Ok(())
483 }
484
485 /// Writes raw data to this user pointer from a kernel buffer.
486 ///
487 /// Fails with [`EFAULT`] if the write happens on a bad address, or if the write goes out of
488 /// bounds of this [`UserSliceWriter`]. This call may modify the associated userspace slice even
489 /// if it returns an error.
write_slice(&mut self, data: &[u8]) -> Result490 pub fn write_slice(&mut self, data: &[u8]) -> Result {
491 // SAFETY: `data` is a valid slice, so `data.as_ptr()` is valid for
492 // reading `data.len()` bytes.
493 unsafe { self.write_raw(data.as_ptr(), data.len()) }
494 }
495
496 /// Writes raw data to this user pointer from a DMA coherent allocation.
497 ///
498 /// Copies `count` bytes from `alloc` starting from `offset` into this userspace slice.
499 ///
500 /// # Errors
501 ///
502 /// - [`EOVERFLOW`]: `offset + count` overflows.
503 /// - [`ERANGE`]: `offset + count` exceeds the size of `alloc`, or `count` exceeds the
504 /// size of the user-space buffer.
505 /// - [`EFAULT`]: the write hits a bad address or goes out of bounds of this
506 /// [`UserSliceWriter`].
507 ///
508 /// This call may modify the associated userspace slice even if it returns an error.
509 ///
510 /// Note: The memory may be concurrently modified by hardware (e.g., DMA). In such cases,
511 /// the copied data may be inconsistent, but this does not cause undefined behavior.
512 ///
513 /// # Example
514 ///
515 /// Copy the first 256 bytes of a DMA coherent allocation into a userspace buffer:
516 ///
517 /// ```no_run
518 /// use kernel::uaccess::UserSliceWriter;
519 /// use kernel::dma::Coherent;
520 ///
521 /// fn copy_dma_to_user(
522 /// mut writer: UserSliceWriter,
523 /// alloc: &Coherent<[u8]>,
524 /// ) -> Result {
525 /// writer.write_dma(alloc, 0, 256)
526 /// }
527 /// ```
write_dma<T: KnownSize + AsBytes + ?Sized>( &mut self, alloc: &Coherent<T>, offset: usize, count: usize, ) -> Result528 pub fn write_dma<T: KnownSize + AsBytes + ?Sized>(
529 &mut self,
530 alloc: &Coherent<T>,
531 offset: usize,
532 count: usize,
533 ) -> Result {
534 let len = alloc.size();
535 if offset.checked_add(count).ok_or(EOVERFLOW)? > len {
536 return Err(ERANGE);
537 }
538
539 if count > self.len() {
540 return Err(ERANGE);
541 }
542
543 // SAFETY: `as_ptr()` returns a valid pointer to a memory region of `count()` bytes, as
544 // guaranteed by the `Coherent` invariants. The check above ensures `offset + count <= len`.
545 let src_ptr = unsafe { alloc.as_ptr().cast::<u8>().add(offset) };
546
547 // Note: Use `write_raw` instead of `write_slice` because the allocation is coherent
548 // memory that hardware may modify (e.g., DMA); we cannot form a `&[u8]` slice over
549 // such volatile memory.
550 //
551 // SAFETY: `src_ptr` points into the allocation and is valid for `count` bytes (see above).
552 unsafe { self.write_raw(src_ptr, count) }
553 }
554
555 /// Writes raw data to this user pointer from a kernel buffer partially.
556 ///
557 /// This is the same as [`Self::write_slice`] but considers the given `offset` into `data` and
558 /// truncates the write to the boundaries of `self` and `data`.
559 ///
560 /// On success, returns the number of bytes written.
write_slice_partial(&mut self, data: &[u8], offset: usize) -> Result<usize>561 pub fn write_slice_partial(&mut self, data: &[u8], offset: usize) -> Result<usize> {
562 let end = offset.saturating_add(self.len()).min(data.len());
563
564 let Some(src) = data.get(offset..end) else {
565 return Ok(0);
566 };
567
568 self.write_slice(src)?;
569 Ok(src.len())
570 }
571
572 /// Writes raw data to this user pointer from a kernel buffer partially.
573 ///
574 /// This is the same as [`Self::write_slice_partial`] but updates the given [`file::Offset`] by
575 /// the number of bytes written.
576 ///
577 /// This is equivalent to C's `simple_read_from_buffer()`.
578 ///
579 /// On success, returns the number of bytes written.
write_slice_file(&mut self, data: &[u8], offset: &mut file::Offset) -> Result<usize>580 pub fn write_slice_file(&mut self, data: &[u8], offset: &mut file::Offset) -> Result<usize> {
581 if offset.is_negative() {
582 return Err(EINVAL);
583 }
584
585 let Ok(offset_index) = (*offset).try_into() else {
586 return Ok(0);
587 };
588
589 let written = self.write_slice_partial(data, offset_index)?;
590
591 // OVERFLOW: `offset + written <= data.len() <= isize::MAX <= Offset::MAX`
592 *offset += written as i64;
593
594 Ok(written)
595 }
596
597 /// Writes the provided Rust value to this userspace pointer.
598 ///
599 /// Fails with [`EFAULT`] if the write happens on a bad address, or if the write goes out of
600 /// bounds of this [`UserSliceWriter`]. This call may modify the associated userspace slice even
601 /// if it returns an error.
write<T: AsBytes>(&mut self, value: &T) -> Result602 pub fn write<T: AsBytes>(&mut self, value: &T) -> Result {
603 let len = size_of::<T>();
604 if len > self.length {
605 return Err(EFAULT);
606 }
607 // SAFETY: The reference points to a value of type `T`, so it is valid for reading
608 // `size_of::<T>()` bytes.
609 //
610 // By using the _copy_to_user variant, we skip the check_object_size check that verifies the
611 // kernel pointer. This mirrors the logic on the C side that skips the check when the length
612 // is a compile-time constant.
613 let res = unsafe {
614 bindings::_copy_to_user(
615 self.ptr.as_mut_ptr(),
616 core::ptr::from_ref(value).cast::<c_void>(),
617 len,
618 )
619 };
620 if res != 0 {
621 return Err(EFAULT);
622 }
623 self.ptr = self.ptr.wrapping_byte_add(len);
624 self.length -= len;
625 Ok(())
626 }
627 }
628
629 /// Reads a nul-terminated string into `dst` and returns the length.
630 ///
631 /// This reads from userspace until a NUL byte is encountered, or until `dst.len()` bytes have been
632 /// read. Fails with [`EFAULT`] if a read happens on a bad address (some data may have been
633 /// copied). When the end of the buffer is encountered, no NUL byte is added, so the string is
634 /// *not* guaranteed to be NUL-terminated when `Ok(dst.len())` is returned.
635 ///
636 /// # Guarantees
637 ///
638 /// When this function returns `Ok(len)`, it is guaranteed that the first `len` bytes of `dst` are
639 /// initialized and non-zero. Furthermore, if `len < dst.len()`, then `dst[len]` is a NUL byte.
640 #[inline]
raw_strncpy_from_user(dst: &mut [MaybeUninit<u8>], src: UserPtr) -> Result<usize>641 fn raw_strncpy_from_user(dst: &mut [MaybeUninit<u8>], src: UserPtr) -> Result<usize> {
642 // CAST: Slice lengths are guaranteed to be `<= isize::MAX`.
643 let len = dst.len() as isize;
644
645 // SAFETY: `dst` is valid for writing `dst.len()` bytes.
646 let res = unsafe {
647 bindings::strncpy_from_user(
648 dst.as_mut_ptr().cast::<c_char>(),
649 src.as_const_ptr().cast::<c_char>(),
650 len,
651 )
652 };
653
654 if res < 0 {
655 return Err(Error::from_errno(res as i32));
656 }
657
658 #[cfg(CONFIG_RUST_OVERFLOW_CHECKS)]
659 assert!(res <= len);
660
661 // GUARANTEES: `strncpy_from_user` was successful, so `dst` has contents in accordance with the
662 // guarantees of this function.
663 Ok(res as usize)
664 }
665