xref: /linux/drivers/android/binder/allocation.rs (revision 4f38da1f027ea2c9f01bb71daa7a299c191b6940)
1*eafedbc7SAlice Ryhl // SPDX-License-Identifier: GPL-2.0
2*eafedbc7SAlice Ryhl 
3*eafedbc7SAlice Ryhl // Copyright (C) 2025 Google LLC.
4*eafedbc7SAlice Ryhl 
5*eafedbc7SAlice Ryhl use core::mem::{size_of, size_of_val, MaybeUninit};
6*eafedbc7SAlice Ryhl use core::ops::Range;
7*eafedbc7SAlice Ryhl 
8*eafedbc7SAlice Ryhl use kernel::{
9*eafedbc7SAlice Ryhl     bindings,
10*eafedbc7SAlice Ryhl     fs::file::{File, FileDescriptorReservation},
11*eafedbc7SAlice Ryhl     prelude::*,
12*eafedbc7SAlice Ryhl     sync::{aref::ARef, Arc},
13*eafedbc7SAlice Ryhl     transmute::{AsBytes, FromBytes},
14*eafedbc7SAlice Ryhl     uaccess::UserSliceReader,
15*eafedbc7SAlice Ryhl     uapi,
16*eafedbc7SAlice Ryhl };
17*eafedbc7SAlice Ryhl 
18*eafedbc7SAlice Ryhl use crate::{
19*eafedbc7SAlice Ryhl     deferred_close::DeferredFdCloser,
20*eafedbc7SAlice Ryhl     defs::*,
21*eafedbc7SAlice Ryhl     node::{Node, NodeRef},
22*eafedbc7SAlice Ryhl     process::Process,
23*eafedbc7SAlice Ryhl     DArc,
24*eafedbc7SAlice Ryhl };
25*eafedbc7SAlice Ryhl 
26*eafedbc7SAlice Ryhl #[derive(Default)]
27*eafedbc7SAlice Ryhl pub(crate) struct AllocationInfo {
28*eafedbc7SAlice Ryhl     /// Range within the allocation where we can find the offsets to the object descriptors.
29*eafedbc7SAlice Ryhl     pub(crate) offsets: Option<Range<usize>>,
30*eafedbc7SAlice Ryhl     /// The target node of the transaction this allocation is associated to.
31*eafedbc7SAlice Ryhl     /// Not set for replies.
32*eafedbc7SAlice Ryhl     pub(crate) target_node: Option<NodeRef>,
33*eafedbc7SAlice Ryhl     /// When this allocation is dropped, call `pending_oneway_finished` on the node.
34*eafedbc7SAlice Ryhl     ///
35*eafedbc7SAlice Ryhl     /// This is used to serialize oneway transaction on the same node. Binder guarantees that
36*eafedbc7SAlice Ryhl     /// oneway transactions to the same node are delivered sequentially in the order they are sent.
37*eafedbc7SAlice Ryhl     pub(crate) oneway_node: Option<DArc<Node>>,
38*eafedbc7SAlice Ryhl     /// Zero the data in the buffer on free.
39*eafedbc7SAlice Ryhl     pub(crate) clear_on_free: bool,
40*eafedbc7SAlice Ryhl     /// List of files embedded in this transaction.
41*eafedbc7SAlice Ryhl     file_list: FileList,
42*eafedbc7SAlice Ryhl }
43*eafedbc7SAlice Ryhl 
44*eafedbc7SAlice Ryhl /// Represents an allocation that the kernel is currently using.
45*eafedbc7SAlice Ryhl ///
46*eafedbc7SAlice Ryhl /// When allocations are idle, the range allocator holds the data related to them.
47*eafedbc7SAlice Ryhl ///
48*eafedbc7SAlice Ryhl /// # Invariants
49*eafedbc7SAlice Ryhl ///
50*eafedbc7SAlice Ryhl /// This allocation corresponds to an allocation in the range allocator, so the relevant pages are
51*eafedbc7SAlice Ryhl /// marked in use in the page range.
52*eafedbc7SAlice Ryhl pub(crate) struct Allocation {
53*eafedbc7SAlice Ryhl     pub(crate) offset: usize,
54*eafedbc7SAlice Ryhl     size: usize,
55*eafedbc7SAlice Ryhl     pub(crate) ptr: usize,
56*eafedbc7SAlice Ryhl     pub(crate) process: Arc<Process>,
57*eafedbc7SAlice Ryhl     allocation_info: Option<AllocationInfo>,
58*eafedbc7SAlice Ryhl     free_on_drop: bool,
59*eafedbc7SAlice Ryhl     pub(crate) oneway_spam_detected: bool,
60*eafedbc7SAlice Ryhl     #[allow(dead_code)]
61*eafedbc7SAlice Ryhl     pub(crate) debug_id: usize,
62*eafedbc7SAlice Ryhl }
63*eafedbc7SAlice Ryhl 
64*eafedbc7SAlice Ryhl impl Allocation {
65*eafedbc7SAlice Ryhl     pub(crate) fn new(
66*eafedbc7SAlice Ryhl         process: Arc<Process>,
67*eafedbc7SAlice Ryhl         debug_id: usize,
68*eafedbc7SAlice Ryhl         offset: usize,
69*eafedbc7SAlice Ryhl         size: usize,
70*eafedbc7SAlice Ryhl         ptr: usize,
71*eafedbc7SAlice Ryhl         oneway_spam_detected: bool,
72*eafedbc7SAlice Ryhl     ) -> Self {
73*eafedbc7SAlice Ryhl         Self {
74*eafedbc7SAlice Ryhl             process,
75*eafedbc7SAlice Ryhl             offset,
76*eafedbc7SAlice Ryhl             size,
77*eafedbc7SAlice Ryhl             ptr,
78*eafedbc7SAlice Ryhl             debug_id,
79*eafedbc7SAlice Ryhl             oneway_spam_detected,
80*eafedbc7SAlice Ryhl             allocation_info: None,
81*eafedbc7SAlice Ryhl             free_on_drop: true,
82*eafedbc7SAlice Ryhl         }
83*eafedbc7SAlice Ryhl     }
84*eafedbc7SAlice Ryhl 
85*eafedbc7SAlice Ryhl     fn size_check(&self, offset: usize, size: usize) -> Result {
86*eafedbc7SAlice Ryhl         let overflow_fail = offset.checked_add(size).is_none();
87*eafedbc7SAlice Ryhl         let cmp_size_fail = offset.wrapping_add(size) > self.size;
88*eafedbc7SAlice Ryhl         if overflow_fail || cmp_size_fail {
89*eafedbc7SAlice Ryhl             return Err(EFAULT);
90*eafedbc7SAlice Ryhl         }
91*eafedbc7SAlice Ryhl         Ok(())
92*eafedbc7SAlice Ryhl     }
93*eafedbc7SAlice Ryhl 
94*eafedbc7SAlice Ryhl     pub(crate) fn copy_into(
95*eafedbc7SAlice Ryhl         &self,
96*eafedbc7SAlice Ryhl         reader: &mut UserSliceReader,
97*eafedbc7SAlice Ryhl         offset: usize,
98*eafedbc7SAlice Ryhl         size: usize,
99*eafedbc7SAlice Ryhl     ) -> Result {
100*eafedbc7SAlice Ryhl         self.size_check(offset, size)?;
101*eafedbc7SAlice Ryhl 
102*eafedbc7SAlice Ryhl         // SAFETY: While this object exists, the range allocator will keep the range allocated, and
103*eafedbc7SAlice Ryhl         // in turn, the pages will be marked as in use.
104*eafedbc7SAlice Ryhl         unsafe {
105*eafedbc7SAlice Ryhl             self.process
106*eafedbc7SAlice Ryhl                 .pages
107*eafedbc7SAlice Ryhl                 .copy_from_user_slice(reader, self.offset + offset, size)
108*eafedbc7SAlice Ryhl         }
109*eafedbc7SAlice Ryhl     }
110*eafedbc7SAlice Ryhl 
111*eafedbc7SAlice Ryhl     pub(crate) fn read<T: FromBytes>(&self, offset: usize) -> Result<T> {
112*eafedbc7SAlice Ryhl         self.size_check(offset, size_of::<T>())?;
113*eafedbc7SAlice Ryhl 
114*eafedbc7SAlice Ryhl         // SAFETY: While this object exists, the range allocator will keep the range allocated, and
115*eafedbc7SAlice Ryhl         // in turn, the pages will be marked as in use.
116*eafedbc7SAlice Ryhl         unsafe { self.process.pages.read(self.offset + offset) }
117*eafedbc7SAlice Ryhl     }
118*eafedbc7SAlice Ryhl 
119*eafedbc7SAlice Ryhl     pub(crate) fn write<T: ?Sized>(&self, offset: usize, obj: &T) -> Result {
120*eafedbc7SAlice Ryhl         self.size_check(offset, size_of_val::<T>(obj))?;
121*eafedbc7SAlice Ryhl 
122*eafedbc7SAlice Ryhl         // SAFETY: While this object exists, the range allocator will keep the range allocated, and
123*eafedbc7SAlice Ryhl         // in turn, the pages will be marked as in use.
124*eafedbc7SAlice Ryhl         unsafe { self.process.pages.write(self.offset + offset, obj) }
125*eafedbc7SAlice Ryhl     }
126*eafedbc7SAlice Ryhl 
127*eafedbc7SAlice Ryhl     pub(crate) fn fill_zero(&self) -> Result {
128*eafedbc7SAlice Ryhl         // SAFETY: While this object exists, the range allocator will keep the range allocated, and
129*eafedbc7SAlice Ryhl         // in turn, the pages will be marked as in use.
130*eafedbc7SAlice Ryhl         unsafe { self.process.pages.fill_zero(self.offset, self.size) }
131*eafedbc7SAlice Ryhl     }
132*eafedbc7SAlice Ryhl 
133*eafedbc7SAlice Ryhl     pub(crate) fn keep_alive(mut self) {
134*eafedbc7SAlice Ryhl         self.process
135*eafedbc7SAlice Ryhl             .buffer_make_freeable(self.offset, self.allocation_info.take());
136*eafedbc7SAlice Ryhl         self.free_on_drop = false;
137*eafedbc7SAlice Ryhl     }
138*eafedbc7SAlice Ryhl 
139*eafedbc7SAlice Ryhl     pub(crate) fn set_info(&mut self, info: AllocationInfo) {
140*eafedbc7SAlice Ryhl         self.allocation_info = Some(info);
141*eafedbc7SAlice Ryhl     }
142*eafedbc7SAlice Ryhl 
143*eafedbc7SAlice Ryhl     pub(crate) fn get_or_init_info(&mut self) -> &mut AllocationInfo {
144*eafedbc7SAlice Ryhl         self.allocation_info.get_or_insert_with(Default::default)
145*eafedbc7SAlice Ryhl     }
146*eafedbc7SAlice Ryhl 
147*eafedbc7SAlice Ryhl     pub(crate) fn set_info_offsets(&mut self, offsets: Range<usize>) {
148*eafedbc7SAlice Ryhl         self.get_or_init_info().offsets = Some(offsets);
149*eafedbc7SAlice Ryhl     }
150*eafedbc7SAlice Ryhl 
151*eafedbc7SAlice Ryhl     pub(crate) fn set_info_oneway_node(&mut self, oneway_node: DArc<Node>) {
152*eafedbc7SAlice Ryhl         self.get_or_init_info().oneway_node = Some(oneway_node);
153*eafedbc7SAlice Ryhl     }
154*eafedbc7SAlice Ryhl 
155*eafedbc7SAlice Ryhl     pub(crate) fn set_info_clear_on_drop(&mut self) {
156*eafedbc7SAlice Ryhl         self.get_or_init_info().clear_on_free = true;
157*eafedbc7SAlice Ryhl     }
158*eafedbc7SAlice Ryhl 
159*eafedbc7SAlice Ryhl     pub(crate) fn set_info_target_node(&mut self, target_node: NodeRef) {
160*eafedbc7SAlice Ryhl         self.get_or_init_info().target_node = Some(target_node);
161*eafedbc7SAlice Ryhl     }
162*eafedbc7SAlice Ryhl 
163*eafedbc7SAlice Ryhl     /// Reserve enough space to push at least `num_fds` fds.
164*eafedbc7SAlice Ryhl     pub(crate) fn info_add_fd_reserve(&mut self, num_fds: usize) -> Result {
165*eafedbc7SAlice Ryhl         self.get_or_init_info()
166*eafedbc7SAlice Ryhl             .file_list
167*eafedbc7SAlice Ryhl             .files_to_translate
168*eafedbc7SAlice Ryhl             .reserve(num_fds, GFP_KERNEL)?;
169*eafedbc7SAlice Ryhl 
170*eafedbc7SAlice Ryhl         Ok(())
171*eafedbc7SAlice Ryhl     }
172*eafedbc7SAlice Ryhl 
173*eafedbc7SAlice Ryhl     pub(crate) fn info_add_fd(
174*eafedbc7SAlice Ryhl         &mut self,
175*eafedbc7SAlice Ryhl         file: ARef<File>,
176*eafedbc7SAlice Ryhl         buffer_offset: usize,
177*eafedbc7SAlice Ryhl         close_on_free: bool,
178*eafedbc7SAlice Ryhl     ) -> Result {
179*eafedbc7SAlice Ryhl         self.get_or_init_info().file_list.files_to_translate.push(
180*eafedbc7SAlice Ryhl             FileEntry {
181*eafedbc7SAlice Ryhl                 file,
182*eafedbc7SAlice Ryhl                 buffer_offset,
183*eafedbc7SAlice Ryhl                 close_on_free,
184*eafedbc7SAlice Ryhl             },
185*eafedbc7SAlice Ryhl             GFP_KERNEL,
186*eafedbc7SAlice Ryhl         )?;
187*eafedbc7SAlice Ryhl 
188*eafedbc7SAlice Ryhl         Ok(())
189*eafedbc7SAlice Ryhl     }
190*eafedbc7SAlice Ryhl 
191*eafedbc7SAlice Ryhl     pub(crate) fn set_info_close_on_free(&mut self, cof: FdsCloseOnFree) {
192*eafedbc7SAlice Ryhl         self.get_or_init_info().file_list.close_on_free = cof.0;
193*eafedbc7SAlice Ryhl     }
194*eafedbc7SAlice Ryhl 
195*eafedbc7SAlice Ryhl     pub(crate) fn translate_fds(&mut self) -> Result<TranslatedFds> {
196*eafedbc7SAlice Ryhl         let file_list = match self.allocation_info.as_mut() {
197*eafedbc7SAlice Ryhl             Some(info) => &mut info.file_list,
198*eafedbc7SAlice Ryhl             None => return Ok(TranslatedFds::new()),
199*eafedbc7SAlice Ryhl         };
200*eafedbc7SAlice Ryhl 
201*eafedbc7SAlice Ryhl         let files = core::mem::take(&mut file_list.files_to_translate);
202*eafedbc7SAlice Ryhl 
203*eafedbc7SAlice Ryhl         let num_close_on_free = files.iter().filter(|entry| entry.close_on_free).count();
204*eafedbc7SAlice Ryhl         let mut close_on_free = KVec::with_capacity(num_close_on_free, GFP_KERNEL)?;
205*eafedbc7SAlice Ryhl 
206*eafedbc7SAlice Ryhl         let mut reservations = KVec::with_capacity(files.len(), GFP_KERNEL)?;
207*eafedbc7SAlice Ryhl         for file_info in files {
208*eafedbc7SAlice Ryhl             let res = FileDescriptorReservation::get_unused_fd_flags(bindings::O_CLOEXEC)?;
209*eafedbc7SAlice Ryhl             let fd = res.reserved_fd();
210*eafedbc7SAlice Ryhl             self.write::<u32>(file_info.buffer_offset, &fd)?;
211*eafedbc7SAlice Ryhl 
212*eafedbc7SAlice Ryhl             reservations.push(
213*eafedbc7SAlice Ryhl                 Reservation {
214*eafedbc7SAlice Ryhl                     res,
215*eafedbc7SAlice Ryhl                     file: file_info.file,
216*eafedbc7SAlice Ryhl                 },
217*eafedbc7SAlice Ryhl                 GFP_KERNEL,
218*eafedbc7SAlice Ryhl             )?;
219*eafedbc7SAlice Ryhl             if file_info.close_on_free {
220*eafedbc7SAlice Ryhl                 close_on_free.push(fd, GFP_KERNEL)?;
221*eafedbc7SAlice Ryhl             }
222*eafedbc7SAlice Ryhl         }
223*eafedbc7SAlice Ryhl 
224*eafedbc7SAlice Ryhl         Ok(TranslatedFds {
225*eafedbc7SAlice Ryhl             reservations,
226*eafedbc7SAlice Ryhl             close_on_free: FdsCloseOnFree(close_on_free),
227*eafedbc7SAlice Ryhl         })
228*eafedbc7SAlice Ryhl     }
229*eafedbc7SAlice Ryhl 
230*eafedbc7SAlice Ryhl     /// Should the looper return to userspace when freeing this allocation?
231*eafedbc7SAlice Ryhl     pub(crate) fn looper_need_return_on_free(&self) -> bool {
232*eafedbc7SAlice Ryhl         // Closing fds involves pushing task_work for execution when we return to userspace. Hence,
233*eafedbc7SAlice Ryhl         // we should return to userspace asap if we are closing fds.
234*eafedbc7SAlice Ryhl         match self.allocation_info {
235*eafedbc7SAlice Ryhl             Some(ref info) => !info.file_list.close_on_free.is_empty(),
236*eafedbc7SAlice Ryhl             None => false,
237*eafedbc7SAlice Ryhl         }
238*eafedbc7SAlice Ryhl     }
239*eafedbc7SAlice Ryhl }
240*eafedbc7SAlice Ryhl 
241*eafedbc7SAlice Ryhl impl Drop for Allocation {
242*eafedbc7SAlice Ryhl     fn drop(&mut self) {
243*eafedbc7SAlice Ryhl         if !self.free_on_drop {
244*eafedbc7SAlice Ryhl             return;
245*eafedbc7SAlice Ryhl         }
246*eafedbc7SAlice Ryhl 
247*eafedbc7SAlice Ryhl         if let Some(mut info) = self.allocation_info.take() {
248*eafedbc7SAlice Ryhl             if let Some(oneway_node) = info.oneway_node.as_ref() {
249*eafedbc7SAlice Ryhl                 oneway_node.pending_oneway_finished();
250*eafedbc7SAlice Ryhl             }
251*eafedbc7SAlice Ryhl 
252*eafedbc7SAlice Ryhl             info.target_node = None;
253*eafedbc7SAlice Ryhl 
254*eafedbc7SAlice Ryhl             if let Some(offsets) = info.offsets.clone() {
255*eafedbc7SAlice Ryhl                 let view = AllocationView::new(self, offsets.start);
256*eafedbc7SAlice Ryhl                 for i in offsets.step_by(size_of::<usize>()) {
257*eafedbc7SAlice Ryhl                     if view.cleanup_object(i).is_err() {
258*eafedbc7SAlice Ryhl                         pr_warn!("Error cleaning up object at offset {}\n", i)
259*eafedbc7SAlice Ryhl                     }
260*eafedbc7SAlice Ryhl                 }
261*eafedbc7SAlice Ryhl             }
262*eafedbc7SAlice Ryhl 
263*eafedbc7SAlice Ryhl             for &fd in &info.file_list.close_on_free {
264*eafedbc7SAlice Ryhl                 let closer = match DeferredFdCloser::new(GFP_KERNEL) {
265*eafedbc7SAlice Ryhl                     Ok(closer) => closer,
266*eafedbc7SAlice Ryhl                     Err(kernel::alloc::AllocError) => {
267*eafedbc7SAlice Ryhl                         // Ignore allocation failures.
268*eafedbc7SAlice Ryhl                         break;
269*eafedbc7SAlice Ryhl                     }
270*eafedbc7SAlice Ryhl                 };
271*eafedbc7SAlice Ryhl 
272*eafedbc7SAlice Ryhl                 // Here, we ignore errors. The operation can fail if the fd is not valid, or if the
273*eafedbc7SAlice Ryhl                 // method is called from a kthread. However, this is always called from a syscall,
274*eafedbc7SAlice Ryhl                 // so the latter case cannot happen, and we don't care about the first case.
275*eafedbc7SAlice Ryhl                 let _ = closer.close_fd(fd);
276*eafedbc7SAlice Ryhl             }
277*eafedbc7SAlice Ryhl 
278*eafedbc7SAlice Ryhl             if info.clear_on_free {
279*eafedbc7SAlice Ryhl                 if let Err(e) = self.fill_zero() {
280*eafedbc7SAlice Ryhl                     pr_warn!("Failed to clear data on free: {:?}", e);
281*eafedbc7SAlice Ryhl                 }
282*eafedbc7SAlice Ryhl             }
283*eafedbc7SAlice Ryhl         }
284*eafedbc7SAlice Ryhl 
285*eafedbc7SAlice Ryhl         self.process.buffer_raw_free(self.ptr);
286*eafedbc7SAlice Ryhl     }
287*eafedbc7SAlice Ryhl }
288*eafedbc7SAlice Ryhl 
289*eafedbc7SAlice Ryhl /// A wrapper around `Allocation` that is being created.
290*eafedbc7SAlice Ryhl ///
291*eafedbc7SAlice Ryhl /// If the allocation is destroyed while wrapped in this wrapper, then the allocation will be
292*eafedbc7SAlice Ryhl /// considered to be part of a failed transaction. Successful transactions avoid that by calling
293*eafedbc7SAlice Ryhl /// `success`, which skips the destructor.
294*eafedbc7SAlice Ryhl #[repr(transparent)]
295*eafedbc7SAlice Ryhl pub(crate) struct NewAllocation(pub(crate) Allocation);
296*eafedbc7SAlice Ryhl 
297*eafedbc7SAlice Ryhl impl NewAllocation {
298*eafedbc7SAlice Ryhl     pub(crate) fn success(self) -> Allocation {
299*eafedbc7SAlice Ryhl         // This skips the destructor.
300*eafedbc7SAlice Ryhl         //
301*eafedbc7SAlice Ryhl         // SAFETY: This type is `#[repr(transparent)]`, so the layout matches.
302*eafedbc7SAlice Ryhl         unsafe { core::mem::transmute(self) }
303*eafedbc7SAlice Ryhl     }
304*eafedbc7SAlice Ryhl }
305*eafedbc7SAlice Ryhl 
306*eafedbc7SAlice Ryhl impl core::ops::Deref for NewAllocation {
307*eafedbc7SAlice Ryhl     type Target = Allocation;
308*eafedbc7SAlice Ryhl     fn deref(&self) -> &Allocation {
309*eafedbc7SAlice Ryhl         &self.0
310*eafedbc7SAlice Ryhl     }
311*eafedbc7SAlice Ryhl }
312*eafedbc7SAlice Ryhl 
313*eafedbc7SAlice Ryhl impl core::ops::DerefMut for NewAllocation {
314*eafedbc7SAlice Ryhl     fn deref_mut(&mut self) -> &mut Allocation {
315*eafedbc7SAlice Ryhl         &mut self.0
316*eafedbc7SAlice Ryhl     }
317*eafedbc7SAlice Ryhl }
318*eafedbc7SAlice Ryhl 
319*eafedbc7SAlice Ryhl /// A view into the beginning of an allocation.
320*eafedbc7SAlice Ryhl ///
321*eafedbc7SAlice Ryhl /// All attempts to read or write outside of the view will fail. To intentionally access outside of
322*eafedbc7SAlice Ryhl /// this view, use the `alloc` field of this struct directly.
323*eafedbc7SAlice Ryhl pub(crate) struct AllocationView<'a> {
324*eafedbc7SAlice Ryhl     pub(crate) alloc: &'a mut Allocation,
325*eafedbc7SAlice Ryhl     limit: usize,
326*eafedbc7SAlice Ryhl }
327*eafedbc7SAlice Ryhl 
328*eafedbc7SAlice Ryhl impl<'a> AllocationView<'a> {
329*eafedbc7SAlice Ryhl     pub(crate) fn new(alloc: &'a mut Allocation, limit: usize) -> Self {
330*eafedbc7SAlice Ryhl         AllocationView { alloc, limit }
331*eafedbc7SAlice Ryhl     }
332*eafedbc7SAlice Ryhl 
333*eafedbc7SAlice Ryhl     pub(crate) fn read<T: FromBytes>(&self, offset: usize) -> Result<T> {
334*eafedbc7SAlice Ryhl         if offset.checked_add(size_of::<T>()).ok_or(EINVAL)? > self.limit {
335*eafedbc7SAlice Ryhl             return Err(EINVAL);
336*eafedbc7SAlice Ryhl         }
337*eafedbc7SAlice Ryhl         self.alloc.read(offset)
338*eafedbc7SAlice Ryhl     }
339*eafedbc7SAlice Ryhl 
340*eafedbc7SAlice Ryhl     pub(crate) fn write<T: AsBytes>(&self, offset: usize, obj: &T) -> Result {
341*eafedbc7SAlice Ryhl         if offset.checked_add(size_of::<T>()).ok_or(EINVAL)? > self.limit {
342*eafedbc7SAlice Ryhl             return Err(EINVAL);
343*eafedbc7SAlice Ryhl         }
344*eafedbc7SAlice Ryhl         self.alloc.write(offset, obj)
345*eafedbc7SAlice Ryhl     }
346*eafedbc7SAlice Ryhl 
347*eafedbc7SAlice Ryhl     pub(crate) fn copy_into(
348*eafedbc7SAlice Ryhl         &self,
349*eafedbc7SAlice Ryhl         reader: &mut UserSliceReader,
350*eafedbc7SAlice Ryhl         offset: usize,
351*eafedbc7SAlice Ryhl         size: usize,
352*eafedbc7SAlice Ryhl     ) -> Result {
353*eafedbc7SAlice Ryhl         if offset.checked_add(size).ok_or(EINVAL)? > self.limit {
354*eafedbc7SAlice Ryhl             return Err(EINVAL);
355*eafedbc7SAlice Ryhl         }
356*eafedbc7SAlice Ryhl         self.alloc.copy_into(reader, offset, size)
357*eafedbc7SAlice Ryhl     }
358*eafedbc7SAlice Ryhl 
359*eafedbc7SAlice Ryhl     pub(crate) fn transfer_binder_object(
360*eafedbc7SAlice Ryhl         &self,
361*eafedbc7SAlice Ryhl         offset: usize,
362*eafedbc7SAlice Ryhl         obj: &uapi::flat_binder_object,
363*eafedbc7SAlice Ryhl         strong: bool,
364*eafedbc7SAlice Ryhl         node_ref: NodeRef,
365*eafedbc7SAlice Ryhl     ) -> Result {
366*eafedbc7SAlice Ryhl         let mut newobj = FlatBinderObject::default();
367*eafedbc7SAlice Ryhl         let node = node_ref.node.clone();
368*eafedbc7SAlice Ryhl         if Arc::ptr_eq(&node_ref.node.owner, &self.alloc.process) {
369*eafedbc7SAlice Ryhl             // The receiving process is the owner of the node, so send it a binder object (instead
370*eafedbc7SAlice Ryhl             // of a handle).
371*eafedbc7SAlice Ryhl             let (ptr, cookie) = node.get_id();
372*eafedbc7SAlice Ryhl             newobj.hdr.type_ = if strong {
373*eafedbc7SAlice Ryhl                 BINDER_TYPE_BINDER
374*eafedbc7SAlice Ryhl             } else {
375*eafedbc7SAlice Ryhl                 BINDER_TYPE_WEAK_BINDER
376*eafedbc7SAlice Ryhl             };
377*eafedbc7SAlice Ryhl             newobj.flags = obj.flags;
378*eafedbc7SAlice Ryhl             newobj.__bindgen_anon_1.binder = ptr as _;
379*eafedbc7SAlice Ryhl             newobj.cookie = cookie as _;
380*eafedbc7SAlice Ryhl             self.write(offset, &newobj)?;
381*eafedbc7SAlice Ryhl             // Increment the user ref count on the node. It will be decremented as part of the
382*eafedbc7SAlice Ryhl             // destruction of the buffer, when we see a binder or weak-binder object.
383*eafedbc7SAlice Ryhl             node.update_refcount(true, 1, strong);
384*eafedbc7SAlice Ryhl         } else {
385*eafedbc7SAlice Ryhl             // The receiving process is different from the owner, so we need to insert a handle to
386*eafedbc7SAlice Ryhl             // the binder object.
387*eafedbc7SAlice Ryhl             let handle = self
388*eafedbc7SAlice Ryhl                 .alloc
389*eafedbc7SAlice Ryhl                 .process
390*eafedbc7SAlice Ryhl                 .as_arc_borrow()
391*eafedbc7SAlice Ryhl                 .insert_or_update_handle(node_ref, false)?;
392*eafedbc7SAlice Ryhl             newobj.hdr.type_ = if strong {
393*eafedbc7SAlice Ryhl                 BINDER_TYPE_HANDLE
394*eafedbc7SAlice Ryhl             } else {
395*eafedbc7SAlice Ryhl                 BINDER_TYPE_WEAK_HANDLE
396*eafedbc7SAlice Ryhl             };
397*eafedbc7SAlice Ryhl             newobj.flags = obj.flags;
398*eafedbc7SAlice Ryhl             newobj.__bindgen_anon_1.handle = handle;
399*eafedbc7SAlice Ryhl             if self.write(offset, &newobj).is_err() {
400*eafedbc7SAlice Ryhl                 // Decrement ref count on the handle we just created.
401*eafedbc7SAlice Ryhl                 let _ = self
402*eafedbc7SAlice Ryhl                     .alloc
403*eafedbc7SAlice Ryhl                     .process
404*eafedbc7SAlice Ryhl                     .as_arc_borrow()
405*eafedbc7SAlice Ryhl                     .update_ref(handle, false, strong);
406*eafedbc7SAlice Ryhl                 return Err(EINVAL);
407*eafedbc7SAlice Ryhl             }
408*eafedbc7SAlice Ryhl         }
409*eafedbc7SAlice Ryhl 
410*eafedbc7SAlice Ryhl         Ok(())
411*eafedbc7SAlice Ryhl     }
412*eafedbc7SAlice Ryhl 
413*eafedbc7SAlice Ryhl     fn cleanup_object(&self, index_offset: usize) -> Result {
414*eafedbc7SAlice Ryhl         let offset = self.alloc.read(index_offset)?;
415*eafedbc7SAlice Ryhl         let header = self.read::<BinderObjectHeader>(offset)?;
416*eafedbc7SAlice Ryhl         match header.type_ {
417*eafedbc7SAlice Ryhl             BINDER_TYPE_WEAK_BINDER | BINDER_TYPE_BINDER => {
418*eafedbc7SAlice Ryhl                 let obj = self.read::<FlatBinderObject>(offset)?;
419*eafedbc7SAlice Ryhl                 let strong = header.type_ == BINDER_TYPE_BINDER;
420*eafedbc7SAlice Ryhl                 // SAFETY: The type is `BINDER_TYPE_{WEAK_}BINDER`, so the `binder` field is
421*eafedbc7SAlice Ryhl                 // populated.
422*eafedbc7SAlice Ryhl                 let ptr = unsafe { obj.__bindgen_anon_1.binder };
423*eafedbc7SAlice Ryhl                 let cookie = obj.cookie;
424*eafedbc7SAlice Ryhl                 self.alloc.process.update_node(ptr, cookie, strong);
425*eafedbc7SAlice Ryhl                 Ok(())
426*eafedbc7SAlice Ryhl             }
427*eafedbc7SAlice Ryhl             BINDER_TYPE_WEAK_HANDLE | BINDER_TYPE_HANDLE => {
428*eafedbc7SAlice Ryhl                 let obj = self.read::<FlatBinderObject>(offset)?;
429*eafedbc7SAlice Ryhl                 let strong = header.type_ == BINDER_TYPE_HANDLE;
430*eafedbc7SAlice Ryhl                 // SAFETY: The type is `BINDER_TYPE_{WEAK_}HANDLE`, so the `handle` field is
431*eafedbc7SAlice Ryhl                 // populated.
432*eafedbc7SAlice Ryhl                 let handle = unsafe { obj.__bindgen_anon_1.handle };
433*eafedbc7SAlice Ryhl                 self.alloc
434*eafedbc7SAlice Ryhl                     .process
435*eafedbc7SAlice Ryhl                     .as_arc_borrow()
436*eafedbc7SAlice Ryhl                     .update_ref(handle, false, strong)
437*eafedbc7SAlice Ryhl             }
438*eafedbc7SAlice Ryhl             _ => Ok(()),
439*eafedbc7SAlice Ryhl         }
440*eafedbc7SAlice Ryhl     }
441*eafedbc7SAlice Ryhl }
442*eafedbc7SAlice Ryhl 
443*eafedbc7SAlice Ryhl /// A binder object as it is serialized.
444*eafedbc7SAlice Ryhl ///
445*eafedbc7SAlice Ryhl /// # Invariants
446*eafedbc7SAlice Ryhl ///
447*eafedbc7SAlice Ryhl /// All bytes must be initialized, and the value of `self.hdr.type_` must be one of the allowed
448*eafedbc7SAlice Ryhl /// types.
449*eafedbc7SAlice Ryhl #[repr(C)]
450*eafedbc7SAlice Ryhl pub(crate) union BinderObject {
451*eafedbc7SAlice Ryhl     hdr: uapi::binder_object_header,
452*eafedbc7SAlice Ryhl     fbo: uapi::flat_binder_object,
453*eafedbc7SAlice Ryhl     fdo: uapi::binder_fd_object,
454*eafedbc7SAlice Ryhl     bbo: uapi::binder_buffer_object,
455*eafedbc7SAlice Ryhl     fdao: uapi::binder_fd_array_object,
456*eafedbc7SAlice Ryhl }
457*eafedbc7SAlice Ryhl 
458*eafedbc7SAlice Ryhl /// A view into a `BinderObject` that can be used in a match statement.
459*eafedbc7SAlice Ryhl pub(crate) enum BinderObjectRef<'a> {
460*eafedbc7SAlice Ryhl     Binder(&'a mut uapi::flat_binder_object),
461*eafedbc7SAlice Ryhl     Handle(&'a mut uapi::flat_binder_object),
462*eafedbc7SAlice Ryhl     Fd(&'a mut uapi::binder_fd_object),
463*eafedbc7SAlice Ryhl     Ptr(&'a mut uapi::binder_buffer_object),
464*eafedbc7SAlice Ryhl     Fda(&'a mut uapi::binder_fd_array_object),
465*eafedbc7SAlice Ryhl }
466*eafedbc7SAlice Ryhl 
467*eafedbc7SAlice Ryhl impl BinderObject {
468*eafedbc7SAlice Ryhl     pub(crate) fn read_from(reader: &mut UserSliceReader) -> Result<BinderObject> {
469*eafedbc7SAlice Ryhl         let object = Self::read_from_inner(|slice| {
470*eafedbc7SAlice Ryhl             let read_len = usize::min(slice.len(), reader.len());
471*eafedbc7SAlice Ryhl             reader.clone_reader().read_slice(&mut slice[..read_len])?;
472*eafedbc7SAlice Ryhl             Ok(())
473*eafedbc7SAlice Ryhl         })?;
474*eafedbc7SAlice Ryhl 
475*eafedbc7SAlice Ryhl         // If we used a object type smaller than the largest object size, then we've read more
476*eafedbc7SAlice Ryhl         // bytes than we needed to. However, we used `.clone_reader()` to avoid advancing the
477*eafedbc7SAlice Ryhl         // original reader. Now, we call `skip` so that the caller's reader is advanced by the
478*eafedbc7SAlice Ryhl         // right amount.
479*eafedbc7SAlice Ryhl         //
480*eafedbc7SAlice Ryhl         // The `skip` call fails if the reader doesn't have `size` bytes available. This could
481*eafedbc7SAlice Ryhl         // happen if the type header corresponds to an object type that is larger than the rest of
482*eafedbc7SAlice Ryhl         // the reader.
483*eafedbc7SAlice Ryhl         //
484*eafedbc7SAlice Ryhl         // Any extra bytes beyond the size of the object are inaccessible after this call, so
485*eafedbc7SAlice Ryhl         // reading them again from the `reader` later does not result in TOCTOU bugs.
486*eafedbc7SAlice Ryhl         reader.skip(object.size())?;
487*eafedbc7SAlice Ryhl 
488*eafedbc7SAlice Ryhl         Ok(object)
489*eafedbc7SAlice Ryhl     }
490*eafedbc7SAlice Ryhl 
491*eafedbc7SAlice Ryhl     /// Use the provided reader closure to construct a `BinderObject`.
492*eafedbc7SAlice Ryhl     ///
493*eafedbc7SAlice Ryhl     /// The closure should write the bytes for the object into the provided slice.
494*eafedbc7SAlice Ryhl     pub(crate) fn read_from_inner<R>(reader: R) -> Result<BinderObject>
495*eafedbc7SAlice Ryhl     where
496*eafedbc7SAlice Ryhl         R: FnOnce(&mut [u8; size_of::<BinderObject>()]) -> Result<()>,
497*eafedbc7SAlice Ryhl     {
498*eafedbc7SAlice Ryhl         let mut obj = MaybeUninit::<BinderObject>::zeroed();
499*eafedbc7SAlice Ryhl 
500*eafedbc7SAlice Ryhl         // SAFETY: The lengths of `BinderObject` and `[u8; size_of::<BinderObject>()]` are equal,
501*eafedbc7SAlice Ryhl         // and the byte array has an alignment requirement of one, so the pointer cast is okay.
502*eafedbc7SAlice Ryhl         // Additionally, `obj` was initialized to zeros, so the byte array will not be
503*eafedbc7SAlice Ryhl         // uninitialized.
504*eafedbc7SAlice Ryhl         (reader)(unsafe { &mut *obj.as_mut_ptr().cast() })?;
505*eafedbc7SAlice Ryhl 
506*eafedbc7SAlice Ryhl         // SAFETY: The entire object is initialized, so accessing this field is safe.
507*eafedbc7SAlice Ryhl         let type_ = unsafe { obj.assume_init_ref().hdr.type_ };
508*eafedbc7SAlice Ryhl         if Self::type_to_size(type_).is_none() {
509*eafedbc7SAlice Ryhl             // The value of `obj.hdr_type_` was invalid.
510*eafedbc7SAlice Ryhl             return Err(EINVAL);
511*eafedbc7SAlice Ryhl         }
512*eafedbc7SAlice Ryhl 
513*eafedbc7SAlice Ryhl         // SAFETY: All bytes are initialized (since we zeroed them at the start) and we checked
514*eafedbc7SAlice Ryhl         // that `self.hdr.type_` is one of the allowed types, so the type invariants are satisfied.
515*eafedbc7SAlice Ryhl         unsafe { Ok(obj.assume_init()) }
516*eafedbc7SAlice Ryhl     }
517*eafedbc7SAlice Ryhl 
518*eafedbc7SAlice Ryhl     pub(crate) fn as_ref(&mut self) -> BinderObjectRef<'_> {
519*eafedbc7SAlice Ryhl         use BinderObjectRef::*;
520*eafedbc7SAlice Ryhl         // SAFETY: The constructor ensures that all bytes of `self` are initialized, and all
521*eafedbc7SAlice Ryhl         // variants of this union accept all initialized bit patterns.
522*eafedbc7SAlice Ryhl         unsafe {
523*eafedbc7SAlice Ryhl             match self.hdr.type_ {
524*eafedbc7SAlice Ryhl                 BINDER_TYPE_WEAK_BINDER | BINDER_TYPE_BINDER => Binder(&mut self.fbo),
525*eafedbc7SAlice Ryhl                 BINDER_TYPE_WEAK_HANDLE | BINDER_TYPE_HANDLE => Handle(&mut self.fbo),
526*eafedbc7SAlice Ryhl                 BINDER_TYPE_FD => Fd(&mut self.fdo),
527*eafedbc7SAlice Ryhl                 BINDER_TYPE_PTR => Ptr(&mut self.bbo),
528*eafedbc7SAlice Ryhl                 BINDER_TYPE_FDA => Fda(&mut self.fdao),
529*eafedbc7SAlice Ryhl                 // SAFETY: By the type invariant, the value of `self.hdr.type_` cannot have any
530*eafedbc7SAlice Ryhl                 // other value than the ones checked above.
531*eafedbc7SAlice Ryhl                 _ => core::hint::unreachable_unchecked(),
532*eafedbc7SAlice Ryhl             }
533*eafedbc7SAlice Ryhl         }
534*eafedbc7SAlice Ryhl     }
535*eafedbc7SAlice Ryhl 
536*eafedbc7SAlice Ryhl     pub(crate) fn size(&self) -> usize {
537*eafedbc7SAlice Ryhl         // SAFETY: The entire object is initialized, so accessing this field is safe.
538*eafedbc7SAlice Ryhl         let type_ = unsafe { self.hdr.type_ };
539*eafedbc7SAlice Ryhl 
540*eafedbc7SAlice Ryhl         // SAFETY: The type invariants guarantee that the type field is correct.
541*eafedbc7SAlice Ryhl         unsafe { Self::type_to_size(type_).unwrap_unchecked() }
542*eafedbc7SAlice Ryhl     }
543*eafedbc7SAlice Ryhl 
544*eafedbc7SAlice Ryhl     fn type_to_size(type_: u32) -> Option<usize> {
545*eafedbc7SAlice Ryhl         match type_ {
546*eafedbc7SAlice Ryhl             BINDER_TYPE_WEAK_BINDER => Some(size_of::<uapi::flat_binder_object>()),
547*eafedbc7SAlice Ryhl             BINDER_TYPE_BINDER => Some(size_of::<uapi::flat_binder_object>()),
548*eafedbc7SAlice Ryhl             BINDER_TYPE_WEAK_HANDLE => Some(size_of::<uapi::flat_binder_object>()),
549*eafedbc7SAlice Ryhl             BINDER_TYPE_HANDLE => Some(size_of::<uapi::flat_binder_object>()),
550*eafedbc7SAlice Ryhl             BINDER_TYPE_FD => Some(size_of::<uapi::binder_fd_object>()),
551*eafedbc7SAlice Ryhl             BINDER_TYPE_PTR => Some(size_of::<uapi::binder_buffer_object>()),
552*eafedbc7SAlice Ryhl             BINDER_TYPE_FDA => Some(size_of::<uapi::binder_fd_array_object>()),
553*eafedbc7SAlice Ryhl             _ => None,
554*eafedbc7SAlice Ryhl         }
555*eafedbc7SAlice Ryhl     }
556*eafedbc7SAlice Ryhl }
557*eafedbc7SAlice Ryhl 
558*eafedbc7SAlice Ryhl #[derive(Default)]
559*eafedbc7SAlice Ryhl struct FileList {
560*eafedbc7SAlice Ryhl     files_to_translate: KVec<FileEntry>,
561*eafedbc7SAlice Ryhl     close_on_free: KVec<u32>,
562*eafedbc7SAlice Ryhl }
563*eafedbc7SAlice Ryhl 
564*eafedbc7SAlice Ryhl struct FileEntry {
565*eafedbc7SAlice Ryhl     /// The file for which a descriptor will be created in the recipient process.
566*eafedbc7SAlice Ryhl     file: ARef<File>,
567*eafedbc7SAlice Ryhl     /// The offset in the buffer where the file descriptor is stored.
568*eafedbc7SAlice Ryhl     buffer_offset: usize,
569*eafedbc7SAlice Ryhl     /// Whether this fd should be closed when the allocation is freed.
570*eafedbc7SAlice Ryhl     close_on_free: bool,
571*eafedbc7SAlice Ryhl }
572*eafedbc7SAlice Ryhl 
573*eafedbc7SAlice Ryhl pub(crate) struct TranslatedFds {
574*eafedbc7SAlice Ryhl     reservations: KVec<Reservation>,
575*eafedbc7SAlice Ryhl     /// If commit is called, then these fds should be closed. (If commit is not called, then they
576*eafedbc7SAlice Ryhl     /// shouldn't be closed.)
577*eafedbc7SAlice Ryhl     close_on_free: FdsCloseOnFree,
578*eafedbc7SAlice Ryhl }
579*eafedbc7SAlice Ryhl 
580*eafedbc7SAlice Ryhl struct Reservation {
581*eafedbc7SAlice Ryhl     res: FileDescriptorReservation,
582*eafedbc7SAlice Ryhl     file: ARef<File>,
583*eafedbc7SAlice Ryhl }
584*eafedbc7SAlice Ryhl 
585*eafedbc7SAlice Ryhl impl TranslatedFds {
586*eafedbc7SAlice Ryhl     pub(crate) fn new() -> Self {
587*eafedbc7SAlice Ryhl         Self {
588*eafedbc7SAlice Ryhl             reservations: KVec::new(),
589*eafedbc7SAlice Ryhl             close_on_free: FdsCloseOnFree(KVec::new()),
590*eafedbc7SAlice Ryhl         }
591*eafedbc7SAlice Ryhl     }
592*eafedbc7SAlice Ryhl 
593*eafedbc7SAlice Ryhl     pub(crate) fn commit(self) -> FdsCloseOnFree {
594*eafedbc7SAlice Ryhl         for entry in self.reservations {
595*eafedbc7SAlice Ryhl             entry.res.fd_install(entry.file);
596*eafedbc7SAlice Ryhl         }
597*eafedbc7SAlice Ryhl 
598*eafedbc7SAlice Ryhl         self.close_on_free
599*eafedbc7SAlice Ryhl     }
600*eafedbc7SAlice Ryhl }
601*eafedbc7SAlice Ryhl 
602*eafedbc7SAlice Ryhl pub(crate) struct FdsCloseOnFree(KVec<u32>);
603