xref: /linux/drivers/android/binder/thread.rs (revision d047248190d86a52164656d47bec9bfba61dc71e)
1 // SPDX-License-Identifier: GPL-2.0
2 
3 // Copyright (C) 2025 Google LLC.
4 
5 //! This module defines the `Thread` type, which represents a userspace thread that is using
6 //! binder.
7 //!
8 //! The `Process` object stores all of the threads in an rb tree.
9 
10 use kernel::{
11     bindings,
12     fs::{File, LocalFile},
13     list::{AtomicTracker, List, ListArc, ListLinks, TryNewListArc},
14     prelude::*,
15     security,
16     seq_file::SeqFile,
17     seq_print,
18     sync::poll::{PollCondVar, PollTable},
19     sync::{Arc, SpinLock},
20     task::Task,
21     types::ARef,
22     uaccess::UserSlice,
23     uapi,
24 };
25 
26 use crate::{
27     allocation::{Allocation, AllocationView, BinderObject, BinderObjectRef, NewAllocation},
28     defs::*,
29     error::BinderResult,
30     process::{GetWorkOrRegister, Process},
31     ptr_align,
32     stats::GLOBAL_STATS,
33     transaction::Transaction,
34     BinderReturnWriter, DArc, DLArc, DTRWrap, DeliverCode, DeliverToRead,
35 };
36 
37 use core::{
38     mem::size_of,
39     sync::atomic::{AtomicU32, Ordering},
40 };
41 
42 fn is_aligned(value: usize, to: usize) -> bool {
43     value % to == 0
44 }
45 
46 /// Stores the layout of the scatter-gather entries. This is used during the `translate_objects`
47 /// call and is discarded when it returns.
48 struct ScatterGatherState {
49     /// A struct that tracks the amount of unused buffer space.
50     unused_buffer_space: UnusedBufferSpace,
51     /// Scatter-gather entries to copy.
52     sg_entries: KVec<ScatterGatherEntry>,
53     /// Indexes into `sg_entries` corresponding to the last binder_buffer_object that
54     /// was processed and all of its ancestors. The array is in sorted order.
55     ancestors: KVec<usize>,
56 }
57 
58 /// This entry specifies an additional buffer that should be copied using the scatter-gather
59 /// mechanism.
60 struct ScatterGatherEntry {
61     /// The index in the offset array of the BINDER_TYPE_PTR that this entry originates from.
62     obj_index: usize,
63     /// Offset in target buffer.
64     offset: usize,
65     /// User address in source buffer.
66     sender_uaddr: usize,
67     /// Number of bytes to copy.
68     length: usize,
69     /// The minimum offset of the next fixup in this buffer.
70     fixup_min_offset: usize,
71     /// The offsets within this buffer that contain pointers which should be translated.
72     pointer_fixups: KVec<PointerFixupEntry>,
73 }
74 
75 /// This entry specifies that a fixup should happen at `target_offset` of the
76 /// buffer.
77 enum PointerFixupEntry {
78     /// A fixup for a `binder_buffer_object`.
79     Fixup {
80         /// The translated pointer to write.
81         pointer_value: u64,
82         /// The offset at which the value should be written. The offset is relative
83         /// to the original buffer.
84         target_offset: usize,
85     },
86     /// A skip for a `binder_fd_array_object`.
87     Skip {
88         /// The number of bytes to skip.
89         skip: usize,
90         /// The offset at which the skip should happen. The offset is relative
91         /// to the original buffer.
92         target_offset: usize,
93     },
94 }
95 
96 /// Return type of `apply_and_validate_fixup_in_parent`.
97 struct ParentFixupInfo {
98     /// The index of the parent buffer in `sg_entries`.
99     parent_sg_index: usize,
100     /// The number of ancestors of the buffer.
101     ///
102     /// The buffer is considered an ancestor of itself, so this is always at
103     /// least one.
104     num_ancestors: usize,
105     /// New value of `fixup_min_offset` if this fixup is applied.
106     new_min_offset: usize,
107     /// The offset of the fixup in the target buffer.
108     target_offset: usize,
109 }
110 
111 impl ScatterGatherState {
112     /// Called when a `binder_buffer_object` or `binder_fd_array_object` tries
113     /// to access a region in its parent buffer. These accesses have various
114     /// restrictions, which this method verifies.
115     ///
116     /// The `parent_offset` and `length` arguments describe the offset and
117     /// length of the access in the parent buffer.
118     ///
119     /// # Detailed restrictions
120     ///
121     /// Obviously the fixup must be in-bounds for the parent buffer.
122     ///
123     /// For safety reasons, we only allow fixups inside a buffer to happen
124     /// at increasing offsets; additionally, we only allow fixup on the last
125     /// buffer object that was verified, or one of its parents.
126     ///
127     /// Example of what is allowed:
128     ///
129     /// A
130     ///   B (parent = A, offset = 0)
131     ///   C (parent = A, offset = 16)
132     ///     D (parent = C, offset = 0)
133     ///   E (parent = A, offset = 32) // min_offset is 16 (C.parent_offset)
134     ///
135     /// Examples of what is not allowed:
136     ///
137     /// Decreasing offsets within the same parent:
138     /// A
139     ///   C (parent = A, offset = 16)
140     ///   B (parent = A, offset = 0) // decreasing offset within A
141     ///
142     /// Arcerring to a parent that wasn't the last object or any of its parents:
143     /// A
144     ///   B (parent = A, offset = 0)
145     ///   C (parent = A, offset = 0)
146     ///   C (parent = A, offset = 16)
147     ///     D (parent = B, offset = 0) // B is not A or any of A's parents
148     fn validate_parent_fixup(
149         &self,
150         parent: usize,
151         parent_offset: usize,
152         length: usize,
153     ) -> Result<ParentFixupInfo> {
154         // Using `position` would also be correct, but `rposition` avoids
155         // quadratic running times.
156         let ancestors_i = self
157             .ancestors
158             .iter()
159             .copied()
160             .rposition(|sg_idx| self.sg_entries[sg_idx].obj_index == parent)
161             .ok_or(EINVAL)?;
162         let sg_idx = self.ancestors[ancestors_i];
163         let sg_entry = match self.sg_entries.get(sg_idx) {
164             Some(sg_entry) => sg_entry,
165             None => {
166                 pr_err!(
167                     "self.ancestors[{}] is {}, but self.sg_entries.len() is {}",
168                     ancestors_i,
169                     sg_idx,
170                     self.sg_entries.len()
171                 );
172                 return Err(EINVAL);
173             }
174         };
175         if sg_entry.fixup_min_offset > parent_offset {
176             pr_warn!(
177                 "validate_parent_fixup: fixup_min_offset={}, parent_offset={}",
178                 sg_entry.fixup_min_offset,
179                 parent_offset
180             );
181             return Err(EINVAL);
182         }
183         let new_min_offset = parent_offset.checked_add(length).ok_or(EINVAL)?;
184         if new_min_offset > sg_entry.length {
185             pr_warn!(
186                 "validate_parent_fixup: new_min_offset={}, sg_entry.length={}",
187                 new_min_offset,
188                 sg_entry.length
189             );
190             return Err(EINVAL);
191         }
192         let target_offset = sg_entry.offset.checked_add(parent_offset).ok_or(EINVAL)?;
193         // The `ancestors_i + 1` operation can't overflow since the output of the addition is at
194         // most `self.ancestors.len()`, which also fits in a usize.
195         Ok(ParentFixupInfo {
196             parent_sg_index: sg_idx,
197             num_ancestors: ancestors_i + 1,
198             new_min_offset,
199             target_offset,
200         })
201     }
202 }
203 
204 /// Keeps track of how much unused buffer space is left. The initial amount is the number of bytes
205 /// requested by the user using the `buffers_size` field of `binder_transaction_data_sg`. Each time
206 /// we translate an object of type `BINDER_TYPE_PTR`, some of the unused buffer space is consumed.
207 struct UnusedBufferSpace {
208     /// The start of the remaining space.
209     offset: usize,
210     /// The end of the remaining space.
211     limit: usize,
212 }
213 impl UnusedBufferSpace {
214     /// Claim the next `size` bytes from the unused buffer space. The offset for the claimed chunk
215     /// into the buffer is returned.
216     fn claim_next(&mut self, size: usize) -> Result<usize> {
217         // We require every chunk to be aligned.
218         let size = ptr_align(size).ok_or(EINVAL)?;
219         let new_offset = self.offset.checked_add(size).ok_or(EINVAL)?;
220 
221         if new_offset <= self.limit {
222             let offset = self.offset;
223             self.offset = new_offset;
224             Ok(offset)
225         } else {
226             Err(EINVAL)
227         }
228     }
229 }
230 
231 pub(crate) enum PushWorkRes {
232     Ok,
233     FailedDead(DLArc<dyn DeliverToRead>),
234 }
235 
236 impl PushWorkRes {
237     fn is_ok(&self) -> bool {
238         match self {
239             PushWorkRes::Ok => true,
240             PushWorkRes::FailedDead(_) => false,
241         }
242     }
243 }
244 
245 /// The fields of `Thread` protected by the spinlock.
246 struct InnerThread {
247     /// Determines the looper state of the thread. It is a bit-wise combination of the constants
248     /// prefixed with `LOOPER_`.
249     looper_flags: u32,
250 
251     /// Determines whether the looper should return.
252     looper_need_return: bool,
253 
254     /// Determines if thread is dead.
255     is_dead: bool,
256 
257     /// Work item used to deliver error codes to the thread that started a transaction. Stored here
258     /// so that it can be reused.
259     reply_work: DArc<ThreadError>,
260 
261     /// Work item used to deliver error codes to the current thread. Stored here so that it can be
262     /// reused.
263     return_work: DArc<ThreadError>,
264 
265     /// Determines whether the work list below should be processed. When set to false, `work_list`
266     /// is treated as if it were empty.
267     process_work_list: bool,
268     /// List of work items to deliver to userspace.
269     work_list: List<DTRWrap<dyn DeliverToRead>>,
270     current_transaction: Option<DArc<Transaction>>,
271 
272     /// Extended error information for this thread.
273     extended_error: ExtendedError,
274 }
275 
276 const LOOPER_REGISTERED: u32 = 0x01;
277 const LOOPER_ENTERED: u32 = 0x02;
278 const LOOPER_EXITED: u32 = 0x04;
279 const LOOPER_INVALID: u32 = 0x08;
280 const LOOPER_WAITING: u32 = 0x10;
281 const LOOPER_WAITING_PROC: u32 = 0x20;
282 const LOOPER_POLL: u32 = 0x40;
283 
284 impl InnerThread {
285     fn new() -> Result<Self> {
286         fn next_err_id() -> u32 {
287             static EE_ID: AtomicU32 = AtomicU32::new(0);
288             EE_ID.fetch_add(1, Ordering::Relaxed)
289         }
290 
291         Ok(Self {
292             looper_flags: 0,
293             looper_need_return: false,
294             is_dead: false,
295             process_work_list: false,
296             reply_work: ThreadError::try_new()?,
297             return_work: ThreadError::try_new()?,
298             work_list: List::new(),
299             current_transaction: None,
300             extended_error: ExtendedError::new(next_err_id(), BR_OK, 0),
301         })
302     }
303 
304     fn pop_work(&mut self) -> Option<DLArc<dyn DeliverToRead>> {
305         if !self.process_work_list {
306             return None;
307         }
308 
309         let ret = self.work_list.pop_front();
310         self.process_work_list = !self.work_list.is_empty();
311         ret
312     }
313 
314     fn push_work(&mut self, work: DLArc<dyn DeliverToRead>) -> PushWorkRes {
315         if self.is_dead {
316             PushWorkRes::FailedDead(work)
317         } else {
318             self.work_list.push_back(work);
319             self.process_work_list = true;
320             PushWorkRes::Ok
321         }
322     }
323 
324     fn push_reply_work(&mut self, code: u32) {
325         if let Ok(work) = ListArc::try_from_arc(self.reply_work.clone()) {
326             work.set_error_code(code);
327             self.push_work(work);
328         } else {
329             pr_warn!("Thread reply work is already in use.");
330         }
331     }
332 
333     fn push_return_work(&mut self, reply: u32) {
334         if let Ok(work) = ListArc::try_from_arc(self.return_work.clone()) {
335             work.set_error_code(reply);
336             self.push_work(work);
337         } else {
338             pr_warn!("Thread return work is already in use.");
339         }
340     }
341 
342     /// Used to push work items that do not need to be processed immediately and can wait until the
343     /// thread gets another work item.
344     fn push_work_deferred(&mut self, work: DLArc<dyn DeliverToRead>) {
345         self.work_list.push_back(work);
346     }
347 
348     /// Fetches the transaction this thread can reply to. If the thread has a pending transaction
349     /// (that it could respond to) but it has also issued a transaction, it must first wait for the
350     /// previously-issued transaction to complete.
351     ///
352     /// The `thread` parameter should be the thread containing this `ThreadInner`.
353     fn pop_transaction_to_reply(&mut self, thread: &Thread) -> Result<DArc<Transaction>> {
354         let transaction = self.current_transaction.take().ok_or(EINVAL)?;
355         if core::ptr::eq(thread, transaction.from.as_ref()) {
356             self.current_transaction = Some(transaction);
357             return Err(EINVAL);
358         }
359         // Find a new current transaction for this thread.
360         self.current_transaction = transaction.find_from(thread).cloned();
361         Ok(transaction)
362     }
363 
364     fn pop_transaction_replied(&mut self, transaction: &DArc<Transaction>) -> bool {
365         match self.current_transaction.take() {
366             None => false,
367             Some(old) => {
368                 if !Arc::ptr_eq(transaction, &old) {
369                     self.current_transaction = Some(old);
370                     return false;
371                 }
372                 self.current_transaction = old.clone_next();
373                 true
374             }
375         }
376     }
377 
378     fn looper_enter(&mut self) {
379         self.looper_flags |= LOOPER_ENTERED;
380         if self.looper_flags & LOOPER_REGISTERED != 0 {
381             self.looper_flags |= LOOPER_INVALID;
382         }
383     }
384 
385     fn looper_register(&mut self, valid: bool) {
386         self.looper_flags |= LOOPER_REGISTERED;
387         if !valid || self.looper_flags & LOOPER_ENTERED != 0 {
388             self.looper_flags |= LOOPER_INVALID;
389         }
390     }
391 
392     fn looper_exit(&mut self) {
393         self.looper_flags |= LOOPER_EXITED;
394     }
395 
396     /// Determines whether the thread is part of a pool, i.e., if it is a looper.
397     fn is_looper(&self) -> bool {
398         self.looper_flags & (LOOPER_ENTERED | LOOPER_REGISTERED) != 0
399     }
400 
401     /// Determines whether the thread should attempt to fetch work items from the process queue.
402     /// This is generally case when the thread is registered as a looper and not part of a
403     /// transaction stack. But if there is local work, we want to return to userspace before we
404     /// deliver any remote work.
405     fn should_use_process_work_queue(&self) -> bool {
406         self.current_transaction.is_none() && !self.process_work_list && self.is_looper()
407     }
408 
409     fn poll(&mut self) -> u32 {
410         self.looper_flags |= LOOPER_POLL;
411         if self.process_work_list || self.looper_need_return {
412             bindings::POLLIN
413         } else {
414             0
415         }
416     }
417 }
418 
419 /// This represents a thread that's used with binder.
420 #[pin_data]
421 pub(crate) struct Thread {
422     pub(crate) id: i32,
423     pub(crate) process: Arc<Process>,
424     pub(crate) task: ARef<Task>,
425     #[pin]
426     inner: SpinLock<InnerThread>,
427     #[pin]
428     work_condvar: PollCondVar,
429     /// Used to insert this thread into the process' `ready_threads` list.
430     ///
431     /// INVARIANT: May never be used for any other list than the `self.process.ready_threads`.
432     #[pin]
433     links: ListLinks,
434     #[pin]
435     links_track: AtomicTracker,
436 }
437 
438 kernel::list::impl_list_arc_safe! {
439     impl ListArcSafe<0> for Thread {
440         tracked_by links_track: AtomicTracker;
441     }
442 }
443 kernel::list::impl_list_item! {
444     impl ListItem<0> for Thread {
445         using ListLinks { self.links };
446     }
447 }
448 
449 impl Thread {
450     pub(crate) fn new(id: i32, process: Arc<Process>) -> Result<Arc<Self>> {
451         let inner = InnerThread::new()?;
452 
453         Arc::pin_init(
454             try_pin_init!(Thread {
455                 id,
456                 process,
457                 task: ARef::from(&**kernel::current!()),
458                 inner <- kernel::new_spinlock!(inner, "Thread::inner"),
459                 work_condvar <- kernel::new_poll_condvar!("Thread::work_condvar"),
460                 links <- ListLinks::new(),
461                 links_track <- AtomicTracker::new(),
462             }),
463             GFP_KERNEL,
464         )
465     }
466 
467     #[inline(never)]
468     pub(crate) fn debug_print(self: &Arc<Self>, m: &SeqFile, print_all: bool) -> Result<()> {
469         let inner = self.inner.lock();
470 
471         if print_all || inner.current_transaction.is_some() || !inner.work_list.is_empty() {
472             seq_print!(
473                 m,
474                 "  thread {}: l {:02x} need_return {}\n",
475                 self.id,
476                 inner.looper_flags,
477                 inner.looper_need_return,
478             );
479         }
480 
481         let mut t_opt = inner.current_transaction.as_ref();
482         while let Some(t) = t_opt {
483             if Arc::ptr_eq(&t.from, self) {
484                 t.debug_print_inner(m, "    outgoing transaction ");
485                 t_opt = t.from_parent.as_ref();
486             } else if Arc::ptr_eq(&t.to, &self.process) {
487                 t.debug_print_inner(m, "    incoming transaction ");
488                 t_opt = t.find_from(self);
489             } else {
490                 t.debug_print_inner(m, "    bad transaction ");
491                 t_opt = None;
492             }
493         }
494 
495         for work in &inner.work_list {
496             work.debug_print(m, "    ", "    pending transaction ")?;
497         }
498         Ok(())
499     }
500 
501     pub(crate) fn get_extended_error(&self, data: UserSlice) -> Result {
502         let mut writer = data.writer();
503         let ee = self.inner.lock().extended_error;
504         writer.write(&ee)?;
505         Ok(())
506     }
507 
508     pub(crate) fn set_current_transaction(&self, transaction: DArc<Transaction>) {
509         self.inner.lock().current_transaction = Some(transaction);
510     }
511 
512     pub(crate) fn has_current_transaction(&self) -> bool {
513         self.inner.lock().current_transaction.is_some()
514     }
515 
516     /// Attempts to fetch a work item from the thread-local queue. The behaviour if the queue is
517     /// empty depends on `wait`: if it is true, the function waits for some work to be queued (or a
518     /// signal); otherwise it returns indicating that none is available.
519     fn get_work_local(self: &Arc<Self>, wait: bool) -> Result<Option<DLArc<dyn DeliverToRead>>> {
520         {
521             let mut inner = self.inner.lock();
522             if inner.looper_need_return {
523                 return Ok(inner.pop_work());
524             }
525         }
526 
527         // Try once if the caller does not want to wait.
528         if !wait {
529             return self.inner.lock().pop_work().ok_or(EAGAIN).map(Some);
530         }
531 
532         // Loop waiting only on the local queue (i.e., not registering with the process queue).
533         let mut inner = self.inner.lock();
534         loop {
535             if let Some(work) = inner.pop_work() {
536                 return Ok(Some(work));
537             }
538 
539             inner.looper_flags |= LOOPER_WAITING;
540             let signal_pending = self.work_condvar.wait_interruptible_freezable(&mut inner);
541             inner.looper_flags &= !LOOPER_WAITING;
542 
543             if signal_pending {
544                 return Err(EINTR);
545             }
546             if inner.looper_need_return {
547                 return Ok(None);
548             }
549         }
550     }
551 
552     /// Attempts to fetch a work item from the thread-local queue, falling back to the process-wide
553     /// queue if none is available locally.
554     ///
555     /// This must only be called when the thread is not participating in a transaction chain. If it
556     /// is, the local version (`get_work_local`) should be used instead.
557     fn get_work(self: &Arc<Self>, wait: bool) -> Result<Option<DLArc<dyn DeliverToRead>>> {
558         // Try to get work from the thread's work queue, using only a local lock.
559         {
560             let mut inner = self.inner.lock();
561             if let Some(work) = inner.pop_work() {
562                 return Ok(Some(work));
563             }
564             if inner.looper_need_return {
565                 drop(inner);
566                 return Ok(self.process.get_work());
567             }
568         }
569 
570         // If the caller doesn't want to wait, try to grab work from the process queue.
571         //
572         // We know nothing will have been queued directly to the thread queue because it is not in
573         // a transaction and it is not in the process' ready list.
574         if !wait {
575             return self.process.get_work().ok_or(EAGAIN).map(Some);
576         }
577 
578         // Get work from the process queue. If none is available, atomically register as ready.
579         let reg = match self.process.get_work_or_register(self) {
580             GetWorkOrRegister::Work(work) => return Ok(Some(work)),
581             GetWorkOrRegister::Register(reg) => reg,
582         };
583 
584         let mut inner = self.inner.lock();
585         loop {
586             if let Some(work) = inner.pop_work() {
587                 return Ok(Some(work));
588             }
589 
590             inner.looper_flags |= LOOPER_WAITING | LOOPER_WAITING_PROC;
591             let signal_pending = self.work_condvar.wait_interruptible_freezable(&mut inner);
592             inner.looper_flags &= !(LOOPER_WAITING | LOOPER_WAITING_PROC);
593 
594             if signal_pending || inner.looper_need_return {
595                 // We need to return now. We need to pull the thread off the list of ready threads
596                 // (by dropping `reg`), then check the state again after it's off the list to
597                 // ensure that something was not queued in the meantime. If something has been
598                 // queued, we just return it (instead of the error).
599                 drop(inner);
600                 drop(reg);
601 
602                 let res = match self.inner.lock().pop_work() {
603                     Some(work) => Ok(Some(work)),
604                     None if signal_pending => Err(EINTR),
605                     None => Ok(None),
606                 };
607                 return res;
608             }
609         }
610     }
611 
612     /// Push the provided work item to be delivered to user space via this thread.
613     ///
614     /// Returns whether the item was successfully pushed. This can only fail if the thread is dead.
615     pub(crate) fn push_work(&self, work: DLArc<dyn DeliverToRead>) -> PushWorkRes {
616         let sync = work.should_sync_wakeup();
617 
618         let res = self.inner.lock().push_work(work);
619 
620         if res.is_ok() {
621             if sync {
622                 self.work_condvar.notify_sync();
623             } else {
624                 self.work_condvar.notify_one();
625             }
626         }
627 
628         res
629     }
630 
631     /// Attempts to push to given work item to the thread if it's a looper thread (i.e., if it's
632     /// part of a thread pool) and is alive. Otherwise, push the work item to the process instead.
633     pub(crate) fn push_work_if_looper(&self, work: DLArc<dyn DeliverToRead>) -> BinderResult {
634         let mut inner = self.inner.lock();
635         if inner.is_looper() && !inner.is_dead {
636             inner.push_work(work);
637             Ok(())
638         } else {
639             drop(inner);
640             self.process.push_work(work)
641         }
642     }
643 
644     pub(crate) fn push_work_deferred(&self, work: DLArc<dyn DeliverToRead>) {
645         self.inner.lock().push_work_deferred(work);
646     }
647 
648     pub(crate) fn push_return_work(&self, reply: u32) {
649         self.inner.lock().push_return_work(reply);
650     }
651 
652     fn translate_object(
653         &self,
654         obj_index: usize,
655         offset: usize,
656         object: BinderObjectRef<'_>,
657         view: &mut AllocationView<'_>,
658         allow_fds: bool,
659         sg_state: &mut ScatterGatherState,
660     ) -> BinderResult {
661         match object {
662             BinderObjectRef::Binder(obj) => {
663                 let strong = obj.hdr.type_ == BINDER_TYPE_BINDER;
664                 // SAFETY: `binder` is a `binder_uintptr_t`; any bit pattern is a valid
665                 // representation.
666                 let ptr = unsafe { obj.__bindgen_anon_1.binder } as _;
667                 let cookie = obj.cookie as _;
668                 let flags = obj.flags as _;
669                 let node = self
670                     .process
671                     .as_arc_borrow()
672                     .get_node(ptr, cookie, flags, strong, self)?;
673                 security::binder_transfer_binder(&self.process.cred, &view.alloc.process.cred)?;
674                 view.transfer_binder_object(offset, obj, strong, node)?;
675             }
676             BinderObjectRef::Handle(obj) => {
677                 let strong = obj.hdr.type_ == BINDER_TYPE_HANDLE;
678                 // SAFETY: `handle` is a `u32`; any bit pattern is a valid representation.
679                 let handle = unsafe { obj.__bindgen_anon_1.handle } as _;
680                 let node = self.process.get_node_from_handle(handle, strong)?;
681                 security::binder_transfer_binder(&self.process.cred, &view.alloc.process.cred)?;
682                 view.transfer_binder_object(offset, obj, strong, node)?;
683             }
684             BinderObjectRef::Fd(obj) => {
685                 if !allow_fds {
686                     return Err(EPERM.into());
687                 }
688 
689                 // SAFETY: `fd` is a `u32`; any bit pattern is a valid representation.
690                 let fd = unsafe { obj.__bindgen_anon_1.fd };
691                 let file = LocalFile::fget(fd)?;
692                 // SAFETY: The binder driver never calls `fdget_pos` and this code runs from an
693                 // ioctl, so there are no active calls to `fdget_pos` on this thread.
694                 let file = unsafe { LocalFile::assume_no_fdget_pos(file) };
695                 security::binder_transfer_file(
696                     &self.process.cred,
697                     &view.alloc.process.cred,
698                     &file,
699                 )?;
700 
701                 let mut obj_write = BinderFdObject::default();
702                 obj_write.hdr.type_ = BINDER_TYPE_FD;
703                 // This will be overwritten with the actual fd when the transaction is received.
704                 obj_write.__bindgen_anon_1.fd = u32::MAX;
705                 obj_write.cookie = obj.cookie;
706                 view.write::<BinderFdObject>(offset, &obj_write)?;
707 
708                 const FD_FIELD_OFFSET: usize =
709                     core::mem::offset_of!(uapi::binder_fd_object, __bindgen_anon_1.fd);
710 
711                 let field_offset = offset + FD_FIELD_OFFSET;
712 
713                 view.alloc.info_add_fd(file, field_offset, false)?;
714             }
715             BinderObjectRef::Ptr(obj) => {
716                 let obj_length = obj.length.try_into().map_err(|_| EINVAL)?;
717                 let alloc_offset = match sg_state.unused_buffer_space.claim_next(obj_length) {
718                     Ok(alloc_offset) => alloc_offset,
719                     Err(err) => {
720                         pr_warn!(
721                             "Failed to claim space for a BINDER_TYPE_PTR. (offset: {}, limit: {}, size: {})",
722                             sg_state.unused_buffer_space.offset,
723                             sg_state.unused_buffer_space.limit,
724                             obj_length,
725                         );
726                         return Err(err.into());
727                     }
728                 };
729 
730                 let sg_state_idx = sg_state.sg_entries.len();
731                 sg_state.sg_entries.push(
732                     ScatterGatherEntry {
733                         obj_index,
734                         offset: alloc_offset,
735                         sender_uaddr: obj.buffer as _,
736                         length: obj_length,
737                         pointer_fixups: KVec::new(),
738                         fixup_min_offset: 0,
739                     },
740                     GFP_KERNEL,
741                 )?;
742 
743                 let buffer_ptr_in_user_space = (view.alloc.ptr + alloc_offset) as u64;
744 
745                 if obj.flags & uapi::BINDER_BUFFER_FLAG_HAS_PARENT == 0 {
746                     sg_state.ancestors.clear();
747                     sg_state.ancestors.push(sg_state_idx, GFP_KERNEL)?;
748                 } else {
749                     // Another buffer also has a pointer to this buffer, and we need to fixup that
750                     // pointer too.
751 
752                     let parent_index = usize::try_from(obj.parent).map_err(|_| EINVAL)?;
753                     let parent_offset = usize::try_from(obj.parent_offset).map_err(|_| EINVAL)?;
754 
755                     let info = sg_state.validate_parent_fixup(
756                         parent_index,
757                         parent_offset,
758                         size_of::<u64>(),
759                     )?;
760 
761                     sg_state.ancestors.truncate(info.num_ancestors);
762                     sg_state.ancestors.push(sg_state_idx, GFP_KERNEL)?;
763 
764                     let parent_entry = match sg_state.sg_entries.get_mut(info.parent_sg_index) {
765                         Some(parent_entry) => parent_entry,
766                         None => {
767                             pr_err!(
768                                 "validate_parent_fixup returned index out of bounds for sg.entries"
769                             );
770                             return Err(EINVAL.into());
771                         }
772                     };
773 
774                     parent_entry.fixup_min_offset = info.new_min_offset;
775                     parent_entry.pointer_fixups.push(
776                         PointerFixupEntry::Fixup {
777                             pointer_value: buffer_ptr_in_user_space,
778                             target_offset: info.target_offset,
779                         },
780                         GFP_KERNEL,
781                     )?;
782                 }
783 
784                 let mut obj_write = BinderBufferObject::default();
785                 obj_write.hdr.type_ = BINDER_TYPE_PTR;
786                 obj_write.flags = obj.flags;
787                 obj_write.buffer = buffer_ptr_in_user_space;
788                 obj_write.length = obj.length;
789                 obj_write.parent = obj.parent;
790                 obj_write.parent_offset = obj.parent_offset;
791                 view.write::<BinderBufferObject>(offset, &obj_write)?;
792             }
793             BinderObjectRef::Fda(obj) => {
794                 if !allow_fds {
795                     return Err(EPERM.into());
796                 }
797                 let parent_index = usize::try_from(obj.parent).map_err(|_| EINVAL)?;
798                 let parent_offset = usize::try_from(obj.parent_offset).map_err(|_| EINVAL)?;
799                 let num_fds = usize::try_from(obj.num_fds).map_err(|_| EINVAL)?;
800                 let fds_len = num_fds.checked_mul(size_of::<u32>()).ok_or(EINVAL)?;
801 
802                 if !is_aligned(parent_offset, size_of::<u32>()) {
803                     return Err(EINVAL.into());
804                 }
805 
806                 let info = sg_state.validate_parent_fixup(parent_index, parent_offset, fds_len)?;
807                 view.alloc.info_add_fd_reserve(num_fds)?;
808 
809                 sg_state.ancestors.truncate(info.num_ancestors);
810                 let parent_entry = match sg_state.sg_entries.get_mut(info.parent_sg_index) {
811                     Some(parent_entry) => parent_entry,
812                     None => {
813                         pr_err!(
814                             "validate_parent_fixup returned index out of bounds for sg.entries"
815                         );
816                         return Err(EINVAL.into());
817                     }
818                 };
819 
820                 if !is_aligned(parent_entry.sender_uaddr, size_of::<u32>()) {
821                     return Err(EINVAL.into());
822                 }
823 
824                 parent_entry.fixup_min_offset = info.new_min_offset;
825                 parent_entry
826                     .pointer_fixups
827                     .push(
828                         PointerFixupEntry::Skip {
829                             skip: fds_len,
830                             target_offset: info.target_offset,
831                         },
832                         GFP_KERNEL,
833                     )
834                     .map_err(|_| ENOMEM)?;
835 
836                 let fda_uaddr = parent_entry
837                     .sender_uaddr
838                     .checked_add(parent_offset)
839                     .ok_or(EINVAL)?;
840 
841                 let mut fda_bytes = KVec::new();
842                 UserSlice::new(UserPtr::from_addr(fda_uaddr as _), fds_len)
843                     .read_all(&mut fda_bytes, GFP_KERNEL)?;
844 
845                 if fds_len != fda_bytes.len() {
846                     pr_err!("UserSlice::read_all returned wrong length in BINDER_TYPE_FDA");
847                     return Err(EINVAL.into());
848                 }
849 
850                 for i in (0..fds_len).step_by(size_of::<u32>()) {
851                     let fd = {
852                         let mut fd_bytes = [0u8; size_of::<u32>()];
853                         fd_bytes.copy_from_slice(&fda_bytes[i..i + size_of::<u32>()]);
854                         u32::from_ne_bytes(fd_bytes)
855                     };
856 
857                     let file = LocalFile::fget(fd)?;
858                     // SAFETY: The binder driver never calls `fdget_pos` and this code runs from an
859                     // ioctl, so there are no active calls to `fdget_pos` on this thread.
860                     let file = unsafe { LocalFile::assume_no_fdget_pos(file) };
861                     security::binder_transfer_file(
862                         &self.process.cred,
863                         &view.alloc.process.cred,
864                         &file,
865                     )?;
866 
867                     // The `validate_parent_fixup` call ensuers that this addition will not
868                     // overflow.
869                     view.alloc.info_add_fd(file, info.target_offset + i, true)?;
870                 }
871                 drop(fda_bytes);
872 
873                 let mut obj_write = BinderFdArrayObject::default();
874                 obj_write.hdr.type_ = BINDER_TYPE_FDA;
875                 obj_write.num_fds = obj.num_fds;
876                 obj_write.parent = obj.parent;
877                 obj_write.parent_offset = obj.parent_offset;
878                 view.write::<BinderFdArrayObject>(offset, &obj_write)?;
879             }
880         }
881         Ok(())
882     }
883 
884     fn apply_sg(&self, alloc: &mut Allocation, sg_state: &mut ScatterGatherState) -> BinderResult {
885         for sg_entry in &mut sg_state.sg_entries {
886             let mut end_of_previous_fixup = sg_entry.offset;
887             let offset_end = sg_entry.offset.checked_add(sg_entry.length).ok_or(EINVAL)?;
888 
889             let mut reader =
890                 UserSlice::new(UserPtr::from_addr(sg_entry.sender_uaddr), sg_entry.length).reader();
891             for fixup in &mut sg_entry.pointer_fixups {
892                 let (fixup_len, fixup_offset) = match fixup {
893                     PointerFixupEntry::Fixup { target_offset, .. } => {
894                         (size_of::<u64>(), *target_offset)
895                     }
896                     PointerFixupEntry::Skip {
897                         skip,
898                         target_offset,
899                     } => (*skip, *target_offset),
900                 };
901 
902                 let target_offset_end = fixup_offset.checked_add(fixup_len).ok_or(EINVAL)?;
903                 if fixup_offset < end_of_previous_fixup || offset_end < target_offset_end {
904                     pr_warn!(
905                         "Fixups oob {} {} {} {}",
906                         fixup_offset,
907                         end_of_previous_fixup,
908                         offset_end,
909                         target_offset_end
910                     );
911                     return Err(EINVAL.into());
912                 }
913 
914                 let copy_off = end_of_previous_fixup;
915                 let copy_len = fixup_offset - end_of_previous_fixup;
916                 if let Err(err) = alloc.copy_into(&mut reader, copy_off, copy_len) {
917                     pr_warn!("Failed copying into alloc: {:?}", err);
918                     return Err(err.into());
919                 }
920                 if let PointerFixupEntry::Fixup { pointer_value, .. } = fixup {
921                     let res = alloc.write::<u64>(fixup_offset, pointer_value);
922                     if let Err(err) = res {
923                         pr_warn!("Failed copying ptr into alloc: {:?}", err);
924                         return Err(err.into());
925                     }
926                 }
927                 if let Err(err) = reader.skip(fixup_len) {
928                     pr_warn!("Failed skipping {} from reader: {:?}", fixup_len, err);
929                     return Err(err.into());
930                 }
931                 end_of_previous_fixup = target_offset_end;
932             }
933             let copy_off = end_of_previous_fixup;
934             let copy_len = offset_end - end_of_previous_fixup;
935             if let Err(err) = alloc.copy_into(&mut reader, copy_off, copy_len) {
936                 pr_warn!("Failed copying remainder into alloc: {:?}", err);
937                 return Err(err.into());
938             }
939         }
940         Ok(())
941     }
942 
943     /// This method copies the payload of a transaction into the target process.
944     ///
945     /// The resulting payload will have several different components, which will be stored next to
946     /// each other in the allocation. Furthermore, various objects can be embedded in the payload,
947     /// and those objects have to be translated so that they make sense to the target transaction.
948     pub(crate) fn copy_transaction_data(
949         &self,
950         to_process: Arc<Process>,
951         tr: &BinderTransactionDataSg,
952         debug_id: usize,
953         allow_fds: bool,
954         txn_security_ctx_offset: Option<&mut usize>,
955     ) -> BinderResult<NewAllocation> {
956         let trd = &tr.transaction_data;
957         let is_oneway = trd.flags & TF_ONE_WAY != 0;
958         let mut secctx = if let Some(offset) = txn_security_ctx_offset {
959             let secid = self.process.cred.get_secid();
960             let ctx = match security::SecurityCtx::from_secid(secid) {
961                 Ok(ctx) => ctx,
962                 Err(err) => {
963                     pr_warn!("Failed to get security ctx for id {}: {:?}", secid, err);
964                     return Err(err.into());
965                 }
966             };
967             Some((offset, ctx))
968         } else {
969             None
970         };
971 
972         let data_size = trd.data_size.try_into().map_err(|_| EINVAL)?;
973         let aligned_data_size = ptr_align(data_size).ok_or(EINVAL)?;
974         let offsets_size: usize = trd.offsets_size.try_into().map_err(|_| EINVAL)?;
975         let buffers_size: usize = tr.buffers_size.try_into().map_err(|_| EINVAL)?;
976         let aligned_secctx_size = match secctx.as_ref() {
977             Some((_offset, ctx)) => ptr_align(ctx.len()).ok_or(EINVAL)?,
978             None => 0,
979         };
980 
981         if !is_aligned(offsets_size, size_of::<u64>()) {
982             return Err(EINVAL.into());
983         }
984         if !is_aligned(buffers_size, size_of::<u64>()) {
985             return Err(EINVAL.into());
986         }
987 
988         // This guarantees that at least `sizeof(usize)` bytes will be allocated.
989         let len = usize::max(
990             aligned_data_size
991                 .checked_add(offsets_size)
992                 .and_then(|sum| sum.checked_add(buffers_size))
993                 .and_then(|sum| sum.checked_add(aligned_secctx_size))
994                 .ok_or(ENOMEM)?,
995             size_of::<u64>(),
996         );
997         let secctx_off = aligned_data_size + offsets_size + buffers_size;
998         let mut alloc =
999             match to_process.buffer_alloc(debug_id, len, is_oneway, self.process.task.pid()) {
1000                 Ok(alloc) => alloc,
1001                 Err(err) => {
1002                     pr_warn!(
1003                         "Failed to allocate buffer. len:{}, is_oneway:{}",
1004                         len,
1005                         is_oneway
1006                     );
1007                     return Err(err);
1008                 }
1009             };
1010 
1011         // SAFETY: This accesses a union field, but it's okay because the field's type is valid for
1012         // all bit-patterns.
1013         let trd_data_ptr = unsafe { &trd.data.ptr };
1014         let mut buffer_reader =
1015             UserSlice::new(UserPtr::from_addr(trd_data_ptr.buffer as _), data_size).reader();
1016         let mut end_of_previous_object = 0;
1017         let mut sg_state = None;
1018 
1019         // Copy offsets if there are any.
1020         if offsets_size > 0 {
1021             {
1022                 let mut reader =
1023                     UserSlice::new(UserPtr::from_addr(trd_data_ptr.offsets as _), offsets_size)
1024                         .reader();
1025                 alloc.copy_into(&mut reader, aligned_data_size, offsets_size)?;
1026             }
1027 
1028             let offsets_start = aligned_data_size;
1029             let offsets_end = aligned_data_size + offsets_size;
1030 
1031             // This state is used for BINDER_TYPE_PTR objects.
1032             let sg_state = sg_state.insert(ScatterGatherState {
1033                 unused_buffer_space: UnusedBufferSpace {
1034                     offset: offsets_end,
1035                     limit: offsets_end + buffers_size,
1036                 },
1037                 sg_entries: KVec::new(),
1038                 ancestors: KVec::new(),
1039             });
1040 
1041             // Traverse the objects specified.
1042             let mut view = AllocationView::new(&mut alloc, data_size);
1043             for (index, index_offset) in (offsets_start..offsets_end)
1044                 .step_by(size_of::<u64>())
1045                 .enumerate()
1046             {
1047                 let offset: usize = view
1048                     .alloc
1049                     .read::<u64>(index_offset)?
1050                     .try_into()
1051                     .map_err(|_| EINVAL)?;
1052 
1053                 if offset < end_of_previous_object || !is_aligned(offset, size_of::<u32>()) {
1054                     pr_warn!("Got transaction with invalid offset.");
1055                     return Err(EINVAL.into());
1056                 }
1057 
1058                 // Copy data between two objects.
1059                 if end_of_previous_object < offset {
1060                     view.copy_into(
1061                         &mut buffer_reader,
1062                         end_of_previous_object,
1063                         offset - end_of_previous_object,
1064                     )?;
1065                 }
1066 
1067                 let mut object = BinderObject::read_from(&mut buffer_reader)?;
1068 
1069                 match self.translate_object(
1070                     index,
1071                     offset,
1072                     object.as_ref(),
1073                     &mut view,
1074                     allow_fds,
1075                     sg_state,
1076                 ) {
1077                     Ok(()) => end_of_previous_object = offset + object.size(),
1078                     Err(err) => {
1079                         pr_warn!("Error while translating object.");
1080                         return Err(err);
1081                     }
1082                 }
1083 
1084                 // Update the indexes containing objects to clean up.
1085                 let offset_after_object = index_offset + size_of::<u64>();
1086                 view.alloc
1087                     .set_info_offsets(offsets_start..offset_after_object);
1088             }
1089         }
1090 
1091         // Copy remaining raw data.
1092         alloc.copy_into(
1093             &mut buffer_reader,
1094             end_of_previous_object,
1095             data_size - end_of_previous_object,
1096         )?;
1097 
1098         if let Some(sg_state) = sg_state.as_mut() {
1099             if let Err(err) = self.apply_sg(&mut alloc, sg_state) {
1100                 pr_warn!("Failure in apply_sg: {:?}", err);
1101                 return Err(err);
1102             }
1103         }
1104 
1105         if let Some((off_out, secctx)) = secctx.as_mut() {
1106             if let Err(err) = alloc.write(secctx_off, secctx.as_bytes()) {
1107                 pr_warn!("Failed to write security context: {:?}", err);
1108                 return Err(err.into());
1109             }
1110             **off_out = secctx_off;
1111         }
1112         Ok(alloc)
1113     }
1114 
1115     fn unwind_transaction_stack(self: &Arc<Self>) {
1116         let mut thread = self.clone();
1117         while let Ok(transaction) = {
1118             let mut inner = thread.inner.lock();
1119             inner.pop_transaction_to_reply(thread.as_ref())
1120         } {
1121             let reply = Err(BR_DEAD_REPLY);
1122             if !transaction.from.deliver_single_reply(reply, &transaction) {
1123                 break;
1124             }
1125 
1126             thread = transaction.from.clone();
1127         }
1128     }
1129 
1130     pub(crate) fn deliver_reply(
1131         &self,
1132         reply: Result<DLArc<Transaction>, u32>,
1133         transaction: &DArc<Transaction>,
1134     ) {
1135         if self.deliver_single_reply(reply, transaction) {
1136             transaction.from.unwind_transaction_stack();
1137         }
1138     }
1139 
1140     /// Delivers a reply to the thread that started a transaction. The reply can either be a
1141     /// reply-transaction or an error code to be delivered instead.
1142     ///
1143     /// Returns whether the thread is dead. If it is, the caller is expected to unwind the
1144     /// transaction stack by completing transactions for threads that are dead.
1145     fn deliver_single_reply(
1146         &self,
1147         reply: Result<DLArc<Transaction>, u32>,
1148         transaction: &DArc<Transaction>,
1149     ) -> bool {
1150         if let Ok(transaction) = &reply {
1151             transaction.set_outstanding(&mut self.process.inner.lock());
1152         }
1153 
1154         {
1155             let mut inner = self.inner.lock();
1156             if !inner.pop_transaction_replied(transaction) {
1157                 return false;
1158             }
1159 
1160             if inner.is_dead {
1161                 return true;
1162             }
1163 
1164             match reply {
1165                 Ok(work) => {
1166                     inner.push_work(work);
1167                 }
1168                 Err(code) => inner.push_reply_work(code),
1169             }
1170         }
1171 
1172         // Notify the thread now that we've released the inner lock.
1173         self.work_condvar.notify_sync();
1174         false
1175     }
1176 
1177     /// Determines if the given transaction is the current transaction for this thread.
1178     fn is_current_transaction(&self, transaction: &DArc<Transaction>) -> bool {
1179         let inner = self.inner.lock();
1180         match &inner.current_transaction {
1181             None => false,
1182             Some(current) => Arc::ptr_eq(current, transaction),
1183         }
1184     }
1185 
1186     /// Determines the current top of the transaction stack. It fails if the top is in another
1187     /// thread (i.e., this thread belongs to a stack but it has called another thread). The top is
1188     /// [`None`] if the thread is not currently participating in a transaction stack.
1189     fn top_of_transaction_stack(&self) -> Result<Option<DArc<Transaction>>> {
1190         let inner = self.inner.lock();
1191         if let Some(cur) = &inner.current_transaction {
1192             if core::ptr::eq(self, cur.from.as_ref()) {
1193                 pr_warn!("got new transaction with bad transaction stack");
1194                 return Err(EINVAL);
1195             }
1196             Ok(Some(cur.clone()))
1197         } else {
1198             Ok(None)
1199         }
1200     }
1201 
1202     fn transaction<T>(self: &Arc<Self>, tr: &BinderTransactionDataSg, inner: T)
1203     where
1204         T: FnOnce(&Arc<Self>, &BinderTransactionDataSg) -> BinderResult,
1205     {
1206         if let Err(err) = inner(self, tr) {
1207             if err.should_pr_warn() {
1208                 let mut ee = self.inner.lock().extended_error;
1209                 ee.command = err.reply;
1210                 ee.param = err.as_errno();
1211                 pr_warn!(
1212                     "Transaction failed: {:?} my_pid:{}",
1213                     err,
1214                     self.process.pid_in_current_ns()
1215                 );
1216             }
1217 
1218             self.push_return_work(err.reply);
1219         }
1220     }
1221 
1222     fn transaction_inner(self: &Arc<Self>, tr: &BinderTransactionDataSg) -> BinderResult {
1223         // SAFETY: Handle's type has no invalid bit patterns.
1224         let handle = unsafe { tr.transaction_data.target.handle };
1225         let node_ref = self.process.get_transaction_node(handle)?;
1226         security::binder_transaction(&self.process.cred, &node_ref.node.owner.cred)?;
1227         // TODO: We need to ensure that there isn't a pending transaction in the work queue. How
1228         // could this happen?
1229         let top = self.top_of_transaction_stack()?;
1230         let list_completion = DTRWrap::arc_try_new(DeliverCode::new(BR_TRANSACTION_COMPLETE))?;
1231         let completion = list_completion.clone_arc();
1232         let transaction = Transaction::new(node_ref, top, self, tr)?;
1233 
1234         // Check that the transaction stack hasn't changed while the lock was released, then update
1235         // it with the new transaction.
1236         {
1237             let mut inner = self.inner.lock();
1238             if !transaction.is_stacked_on(&inner.current_transaction) {
1239                 pr_warn!("Transaction stack changed during transaction!");
1240                 return Err(EINVAL.into());
1241             }
1242             inner.current_transaction = Some(transaction.clone_arc());
1243             // We push the completion as a deferred work so that we wait for the reply before
1244             // returning to userland.
1245             inner.push_work_deferred(list_completion);
1246         }
1247 
1248         if let Err(e) = transaction.submit() {
1249             completion.skip();
1250             // Define `transaction` first to drop it after `inner`.
1251             let transaction;
1252             let mut inner = self.inner.lock();
1253             transaction = inner.current_transaction.take().unwrap();
1254             inner.current_transaction = transaction.clone_next();
1255             Err(e)
1256         } else {
1257             Ok(())
1258         }
1259     }
1260 
1261     fn reply_inner(self: &Arc<Self>, tr: &BinderTransactionDataSg) -> BinderResult {
1262         let orig = self.inner.lock().pop_transaction_to_reply(self)?;
1263         if !orig.from.is_current_transaction(&orig) {
1264             return Err(EINVAL.into());
1265         }
1266 
1267         // We need to complete the transaction even if we cannot complete building the reply.
1268         let out = (|| -> BinderResult<_> {
1269             let completion = DTRWrap::arc_try_new(DeliverCode::new(BR_TRANSACTION_COMPLETE))?;
1270             let process = orig.from.process.clone();
1271             let allow_fds = orig.flags & TF_ACCEPT_FDS != 0;
1272             let reply = Transaction::new_reply(self, process, tr, allow_fds)?;
1273             self.inner.lock().push_work(completion);
1274             orig.from.deliver_reply(Ok(reply), &orig);
1275             Ok(())
1276         })()
1277         .map_err(|mut err| {
1278             // At this point we only return `BR_TRANSACTION_COMPLETE` to the caller, and we must let
1279             // the sender know that the transaction has completed (with an error in this case).
1280             pr_warn!(
1281                 "Failure {:?} during reply - delivering BR_FAILED_REPLY to sender.",
1282                 err
1283             );
1284             let reply = Err(BR_FAILED_REPLY);
1285             orig.from.deliver_reply(reply, &orig);
1286             err.reply = BR_TRANSACTION_COMPLETE;
1287             err
1288         });
1289 
1290         out
1291     }
1292 
1293     fn oneway_transaction_inner(self: &Arc<Self>, tr: &BinderTransactionDataSg) -> BinderResult {
1294         // SAFETY: The `handle` field is valid for all possible byte values, so reading from the
1295         // union is okay.
1296         let handle = unsafe { tr.transaction_data.target.handle };
1297         let node_ref = self.process.get_transaction_node(handle)?;
1298         security::binder_transaction(&self.process.cred, &node_ref.node.owner.cred)?;
1299         let transaction = Transaction::new(node_ref, None, self, tr)?;
1300         let code = if self.process.is_oneway_spam_detection_enabled()
1301             && transaction.oneway_spam_detected
1302         {
1303             BR_ONEWAY_SPAM_SUSPECT
1304         } else {
1305             BR_TRANSACTION_COMPLETE
1306         };
1307         let list_completion = DTRWrap::arc_try_new(DeliverCode::new(code))?;
1308         let completion = list_completion.clone_arc();
1309         self.inner.lock().push_work(list_completion);
1310         match transaction.submit() {
1311             Ok(()) => Ok(()),
1312             Err(err) => {
1313                 completion.skip();
1314                 Err(err)
1315             }
1316         }
1317     }
1318 
1319     fn write(self: &Arc<Self>, req: &mut BinderWriteRead) -> Result {
1320         let write_start = req.write_buffer.wrapping_add(req.write_consumed);
1321         let write_len = req.write_size.saturating_sub(req.write_consumed);
1322         let mut reader =
1323             UserSlice::new(UserPtr::from_addr(write_start as _), write_len as _).reader();
1324 
1325         while reader.len() >= size_of::<u32>() && self.inner.lock().return_work.is_unused() {
1326             let before = reader.len();
1327             let cmd = reader.read::<u32>()?;
1328             GLOBAL_STATS.inc_bc(cmd);
1329             self.process.stats.inc_bc(cmd);
1330             match cmd {
1331                 BC_TRANSACTION => {
1332                     let tr = reader.read::<BinderTransactionData>()?.with_buffers_size(0);
1333                     if tr.transaction_data.flags & TF_ONE_WAY != 0 {
1334                         self.transaction(&tr, Self::oneway_transaction_inner);
1335                     } else {
1336                         self.transaction(&tr, Self::transaction_inner);
1337                     }
1338                 }
1339                 BC_TRANSACTION_SG => {
1340                     let tr = reader.read::<BinderTransactionDataSg>()?;
1341                     if tr.transaction_data.flags & TF_ONE_WAY != 0 {
1342                         self.transaction(&tr, Self::oneway_transaction_inner);
1343                     } else {
1344                         self.transaction(&tr, Self::transaction_inner);
1345                     }
1346                 }
1347                 BC_REPLY => {
1348                     let tr = reader.read::<BinderTransactionData>()?.with_buffers_size(0);
1349                     self.transaction(&tr, Self::reply_inner)
1350                 }
1351                 BC_REPLY_SG => {
1352                     let tr = reader.read::<BinderTransactionDataSg>()?;
1353                     self.transaction(&tr, Self::reply_inner)
1354                 }
1355                 BC_FREE_BUFFER => {
1356                     let buffer = self.process.buffer_get(reader.read()?);
1357                     if let Some(buffer) = buffer {
1358                         if buffer.looper_need_return_on_free() {
1359                             self.inner.lock().looper_need_return = true;
1360                         }
1361                         drop(buffer);
1362                     }
1363                 }
1364                 BC_INCREFS => {
1365                     self.process
1366                         .as_arc_borrow()
1367                         .update_ref(reader.read()?, true, false)?
1368                 }
1369                 BC_ACQUIRE => {
1370                     self.process
1371                         .as_arc_borrow()
1372                         .update_ref(reader.read()?, true, true)?
1373                 }
1374                 BC_RELEASE => {
1375                     self.process
1376                         .as_arc_borrow()
1377                         .update_ref(reader.read()?, false, true)?
1378                 }
1379                 BC_DECREFS => {
1380                     self.process
1381                         .as_arc_borrow()
1382                         .update_ref(reader.read()?, false, false)?
1383                 }
1384                 BC_INCREFS_DONE => self.process.inc_ref_done(&mut reader, false)?,
1385                 BC_ACQUIRE_DONE => self.process.inc_ref_done(&mut reader, true)?,
1386                 BC_REQUEST_DEATH_NOTIFICATION => self.process.request_death(&mut reader, self)?,
1387                 BC_CLEAR_DEATH_NOTIFICATION => self.process.clear_death(&mut reader, self)?,
1388                 BC_DEAD_BINDER_DONE => self.process.dead_binder_done(reader.read()?, self),
1389                 BC_REGISTER_LOOPER => {
1390                     let valid = self.process.register_thread();
1391                     self.inner.lock().looper_register(valid);
1392                 }
1393                 BC_ENTER_LOOPER => self.inner.lock().looper_enter(),
1394                 BC_EXIT_LOOPER => self.inner.lock().looper_exit(),
1395                 BC_REQUEST_FREEZE_NOTIFICATION => self.process.request_freeze_notif(&mut reader)?,
1396                 BC_CLEAR_FREEZE_NOTIFICATION => self.process.clear_freeze_notif(&mut reader)?,
1397                 BC_FREEZE_NOTIFICATION_DONE => self.process.freeze_notif_done(&mut reader)?,
1398 
1399                 // Fail if given an unknown error code.
1400                 // BC_ATTEMPT_ACQUIRE and BC_ACQUIRE_RESULT are no longer supported.
1401                 _ => return Err(EINVAL),
1402             }
1403             // Update the number of write bytes consumed.
1404             req.write_consumed += (before - reader.len()) as u64;
1405         }
1406 
1407         Ok(())
1408     }
1409 
1410     fn read(self: &Arc<Self>, req: &mut BinderWriteRead, wait: bool) -> Result {
1411         let read_start = req.read_buffer.wrapping_add(req.read_consumed);
1412         let read_len = req.read_size.saturating_sub(req.read_consumed);
1413         let mut writer = BinderReturnWriter::new(
1414             UserSlice::new(UserPtr::from_addr(read_start as _), read_len as _).writer(),
1415             self,
1416         );
1417         let (in_pool, use_proc_queue) = {
1418             let inner = self.inner.lock();
1419             (inner.is_looper(), inner.should_use_process_work_queue())
1420         };
1421 
1422         let getter = if use_proc_queue {
1423             Self::get_work
1424         } else {
1425             Self::get_work_local
1426         };
1427 
1428         // Reserve some room at the beginning of the read buffer so that we can send a
1429         // BR_SPAWN_LOOPER if we need to.
1430         let mut has_noop_placeholder = false;
1431         if req.read_consumed == 0 {
1432             if let Err(err) = writer.write_code(BR_NOOP) {
1433                 pr_warn!("Failure when writing BR_NOOP at beginning of buffer.");
1434                 return Err(err);
1435             }
1436             has_noop_placeholder = true;
1437         }
1438 
1439         // Loop doing work while there is room in the buffer.
1440         let initial_len = writer.len();
1441         while writer.len() >= size_of::<uapi::binder_transaction_data_secctx>() + 4 {
1442             match getter(self, wait && initial_len == writer.len()) {
1443                 Ok(Some(work)) => match work.into_arc().do_work(self, &mut writer) {
1444                     Ok(true) => {}
1445                     Ok(false) => break,
1446                     Err(err) => {
1447                         return Err(err);
1448                     }
1449                 },
1450                 Ok(None) => {
1451                     break;
1452                 }
1453                 Err(err) => {
1454                     // Propagate the error if we haven't written anything else.
1455                     if err != EINTR && err != EAGAIN {
1456                         pr_warn!("Failure in work getter: {:?}", err);
1457                     }
1458                     if initial_len == writer.len() {
1459                         return Err(err);
1460                     } else {
1461                         break;
1462                     }
1463                 }
1464             }
1465         }
1466 
1467         req.read_consumed += read_len - writer.len() as u64;
1468 
1469         // Write BR_SPAWN_LOOPER if the process needs more threads for its pool.
1470         if has_noop_placeholder && in_pool && self.process.needs_thread() {
1471             let mut writer =
1472                 UserSlice::new(UserPtr::from_addr(req.read_buffer as _), req.read_size as _)
1473                     .writer();
1474             writer.write(&BR_SPAWN_LOOPER)?;
1475         }
1476         Ok(())
1477     }
1478 
1479     pub(crate) fn write_read(self: &Arc<Self>, data: UserSlice, wait: bool) -> Result {
1480         let (mut reader, mut writer) = data.reader_writer();
1481         let mut req = reader.read::<BinderWriteRead>()?;
1482 
1483         // Go through the write buffer.
1484         let mut ret = Ok(());
1485         if req.write_size > 0 {
1486             ret = self.write(&mut req);
1487             if let Err(err) = ret {
1488                 pr_warn!(
1489                     "Write failure {:?} in pid:{}",
1490                     err,
1491                     self.process.pid_in_current_ns()
1492                 );
1493                 req.read_consumed = 0;
1494                 writer.write(&req)?;
1495                 self.inner.lock().looper_need_return = false;
1496                 return ret;
1497             }
1498         }
1499 
1500         // Go through the work queue.
1501         if req.read_size > 0 {
1502             ret = self.read(&mut req, wait);
1503             if ret.is_err() && ret != Err(EINTR) {
1504                 pr_warn!(
1505                     "Read failure {:?} in pid:{}",
1506                     ret,
1507                     self.process.pid_in_current_ns()
1508                 );
1509             }
1510         }
1511 
1512         // Write the request back so that the consumed fields are visible to the caller.
1513         writer.write(&req)?;
1514 
1515         self.inner.lock().looper_need_return = false;
1516 
1517         ret
1518     }
1519 
1520     pub(crate) fn poll(&self, file: &File, table: PollTable<'_>) -> (bool, u32) {
1521         table.register_wait(file, &self.work_condvar);
1522         let mut inner = self.inner.lock();
1523         (inner.should_use_process_work_queue(), inner.poll())
1524     }
1525 
1526     /// Make the call to `get_work` or `get_work_local` return immediately, if any.
1527     pub(crate) fn exit_looper(&self) {
1528         let mut inner = self.inner.lock();
1529         let should_notify = inner.looper_flags & LOOPER_WAITING != 0;
1530         if should_notify {
1531             inner.looper_need_return = true;
1532         }
1533         drop(inner);
1534 
1535         if should_notify {
1536             self.work_condvar.notify_one();
1537         }
1538     }
1539 
1540     pub(crate) fn notify_if_poll_ready(&self, sync: bool) {
1541         // Determine if we need to notify. This requires the lock.
1542         let inner = self.inner.lock();
1543         let notify = inner.looper_flags & LOOPER_POLL != 0 && inner.should_use_process_work_queue();
1544         drop(inner);
1545 
1546         // Now that the lock is no longer held, notify the waiters if we have to.
1547         if notify {
1548             if sync {
1549                 self.work_condvar.notify_sync();
1550             } else {
1551                 self.work_condvar.notify_one();
1552             }
1553         }
1554     }
1555 
1556     pub(crate) fn release(self: &Arc<Self>) {
1557         self.inner.lock().is_dead = true;
1558 
1559         //self.work_condvar.clear();
1560         self.unwind_transaction_stack();
1561 
1562         // Cancel all pending work items.
1563         while let Ok(Some(work)) = self.get_work_local(false) {
1564             work.into_arc().cancel();
1565         }
1566     }
1567 }
1568 
1569 #[pin_data]
1570 struct ThreadError {
1571     error_code: AtomicU32,
1572     #[pin]
1573     links_track: AtomicTracker,
1574 }
1575 
1576 impl ThreadError {
1577     fn try_new() -> Result<DArc<Self>> {
1578         DTRWrap::arc_pin_init(pin_init!(Self {
1579             error_code: AtomicU32::new(BR_OK),
1580             links_track <- AtomicTracker::new(),
1581         }))
1582         .map(ListArc::into_arc)
1583     }
1584 
1585     fn set_error_code(&self, code: u32) {
1586         self.error_code.store(code, Ordering::Relaxed);
1587     }
1588 
1589     fn is_unused(&self) -> bool {
1590         self.error_code.load(Ordering::Relaxed) == BR_OK
1591     }
1592 }
1593 
1594 impl DeliverToRead for ThreadError {
1595     fn do_work(
1596         self: DArc<Self>,
1597         _thread: &Thread,
1598         writer: &mut BinderReturnWriter<'_>,
1599     ) -> Result<bool> {
1600         let code = self.error_code.load(Ordering::Relaxed);
1601         self.error_code.store(BR_OK, Ordering::Relaxed);
1602         writer.write_code(code)?;
1603         Ok(true)
1604     }
1605 
1606     fn cancel(self: DArc<Self>) {}
1607 
1608     fn should_sync_wakeup(&self) -> bool {
1609         false
1610     }
1611 
1612     fn debug_print(&self, m: &SeqFile, prefix: &str, _tprefix: &str) -> Result<()> {
1613         seq_print!(
1614             m,
1615             "{}transaction error: {}\n",
1616             prefix,
1617             self.error_code.load(Ordering::Relaxed)
1618         );
1619         Ok(())
1620     }
1621 }
1622 
1623 kernel::list::impl_list_arc_safe! {
1624     impl ListArcSafe<0> for ThreadError {
1625         tracked_by links_track: AtomicTracker;
1626     }
1627 }
1628