xref: /linux/drivers/android/binder/thread.rs (revision 0923fd0419a1a2c8846e15deacac11b619e996d9)
1 // SPDX-License-Identifier: GPL-2.0
2 
3 // Copyright (C) 2025 Google LLC.
4 
5 //! This module defines the `Thread` type, which represents a userspace thread that is using
6 //! binder.
7 //!
8 //! The `Process` object stores all of the threads in an rb tree.
9 
10 use kernel::{
11     bindings,
12     fs::{File, LocalFile},
13     list::{AtomicTracker, List, ListArc, ListLinks, TryNewListArc},
14     prelude::*,
15     security,
16     seq_file::SeqFile,
17     seq_print,
18     sync::atomic::{ordering::Relaxed, Atomic},
19     sync::poll::{PollCondVar, PollTable},
20     sync::{Arc, SpinLock},
21     task::Task,
22     types::ARef,
23     uaccess::UserSlice,
24     uapi,
25 };
26 
27 use crate::{
28     allocation::{Allocation, AllocationView, BinderObject, BinderObjectRef, NewAllocation},
29     defs::*,
30     error::BinderResult,
31     process::{GetWorkOrRegister, Process},
32     ptr_align,
33     stats::GLOBAL_STATS,
34     transaction::Transaction,
35     BinderReturnWriter, DArc, DLArc, DTRWrap, DeliverCode, DeliverToRead,
36 };
37 
38 use core::mem::size_of;
39 
40 fn is_aligned(value: usize, to: usize) -> bool {
41     value % to == 0
42 }
43 
44 /// Stores the layout of the scatter-gather entries. This is used during the `translate_objects`
45 /// call and is discarded when it returns.
46 struct ScatterGatherState {
47     /// A struct that tracks the amount of unused buffer space.
48     unused_buffer_space: UnusedBufferSpace,
49     /// Scatter-gather entries to copy.
50     sg_entries: KVec<ScatterGatherEntry>,
51     /// Indexes into `sg_entries` corresponding to the last binder_buffer_object that
52     /// was processed and all of its ancestors. The array is in sorted order.
53     ancestors: KVec<usize>,
54 }
55 
56 /// This entry specifies an additional buffer that should be copied using the scatter-gather
57 /// mechanism.
58 struct ScatterGatherEntry {
59     /// The index in the offset array of the BINDER_TYPE_PTR that this entry originates from.
60     obj_index: usize,
61     /// Offset in target buffer.
62     offset: usize,
63     /// User address in source buffer.
64     sender_uaddr: usize,
65     /// Number of bytes to copy.
66     length: usize,
67     /// The minimum offset of the next fixup in this buffer.
68     fixup_min_offset: usize,
69     /// The offsets within this buffer that contain pointers which should be translated.
70     pointer_fixups: KVec<PointerFixupEntry>,
71 }
72 
73 /// This entry specifies that a fixup should happen at `target_offset` of the
74 /// buffer.
75 enum PointerFixupEntry {
76     /// A fixup for a `binder_buffer_object`.
77     Fixup {
78         /// The translated pointer to write.
79         pointer_value: u64,
80         /// The offset at which the value should be written. The offset is relative
81         /// to the original buffer.
82         target_offset: usize,
83     },
84     /// A skip for a `binder_fd_array_object`.
85     Skip {
86         /// The number of bytes to skip.
87         skip: usize,
88         /// The offset at which the skip should happen. The offset is relative
89         /// to the original buffer.
90         target_offset: usize,
91     },
92 }
93 
94 /// Return type of `apply_and_validate_fixup_in_parent`.
95 struct ParentFixupInfo {
96     /// The index of the parent buffer in `sg_entries`.
97     parent_sg_index: usize,
98     /// The number of ancestors of the buffer.
99     ///
100     /// The buffer is considered an ancestor of itself, so this is always at
101     /// least one.
102     num_ancestors: usize,
103     /// New value of `fixup_min_offset` if this fixup is applied.
104     new_min_offset: usize,
105     /// The offset of the fixup in the target buffer.
106     target_offset: usize,
107 }
108 
109 impl ScatterGatherState {
110     /// Called when a `binder_buffer_object` or `binder_fd_array_object` tries
111     /// to access a region in its parent buffer. These accesses have various
112     /// restrictions, which this method verifies.
113     ///
114     /// The `parent_offset` and `length` arguments describe the offset and
115     /// length of the access in the parent buffer.
116     ///
117     /// # Detailed restrictions
118     ///
119     /// Obviously the fixup must be in-bounds for the parent buffer.
120     ///
121     /// For safety reasons, we only allow fixups inside a buffer to happen
122     /// at increasing offsets; additionally, we only allow fixup on the last
123     /// buffer object that was verified, or one of its parents.
124     ///
125     /// Example of what is allowed:
126     ///
127     /// A
128     ///   B (parent = A, offset = 0)
129     ///   C (parent = A, offset = 16)
130     ///     D (parent = C, offset = 0)
131     ///   E (parent = A, offset = 32) // min_offset is 16 (C.parent_offset)
132     ///
133     /// Examples of what is not allowed:
134     ///
135     /// Decreasing offsets within the same parent:
136     /// A
137     ///   C (parent = A, offset = 16)
138     ///   B (parent = A, offset = 0) // decreasing offset within A
139     ///
140     /// Arcerring to a parent that wasn't the last object or any of its parents:
141     /// A
142     ///   B (parent = A, offset = 0)
143     ///   C (parent = A, offset = 0)
144     ///   C (parent = A, offset = 16)
145     ///     D (parent = B, offset = 0) // B is not A or any of A's parents
146     fn validate_parent_fixup(
147         &self,
148         parent: usize,
149         parent_offset: usize,
150         length: usize,
151     ) -> Result<ParentFixupInfo> {
152         // Using `position` would also be correct, but `rposition` avoids
153         // quadratic running times.
154         let ancestors_i = self
155             .ancestors
156             .iter()
157             .copied()
158             .rposition(|sg_idx| self.sg_entries[sg_idx].obj_index == parent)
159             .ok_or(EINVAL)?;
160         let sg_idx = self.ancestors[ancestors_i];
161         let sg_entry = match self.sg_entries.get(sg_idx) {
162             Some(sg_entry) => sg_entry,
163             None => {
164                 pr_err!(
165                     "self.ancestors[{}] is {}, but self.sg_entries.len() is {}",
166                     ancestors_i,
167                     sg_idx,
168                     self.sg_entries.len()
169                 );
170                 return Err(EINVAL);
171             }
172         };
173         if sg_entry.fixup_min_offset > parent_offset {
174             pr_warn!(
175                 "validate_parent_fixup: fixup_min_offset={}, parent_offset={}",
176                 sg_entry.fixup_min_offset,
177                 parent_offset
178             );
179             return Err(EINVAL);
180         }
181         let new_min_offset = parent_offset.checked_add(length).ok_or(EINVAL)?;
182         if new_min_offset > sg_entry.length {
183             pr_warn!(
184                 "validate_parent_fixup: new_min_offset={}, sg_entry.length={}",
185                 new_min_offset,
186                 sg_entry.length
187             );
188             return Err(EINVAL);
189         }
190         let target_offset = sg_entry.offset.checked_add(parent_offset).ok_or(EINVAL)?;
191         // The `ancestors_i + 1` operation can't overflow since the output of the addition is at
192         // most `self.ancestors.len()`, which also fits in a usize.
193         Ok(ParentFixupInfo {
194             parent_sg_index: sg_idx,
195             num_ancestors: ancestors_i + 1,
196             new_min_offset,
197             target_offset,
198         })
199     }
200 }
201 
202 /// Keeps track of how much unused buffer space is left. The initial amount is the number of bytes
203 /// requested by the user using the `buffers_size` field of `binder_transaction_data_sg`. Each time
204 /// we translate an object of type `BINDER_TYPE_PTR`, some of the unused buffer space is consumed.
205 struct UnusedBufferSpace {
206     /// The start of the remaining space.
207     offset: usize,
208     /// The end of the remaining space.
209     limit: usize,
210 }
211 impl UnusedBufferSpace {
212     /// Claim the next `size` bytes from the unused buffer space. The offset for the claimed chunk
213     /// into the buffer is returned.
214     fn claim_next(&mut self, size: usize) -> Result<usize> {
215         // We require every chunk to be aligned.
216         let size = ptr_align(size).ok_or(EINVAL)?;
217         let new_offset = self.offset.checked_add(size).ok_or(EINVAL)?;
218 
219         if new_offset <= self.limit {
220             let offset = self.offset;
221             self.offset = new_offset;
222             Ok(offset)
223         } else {
224             Err(EINVAL)
225         }
226     }
227 }
228 
229 pub(crate) enum PushWorkRes {
230     Ok,
231     FailedDead(DLArc<dyn DeliverToRead>),
232 }
233 
234 impl PushWorkRes {
235     fn is_ok(&self) -> bool {
236         match self {
237             PushWorkRes::Ok => true,
238             PushWorkRes::FailedDead(_) => false,
239         }
240     }
241 }
242 
243 /// The fields of `Thread` protected by the spinlock.
244 struct InnerThread {
245     /// Determines the looper state of the thread. It is a bit-wise combination of the constants
246     /// prefixed with `LOOPER_`.
247     looper_flags: u32,
248 
249     /// Determines whether the looper should return.
250     looper_need_return: bool,
251 
252     /// Determines if thread is dead.
253     is_dead: bool,
254 
255     /// Work item used to deliver error codes to the thread that started a transaction. Stored here
256     /// so that it can be reused.
257     reply_work: DArc<ThreadError>,
258 
259     /// Work item used to deliver error codes to the current thread. Stored here so that it can be
260     /// reused.
261     return_work: DArc<ThreadError>,
262 
263     /// Determines whether the work list below should be processed. When set to false, `work_list`
264     /// is treated as if it were empty.
265     process_work_list: bool,
266     /// List of work items to deliver to userspace.
267     work_list: List<DTRWrap<dyn DeliverToRead>>,
268     current_transaction: Option<DArc<Transaction>>,
269 
270     /// Extended error information for this thread.
271     extended_error: ExtendedError,
272 }
273 
274 const LOOPER_REGISTERED: u32 = 0x01;
275 const LOOPER_ENTERED: u32 = 0x02;
276 const LOOPER_EXITED: u32 = 0x04;
277 const LOOPER_INVALID: u32 = 0x08;
278 const LOOPER_WAITING: u32 = 0x10;
279 const LOOPER_WAITING_PROC: u32 = 0x20;
280 const LOOPER_POLL: u32 = 0x40;
281 
282 impl InnerThread {
283     fn new() -> Result<Self> {
284         fn next_err_id() -> u32 {
285             static EE_ID: Atomic<u32> = Atomic::new(0);
286             EE_ID.fetch_add(1, Relaxed)
287         }
288 
289         Ok(Self {
290             looper_flags: 0,
291             looper_need_return: false,
292             is_dead: false,
293             process_work_list: false,
294             reply_work: ThreadError::try_new()?,
295             return_work: ThreadError::try_new()?,
296             work_list: List::new(),
297             current_transaction: None,
298             extended_error: ExtendedError::new(next_err_id(), BR_OK, 0),
299         })
300     }
301 
302     fn pop_work(&mut self) -> Option<DLArc<dyn DeliverToRead>> {
303         if !self.process_work_list {
304             return None;
305         }
306 
307         let ret = self.work_list.pop_front();
308         self.process_work_list = !self.work_list.is_empty();
309         ret
310     }
311 
312     fn push_work(&mut self, work: DLArc<dyn DeliverToRead>) -> PushWorkRes {
313         if self.is_dead {
314             PushWorkRes::FailedDead(work)
315         } else {
316             self.work_list.push_back(work);
317             self.process_work_list = true;
318             PushWorkRes::Ok
319         }
320     }
321 
322     fn push_reply_work(&mut self, code: u32) {
323         if let Ok(work) = ListArc::try_from_arc(self.reply_work.clone()) {
324             work.set_error_code(code);
325             self.push_work(work);
326         } else {
327             pr_warn!("Thread reply work is already in use.");
328         }
329     }
330 
331     fn push_return_work(&mut self, reply: u32) {
332         if let Ok(work) = ListArc::try_from_arc(self.return_work.clone()) {
333             work.set_error_code(reply);
334             self.push_work(work);
335         } else {
336             pr_warn!("Thread return work is already in use.");
337         }
338     }
339 
340     /// Used to push work items that do not need to be processed immediately and can wait until the
341     /// thread gets another work item.
342     fn push_work_deferred(&mut self, work: DLArc<dyn DeliverToRead>) {
343         self.work_list.push_back(work);
344     }
345 
346     /// Fetches the transaction this thread can reply to. If the thread has a pending transaction
347     /// (that it could respond to) but it has also issued a transaction, it must first wait for the
348     /// previously-issued transaction to complete.
349     ///
350     /// The `thread` parameter should be the thread containing this `ThreadInner`.
351     fn pop_transaction_to_reply(&mut self, thread: &Thread) -> Result<DArc<Transaction>> {
352         let transaction = self.current_transaction.take().ok_or(EINVAL)?;
353         if core::ptr::eq(thread, transaction.from.as_ref()) {
354             self.current_transaction = Some(transaction);
355             return Err(EINVAL);
356         }
357         // Find a new current transaction for this thread.
358         self.current_transaction = transaction.find_from(thread).cloned();
359         Ok(transaction)
360     }
361 
362     fn pop_transaction_replied(&mut self, transaction: &DArc<Transaction>) -> bool {
363         match self.current_transaction.take() {
364             None => false,
365             Some(old) => {
366                 if !Arc::ptr_eq(transaction, &old) {
367                     self.current_transaction = Some(old);
368                     return false;
369                 }
370                 self.current_transaction = old.clone_next();
371                 true
372             }
373         }
374     }
375 
376     fn looper_enter(&mut self) {
377         self.looper_flags |= LOOPER_ENTERED;
378         if self.looper_flags & LOOPER_REGISTERED != 0 {
379             self.looper_flags |= LOOPER_INVALID;
380         }
381     }
382 
383     fn looper_register(&mut self, valid: bool) {
384         self.looper_flags |= LOOPER_REGISTERED;
385         if !valid || self.looper_flags & LOOPER_ENTERED != 0 {
386             self.looper_flags |= LOOPER_INVALID;
387         }
388     }
389 
390     fn looper_exit(&mut self) {
391         self.looper_flags |= LOOPER_EXITED;
392     }
393 
394     /// Determines whether the thread is part of a pool, i.e., if it is a looper.
395     fn is_looper(&self) -> bool {
396         self.looper_flags & (LOOPER_ENTERED | LOOPER_REGISTERED) != 0
397     }
398 
399     /// Determines whether the thread should attempt to fetch work items from the process queue.
400     /// This is generally case when the thread is registered as a looper and not part of a
401     /// transaction stack. But if there is local work, we want to return to userspace before we
402     /// deliver any remote work.
403     fn should_use_process_work_queue(&self) -> bool {
404         self.current_transaction.is_none() && !self.process_work_list && self.is_looper()
405     }
406 
407     fn poll(&mut self) -> u32 {
408         self.looper_flags |= LOOPER_POLL;
409         if self.process_work_list || self.looper_need_return {
410             bindings::POLLIN
411         } else {
412             0
413         }
414     }
415 }
416 
417 /// This represents a thread that's used with binder.
418 #[pin_data]
419 pub(crate) struct Thread {
420     pub(crate) id: i32,
421     pub(crate) process: Arc<Process>,
422     pub(crate) task: ARef<Task>,
423     #[pin]
424     inner: SpinLock<InnerThread>,
425     #[pin]
426     work_condvar: PollCondVar,
427     /// Used to insert this thread into the process' `ready_threads` list.
428     ///
429     /// INVARIANT: May never be used for any other list than the `self.process.ready_threads`.
430     #[pin]
431     links: ListLinks,
432     #[pin]
433     links_track: AtomicTracker,
434 }
435 
436 kernel::list::impl_list_arc_safe! {
437     impl ListArcSafe<0> for Thread {
438         tracked_by links_track: AtomicTracker;
439     }
440 }
441 kernel::list::impl_list_item! {
442     impl ListItem<0> for Thread {
443         using ListLinks { self.links };
444     }
445 }
446 
447 impl Thread {
448     pub(crate) fn new(id: i32, process: Arc<Process>) -> Result<Arc<Self>> {
449         let inner = InnerThread::new()?;
450 
451         Arc::pin_init(
452             try_pin_init!(Thread {
453                 id,
454                 process,
455                 task: ARef::from(&**kernel::current!()),
456                 inner <- kernel::new_spinlock!(inner, "Thread::inner"),
457                 work_condvar <- kernel::new_poll_condvar!("Thread::work_condvar"),
458                 links <- ListLinks::new(),
459                 links_track <- AtomicTracker::new(),
460             }),
461             GFP_KERNEL,
462         )
463     }
464 
465     #[inline(never)]
466     pub(crate) fn debug_print(self: &Arc<Self>, m: &SeqFile, print_all: bool) -> Result<()> {
467         let inner = self.inner.lock();
468 
469         if print_all || inner.current_transaction.is_some() || !inner.work_list.is_empty() {
470             seq_print!(
471                 m,
472                 "  thread {}: l {:02x} need_return {}\n",
473                 self.id,
474                 inner.looper_flags,
475                 inner.looper_need_return,
476             );
477         }
478 
479         let mut t_opt = inner.current_transaction.as_ref();
480         while let Some(t) = t_opt {
481             if Arc::ptr_eq(&t.from, self) {
482                 t.debug_print_inner(m, "    outgoing transaction ");
483                 t_opt = t.from_parent.as_ref();
484             } else if Arc::ptr_eq(&t.to, &self.process) {
485                 t.debug_print_inner(m, "    incoming transaction ");
486                 t_opt = t.find_from(self);
487             } else {
488                 t.debug_print_inner(m, "    bad transaction ");
489                 t_opt = None;
490             }
491         }
492 
493         for work in &inner.work_list {
494             work.debug_print(m, "    ", "    pending transaction ")?;
495         }
496         Ok(())
497     }
498 
499     pub(crate) fn get_extended_error(&self, data: UserSlice) -> Result {
500         let mut writer = data.writer();
501         let ee = self.inner.lock().extended_error;
502         writer.write(&ee)?;
503         Ok(())
504     }
505 
506     pub(crate) fn set_current_transaction(&self, transaction: DArc<Transaction>) {
507         self.inner.lock().current_transaction = Some(transaction);
508     }
509 
510     pub(crate) fn has_current_transaction(&self) -> bool {
511         self.inner.lock().current_transaction.is_some()
512     }
513 
514     /// Attempts to fetch a work item from the thread-local queue. The behaviour if the queue is
515     /// empty depends on `wait`: if it is true, the function waits for some work to be queued (or a
516     /// signal); otherwise it returns indicating that none is available.
517     fn get_work_local(self: &Arc<Self>, wait: bool) -> Result<Option<DLArc<dyn DeliverToRead>>> {
518         {
519             let mut inner = self.inner.lock();
520             if inner.looper_need_return {
521                 return Ok(inner.pop_work());
522             }
523         }
524 
525         // Try once if the caller does not want to wait.
526         if !wait {
527             return self.inner.lock().pop_work().ok_or(EAGAIN).map(Some);
528         }
529 
530         // Loop waiting only on the local queue (i.e., not registering with the process queue).
531         let mut inner = self.inner.lock();
532         loop {
533             if let Some(work) = inner.pop_work() {
534                 return Ok(Some(work));
535             }
536 
537             inner.looper_flags |= LOOPER_WAITING;
538             let signal_pending = self.work_condvar.wait_interruptible_freezable(&mut inner);
539             inner.looper_flags &= !LOOPER_WAITING;
540 
541             if signal_pending {
542                 return Err(EINTR);
543             }
544             if inner.looper_need_return {
545                 return Ok(None);
546             }
547         }
548     }
549 
550     /// Attempts to fetch a work item from the thread-local queue, falling back to the process-wide
551     /// queue if none is available locally.
552     ///
553     /// This must only be called when the thread is not participating in a transaction chain. If it
554     /// is, the local version (`get_work_local`) should be used instead.
555     fn get_work(self: &Arc<Self>, wait: bool) -> Result<Option<DLArc<dyn DeliverToRead>>> {
556         // Try to get work from the thread's work queue, using only a local lock.
557         {
558             let mut inner = self.inner.lock();
559             if let Some(work) = inner.pop_work() {
560                 return Ok(Some(work));
561             }
562             if inner.looper_need_return {
563                 drop(inner);
564                 return Ok(self.process.get_work());
565             }
566         }
567 
568         // If the caller doesn't want to wait, try to grab work from the process queue.
569         //
570         // We know nothing will have been queued directly to the thread queue because it is not in
571         // a transaction and it is not in the process' ready list.
572         if !wait {
573             return self.process.get_work().ok_or(EAGAIN).map(Some);
574         }
575 
576         // Get work from the process queue. If none is available, atomically register as ready.
577         let reg = match self.process.get_work_or_register(self) {
578             GetWorkOrRegister::Work(work) => return Ok(Some(work)),
579             GetWorkOrRegister::Register(reg) => reg,
580         };
581 
582         let mut inner = self.inner.lock();
583         loop {
584             if let Some(work) = inner.pop_work() {
585                 return Ok(Some(work));
586             }
587 
588             inner.looper_flags |= LOOPER_WAITING | LOOPER_WAITING_PROC;
589             let signal_pending = self.work_condvar.wait_interruptible_freezable(&mut inner);
590             inner.looper_flags &= !(LOOPER_WAITING | LOOPER_WAITING_PROC);
591 
592             if signal_pending || inner.looper_need_return {
593                 // We need to return now. We need to pull the thread off the list of ready threads
594                 // (by dropping `reg`), then check the state again after it's off the list to
595                 // ensure that something was not queued in the meantime. If something has been
596                 // queued, we just return it (instead of the error).
597                 drop(inner);
598                 drop(reg);
599 
600                 let res = match self.inner.lock().pop_work() {
601                     Some(work) => Ok(Some(work)),
602                     None if signal_pending => Err(EINTR),
603                     None => Ok(None),
604                 };
605                 return res;
606             }
607         }
608     }
609 
610     /// Push the provided work item to be delivered to user space via this thread.
611     ///
612     /// Returns whether the item was successfully pushed. This can only fail if the thread is dead.
613     pub(crate) fn push_work(&self, work: DLArc<dyn DeliverToRead>) -> PushWorkRes {
614         let sync = work.should_sync_wakeup();
615 
616         let res = self.inner.lock().push_work(work);
617 
618         if res.is_ok() {
619             if sync {
620                 self.work_condvar.notify_sync();
621             } else {
622                 self.work_condvar.notify_one();
623             }
624         }
625 
626         res
627     }
628 
629     /// Attempts to push to given work item to the thread if it's a looper thread (i.e., if it's
630     /// part of a thread pool) and is alive. Otherwise, push the work item to the process instead.
631     pub(crate) fn push_work_if_looper(&self, work: DLArc<dyn DeliverToRead>) -> BinderResult {
632         let mut inner = self.inner.lock();
633         if inner.is_looper() && !inner.is_dead {
634             inner.push_work(work);
635             Ok(())
636         } else {
637             drop(inner);
638             self.process.push_work(work)
639         }
640     }
641 
642     pub(crate) fn push_work_deferred(&self, work: DLArc<dyn DeliverToRead>) {
643         self.inner.lock().push_work_deferred(work);
644     }
645 
646     pub(crate) fn push_return_work(&self, reply: u32) {
647         self.inner.lock().push_return_work(reply);
648     }
649 
650     fn translate_object(
651         &self,
652         obj_index: usize,
653         offset: usize,
654         object: BinderObjectRef<'_>,
655         view: &mut AllocationView<'_>,
656         allow_fds: bool,
657         sg_state: &mut ScatterGatherState,
658     ) -> BinderResult {
659         match object {
660             BinderObjectRef::Binder(obj) => {
661                 let strong = obj.hdr.type_ == BINDER_TYPE_BINDER;
662                 // SAFETY: `binder` is a `binder_uintptr_t`; any bit pattern is a valid
663                 // representation.
664                 let ptr = unsafe { obj.__bindgen_anon_1.binder } as _;
665                 let cookie = obj.cookie as _;
666                 let flags = obj.flags as _;
667                 let node = self
668                     .process
669                     .as_arc_borrow()
670                     .get_node(ptr, cookie, flags, strong, self)?;
671                 security::binder_transfer_binder(&self.process.cred, &view.alloc.process.cred)?;
672                 view.transfer_binder_object(offset, obj, strong, node)?;
673             }
674             BinderObjectRef::Handle(obj) => {
675                 let strong = obj.hdr.type_ == BINDER_TYPE_HANDLE;
676                 // SAFETY: `handle` is a `u32`; any bit pattern is a valid representation.
677                 let handle = unsafe { obj.__bindgen_anon_1.handle } as _;
678                 let node = self.process.get_node_from_handle(handle, strong)?;
679                 security::binder_transfer_binder(&self.process.cred, &view.alloc.process.cred)?;
680                 view.transfer_binder_object(offset, obj, strong, node)?;
681             }
682             BinderObjectRef::Fd(obj) => {
683                 if !allow_fds {
684                     return Err(EPERM.into());
685                 }
686 
687                 // SAFETY: `fd` is a `u32`; any bit pattern is a valid representation.
688                 let fd = unsafe { obj.__bindgen_anon_1.fd };
689                 let file = LocalFile::fget(fd)?;
690                 // SAFETY: The binder driver never calls `fdget_pos` and this code runs from an
691                 // ioctl, so there are no active calls to `fdget_pos` on this thread.
692                 let file = unsafe { LocalFile::assume_no_fdget_pos(file) };
693                 security::binder_transfer_file(
694                     &self.process.cred,
695                     &view.alloc.process.cred,
696                     &file,
697                 )?;
698 
699                 let mut obj_write = BinderFdObject::default();
700                 obj_write.hdr.type_ = BINDER_TYPE_FD;
701                 // This will be overwritten with the actual fd when the transaction is received.
702                 obj_write.__bindgen_anon_1.fd = u32::MAX;
703                 obj_write.cookie = obj.cookie;
704                 view.write::<BinderFdObject>(offset, &obj_write)?;
705 
706                 const FD_FIELD_OFFSET: usize =
707                     core::mem::offset_of!(uapi::binder_fd_object, __bindgen_anon_1.fd);
708 
709                 let field_offset = offset + FD_FIELD_OFFSET;
710 
711                 view.alloc.info_add_fd(file, field_offset, false)?;
712             }
713             BinderObjectRef::Ptr(obj) => {
714                 let obj_length = obj.length.try_into().map_err(|_| EINVAL)?;
715                 let alloc_offset = match sg_state.unused_buffer_space.claim_next(obj_length) {
716                     Ok(alloc_offset) => alloc_offset,
717                     Err(err) => {
718                         pr_warn!(
719                             "Failed to claim space for a BINDER_TYPE_PTR. (offset: {}, limit: {}, size: {})",
720                             sg_state.unused_buffer_space.offset,
721                             sg_state.unused_buffer_space.limit,
722                             obj_length,
723                         );
724                         return Err(err.into());
725                     }
726                 };
727 
728                 let sg_state_idx = sg_state.sg_entries.len();
729                 sg_state.sg_entries.push(
730                     ScatterGatherEntry {
731                         obj_index,
732                         offset: alloc_offset,
733                         sender_uaddr: obj.buffer as _,
734                         length: obj_length,
735                         pointer_fixups: KVec::new(),
736                         fixup_min_offset: 0,
737                     },
738                     GFP_KERNEL,
739                 )?;
740 
741                 let buffer_ptr_in_user_space = (view.alloc.ptr + alloc_offset) as u64;
742 
743                 if obj.flags & uapi::BINDER_BUFFER_FLAG_HAS_PARENT == 0 {
744                     sg_state.ancestors.clear();
745                     sg_state.ancestors.push(sg_state_idx, GFP_KERNEL)?;
746                 } else {
747                     // Another buffer also has a pointer to this buffer, and we need to fixup that
748                     // pointer too.
749 
750                     let parent_index = usize::try_from(obj.parent).map_err(|_| EINVAL)?;
751                     let parent_offset = usize::try_from(obj.parent_offset).map_err(|_| EINVAL)?;
752 
753                     let info = sg_state.validate_parent_fixup(
754                         parent_index,
755                         parent_offset,
756                         size_of::<u64>(),
757                     )?;
758 
759                     sg_state.ancestors.truncate(info.num_ancestors);
760                     sg_state.ancestors.push(sg_state_idx, GFP_KERNEL)?;
761 
762                     let parent_entry = match sg_state.sg_entries.get_mut(info.parent_sg_index) {
763                         Some(parent_entry) => parent_entry,
764                         None => {
765                             pr_err!(
766                                 "validate_parent_fixup returned index out of bounds for sg.entries"
767                             );
768                             return Err(EINVAL.into());
769                         }
770                     };
771 
772                     parent_entry.fixup_min_offset = info.new_min_offset;
773                     parent_entry.pointer_fixups.push(
774                         PointerFixupEntry::Fixup {
775                             pointer_value: buffer_ptr_in_user_space,
776                             target_offset: info.target_offset,
777                         },
778                         GFP_KERNEL,
779                     )?;
780                 }
781 
782                 let mut obj_write = BinderBufferObject::default();
783                 obj_write.hdr.type_ = BINDER_TYPE_PTR;
784                 obj_write.flags = obj.flags;
785                 obj_write.buffer = buffer_ptr_in_user_space;
786                 obj_write.length = obj.length;
787                 obj_write.parent = obj.parent;
788                 obj_write.parent_offset = obj.parent_offset;
789                 view.write::<BinderBufferObject>(offset, &obj_write)?;
790             }
791             BinderObjectRef::Fda(obj) => {
792                 if !allow_fds {
793                     return Err(EPERM.into());
794                 }
795                 let parent_index = usize::try_from(obj.parent).map_err(|_| EINVAL)?;
796                 let parent_offset = usize::try_from(obj.parent_offset).map_err(|_| EINVAL)?;
797                 let num_fds = usize::try_from(obj.num_fds).map_err(|_| EINVAL)?;
798                 let fds_len = num_fds.checked_mul(size_of::<u32>()).ok_or(EINVAL)?;
799 
800                 if !is_aligned(parent_offset, size_of::<u32>()) {
801                     return Err(EINVAL.into());
802                 }
803 
804                 let info = sg_state.validate_parent_fixup(parent_index, parent_offset, fds_len)?;
805                 view.alloc.info_add_fd_reserve(num_fds)?;
806 
807                 sg_state.ancestors.truncate(info.num_ancestors);
808                 let parent_entry = match sg_state.sg_entries.get_mut(info.parent_sg_index) {
809                     Some(parent_entry) => parent_entry,
810                     None => {
811                         pr_err!(
812                             "validate_parent_fixup returned index out of bounds for sg.entries"
813                         );
814                         return Err(EINVAL.into());
815                     }
816                 };
817 
818                 if !is_aligned(parent_entry.sender_uaddr, size_of::<u32>()) {
819                     return Err(EINVAL.into());
820                 }
821 
822                 parent_entry.fixup_min_offset = info.new_min_offset;
823                 parent_entry
824                     .pointer_fixups
825                     .push(
826                         PointerFixupEntry::Skip {
827                             skip: fds_len,
828                             target_offset: info.target_offset,
829                         },
830                         GFP_KERNEL,
831                     )
832                     .map_err(|_| ENOMEM)?;
833 
834                 let fda_uaddr = parent_entry
835                     .sender_uaddr
836                     .checked_add(parent_offset)
837                     .ok_or(EINVAL)?;
838 
839                 let mut fda_bytes = KVec::new();
840                 UserSlice::new(UserPtr::from_addr(fda_uaddr as _), fds_len)
841                     .read_all(&mut fda_bytes, GFP_KERNEL)?;
842 
843                 if fds_len != fda_bytes.len() {
844                     pr_err!("UserSlice::read_all returned wrong length in BINDER_TYPE_FDA");
845                     return Err(EINVAL.into());
846                 }
847 
848                 for i in (0..fds_len).step_by(size_of::<u32>()) {
849                     let fd = {
850                         let mut fd_bytes = [0u8; size_of::<u32>()];
851                         fd_bytes.copy_from_slice(&fda_bytes[i..i + size_of::<u32>()]);
852                         u32::from_ne_bytes(fd_bytes)
853                     };
854 
855                     let file = LocalFile::fget(fd)?;
856                     // SAFETY: The binder driver never calls `fdget_pos` and this code runs from an
857                     // ioctl, so there are no active calls to `fdget_pos` on this thread.
858                     let file = unsafe { LocalFile::assume_no_fdget_pos(file) };
859                     security::binder_transfer_file(
860                         &self.process.cred,
861                         &view.alloc.process.cred,
862                         &file,
863                     )?;
864 
865                     // The `validate_parent_fixup` call ensuers that this addition will not
866                     // overflow.
867                     view.alloc.info_add_fd(file, info.target_offset + i, true)?;
868                 }
869                 drop(fda_bytes);
870 
871                 let mut obj_write = BinderFdArrayObject::default();
872                 obj_write.hdr.type_ = BINDER_TYPE_FDA;
873                 obj_write.num_fds = obj.num_fds;
874                 obj_write.parent = obj.parent;
875                 obj_write.parent_offset = obj.parent_offset;
876                 view.write::<BinderFdArrayObject>(offset, &obj_write)?;
877             }
878         }
879         Ok(())
880     }
881 
882     fn apply_sg(&self, alloc: &mut Allocation, sg_state: &mut ScatterGatherState) -> BinderResult {
883         for sg_entry in &mut sg_state.sg_entries {
884             let mut end_of_previous_fixup = sg_entry.offset;
885             let offset_end = sg_entry.offset.checked_add(sg_entry.length).ok_or(EINVAL)?;
886 
887             let mut reader =
888                 UserSlice::new(UserPtr::from_addr(sg_entry.sender_uaddr), sg_entry.length).reader();
889             for fixup in &mut sg_entry.pointer_fixups {
890                 let (fixup_len, fixup_offset) = match fixup {
891                     PointerFixupEntry::Fixup { target_offset, .. } => {
892                         (size_of::<u64>(), *target_offset)
893                     }
894                     PointerFixupEntry::Skip {
895                         skip,
896                         target_offset,
897                     } => (*skip, *target_offset),
898                 };
899 
900                 let target_offset_end = fixup_offset.checked_add(fixup_len).ok_or(EINVAL)?;
901                 if fixup_offset < end_of_previous_fixup || offset_end < target_offset_end {
902                     pr_warn!(
903                         "Fixups oob {} {} {} {}",
904                         fixup_offset,
905                         end_of_previous_fixup,
906                         offset_end,
907                         target_offset_end
908                     );
909                     return Err(EINVAL.into());
910                 }
911 
912                 let copy_off = end_of_previous_fixup;
913                 let copy_len = fixup_offset - end_of_previous_fixup;
914                 if let Err(err) = alloc.copy_into(&mut reader, copy_off, copy_len) {
915                     pr_warn!("Failed copying into alloc: {:?}", err);
916                     return Err(err.into());
917                 }
918                 if let PointerFixupEntry::Fixup { pointer_value, .. } = fixup {
919                     let res = alloc.write::<u64>(fixup_offset, pointer_value);
920                     if let Err(err) = res {
921                         pr_warn!("Failed copying ptr into alloc: {:?}", err);
922                         return Err(err.into());
923                     }
924                 }
925                 if let Err(err) = reader.skip(fixup_len) {
926                     pr_warn!("Failed skipping {} from reader: {:?}", fixup_len, err);
927                     return Err(err.into());
928                 }
929                 end_of_previous_fixup = target_offset_end;
930             }
931             let copy_off = end_of_previous_fixup;
932             let copy_len = offset_end - end_of_previous_fixup;
933             if let Err(err) = alloc.copy_into(&mut reader, copy_off, copy_len) {
934                 pr_warn!("Failed copying remainder into alloc: {:?}", err);
935                 return Err(err.into());
936             }
937         }
938         Ok(())
939     }
940 
941     /// This method copies the payload of a transaction into the target process.
942     ///
943     /// The resulting payload will have several different components, which will be stored next to
944     /// each other in the allocation. Furthermore, various objects can be embedded in the payload,
945     /// and those objects have to be translated so that they make sense to the target transaction.
946     pub(crate) fn copy_transaction_data(
947         &self,
948         to_process: Arc<Process>,
949         tr: &BinderTransactionDataSg,
950         debug_id: usize,
951         allow_fds: bool,
952         txn_security_ctx_offset: Option<&mut usize>,
953     ) -> BinderResult<NewAllocation> {
954         let trd = &tr.transaction_data;
955         let is_oneway = trd.flags & TF_ONE_WAY != 0;
956         let mut secctx = if let Some(offset) = txn_security_ctx_offset {
957             let secid = self.process.cred.get_secid();
958             let ctx = match security::SecurityCtx::from_secid(secid) {
959                 Ok(ctx) => ctx,
960                 Err(err) => {
961                     pr_warn!("Failed to get security ctx for id {}: {:?}", secid, err);
962                     return Err(err.into());
963                 }
964             };
965             Some((offset, ctx))
966         } else {
967             None
968         };
969 
970         let data_size = trd.data_size.try_into().map_err(|_| EINVAL)?;
971         let aligned_data_size = ptr_align(data_size).ok_or(EINVAL)?;
972         let offsets_size: usize = trd.offsets_size.try_into().map_err(|_| EINVAL)?;
973         let buffers_size: usize = tr.buffers_size.try_into().map_err(|_| EINVAL)?;
974         let aligned_secctx_size = match secctx.as_ref() {
975             Some((_offset, ctx)) => ptr_align(ctx.len()).ok_or(EINVAL)?,
976             None => 0,
977         };
978 
979         if !is_aligned(offsets_size, size_of::<u64>()) {
980             return Err(EINVAL.into());
981         }
982         if !is_aligned(buffers_size, size_of::<u64>()) {
983             return Err(EINVAL.into());
984         }
985 
986         // This guarantees that at least `sizeof(usize)` bytes will be allocated.
987         let len = usize::max(
988             aligned_data_size
989                 .checked_add(offsets_size)
990                 .and_then(|sum| sum.checked_add(buffers_size))
991                 .and_then(|sum| sum.checked_add(aligned_secctx_size))
992                 .ok_or(ENOMEM)?,
993             size_of::<u64>(),
994         );
995         let secctx_off = aligned_data_size + offsets_size + buffers_size;
996         let mut alloc =
997             match to_process.buffer_alloc(debug_id, len, is_oneway, self.process.task.pid()) {
998                 Ok(alloc) => alloc,
999                 Err(err) => {
1000                     pr_warn!(
1001                         "Failed to allocate buffer. len:{}, is_oneway:{}",
1002                         len,
1003                         is_oneway
1004                     );
1005                     return Err(err);
1006                 }
1007             };
1008 
1009         // SAFETY: This accesses a union field, but it's okay because the field's type is valid for
1010         // all bit-patterns.
1011         let trd_data_ptr = unsafe { &trd.data.ptr };
1012         let mut buffer_reader =
1013             UserSlice::new(UserPtr::from_addr(trd_data_ptr.buffer as _), data_size).reader();
1014         let mut end_of_previous_object = 0;
1015         let mut sg_state = None;
1016 
1017         // Copy offsets if there are any.
1018         if offsets_size > 0 {
1019             {
1020                 let mut reader =
1021                     UserSlice::new(UserPtr::from_addr(trd_data_ptr.offsets as _), offsets_size)
1022                         .reader();
1023                 alloc.copy_into(&mut reader, aligned_data_size, offsets_size)?;
1024             }
1025 
1026             let offsets_start = aligned_data_size;
1027             let offsets_end = aligned_data_size + offsets_size;
1028 
1029             // This state is used for BINDER_TYPE_PTR objects.
1030             let sg_state = sg_state.insert(ScatterGatherState {
1031                 unused_buffer_space: UnusedBufferSpace {
1032                     offset: offsets_end,
1033                     limit: offsets_end + buffers_size,
1034                 },
1035                 sg_entries: KVec::new(),
1036                 ancestors: KVec::new(),
1037             });
1038 
1039             // Traverse the objects specified.
1040             let mut view = AllocationView::new(&mut alloc, data_size);
1041             for (index, index_offset) in (offsets_start..offsets_end)
1042                 .step_by(size_of::<u64>())
1043                 .enumerate()
1044             {
1045                 let offset: usize = view
1046                     .alloc
1047                     .read::<u64>(index_offset)?
1048                     .try_into()
1049                     .map_err(|_| EINVAL)?;
1050 
1051                 if offset < end_of_previous_object || !is_aligned(offset, size_of::<u32>()) {
1052                     pr_warn!("Got transaction with invalid offset.");
1053                     return Err(EINVAL.into());
1054                 }
1055 
1056                 // Copy data between two objects.
1057                 if end_of_previous_object < offset {
1058                     view.copy_into(
1059                         &mut buffer_reader,
1060                         end_of_previous_object,
1061                         offset - end_of_previous_object,
1062                     )?;
1063                 }
1064 
1065                 let mut object = BinderObject::read_from(&mut buffer_reader)?;
1066 
1067                 match self.translate_object(
1068                     index,
1069                     offset,
1070                     object.as_ref(),
1071                     &mut view,
1072                     allow_fds,
1073                     sg_state,
1074                 ) {
1075                     Ok(()) => end_of_previous_object = offset + object.size(),
1076                     Err(err) => {
1077                         pr_warn!("Error while translating object.");
1078                         return Err(err);
1079                     }
1080                 }
1081 
1082                 // Update the indexes containing objects to clean up.
1083                 let offset_after_object = index_offset + size_of::<u64>();
1084                 view.alloc
1085                     .set_info_offsets(offsets_start..offset_after_object);
1086             }
1087         }
1088 
1089         // Copy remaining raw data.
1090         alloc.copy_into(
1091             &mut buffer_reader,
1092             end_of_previous_object,
1093             data_size - end_of_previous_object,
1094         )?;
1095 
1096         if let Some(sg_state) = sg_state.as_mut() {
1097             if let Err(err) = self.apply_sg(&mut alloc, sg_state) {
1098                 pr_warn!("Failure in apply_sg: {:?}", err);
1099                 return Err(err);
1100             }
1101         }
1102 
1103         if let Some((off_out, secctx)) = secctx.as_mut() {
1104             if let Err(err) = alloc.write(secctx_off, secctx.as_bytes()) {
1105                 pr_warn!("Failed to write security context: {:?}", err);
1106                 return Err(err.into());
1107             }
1108             **off_out = secctx_off;
1109         }
1110         Ok(alloc)
1111     }
1112 
1113     fn unwind_transaction_stack(self: &Arc<Self>) {
1114         let mut thread = self.clone();
1115         while let Ok(transaction) = {
1116             let mut inner = thread.inner.lock();
1117             inner.pop_transaction_to_reply(thread.as_ref())
1118         } {
1119             let reply = Err(BR_DEAD_REPLY);
1120             if !transaction.from.deliver_single_reply(reply, &transaction) {
1121                 break;
1122             }
1123 
1124             thread = transaction.from.clone();
1125         }
1126     }
1127 
1128     pub(crate) fn deliver_reply(
1129         &self,
1130         reply: Result<DLArc<Transaction>, u32>,
1131         transaction: &DArc<Transaction>,
1132     ) {
1133         if self.deliver_single_reply(reply, transaction) {
1134             transaction.from.unwind_transaction_stack();
1135         }
1136     }
1137 
1138     /// Delivers a reply to the thread that started a transaction. The reply can either be a
1139     /// reply-transaction or an error code to be delivered instead.
1140     ///
1141     /// Returns whether the thread is dead. If it is, the caller is expected to unwind the
1142     /// transaction stack by completing transactions for threads that are dead.
1143     fn deliver_single_reply(
1144         &self,
1145         reply: Result<DLArc<Transaction>, u32>,
1146         transaction: &DArc<Transaction>,
1147     ) -> bool {
1148         if let Ok(transaction) = &reply {
1149             transaction.set_outstanding(&mut self.process.inner.lock());
1150         }
1151 
1152         {
1153             let mut inner = self.inner.lock();
1154             if !inner.pop_transaction_replied(transaction) {
1155                 return false;
1156             }
1157 
1158             if inner.is_dead {
1159                 return true;
1160             }
1161 
1162             match reply {
1163                 Ok(work) => {
1164                     inner.push_work(work);
1165                 }
1166                 Err(code) => inner.push_reply_work(code),
1167             }
1168         }
1169 
1170         // Notify the thread now that we've released the inner lock.
1171         self.work_condvar.notify_sync();
1172         false
1173     }
1174 
1175     /// Determines if the given transaction is the current transaction for this thread.
1176     fn is_current_transaction(&self, transaction: &DArc<Transaction>) -> bool {
1177         let inner = self.inner.lock();
1178         match &inner.current_transaction {
1179             None => false,
1180             Some(current) => Arc::ptr_eq(current, transaction),
1181         }
1182     }
1183 
1184     /// Determines the current top of the transaction stack. It fails if the top is in another
1185     /// thread (i.e., this thread belongs to a stack but it has called another thread). The top is
1186     /// [`None`] if the thread is not currently participating in a transaction stack.
1187     fn top_of_transaction_stack(&self) -> Result<Option<DArc<Transaction>>> {
1188         let inner = self.inner.lock();
1189         if let Some(cur) = &inner.current_transaction {
1190             if core::ptr::eq(self, cur.from.as_ref()) {
1191                 pr_warn!("got new transaction with bad transaction stack");
1192                 return Err(EINVAL);
1193             }
1194             Ok(Some(cur.clone()))
1195         } else {
1196             Ok(None)
1197         }
1198     }
1199 
1200     fn transaction<T>(self: &Arc<Self>, tr: &BinderTransactionDataSg, inner: T)
1201     where
1202         T: FnOnce(&Arc<Self>, &BinderTransactionDataSg) -> BinderResult,
1203     {
1204         if let Err(err) = inner(self, tr) {
1205             if err.should_pr_warn() {
1206                 let mut ee = self.inner.lock().extended_error;
1207                 ee.command = err.reply;
1208                 ee.param = err.as_errno();
1209                 pr_warn!(
1210                     "Transaction failed: {:?} my_pid:{}",
1211                     err,
1212                     self.process.pid_in_current_ns()
1213                 );
1214             }
1215 
1216             self.push_return_work(err.reply);
1217         }
1218     }
1219 
1220     fn transaction_inner(self: &Arc<Self>, tr: &BinderTransactionDataSg) -> BinderResult {
1221         // SAFETY: Handle's type has no invalid bit patterns.
1222         let handle = unsafe { tr.transaction_data.target.handle };
1223         let node_ref = self.process.get_transaction_node(handle)?;
1224         security::binder_transaction(&self.process.cred, &node_ref.node.owner.cred)?;
1225         // TODO: We need to ensure that there isn't a pending transaction in the work queue. How
1226         // could this happen?
1227         let top = self.top_of_transaction_stack()?;
1228         let list_completion = DTRWrap::arc_try_new(DeliverCode::new(BR_TRANSACTION_COMPLETE))?;
1229         let completion = list_completion.clone_arc();
1230         let transaction = Transaction::new(node_ref, top, self, tr)?;
1231 
1232         // Check that the transaction stack hasn't changed while the lock was released, then update
1233         // it with the new transaction.
1234         {
1235             let mut inner = self.inner.lock();
1236             if !transaction.is_stacked_on(&inner.current_transaction) {
1237                 pr_warn!("Transaction stack changed during transaction!");
1238                 return Err(EINVAL.into());
1239             }
1240             inner.current_transaction = Some(transaction.clone_arc());
1241             // We push the completion as a deferred work so that we wait for the reply before
1242             // returning to userland.
1243             inner.push_work_deferred(list_completion);
1244         }
1245 
1246         if let Err(e) = transaction.submit() {
1247             completion.skip();
1248             // Define `transaction` first to drop it after `inner`.
1249             let transaction;
1250             let mut inner = self.inner.lock();
1251             transaction = inner.current_transaction.take().unwrap();
1252             inner.current_transaction = transaction.clone_next();
1253             Err(e)
1254         } else {
1255             Ok(())
1256         }
1257     }
1258 
1259     fn reply_inner(self: &Arc<Self>, tr: &BinderTransactionDataSg) -> BinderResult {
1260         let orig = self.inner.lock().pop_transaction_to_reply(self)?;
1261         if !orig.from.is_current_transaction(&orig) {
1262             return Err(EINVAL.into());
1263         }
1264 
1265         // We need to complete the transaction even if we cannot complete building the reply.
1266         let out = (|| -> BinderResult<_> {
1267             let completion = DTRWrap::arc_try_new(DeliverCode::new(BR_TRANSACTION_COMPLETE))?;
1268             let process = orig.from.process.clone();
1269             let allow_fds = orig.flags & TF_ACCEPT_FDS != 0;
1270             let reply = Transaction::new_reply(self, process, tr, allow_fds)?;
1271             self.inner.lock().push_work(completion);
1272             orig.from.deliver_reply(Ok(reply), &orig);
1273             Ok(())
1274         })()
1275         .map_err(|mut err| {
1276             // At this point we only return `BR_TRANSACTION_COMPLETE` to the caller, and we must let
1277             // the sender know that the transaction has completed (with an error in this case).
1278             pr_warn!(
1279                 "Failure {:?} during reply - delivering BR_FAILED_REPLY to sender.",
1280                 err
1281             );
1282             let reply = Err(BR_FAILED_REPLY);
1283             orig.from.deliver_reply(reply, &orig);
1284             err.reply = BR_TRANSACTION_COMPLETE;
1285             err
1286         });
1287 
1288         out
1289     }
1290 
1291     fn oneway_transaction_inner(self: &Arc<Self>, tr: &BinderTransactionDataSg) -> BinderResult {
1292         // SAFETY: The `handle` field is valid for all possible byte values, so reading from the
1293         // union is okay.
1294         let handle = unsafe { tr.transaction_data.target.handle };
1295         let node_ref = self.process.get_transaction_node(handle)?;
1296         security::binder_transaction(&self.process.cred, &node_ref.node.owner.cred)?;
1297         let transaction = Transaction::new(node_ref, None, self, tr)?;
1298         let code = if self.process.is_oneway_spam_detection_enabled()
1299             && transaction.oneway_spam_detected
1300         {
1301             BR_ONEWAY_SPAM_SUSPECT
1302         } else {
1303             BR_TRANSACTION_COMPLETE
1304         };
1305         let list_completion = DTRWrap::arc_try_new(DeliverCode::new(code))?;
1306         let completion = list_completion.clone_arc();
1307         self.inner.lock().push_work(list_completion);
1308         match transaction.submit() {
1309             Ok(()) => Ok(()),
1310             Err(err) => {
1311                 completion.skip();
1312                 Err(err)
1313             }
1314         }
1315     }
1316 
1317     fn write(self: &Arc<Self>, req: &mut BinderWriteRead) -> Result {
1318         let write_start = req.write_buffer.wrapping_add(req.write_consumed);
1319         let write_len = req.write_size.saturating_sub(req.write_consumed);
1320         let mut reader =
1321             UserSlice::new(UserPtr::from_addr(write_start as _), write_len as _).reader();
1322 
1323         while reader.len() >= size_of::<u32>() && self.inner.lock().return_work.is_unused() {
1324             let before = reader.len();
1325             let cmd = reader.read::<u32>()?;
1326             GLOBAL_STATS.inc_bc(cmd);
1327             self.process.stats.inc_bc(cmd);
1328             match cmd {
1329                 BC_TRANSACTION => {
1330                     let tr = reader.read::<BinderTransactionData>()?.with_buffers_size(0);
1331                     if tr.transaction_data.flags & TF_ONE_WAY != 0 {
1332                         self.transaction(&tr, Self::oneway_transaction_inner);
1333                     } else {
1334                         self.transaction(&tr, Self::transaction_inner);
1335                     }
1336                 }
1337                 BC_TRANSACTION_SG => {
1338                     let tr = reader.read::<BinderTransactionDataSg>()?;
1339                     if tr.transaction_data.flags & TF_ONE_WAY != 0 {
1340                         self.transaction(&tr, Self::oneway_transaction_inner);
1341                     } else {
1342                         self.transaction(&tr, Self::transaction_inner);
1343                     }
1344                 }
1345                 BC_REPLY => {
1346                     let tr = reader.read::<BinderTransactionData>()?.with_buffers_size(0);
1347                     self.transaction(&tr, Self::reply_inner)
1348                 }
1349                 BC_REPLY_SG => {
1350                     let tr = reader.read::<BinderTransactionDataSg>()?;
1351                     self.transaction(&tr, Self::reply_inner)
1352                 }
1353                 BC_FREE_BUFFER => {
1354                     let buffer = self.process.buffer_get(reader.read()?);
1355                     if let Some(buffer) = buffer {
1356                         if buffer.looper_need_return_on_free() {
1357                             self.inner.lock().looper_need_return = true;
1358                         }
1359                         drop(buffer);
1360                     }
1361                 }
1362                 BC_INCREFS => {
1363                     self.process
1364                         .as_arc_borrow()
1365                         .update_ref(reader.read()?, true, false)?
1366                 }
1367                 BC_ACQUIRE => {
1368                     self.process
1369                         .as_arc_borrow()
1370                         .update_ref(reader.read()?, true, true)?
1371                 }
1372                 BC_RELEASE => {
1373                     self.process
1374                         .as_arc_borrow()
1375                         .update_ref(reader.read()?, false, true)?
1376                 }
1377                 BC_DECREFS => {
1378                     self.process
1379                         .as_arc_borrow()
1380                         .update_ref(reader.read()?, false, false)?
1381                 }
1382                 BC_INCREFS_DONE => self.process.inc_ref_done(&mut reader, false)?,
1383                 BC_ACQUIRE_DONE => self.process.inc_ref_done(&mut reader, true)?,
1384                 BC_REQUEST_DEATH_NOTIFICATION => self.process.request_death(&mut reader, self)?,
1385                 BC_CLEAR_DEATH_NOTIFICATION => self.process.clear_death(&mut reader, self)?,
1386                 BC_DEAD_BINDER_DONE => self.process.dead_binder_done(reader.read()?, self),
1387                 BC_REGISTER_LOOPER => {
1388                     let valid = self.process.register_thread();
1389                     self.inner.lock().looper_register(valid);
1390                 }
1391                 BC_ENTER_LOOPER => self.inner.lock().looper_enter(),
1392                 BC_EXIT_LOOPER => self.inner.lock().looper_exit(),
1393                 BC_REQUEST_FREEZE_NOTIFICATION => self.process.request_freeze_notif(&mut reader)?,
1394                 BC_CLEAR_FREEZE_NOTIFICATION => self.process.clear_freeze_notif(&mut reader)?,
1395                 BC_FREEZE_NOTIFICATION_DONE => self.process.freeze_notif_done(&mut reader)?,
1396 
1397                 // Fail if given an unknown error code.
1398                 // BC_ATTEMPT_ACQUIRE and BC_ACQUIRE_RESULT are no longer supported.
1399                 _ => return Err(EINVAL),
1400             }
1401             // Update the number of write bytes consumed.
1402             req.write_consumed += (before - reader.len()) as u64;
1403         }
1404 
1405         Ok(())
1406     }
1407 
1408     fn read(self: &Arc<Self>, req: &mut BinderWriteRead, wait: bool) -> Result {
1409         let read_start = req.read_buffer.wrapping_add(req.read_consumed);
1410         let read_len = req.read_size.saturating_sub(req.read_consumed);
1411         let mut writer = BinderReturnWriter::new(
1412             UserSlice::new(UserPtr::from_addr(read_start as _), read_len as _).writer(),
1413             self,
1414         );
1415         let (in_pool, use_proc_queue) = {
1416             let inner = self.inner.lock();
1417             (inner.is_looper(), inner.should_use_process_work_queue())
1418         };
1419 
1420         let getter = if use_proc_queue {
1421             Self::get_work
1422         } else {
1423             Self::get_work_local
1424         };
1425 
1426         // Reserve some room at the beginning of the read buffer so that we can send a
1427         // BR_SPAWN_LOOPER if we need to.
1428         let mut has_noop_placeholder = false;
1429         if req.read_consumed == 0 {
1430             if let Err(err) = writer.write_code(BR_NOOP) {
1431                 pr_warn!("Failure when writing BR_NOOP at beginning of buffer.");
1432                 return Err(err);
1433             }
1434             has_noop_placeholder = true;
1435         }
1436 
1437         // Loop doing work while there is room in the buffer.
1438         let initial_len = writer.len();
1439         while writer.len() >= size_of::<uapi::binder_transaction_data_secctx>() + 4 {
1440             match getter(self, wait && initial_len == writer.len()) {
1441                 Ok(Some(work)) => match work.into_arc().do_work(self, &mut writer) {
1442                     Ok(true) => {}
1443                     Ok(false) => break,
1444                     Err(err) => {
1445                         return Err(err);
1446                     }
1447                 },
1448                 Ok(None) => {
1449                     break;
1450                 }
1451                 Err(err) => {
1452                     // Propagate the error if we haven't written anything else.
1453                     if err != EINTR && err != EAGAIN {
1454                         pr_warn!("Failure in work getter: {:?}", err);
1455                     }
1456                     if initial_len == writer.len() {
1457                         return Err(err);
1458                     } else {
1459                         break;
1460                     }
1461                 }
1462             }
1463         }
1464 
1465         req.read_consumed += read_len - writer.len() as u64;
1466 
1467         // Write BR_SPAWN_LOOPER if the process needs more threads for its pool.
1468         if has_noop_placeholder && in_pool && self.process.needs_thread() {
1469             let mut writer =
1470                 UserSlice::new(UserPtr::from_addr(req.read_buffer as _), req.read_size as _)
1471                     .writer();
1472             writer.write(&BR_SPAWN_LOOPER)?;
1473         }
1474         Ok(())
1475     }
1476 
1477     pub(crate) fn write_read(self: &Arc<Self>, data: UserSlice, wait: bool) -> Result {
1478         let (mut reader, mut writer) = data.reader_writer();
1479         let mut req = reader.read::<BinderWriteRead>()?;
1480 
1481         // Go through the write buffer.
1482         let mut ret = Ok(());
1483         if req.write_size > 0 {
1484             ret = self.write(&mut req);
1485             if let Err(err) = ret {
1486                 pr_warn!(
1487                     "Write failure {:?} in pid:{}",
1488                     err,
1489                     self.process.pid_in_current_ns()
1490                 );
1491                 req.read_consumed = 0;
1492                 writer.write(&req)?;
1493                 self.inner.lock().looper_need_return = false;
1494                 return ret;
1495             }
1496         }
1497 
1498         // Go through the work queue.
1499         if req.read_size > 0 {
1500             ret = self.read(&mut req, wait);
1501             if ret.is_err() && ret != Err(EINTR) {
1502                 pr_warn!(
1503                     "Read failure {:?} in pid:{}",
1504                     ret,
1505                     self.process.pid_in_current_ns()
1506                 );
1507             }
1508         }
1509 
1510         // Write the request back so that the consumed fields are visible to the caller.
1511         writer.write(&req)?;
1512 
1513         self.inner.lock().looper_need_return = false;
1514 
1515         ret
1516     }
1517 
1518     pub(crate) fn poll(&self, file: &File, table: PollTable<'_>) -> (bool, u32) {
1519         table.register_wait(file, &self.work_condvar);
1520         let mut inner = self.inner.lock();
1521         (inner.should_use_process_work_queue(), inner.poll())
1522     }
1523 
1524     /// Make the call to `get_work` or `get_work_local` return immediately, if any.
1525     pub(crate) fn exit_looper(&self) {
1526         let mut inner = self.inner.lock();
1527         let should_notify = inner.looper_flags & LOOPER_WAITING != 0;
1528         if should_notify {
1529             inner.looper_need_return = true;
1530         }
1531         drop(inner);
1532 
1533         if should_notify {
1534             self.work_condvar.notify_one();
1535         }
1536     }
1537 
1538     pub(crate) fn notify_if_poll_ready(&self, sync: bool) {
1539         // Determine if we need to notify. This requires the lock.
1540         let inner = self.inner.lock();
1541         let notify = inner.looper_flags & LOOPER_POLL != 0 && inner.should_use_process_work_queue();
1542         drop(inner);
1543 
1544         // Now that the lock is no longer held, notify the waiters if we have to.
1545         if notify {
1546             if sync {
1547                 self.work_condvar.notify_sync();
1548             } else {
1549                 self.work_condvar.notify_one();
1550             }
1551         }
1552     }
1553 
1554     pub(crate) fn release(self: &Arc<Self>) {
1555         self.inner.lock().is_dead = true;
1556 
1557         //self.work_condvar.clear();
1558         self.unwind_transaction_stack();
1559 
1560         // Cancel all pending work items.
1561         while let Ok(Some(work)) = self.get_work_local(false) {
1562             work.into_arc().cancel();
1563         }
1564     }
1565 }
1566 
1567 #[pin_data]
1568 struct ThreadError {
1569     error_code: Atomic<u32>,
1570     #[pin]
1571     links_track: AtomicTracker,
1572 }
1573 
1574 impl ThreadError {
1575     fn try_new() -> Result<DArc<Self>> {
1576         DTRWrap::arc_pin_init(pin_init!(Self {
1577             error_code: Atomic::new(BR_OK),
1578             links_track <- AtomicTracker::new(),
1579         }))
1580         .map(ListArc::into_arc)
1581     }
1582 
1583     fn set_error_code(&self, code: u32) {
1584         self.error_code.store(code, Relaxed);
1585     }
1586 
1587     fn is_unused(&self) -> bool {
1588         self.error_code.load(Relaxed) == BR_OK
1589     }
1590 }
1591 
1592 impl DeliverToRead for ThreadError {
1593     fn do_work(
1594         self: DArc<Self>,
1595         _thread: &Thread,
1596         writer: &mut BinderReturnWriter<'_>,
1597     ) -> Result<bool> {
1598         let code = self.error_code.load(Relaxed);
1599         self.error_code.store(BR_OK, Relaxed);
1600         writer.write_code(code)?;
1601         Ok(true)
1602     }
1603 
1604     fn cancel(self: DArc<Self>) {}
1605 
1606     fn should_sync_wakeup(&self) -> bool {
1607         false
1608     }
1609 
1610     fn debug_print(&self, m: &SeqFile, prefix: &str, _tprefix: &str) -> Result<()> {
1611         seq_print!(
1612             m,
1613             "{}transaction error: {}\n",
1614             prefix,
1615             self.error_code.load(Relaxed)
1616         );
1617         Ok(())
1618     }
1619 }
1620 
1621 kernel::list::impl_list_arc_safe! {
1622     impl ListArcSafe<0> for ThreadError {
1623         tracked_by links_track: AtomicTracker;
1624     }
1625 }
1626