xref: /linux/drivers/android/binder/thread.rs (revision 944e3f7562c55fa37ebcdd58e5f60f296c81a854)
1 // SPDX-License-Identifier: GPL-2.0
2 
3 // Copyright (C) 2025 Google LLC.
4 
5 //! This module defines the `Thread` type, which represents a userspace thread that is using
6 //! binder.
7 //!
8 //! The `Process` object stores all of the threads in an rb tree.
9 
10 use kernel::{
11     bindings,
12     fs::{File, LocalFile},
13     list::{AtomicTracker, List, ListArc, ListLinks, TryNewListArc},
14     prelude::*,
15     security,
16     seq_file::SeqFile,
17     seq_print,
18     sync::atomic::{ordering::Relaxed, Atomic},
19     sync::poll::{PollCondVar, PollTable},
20     sync::{Arc, SpinLock},
21     task::Task,
22     types::ARef,
23     uaccess::UserSlice,
24     uapi,
25 };
26 
27 use crate::{
28     allocation::{Allocation, AllocationView, BinderObject, BinderObjectRef, NewAllocation},
29     defs::*,
30     error::BinderResult,
31     process::{GetWorkOrRegister, Process},
32     ptr_align,
33     stats::GLOBAL_STATS,
34     transaction::Transaction,
35     BinderReturnWriter, DArc, DLArc, DTRWrap, DeliverCode, DeliverToRead,
36 };
37 
38 use core::mem::size_of;
39 
40 /// Stores the layout of the scatter-gather entries. This is used during the `translate_objects`
41 /// call and is discarded when it returns.
42 struct ScatterGatherState {
43     /// A struct that tracks the amount of unused buffer space.
44     unused_buffer_space: UnusedBufferSpace,
45     /// Scatter-gather entries to copy.
46     sg_entries: KVec<ScatterGatherEntry>,
47     /// Indexes into `sg_entries` corresponding to the last binder_buffer_object that
48     /// was processed and all of its ancestors. The array is in sorted order.
49     ancestors: KVec<usize>,
50 }
51 
52 /// This entry specifies an additional buffer that should be copied using the scatter-gather
53 /// mechanism.
54 struct ScatterGatherEntry {
55     /// The index in the offset array of the BINDER_TYPE_PTR that this entry originates from.
56     obj_index: usize,
57     /// Offset in target buffer.
58     offset: usize,
59     /// User address in source buffer.
60     sender_uaddr: usize,
61     /// Number of bytes to copy.
62     length: usize,
63     /// The minimum offset of the next fixup in this buffer.
64     fixup_min_offset: usize,
65     /// The offsets within this buffer that contain pointers which should be translated.
66     pointer_fixups: KVec<PointerFixupEntry>,
67 }
68 
69 /// This entry specifies that a fixup should happen at `target_offset` of the
70 /// buffer. If `skip` is nonzero, then the fixup is a `binder_fd_array_object`
71 /// and is applied later. Otherwise if `skip` is zero, then the size of the
72 /// fixup is `sizeof::<u64>()` and `pointer_value` is written to the buffer.
73 struct PointerFixupEntry {
74     /// The number of bytes to skip, or zero for a `binder_buffer_object` fixup.
75     skip: usize,
76     /// The translated pointer to write when `skip` is zero.
77     pointer_value: u64,
78     /// The offset at which the value should be written. The offset is relative
79     /// to the original buffer.
80     target_offset: usize,
81 }
82 
83 /// Return type of `apply_and_validate_fixup_in_parent`.
84 struct ParentFixupInfo {
85     /// The index of the parent buffer in `sg_entries`.
86     parent_sg_index: usize,
87     /// The number of ancestors of the buffer.
88     ///
89     /// The buffer is considered an ancestor of itself, so this is always at
90     /// least one.
91     num_ancestors: usize,
92     /// New value of `fixup_min_offset` if this fixup is applied.
93     new_min_offset: usize,
94     /// The offset of the fixup in the target buffer.
95     target_offset: usize,
96 }
97 
98 impl ScatterGatherState {
99     /// Called when a `binder_buffer_object` or `binder_fd_array_object` tries
100     /// to access a region in its parent buffer. These accesses have various
101     /// restrictions, which this method verifies.
102     ///
103     /// The `parent_offset` and `length` arguments describe the offset and
104     /// length of the access in the parent buffer.
105     ///
106     /// # Detailed restrictions
107     ///
108     /// Obviously the fixup must be in-bounds for the parent buffer.
109     ///
110     /// For safety reasons, we only allow fixups inside a buffer to happen
111     /// at increasing offsets; additionally, we only allow fixup on the last
112     /// buffer object that was verified, or one of its parents.
113     ///
114     /// Example of what is allowed:
115     ///
116     /// A
117     ///   B (parent = A, offset = 0)
118     ///   C (parent = A, offset = 16)
119     ///     D (parent = C, offset = 0)
120     ///   E (parent = A, offset = 32) // min_offset is 16 (C.parent_offset)
121     ///
122     /// Examples of what is not allowed:
123     ///
124     /// Decreasing offsets within the same parent:
125     /// A
126     ///   C (parent = A, offset = 16)
127     ///   B (parent = A, offset = 0) // decreasing offset within A
128     ///
129     /// Arcerring to a parent that wasn't the last object or any of its parents:
130     /// A
131     ///   B (parent = A, offset = 0)
132     ///   C (parent = A, offset = 0)
133     ///   C (parent = A, offset = 16)
134     ///     D (parent = B, offset = 0) // B is not A or any of A's parents
135     fn validate_parent_fixup(
136         &self,
137         parent: usize,
138         parent_offset: usize,
139         length: usize,
140     ) -> Result<ParentFixupInfo> {
141         // Using `position` would also be correct, but `rposition` avoids
142         // quadratic running times.
143         let ancestors_i = self
144             .ancestors
145             .iter()
146             .copied()
147             .rposition(|sg_idx| self.sg_entries[sg_idx].obj_index == parent)
148             .ok_or(EINVAL)?;
149         let sg_idx = self.ancestors[ancestors_i];
150         let sg_entry = match self.sg_entries.get(sg_idx) {
151             Some(sg_entry) => sg_entry,
152             None => {
153                 pr_err!(
154                     "self.ancestors[{}] is {}, but self.sg_entries.len() is {}",
155                     ancestors_i,
156                     sg_idx,
157                     self.sg_entries.len()
158                 );
159                 return Err(EINVAL);
160             }
161         };
162         if sg_entry.fixup_min_offset > parent_offset {
163             pr_warn!(
164                 "validate_parent_fixup: fixup_min_offset={}, parent_offset={}",
165                 sg_entry.fixup_min_offset,
166                 parent_offset
167             );
168             return Err(EINVAL);
169         }
170         let new_min_offset = parent_offset.checked_add(length).ok_or(EINVAL)?;
171         if new_min_offset > sg_entry.length {
172             pr_warn!(
173                 "validate_parent_fixup: new_min_offset={}, sg_entry.length={}",
174                 new_min_offset,
175                 sg_entry.length
176             );
177             return Err(EINVAL);
178         }
179         let target_offset = sg_entry.offset.checked_add(parent_offset).ok_or(EINVAL)?;
180         // The `ancestors_i + 1` operation can't overflow since the output of the addition is at
181         // most `self.ancestors.len()`, which also fits in a usize.
182         Ok(ParentFixupInfo {
183             parent_sg_index: sg_idx,
184             num_ancestors: ancestors_i + 1,
185             new_min_offset,
186             target_offset,
187         })
188     }
189 }
190 
191 /// Keeps track of how much unused buffer space is left. The initial amount is the number of bytes
192 /// requested by the user using the `buffers_size` field of `binder_transaction_data_sg`. Each time
193 /// we translate an object of type `BINDER_TYPE_PTR`, some of the unused buffer space is consumed.
194 struct UnusedBufferSpace {
195     /// The start of the remaining space.
196     offset: usize,
197     /// The end of the remaining space.
198     limit: usize,
199 }
200 impl UnusedBufferSpace {
201     /// Claim the next `size` bytes from the unused buffer space. The offset for the claimed chunk
202     /// into the buffer is returned.
203     fn claim_next(&mut self, size: usize) -> Result<usize> {
204         // We require every chunk to be aligned.
205         let size = ptr_align(size).ok_or(EINVAL)?;
206         let new_offset = self.offset.checked_add(size).ok_or(EINVAL)?;
207 
208         if new_offset <= self.limit {
209             let offset = self.offset;
210             self.offset = new_offset;
211             Ok(offset)
212         } else {
213             Err(EINVAL)
214         }
215     }
216 }
217 
218 pub(crate) enum PushWorkRes {
219     Ok,
220     FailedDead(DLArc<dyn DeliverToRead>),
221 }
222 
223 impl PushWorkRes {
224     fn is_ok(&self) -> bool {
225         match self {
226             PushWorkRes::Ok => true,
227             PushWorkRes::FailedDead(_) => false,
228         }
229     }
230 }
231 
232 /// The fields of `Thread` protected by the spinlock.
233 struct InnerThread {
234     /// Determines the looper state of the thread. It is a bit-wise combination of the constants
235     /// prefixed with `LOOPER_`.
236     looper_flags: u32,
237 
238     /// Determines whether the looper should return.
239     looper_need_return: bool,
240 
241     /// Determines if thread is dead.
242     is_dead: bool,
243 
244     /// Work item used to deliver error codes to the thread that started a transaction. Stored here
245     /// so that it can be reused.
246     reply_work: DArc<ThreadError>,
247 
248     /// Work item used to deliver error codes to the current thread. Stored here so that it can be
249     /// reused.
250     return_work: DArc<ThreadError>,
251 
252     /// Determines whether the work list below should be processed. When set to false, `work_list`
253     /// is treated as if it were empty.
254     process_work_list: bool,
255     /// List of work items to deliver to userspace.
256     work_list: List<DTRWrap<dyn DeliverToRead>>,
257     current_transaction: Option<DArc<Transaction>>,
258 
259     /// Extended error information for this thread.
260     extended_error: ExtendedError,
261 }
262 
263 const LOOPER_REGISTERED: u32 = 0x01;
264 const LOOPER_ENTERED: u32 = 0x02;
265 const LOOPER_EXITED: u32 = 0x04;
266 const LOOPER_INVALID: u32 = 0x08;
267 const LOOPER_WAITING: u32 = 0x10;
268 const LOOPER_WAITING_PROC: u32 = 0x20;
269 const LOOPER_POLL: u32 = 0x40;
270 
271 impl InnerThread {
272     fn new() -> Result<Self> {
273         fn next_err_id() -> u32 {
274             static EE_ID: Atomic<u32> = Atomic::new(0);
275             EE_ID.fetch_add(1, Relaxed)
276         }
277 
278         Ok(Self {
279             looper_flags: 0,
280             looper_need_return: false,
281             is_dead: false,
282             process_work_list: false,
283             reply_work: ThreadError::try_new()?,
284             return_work: ThreadError::try_new()?,
285             work_list: List::new(),
286             current_transaction: None,
287             extended_error: ExtendedError::new(next_err_id(), BR_OK, 0),
288         })
289     }
290 
291     fn pop_work(&mut self) -> Option<DLArc<dyn DeliverToRead>> {
292         if !self.process_work_list {
293             return None;
294         }
295 
296         let ret = self.work_list.pop_front();
297         self.process_work_list = !self.work_list.is_empty();
298         ret
299     }
300 
301     fn push_work(&mut self, work: DLArc<dyn DeliverToRead>) -> PushWorkRes {
302         if self.is_dead {
303             PushWorkRes::FailedDead(work)
304         } else {
305             self.work_list.push_back(work);
306             self.process_work_list = true;
307             PushWorkRes::Ok
308         }
309     }
310 
311     fn push_reply_work(&mut self, code: u32) {
312         if let Ok(work) = ListArc::try_from_arc(self.reply_work.clone()) {
313             work.set_error_code(code);
314             self.push_work(work);
315         } else {
316             pr_warn!("Thread reply work is already in use.");
317         }
318     }
319 
320     fn push_return_work(&mut self, reply: u32) {
321         if let Ok(work) = ListArc::try_from_arc(self.return_work.clone()) {
322             work.set_error_code(reply);
323             self.push_work(work);
324         } else {
325             pr_warn!("Thread return work is already in use.");
326         }
327     }
328 
329     /// Used to push work items that do not need to be processed immediately and can wait until the
330     /// thread gets another work item.
331     fn push_work_deferred(&mut self, work: DLArc<dyn DeliverToRead>) {
332         self.work_list.push_back(work);
333     }
334 
335     /// Fetches the transaction this thread can reply to. If the thread has a pending transaction
336     /// (that it could respond to) but it has also issued a transaction, it must first wait for the
337     /// previously-issued transaction to complete.
338     ///
339     /// The `thread` parameter should be the thread containing this `ThreadInner`.
340     fn pop_transaction_to_reply(&mut self, thread: &Thread) -> Result<DArc<Transaction>> {
341         let transaction = self.current_transaction.take().ok_or(EINVAL)?;
342         if core::ptr::eq(thread, transaction.from.as_ref()) {
343             self.current_transaction = Some(transaction);
344             return Err(EINVAL);
345         }
346         // Find a new current transaction for this thread.
347         self.current_transaction = transaction.find_from(thread).cloned();
348         Ok(transaction)
349     }
350 
351     fn pop_transaction_replied(&mut self, transaction: &DArc<Transaction>) -> bool {
352         match self.current_transaction.take() {
353             None => false,
354             Some(old) => {
355                 if !Arc::ptr_eq(transaction, &old) {
356                     self.current_transaction = Some(old);
357                     return false;
358                 }
359                 self.current_transaction = old.clone_next();
360                 true
361             }
362         }
363     }
364 
365     fn looper_enter(&mut self) {
366         self.looper_flags |= LOOPER_ENTERED;
367         if self.looper_flags & LOOPER_REGISTERED != 0 {
368             self.looper_flags |= LOOPER_INVALID;
369         }
370     }
371 
372     fn looper_register(&mut self, valid: bool) {
373         self.looper_flags |= LOOPER_REGISTERED;
374         if !valid || self.looper_flags & LOOPER_ENTERED != 0 {
375             self.looper_flags |= LOOPER_INVALID;
376         }
377     }
378 
379     fn looper_exit(&mut self) {
380         self.looper_flags |= LOOPER_EXITED;
381     }
382 
383     /// Determines whether the thread is part of a pool, i.e., if it is a looper.
384     fn is_looper(&self) -> bool {
385         self.looper_flags & (LOOPER_ENTERED | LOOPER_REGISTERED) != 0
386     }
387 
388     /// Determines whether the thread should attempt to fetch work items from the process queue.
389     /// This is generally case when the thread is registered as a looper and not part of a
390     /// transaction stack. But if there is local work, we want to return to userspace before we
391     /// deliver any remote work.
392     fn should_use_process_work_queue(&self) -> bool {
393         self.current_transaction.is_none() && !self.process_work_list && self.is_looper()
394     }
395 
396     fn poll(&mut self) -> u32 {
397         self.looper_flags |= LOOPER_POLL;
398         if self.process_work_list || self.looper_need_return {
399             bindings::POLLIN
400         } else {
401             0
402         }
403     }
404 }
405 
406 /// This represents a thread that's used with binder.
407 #[pin_data]
408 pub(crate) struct Thread {
409     pub(crate) id: i32,
410     pub(crate) process: Arc<Process>,
411     pub(crate) task: ARef<Task>,
412     #[pin]
413     inner: SpinLock<InnerThread>,
414     #[pin]
415     work_condvar: PollCondVar,
416     /// Used to insert this thread into the process' `ready_threads` list.
417     ///
418     /// INVARIANT: May never be used for any other list than the `self.process.ready_threads`.
419     #[pin]
420     links: ListLinks,
421     #[pin]
422     links_track: AtomicTracker,
423 }
424 
425 kernel::list::impl_list_arc_safe! {
426     impl ListArcSafe<0> for Thread {
427         tracked_by links_track: AtomicTracker;
428     }
429 }
430 kernel::list::impl_list_item! {
431     impl ListItem<0> for Thread {
432         using ListLinks { self.links };
433     }
434 }
435 
436 impl Thread {
437     pub(crate) fn new(id: i32, process: Arc<Process>) -> Result<Arc<Self>> {
438         let inner = InnerThread::new()?;
439 
440         Arc::pin_init(
441             try_pin_init!(Thread {
442                 id,
443                 process,
444                 task: ARef::from(&**kernel::current!()),
445                 inner <- kernel::new_spinlock!(inner, "Thread::inner"),
446                 work_condvar <- kernel::new_poll_condvar!("Thread::work_condvar"),
447                 links <- ListLinks::new(),
448                 links_track <- AtomicTracker::new(),
449             }),
450             GFP_KERNEL,
451         )
452     }
453 
454     #[inline(never)]
455     pub(crate) fn debug_print(self: &Arc<Self>, m: &SeqFile, print_all: bool) -> Result<()> {
456         let inner = self.inner.lock();
457 
458         if print_all || inner.current_transaction.is_some() || !inner.work_list.is_empty() {
459             seq_print!(
460                 m,
461                 "  thread {}: l {:02x} need_return {}\n",
462                 self.id,
463                 inner.looper_flags,
464                 inner.looper_need_return,
465             );
466         }
467 
468         let mut t_opt = inner.current_transaction.as_ref();
469         while let Some(t) = t_opt {
470             if Arc::ptr_eq(&t.from, self) {
471                 t.debug_print_inner(m, "    outgoing transaction ");
472                 t_opt = t.from_parent.as_ref();
473             } else if Arc::ptr_eq(&t.to, &self.process) {
474                 t.debug_print_inner(m, "    incoming transaction ");
475                 t_opt = t.find_from(self);
476             } else {
477                 t.debug_print_inner(m, "    bad transaction ");
478                 t_opt = None;
479             }
480         }
481 
482         for work in &inner.work_list {
483             work.debug_print(m, "    ", "    pending transaction ")?;
484         }
485         Ok(())
486     }
487 
488     pub(crate) fn get_extended_error(&self, data: UserSlice) -> Result {
489         let mut writer = data.writer();
490         let ee = self.inner.lock().extended_error;
491         writer.write(&ee)?;
492         Ok(())
493     }
494 
495     pub(crate) fn set_current_transaction(&self, transaction: DArc<Transaction>) {
496         self.inner.lock().current_transaction = Some(transaction);
497     }
498 
499     pub(crate) fn has_current_transaction(&self) -> bool {
500         self.inner.lock().current_transaction.is_some()
501     }
502 
503     /// Attempts to fetch a work item from the thread-local queue. The behaviour if the queue is
504     /// empty depends on `wait`: if it is true, the function waits for some work to be queued (or a
505     /// signal); otherwise it returns indicating that none is available.
506     fn get_work_local(self: &Arc<Self>, wait: bool) -> Result<Option<DLArc<dyn DeliverToRead>>> {
507         {
508             let mut inner = self.inner.lock();
509             if inner.looper_need_return {
510                 return Ok(inner.pop_work());
511             }
512         }
513 
514         // Try once if the caller does not want to wait.
515         if !wait {
516             return self.inner.lock().pop_work().ok_or(EAGAIN).map(Some);
517         }
518 
519         // Loop waiting only on the local queue (i.e., not registering with the process queue).
520         let mut inner = self.inner.lock();
521         loop {
522             if let Some(work) = inner.pop_work() {
523                 return Ok(Some(work));
524             }
525 
526             inner.looper_flags |= LOOPER_WAITING;
527             let signal_pending = self.work_condvar.wait_interruptible_freezable(&mut inner);
528             inner.looper_flags &= !LOOPER_WAITING;
529 
530             if signal_pending {
531                 return Err(EINTR);
532             }
533             if inner.looper_need_return {
534                 return Ok(None);
535             }
536         }
537     }
538 
539     /// Attempts to fetch a work item from the thread-local queue, falling back to the process-wide
540     /// queue if none is available locally.
541     ///
542     /// This must only be called when the thread is not participating in a transaction chain. If it
543     /// is, the local version (`get_work_local`) should be used instead.
544     fn get_work(self: &Arc<Self>, wait: bool) -> Result<Option<DLArc<dyn DeliverToRead>>> {
545         // Try to get work from the thread's work queue, using only a local lock.
546         {
547             let mut inner = self.inner.lock();
548             if let Some(work) = inner.pop_work() {
549                 return Ok(Some(work));
550             }
551             if inner.looper_need_return {
552                 drop(inner);
553                 return Ok(self.process.get_work());
554             }
555         }
556 
557         // If the caller doesn't want to wait, try to grab work from the process queue.
558         //
559         // We know nothing will have been queued directly to the thread queue because it is not in
560         // a transaction and it is not in the process' ready list.
561         if !wait {
562             return self.process.get_work().ok_or(EAGAIN).map(Some);
563         }
564 
565         // Get work from the process queue. If none is available, atomically register as ready.
566         let reg = match self.process.get_work_or_register(self) {
567             GetWorkOrRegister::Work(work) => return Ok(Some(work)),
568             GetWorkOrRegister::Register(reg) => reg,
569         };
570 
571         let mut inner = self.inner.lock();
572         loop {
573             if let Some(work) = inner.pop_work() {
574                 return Ok(Some(work));
575             }
576 
577             inner.looper_flags |= LOOPER_WAITING | LOOPER_WAITING_PROC;
578             let signal_pending = self.work_condvar.wait_interruptible_freezable(&mut inner);
579             inner.looper_flags &= !(LOOPER_WAITING | LOOPER_WAITING_PROC);
580 
581             if signal_pending || inner.looper_need_return {
582                 // We need to return now. We need to pull the thread off the list of ready threads
583                 // (by dropping `reg`), then check the state again after it's off the list to
584                 // ensure that something was not queued in the meantime. If something has been
585                 // queued, we just return it (instead of the error).
586                 drop(inner);
587                 drop(reg);
588 
589                 let res = match self.inner.lock().pop_work() {
590                     Some(work) => Ok(Some(work)),
591                     None if signal_pending => Err(EINTR),
592                     None => Ok(None),
593                 };
594                 return res;
595             }
596         }
597     }
598 
599     /// Push the provided work item to be delivered to user space via this thread.
600     ///
601     /// Returns whether the item was successfully pushed. This can only fail if the thread is dead.
602     pub(crate) fn push_work(&self, work: DLArc<dyn DeliverToRead>) -> PushWorkRes {
603         let sync = work.should_sync_wakeup();
604 
605         let res = self.inner.lock().push_work(work);
606 
607         if res.is_ok() {
608             if sync {
609                 self.work_condvar.notify_sync();
610             } else {
611                 self.work_condvar.notify_one();
612             }
613         }
614 
615         res
616     }
617 
618     /// Attempts to push to given work item to the thread if it's a looper thread (i.e., if it's
619     /// part of a thread pool) and is alive. Otherwise, push the work item to the process instead.
620     pub(crate) fn push_work_if_looper(&self, work: DLArc<dyn DeliverToRead>) -> BinderResult {
621         let mut inner = self.inner.lock();
622         if inner.is_looper() && !inner.is_dead {
623             inner.push_work(work);
624             Ok(())
625         } else {
626             drop(inner);
627             self.process.push_work(work)
628         }
629     }
630 
631     pub(crate) fn push_work_deferred(&self, work: DLArc<dyn DeliverToRead>) {
632         self.inner.lock().push_work_deferred(work);
633     }
634 
635     pub(crate) fn push_return_work(&self, reply: u32) {
636         self.inner.lock().push_return_work(reply);
637     }
638 
639     fn translate_object(
640         &self,
641         obj_index: usize,
642         offset: usize,
643         object: BinderObjectRef<'_>,
644         view: &mut AllocationView<'_>,
645         allow_fds: bool,
646         sg_state: &mut ScatterGatherState,
647     ) -> BinderResult {
648         match object {
649             BinderObjectRef::Binder(obj) => {
650                 let strong = obj.hdr.type_ == BINDER_TYPE_BINDER;
651                 // SAFETY: `binder` is a `binder_uintptr_t`; any bit pattern is a valid
652                 // representation.
653                 let ptr = unsafe { obj.__bindgen_anon_1.binder } as _;
654                 let cookie = obj.cookie as _;
655                 let flags = obj.flags as _;
656                 let node = self
657                     .process
658                     .as_arc_borrow()
659                     .get_node(ptr, cookie, flags, strong, self)?;
660                 security::binder_transfer_binder(&self.process.cred, &view.alloc.process.cred)?;
661                 view.transfer_binder_object(offset, obj, strong, node)?;
662             }
663             BinderObjectRef::Handle(obj) => {
664                 let strong = obj.hdr.type_ == BINDER_TYPE_HANDLE;
665                 // SAFETY: `handle` is a `u32`; any bit pattern is a valid representation.
666                 let handle = unsafe { obj.__bindgen_anon_1.handle } as _;
667                 let node = self.process.get_node_from_handle(handle, strong)?;
668                 security::binder_transfer_binder(&self.process.cred, &view.alloc.process.cred)?;
669                 view.transfer_binder_object(offset, obj, strong, node)?;
670             }
671             BinderObjectRef::Fd(obj) => {
672                 if !allow_fds {
673                     return Err(EPERM.into());
674                 }
675 
676                 // SAFETY: `fd` is a `u32`; any bit pattern is a valid representation.
677                 let fd = unsafe { obj.__bindgen_anon_1.fd };
678                 let file = LocalFile::fget(fd)?;
679                 // SAFETY: The binder driver never calls `fdget_pos` and this code runs from an
680                 // ioctl, so there are no active calls to `fdget_pos` on this thread.
681                 let file = unsafe { LocalFile::assume_no_fdget_pos(file) };
682                 security::binder_transfer_file(
683                     &self.process.cred,
684                     &view.alloc.process.cred,
685                     &file,
686                 )?;
687 
688                 let mut obj_write = BinderFdObject::default();
689                 obj_write.hdr.type_ = BINDER_TYPE_FD;
690                 // This will be overwritten with the actual fd when the transaction is received.
691                 obj_write.__bindgen_anon_1.fd = u32::MAX;
692                 obj_write.cookie = obj.cookie;
693                 view.write::<BinderFdObject>(offset, &obj_write)?;
694 
695                 const FD_FIELD_OFFSET: usize =
696                     core::mem::offset_of!(uapi::binder_fd_object, __bindgen_anon_1.fd);
697 
698                 let field_offset = offset + FD_FIELD_OFFSET;
699 
700                 view.alloc.info_add_fd(file, field_offset, false)?;
701             }
702             BinderObjectRef::Ptr(obj) => {
703                 let obj_length = obj.length.try_into().map_err(|_| EINVAL)?;
704                 let alloc_offset = match sg_state.unused_buffer_space.claim_next(obj_length) {
705                     Ok(alloc_offset) => alloc_offset,
706                     Err(err) => {
707                         pr_warn!(
708                             "Failed to claim space for a BINDER_TYPE_PTR. (offset: {}, limit: {}, size: {})",
709                             sg_state.unused_buffer_space.offset,
710                             sg_state.unused_buffer_space.limit,
711                             obj_length,
712                         );
713                         return Err(err.into());
714                     }
715                 };
716 
717                 let sg_state_idx = sg_state.sg_entries.len();
718                 sg_state.sg_entries.push(
719                     ScatterGatherEntry {
720                         obj_index,
721                         offset: alloc_offset,
722                         sender_uaddr: obj.buffer as _,
723                         length: obj_length,
724                         pointer_fixups: KVec::new(),
725                         fixup_min_offset: 0,
726                     },
727                     GFP_KERNEL,
728                 )?;
729 
730                 let buffer_ptr_in_user_space = (view.alloc.ptr + alloc_offset) as u64;
731 
732                 if obj.flags & uapi::BINDER_BUFFER_FLAG_HAS_PARENT == 0 {
733                     sg_state.ancestors.clear();
734                     sg_state.ancestors.push(sg_state_idx, GFP_KERNEL)?;
735                 } else {
736                     // Another buffer also has a pointer to this buffer, and we need to fixup that
737                     // pointer too.
738 
739                     let parent_index = usize::try_from(obj.parent).map_err(|_| EINVAL)?;
740                     let parent_offset = usize::try_from(obj.parent_offset).map_err(|_| EINVAL)?;
741 
742                     let info = sg_state.validate_parent_fixup(
743                         parent_index,
744                         parent_offset,
745                         size_of::<u64>(),
746                     )?;
747 
748                     sg_state.ancestors.truncate(info.num_ancestors);
749                     sg_state.ancestors.push(sg_state_idx, GFP_KERNEL)?;
750 
751                     let parent_entry = match sg_state.sg_entries.get_mut(info.parent_sg_index) {
752                         Some(parent_entry) => parent_entry,
753                         None => {
754                             pr_err!(
755                                 "validate_parent_fixup returned index out of bounds for sg.entries"
756                             );
757                             return Err(EINVAL.into());
758                         }
759                     };
760 
761                     parent_entry.fixup_min_offset = info.new_min_offset;
762                     parent_entry.pointer_fixups.push(
763                         PointerFixupEntry {
764                             skip: 0,
765                             pointer_value: buffer_ptr_in_user_space,
766                             target_offset: info.target_offset,
767                         },
768                         GFP_KERNEL,
769                     )?;
770                 }
771 
772                 let mut obj_write = BinderBufferObject::default();
773                 obj_write.hdr.type_ = BINDER_TYPE_PTR;
774                 obj_write.flags = obj.flags;
775                 obj_write.buffer = buffer_ptr_in_user_space;
776                 obj_write.length = obj.length;
777                 obj_write.parent = obj.parent;
778                 obj_write.parent_offset = obj.parent_offset;
779                 view.write::<BinderBufferObject>(offset, &obj_write)?;
780             }
781             BinderObjectRef::Fda(obj) => {
782                 if !allow_fds {
783                     return Err(EPERM.into());
784                 }
785                 let parent_index = usize::try_from(obj.parent).map_err(|_| EINVAL)?;
786                 let parent_offset = usize::try_from(obj.parent_offset).map_err(|_| EINVAL)?;
787                 let num_fds = usize::try_from(obj.num_fds).map_err(|_| EINVAL)?;
788                 let fds_len = num_fds.checked_mul(size_of::<u32>()).ok_or(EINVAL)?;
789 
790                 let info = sg_state.validate_parent_fixup(parent_index, parent_offset, fds_len)?;
791                 view.alloc.info_add_fd_reserve(num_fds)?;
792 
793                 sg_state.ancestors.truncate(info.num_ancestors);
794                 let parent_entry = match sg_state.sg_entries.get_mut(info.parent_sg_index) {
795                     Some(parent_entry) => parent_entry,
796                     None => {
797                         pr_err!(
798                             "validate_parent_fixup returned index out of bounds for sg.entries"
799                         );
800                         return Err(EINVAL.into());
801                     }
802                 };
803 
804                 parent_entry.fixup_min_offset = info.new_min_offset;
805                 parent_entry
806                     .pointer_fixups
807                     .push(
808                         PointerFixupEntry {
809                             skip: fds_len,
810                             pointer_value: 0,
811                             target_offset: info.target_offset,
812                         },
813                         GFP_KERNEL,
814                     )
815                     .map_err(|_| ENOMEM)?;
816 
817                 let fda_uaddr = parent_entry
818                     .sender_uaddr
819                     .checked_add(parent_offset)
820                     .ok_or(EINVAL)?;
821                 let mut fda_bytes = KVec::new();
822                 UserSlice::new(UserPtr::from_addr(fda_uaddr as _), fds_len)
823                     .read_all(&mut fda_bytes, GFP_KERNEL)?;
824 
825                 if fds_len != fda_bytes.len() {
826                     pr_err!("UserSlice::read_all returned wrong length in BINDER_TYPE_FDA");
827                     return Err(EINVAL.into());
828                 }
829 
830                 for i in (0..fds_len).step_by(size_of::<u32>()) {
831                     let fd = {
832                         let mut fd_bytes = [0u8; size_of::<u32>()];
833                         fd_bytes.copy_from_slice(&fda_bytes[i..i + size_of::<u32>()]);
834                         u32::from_ne_bytes(fd_bytes)
835                     };
836 
837                     let file = LocalFile::fget(fd)?;
838                     // SAFETY: The binder driver never calls `fdget_pos` and this code runs from an
839                     // ioctl, so there are no active calls to `fdget_pos` on this thread.
840                     let file = unsafe { LocalFile::assume_no_fdget_pos(file) };
841                     security::binder_transfer_file(
842                         &self.process.cred,
843                         &view.alloc.process.cred,
844                         &file,
845                     )?;
846 
847                     // The `validate_parent_fixup` call ensuers that this addition will not
848                     // overflow.
849                     view.alloc.info_add_fd(file, info.target_offset + i, true)?;
850                 }
851                 drop(fda_bytes);
852 
853                 let mut obj_write = BinderFdArrayObject::default();
854                 obj_write.hdr.type_ = BINDER_TYPE_FDA;
855                 obj_write.num_fds = obj.num_fds;
856                 obj_write.parent = obj.parent;
857                 obj_write.parent_offset = obj.parent_offset;
858                 view.write::<BinderFdArrayObject>(offset, &obj_write)?;
859             }
860         }
861         Ok(())
862     }
863 
864     fn apply_sg(&self, alloc: &mut Allocation, sg_state: &mut ScatterGatherState) -> BinderResult {
865         for sg_entry in &mut sg_state.sg_entries {
866             let mut end_of_previous_fixup = sg_entry.offset;
867             let offset_end = sg_entry.offset.checked_add(sg_entry.length).ok_or(EINVAL)?;
868 
869             let mut reader =
870                 UserSlice::new(UserPtr::from_addr(sg_entry.sender_uaddr), sg_entry.length).reader();
871             for fixup in &mut sg_entry.pointer_fixups {
872                 let fixup_len = if fixup.skip == 0 {
873                     size_of::<u64>()
874                 } else {
875                     fixup.skip
876                 };
877 
878                 let target_offset_end = fixup.target_offset.checked_add(fixup_len).ok_or(EINVAL)?;
879                 if fixup.target_offset < end_of_previous_fixup || offset_end < target_offset_end {
880                     pr_warn!(
881                         "Fixups oob {} {} {} {}",
882                         fixup.target_offset,
883                         end_of_previous_fixup,
884                         offset_end,
885                         target_offset_end
886                     );
887                     return Err(EINVAL.into());
888                 }
889 
890                 let copy_off = end_of_previous_fixup;
891                 let copy_len = fixup.target_offset - end_of_previous_fixup;
892                 if let Err(err) = alloc.copy_into(&mut reader, copy_off, copy_len) {
893                     pr_warn!("Failed copying into alloc: {:?}", err);
894                     return Err(err.into());
895                 }
896                 if fixup.skip == 0 {
897                     let res = alloc.write::<u64>(fixup.target_offset, &fixup.pointer_value);
898                     if let Err(err) = res {
899                         pr_warn!("Failed copying ptr into alloc: {:?}", err);
900                         return Err(err.into());
901                     }
902                 }
903                 if let Err(err) = reader.skip(fixup_len) {
904                     pr_warn!("Failed skipping {} from reader: {:?}", fixup_len, err);
905                     return Err(err.into());
906                 }
907                 end_of_previous_fixup = target_offset_end;
908             }
909             let copy_off = end_of_previous_fixup;
910             let copy_len = offset_end - end_of_previous_fixup;
911             if let Err(err) = alloc.copy_into(&mut reader, copy_off, copy_len) {
912                 pr_warn!("Failed copying remainder into alloc: {:?}", err);
913                 return Err(err.into());
914             }
915         }
916         Ok(())
917     }
918 
919     /// This method copies the payload of a transaction into the target process.
920     ///
921     /// The resulting payload will have several different components, which will be stored next to
922     /// each other in the allocation. Furthermore, various objects can be embedded in the payload,
923     /// and those objects have to be translated so that they make sense to the target transaction.
924     pub(crate) fn copy_transaction_data(
925         &self,
926         to_process: Arc<Process>,
927         tr: &BinderTransactionDataSg,
928         debug_id: usize,
929         allow_fds: bool,
930         txn_security_ctx_offset: Option<&mut usize>,
931     ) -> BinderResult<NewAllocation> {
932         let trd = &tr.transaction_data;
933         let is_oneway = trd.flags & TF_ONE_WAY != 0;
934         let mut secctx = if let Some(offset) = txn_security_ctx_offset {
935             let secid = self.process.cred.get_secid();
936             let ctx = match security::SecurityCtx::from_secid(secid) {
937                 Ok(ctx) => ctx,
938                 Err(err) => {
939                     pr_warn!("Failed to get security ctx for id {}: {:?}", secid, err);
940                     return Err(err.into());
941                 }
942             };
943             Some((offset, ctx))
944         } else {
945             None
946         };
947 
948         let data_size = trd.data_size.try_into().map_err(|_| EINVAL)?;
949         let aligned_data_size = ptr_align(data_size).ok_or(EINVAL)?;
950         let offsets_size = trd.offsets_size.try_into().map_err(|_| EINVAL)?;
951         let aligned_offsets_size = ptr_align(offsets_size).ok_or(EINVAL)?;
952         let buffers_size = tr.buffers_size.try_into().map_err(|_| EINVAL)?;
953         let aligned_buffers_size = ptr_align(buffers_size).ok_or(EINVAL)?;
954         let aligned_secctx_size = match secctx.as_ref() {
955             Some((_offset, ctx)) => ptr_align(ctx.len()).ok_or(EINVAL)?,
956             None => 0,
957         };
958 
959         // This guarantees that at least `sizeof(usize)` bytes will be allocated.
960         let len = usize::max(
961             aligned_data_size
962                 .checked_add(aligned_offsets_size)
963                 .and_then(|sum| sum.checked_add(aligned_buffers_size))
964                 .and_then(|sum| sum.checked_add(aligned_secctx_size))
965                 .ok_or(ENOMEM)?,
966             size_of::<usize>(),
967         );
968         let secctx_off = aligned_data_size + aligned_offsets_size + aligned_buffers_size;
969         let mut alloc =
970             match to_process.buffer_alloc(debug_id, len, is_oneway, self.process.task.pid()) {
971                 Ok(alloc) => alloc,
972                 Err(err) => {
973                     pr_warn!(
974                         "Failed to allocate buffer. len:{}, is_oneway:{}",
975                         len,
976                         is_oneway
977                     );
978                     return Err(err);
979                 }
980             };
981 
982         // SAFETY: This accesses a union field, but it's okay because the field's type is valid for
983         // all bit-patterns.
984         let trd_data_ptr = unsafe { &trd.data.ptr };
985         let mut buffer_reader =
986             UserSlice::new(UserPtr::from_addr(trd_data_ptr.buffer as _), data_size).reader();
987         let mut end_of_previous_object = 0;
988         let mut sg_state = None;
989 
990         // Copy offsets if there are any.
991         if offsets_size > 0 {
992             {
993                 let mut reader =
994                     UserSlice::new(UserPtr::from_addr(trd_data_ptr.offsets as _), offsets_size)
995                         .reader();
996                 alloc.copy_into(&mut reader, aligned_data_size, offsets_size)?;
997             }
998 
999             let offsets_start = aligned_data_size;
1000             let offsets_end = aligned_data_size + aligned_offsets_size;
1001 
1002             // This state is used for BINDER_TYPE_PTR objects.
1003             let sg_state = sg_state.insert(ScatterGatherState {
1004                 unused_buffer_space: UnusedBufferSpace {
1005                     offset: offsets_end,
1006                     limit: len,
1007                 },
1008                 sg_entries: KVec::new(),
1009                 ancestors: KVec::new(),
1010             });
1011 
1012             // Traverse the objects specified.
1013             let mut view = AllocationView::new(&mut alloc, data_size);
1014             for (index, index_offset) in (offsets_start..offsets_end)
1015                 .step_by(size_of::<usize>())
1016                 .enumerate()
1017             {
1018                 let offset = view.alloc.read(index_offset)?;
1019 
1020                 if offset < end_of_previous_object {
1021                     pr_warn!("Got transaction with invalid offset.");
1022                     return Err(EINVAL.into());
1023                 }
1024 
1025                 // Copy data between two objects.
1026                 if end_of_previous_object < offset {
1027                     view.copy_into(
1028                         &mut buffer_reader,
1029                         end_of_previous_object,
1030                         offset - end_of_previous_object,
1031                     )?;
1032                 }
1033 
1034                 let mut object = BinderObject::read_from(&mut buffer_reader)?;
1035 
1036                 match self.translate_object(
1037                     index,
1038                     offset,
1039                     object.as_ref(),
1040                     &mut view,
1041                     allow_fds,
1042                     sg_state,
1043                 ) {
1044                     Ok(()) => end_of_previous_object = offset + object.size(),
1045                     Err(err) => {
1046                         pr_warn!("Error while translating object.");
1047                         return Err(err);
1048                     }
1049                 }
1050 
1051                 // Update the indexes containing objects to clean up.
1052                 let offset_after_object = index_offset + size_of::<usize>();
1053                 view.alloc
1054                     .set_info_offsets(offsets_start..offset_after_object);
1055             }
1056         }
1057 
1058         // Copy remaining raw data.
1059         alloc.copy_into(
1060             &mut buffer_reader,
1061             end_of_previous_object,
1062             data_size - end_of_previous_object,
1063         )?;
1064 
1065         if let Some(sg_state) = sg_state.as_mut() {
1066             if let Err(err) = self.apply_sg(&mut alloc, sg_state) {
1067                 pr_warn!("Failure in apply_sg: {:?}", err);
1068                 return Err(err);
1069             }
1070         }
1071 
1072         if let Some((off_out, secctx)) = secctx.as_mut() {
1073             if let Err(err) = alloc.write(secctx_off, secctx.as_bytes()) {
1074                 pr_warn!("Failed to write security context: {:?}", err);
1075                 return Err(err.into());
1076             }
1077             **off_out = secctx_off;
1078         }
1079         Ok(alloc)
1080     }
1081 
1082     fn unwind_transaction_stack(self: &Arc<Self>) {
1083         let mut thread = self.clone();
1084         while let Ok(transaction) = {
1085             let mut inner = thread.inner.lock();
1086             inner.pop_transaction_to_reply(thread.as_ref())
1087         } {
1088             let reply = Err(BR_DEAD_REPLY);
1089             if !transaction.from.deliver_single_reply(reply, &transaction) {
1090                 break;
1091             }
1092 
1093             thread = transaction.from.clone();
1094         }
1095     }
1096 
1097     pub(crate) fn deliver_reply(
1098         &self,
1099         reply: Result<DLArc<Transaction>, u32>,
1100         transaction: &DArc<Transaction>,
1101     ) {
1102         if self.deliver_single_reply(reply, transaction) {
1103             transaction.from.unwind_transaction_stack();
1104         }
1105     }
1106 
1107     /// Delivers a reply to the thread that started a transaction. The reply can either be a
1108     /// reply-transaction or an error code to be delivered instead.
1109     ///
1110     /// Returns whether the thread is dead. If it is, the caller is expected to unwind the
1111     /// transaction stack by completing transactions for threads that are dead.
1112     fn deliver_single_reply(
1113         &self,
1114         reply: Result<DLArc<Transaction>, u32>,
1115         transaction: &DArc<Transaction>,
1116     ) -> bool {
1117         if let Ok(transaction) = &reply {
1118             transaction.set_outstanding(&mut self.process.inner.lock());
1119         }
1120 
1121         {
1122             let mut inner = self.inner.lock();
1123             if !inner.pop_transaction_replied(transaction) {
1124                 return false;
1125             }
1126 
1127             if inner.is_dead {
1128                 return true;
1129             }
1130 
1131             match reply {
1132                 Ok(work) => {
1133                     inner.push_work(work);
1134                 }
1135                 Err(code) => inner.push_reply_work(code),
1136             }
1137         }
1138 
1139         // Notify the thread now that we've released the inner lock.
1140         self.work_condvar.notify_sync();
1141         false
1142     }
1143 
1144     /// Determines if the given transaction is the current transaction for this thread.
1145     fn is_current_transaction(&self, transaction: &DArc<Transaction>) -> bool {
1146         let inner = self.inner.lock();
1147         match &inner.current_transaction {
1148             None => false,
1149             Some(current) => Arc::ptr_eq(current, transaction),
1150         }
1151     }
1152 
1153     /// Determines the current top of the transaction stack. It fails if the top is in another
1154     /// thread (i.e., this thread belongs to a stack but it has called another thread). The top is
1155     /// [`None`] if the thread is not currently participating in a transaction stack.
1156     fn top_of_transaction_stack(&self) -> Result<Option<DArc<Transaction>>> {
1157         let inner = self.inner.lock();
1158         if let Some(cur) = &inner.current_transaction {
1159             if core::ptr::eq(self, cur.from.as_ref()) {
1160                 pr_warn!("got new transaction with bad transaction stack");
1161                 return Err(EINVAL);
1162             }
1163             Ok(Some(cur.clone()))
1164         } else {
1165             Ok(None)
1166         }
1167     }
1168 
1169     fn transaction<T>(self: &Arc<Self>, tr: &BinderTransactionDataSg, inner: T)
1170     where
1171         T: FnOnce(&Arc<Self>, &BinderTransactionDataSg) -> BinderResult,
1172     {
1173         if let Err(err) = inner(self, tr) {
1174             if err.should_pr_warn() {
1175                 let mut ee = self.inner.lock().extended_error;
1176                 ee.command = err.reply;
1177                 ee.param = err.as_errno();
1178                 pr_warn!(
1179                     "Transaction failed: {:?} my_pid:{}",
1180                     err,
1181                     self.process.pid_in_current_ns()
1182                 );
1183             }
1184 
1185             self.push_return_work(err.reply);
1186         }
1187     }
1188 
1189     fn transaction_inner(self: &Arc<Self>, tr: &BinderTransactionDataSg) -> BinderResult {
1190         // SAFETY: Handle's type has no invalid bit patterns.
1191         let handle = unsafe { tr.transaction_data.target.handle };
1192         let node_ref = self.process.get_transaction_node(handle)?;
1193         security::binder_transaction(&self.process.cred, &node_ref.node.owner.cred)?;
1194         // TODO: We need to ensure that there isn't a pending transaction in the work queue. How
1195         // could this happen?
1196         let top = self.top_of_transaction_stack()?;
1197         let list_completion = DTRWrap::arc_try_new(DeliverCode::new(BR_TRANSACTION_COMPLETE))?;
1198         let completion = list_completion.clone_arc();
1199         let transaction = Transaction::new(node_ref, top, self, tr)?;
1200 
1201         // Check that the transaction stack hasn't changed while the lock was released, then update
1202         // it with the new transaction.
1203         {
1204             let mut inner = self.inner.lock();
1205             if !transaction.is_stacked_on(&inner.current_transaction) {
1206                 pr_warn!("Transaction stack changed during transaction!");
1207                 return Err(EINVAL.into());
1208             }
1209             inner.current_transaction = Some(transaction.clone_arc());
1210             // We push the completion as a deferred work so that we wait for the reply before
1211             // returning to userland.
1212             inner.push_work_deferred(list_completion);
1213         }
1214 
1215         if let Err(e) = transaction.submit() {
1216             completion.skip();
1217             // Define `transaction` first to drop it after `inner`.
1218             let transaction;
1219             let mut inner = self.inner.lock();
1220             transaction = inner.current_transaction.take().unwrap();
1221             inner.current_transaction = transaction.clone_next();
1222             Err(e)
1223         } else {
1224             Ok(())
1225         }
1226     }
1227 
1228     fn reply_inner(self: &Arc<Self>, tr: &BinderTransactionDataSg) -> BinderResult {
1229         let orig = self.inner.lock().pop_transaction_to_reply(self)?;
1230         if !orig.from.is_current_transaction(&orig) {
1231             return Err(EINVAL.into());
1232         }
1233 
1234         // We need to complete the transaction even if we cannot complete building the reply.
1235         let out = (|| -> BinderResult<_> {
1236             let completion = DTRWrap::arc_try_new(DeliverCode::new(BR_TRANSACTION_COMPLETE))?;
1237             let process = orig.from.process.clone();
1238             let allow_fds = orig.flags & TF_ACCEPT_FDS != 0;
1239             let reply = Transaction::new_reply(self, process, tr, allow_fds)?;
1240             self.inner.lock().push_work(completion);
1241             orig.from.deliver_reply(Ok(reply), &orig);
1242             Ok(())
1243         })()
1244         .map_err(|mut err| {
1245             // At this point we only return `BR_TRANSACTION_COMPLETE` to the caller, and we must let
1246             // the sender know that the transaction has completed (with an error in this case).
1247             pr_warn!(
1248                 "Failure {:?} during reply - delivering BR_FAILED_REPLY to sender.",
1249                 err
1250             );
1251             let reply = Err(BR_FAILED_REPLY);
1252             orig.from.deliver_reply(reply, &orig);
1253             err.reply = BR_TRANSACTION_COMPLETE;
1254             err
1255         });
1256 
1257         out
1258     }
1259 
1260     fn oneway_transaction_inner(self: &Arc<Self>, tr: &BinderTransactionDataSg) -> BinderResult {
1261         // SAFETY: The `handle` field is valid for all possible byte values, so reading from the
1262         // union is okay.
1263         let handle = unsafe { tr.transaction_data.target.handle };
1264         let node_ref = self.process.get_transaction_node(handle)?;
1265         security::binder_transaction(&self.process.cred, &node_ref.node.owner.cred)?;
1266         let transaction = Transaction::new(node_ref, None, self, tr)?;
1267         let code = if self.process.is_oneway_spam_detection_enabled()
1268             && transaction.oneway_spam_detected
1269         {
1270             BR_ONEWAY_SPAM_SUSPECT
1271         } else {
1272             BR_TRANSACTION_COMPLETE
1273         };
1274         let list_completion = DTRWrap::arc_try_new(DeliverCode::new(code))?;
1275         let completion = list_completion.clone_arc();
1276         self.inner.lock().push_work(list_completion);
1277         match transaction.submit() {
1278             Ok(()) => Ok(()),
1279             Err(err) => {
1280                 completion.skip();
1281                 Err(err)
1282             }
1283         }
1284     }
1285 
1286     fn write(self: &Arc<Self>, req: &mut BinderWriteRead) -> Result {
1287         let write_start = req.write_buffer.wrapping_add(req.write_consumed);
1288         let write_len = req.write_size.saturating_sub(req.write_consumed);
1289         let mut reader =
1290             UserSlice::new(UserPtr::from_addr(write_start as _), write_len as _).reader();
1291 
1292         while reader.len() >= size_of::<u32>() && self.inner.lock().return_work.is_unused() {
1293             let before = reader.len();
1294             let cmd = reader.read::<u32>()?;
1295             GLOBAL_STATS.inc_bc(cmd);
1296             self.process.stats.inc_bc(cmd);
1297             match cmd {
1298                 BC_TRANSACTION => {
1299                     let tr = reader.read::<BinderTransactionData>()?.with_buffers_size(0);
1300                     if tr.transaction_data.flags & TF_ONE_WAY != 0 {
1301                         self.transaction(&tr, Self::oneway_transaction_inner);
1302                     } else {
1303                         self.transaction(&tr, Self::transaction_inner);
1304                     }
1305                 }
1306                 BC_TRANSACTION_SG => {
1307                     let tr = reader.read::<BinderTransactionDataSg>()?;
1308                     if tr.transaction_data.flags & TF_ONE_WAY != 0 {
1309                         self.transaction(&tr, Self::oneway_transaction_inner);
1310                     } else {
1311                         self.transaction(&tr, Self::transaction_inner);
1312                     }
1313                 }
1314                 BC_REPLY => {
1315                     let tr = reader.read::<BinderTransactionData>()?.with_buffers_size(0);
1316                     self.transaction(&tr, Self::reply_inner)
1317                 }
1318                 BC_REPLY_SG => {
1319                     let tr = reader.read::<BinderTransactionDataSg>()?;
1320                     self.transaction(&tr, Self::reply_inner)
1321                 }
1322                 BC_FREE_BUFFER => {
1323                     let buffer = self.process.buffer_get(reader.read()?);
1324                     if let Some(buffer) = buffer {
1325                         if buffer.looper_need_return_on_free() {
1326                             self.inner.lock().looper_need_return = true;
1327                         }
1328                         drop(buffer);
1329                     }
1330                 }
1331                 BC_INCREFS => {
1332                     self.process
1333                         .as_arc_borrow()
1334                         .update_ref(reader.read()?, true, false)?
1335                 }
1336                 BC_ACQUIRE => {
1337                     self.process
1338                         .as_arc_borrow()
1339                         .update_ref(reader.read()?, true, true)?
1340                 }
1341                 BC_RELEASE => {
1342                     self.process
1343                         .as_arc_borrow()
1344                         .update_ref(reader.read()?, false, true)?
1345                 }
1346                 BC_DECREFS => {
1347                     self.process
1348                         .as_arc_borrow()
1349                         .update_ref(reader.read()?, false, false)?
1350                 }
1351                 BC_INCREFS_DONE => self.process.inc_ref_done(&mut reader, false)?,
1352                 BC_ACQUIRE_DONE => self.process.inc_ref_done(&mut reader, true)?,
1353                 BC_REQUEST_DEATH_NOTIFICATION => self.process.request_death(&mut reader, self)?,
1354                 BC_CLEAR_DEATH_NOTIFICATION => self.process.clear_death(&mut reader, self)?,
1355                 BC_DEAD_BINDER_DONE => self.process.dead_binder_done(reader.read()?, self),
1356                 BC_REGISTER_LOOPER => {
1357                     let valid = self.process.register_thread();
1358                     self.inner.lock().looper_register(valid);
1359                 }
1360                 BC_ENTER_LOOPER => self.inner.lock().looper_enter(),
1361                 BC_EXIT_LOOPER => self.inner.lock().looper_exit(),
1362                 BC_REQUEST_FREEZE_NOTIFICATION => self.process.request_freeze_notif(&mut reader)?,
1363                 BC_CLEAR_FREEZE_NOTIFICATION => self.process.clear_freeze_notif(&mut reader)?,
1364                 BC_FREEZE_NOTIFICATION_DONE => self.process.freeze_notif_done(&mut reader)?,
1365 
1366                 // Fail if given an unknown error code.
1367                 // BC_ATTEMPT_ACQUIRE and BC_ACQUIRE_RESULT are no longer supported.
1368                 _ => return Err(EINVAL),
1369             }
1370             // Update the number of write bytes consumed.
1371             req.write_consumed += (before - reader.len()) as u64;
1372         }
1373 
1374         Ok(())
1375     }
1376 
1377     fn read(self: &Arc<Self>, req: &mut BinderWriteRead, wait: bool) -> Result {
1378         let read_start = req.read_buffer.wrapping_add(req.read_consumed);
1379         let read_len = req.read_size.saturating_sub(req.read_consumed);
1380         let mut writer = BinderReturnWriter::new(
1381             UserSlice::new(UserPtr::from_addr(read_start as _), read_len as _).writer(),
1382             self,
1383         );
1384         let (in_pool, use_proc_queue) = {
1385             let inner = self.inner.lock();
1386             (inner.is_looper(), inner.should_use_process_work_queue())
1387         };
1388 
1389         let getter = if use_proc_queue {
1390             Self::get_work
1391         } else {
1392             Self::get_work_local
1393         };
1394 
1395         // Reserve some room at the beginning of the read buffer so that we can send a
1396         // BR_SPAWN_LOOPER if we need to.
1397         let mut has_noop_placeholder = false;
1398         if req.read_consumed == 0 {
1399             if let Err(err) = writer.write_code(BR_NOOP) {
1400                 pr_warn!("Failure when writing BR_NOOP at beginning of buffer.");
1401                 return Err(err);
1402             }
1403             has_noop_placeholder = true;
1404         }
1405 
1406         // Loop doing work while there is room in the buffer.
1407         let initial_len = writer.len();
1408         while writer.len() >= size_of::<uapi::binder_transaction_data_secctx>() + 4 {
1409             match getter(self, wait && initial_len == writer.len()) {
1410                 Ok(Some(work)) => match work.into_arc().do_work(self, &mut writer) {
1411                     Ok(true) => {}
1412                     Ok(false) => break,
1413                     Err(err) => {
1414                         return Err(err);
1415                     }
1416                 },
1417                 Ok(None) => {
1418                     break;
1419                 }
1420                 Err(err) => {
1421                     // Propagate the error if we haven't written anything else.
1422                     if err != EINTR && err != EAGAIN {
1423                         pr_warn!("Failure in work getter: {:?}", err);
1424                     }
1425                     if initial_len == writer.len() {
1426                         return Err(err);
1427                     } else {
1428                         break;
1429                     }
1430                 }
1431             }
1432         }
1433 
1434         req.read_consumed += read_len - writer.len() as u64;
1435 
1436         // Write BR_SPAWN_LOOPER if the process needs more threads for its pool.
1437         if has_noop_placeholder && in_pool && self.process.needs_thread() {
1438             let mut writer =
1439                 UserSlice::new(UserPtr::from_addr(req.read_buffer as _), req.read_size as _)
1440                     .writer();
1441             writer.write(&BR_SPAWN_LOOPER)?;
1442         }
1443         Ok(())
1444     }
1445 
1446     pub(crate) fn write_read(self: &Arc<Self>, data: UserSlice, wait: bool) -> Result {
1447         let (mut reader, mut writer) = data.reader_writer();
1448         let mut req = reader.read::<BinderWriteRead>()?;
1449 
1450         // Go through the write buffer.
1451         let mut ret = Ok(());
1452         if req.write_size > 0 {
1453             ret = self.write(&mut req);
1454             if let Err(err) = ret {
1455                 pr_warn!(
1456                     "Write failure {:?} in pid:{}",
1457                     err,
1458                     self.process.pid_in_current_ns()
1459                 );
1460                 req.read_consumed = 0;
1461                 writer.write(&req)?;
1462                 self.inner.lock().looper_need_return = false;
1463                 return ret;
1464             }
1465         }
1466 
1467         // Go through the work queue.
1468         if req.read_size > 0 {
1469             ret = self.read(&mut req, wait);
1470             if ret.is_err() && ret != Err(EINTR) {
1471                 pr_warn!(
1472                     "Read failure {:?} in pid:{}",
1473                     ret,
1474                     self.process.pid_in_current_ns()
1475                 );
1476             }
1477         }
1478 
1479         // Write the request back so that the consumed fields are visible to the caller.
1480         writer.write(&req)?;
1481 
1482         self.inner.lock().looper_need_return = false;
1483 
1484         ret
1485     }
1486 
1487     pub(crate) fn poll(&self, file: &File, table: PollTable<'_>) -> (bool, u32) {
1488         table.register_wait(file, &self.work_condvar);
1489         let mut inner = self.inner.lock();
1490         (inner.should_use_process_work_queue(), inner.poll())
1491     }
1492 
1493     /// Make the call to `get_work` or `get_work_local` return immediately, if any.
1494     pub(crate) fn exit_looper(&self) {
1495         let mut inner = self.inner.lock();
1496         let should_notify = inner.looper_flags & LOOPER_WAITING != 0;
1497         if should_notify {
1498             inner.looper_need_return = true;
1499         }
1500         drop(inner);
1501 
1502         if should_notify {
1503             self.work_condvar.notify_one();
1504         }
1505     }
1506 
1507     pub(crate) fn notify_if_poll_ready(&self, sync: bool) {
1508         // Determine if we need to notify. This requires the lock.
1509         let inner = self.inner.lock();
1510         let notify = inner.looper_flags & LOOPER_POLL != 0 && inner.should_use_process_work_queue();
1511         drop(inner);
1512 
1513         // Now that the lock is no longer held, notify the waiters if we have to.
1514         if notify {
1515             if sync {
1516                 self.work_condvar.notify_sync();
1517             } else {
1518                 self.work_condvar.notify_one();
1519             }
1520         }
1521     }
1522 
1523     pub(crate) fn release(self: &Arc<Self>) {
1524         self.inner.lock().is_dead = true;
1525 
1526         //self.work_condvar.clear();
1527         self.unwind_transaction_stack();
1528 
1529         // Cancel all pending work items.
1530         while let Ok(Some(work)) = self.get_work_local(false) {
1531             work.into_arc().cancel();
1532         }
1533     }
1534 }
1535 
1536 #[pin_data]
1537 struct ThreadError {
1538     error_code: Atomic<u32>,
1539     #[pin]
1540     links_track: AtomicTracker,
1541 }
1542 
1543 impl ThreadError {
1544     fn try_new() -> Result<DArc<Self>> {
1545         DTRWrap::arc_pin_init(pin_init!(Self {
1546             error_code: Atomic::new(BR_OK),
1547             links_track <- AtomicTracker::new(),
1548         }))
1549         .map(ListArc::into_arc)
1550     }
1551 
1552     fn set_error_code(&self, code: u32) {
1553         self.error_code.store(code, Relaxed);
1554     }
1555 
1556     fn is_unused(&self) -> bool {
1557         self.error_code.load(Relaxed) == BR_OK
1558     }
1559 }
1560 
1561 impl DeliverToRead for ThreadError {
1562     fn do_work(
1563         self: DArc<Self>,
1564         _thread: &Thread,
1565         writer: &mut BinderReturnWriter<'_>,
1566     ) -> Result<bool> {
1567         let code = self.error_code.load(Relaxed);
1568         self.error_code.store(BR_OK, Relaxed);
1569         writer.write_code(code)?;
1570         Ok(true)
1571     }
1572 
1573     fn cancel(self: DArc<Self>) {}
1574 
1575     fn should_sync_wakeup(&self) -> bool {
1576         false
1577     }
1578 
1579     fn debug_print(&self, m: &SeqFile, prefix: &str, _tprefix: &str) -> Result<()> {
1580         seq_print!(
1581             m,
1582             "{}transaction error: {}\n",
1583             prefix,
1584             self.error_code.load(Relaxed)
1585         );
1586         Ok(())
1587     }
1588 }
1589 
1590 kernel::list::impl_list_arc_safe! {
1591     impl ListArcSafe<0> for ThreadError {
1592         tracked_by links_track: AtomicTracker;
1593     }
1594 }
1595