xref: /linux/drivers/android/binder/process.rs (revision 68a052239fc4b351e961f698b824f7654a346091)
1 // SPDX-License-Identifier: GPL-2.0
2 
3 // Copyright (C) 2025 Google LLC.
4 
5 //! This module defines the `Process` type, which represents a process using a particular binder
6 //! context.
7 //!
8 //! The `Process` object keeps track of all of the resources that this process owns in the binder
9 //! context.
10 //!
11 //! There is one `Process` object for each binder fd that a process has opened, so processes using
12 //! several binder contexts have several `Process` objects. This ensures that the contexts are
13 //! fully separated.
14 
15 use core::mem::take;
16 
17 use kernel::{
18     bindings,
19     cred::Credential,
20     error::Error,
21     fs::file::{self, File},
22     list::{List, ListArc, ListArcField, ListLinks},
23     mm,
24     prelude::*,
25     rbtree::{self, RBTree, RBTreeNode, RBTreeNodeReservation},
26     seq_file::SeqFile,
27     seq_print,
28     sync::poll::PollTable,
29     sync::{
30         lock::{spinlock::SpinLockBackend, Guard},
31         Arc, ArcBorrow, CondVar, CondVarTimeoutResult, Mutex, SpinLock, UniqueArc,
32     },
33     task::Task,
34     types::ARef,
35     uaccess::{UserSlice, UserSliceReader},
36     uapi,
37     workqueue::{self, Work},
38 };
39 
40 use crate::{
41     allocation::{Allocation, AllocationInfo, NewAllocation},
42     context::Context,
43     defs::*,
44     error::{BinderError, BinderResult},
45     node::{CouldNotDeliverCriticalIncrement, CritIncrWrapper, Node, NodeDeath, NodeRef},
46     page_range::ShrinkablePageRange,
47     range_alloc::{RangeAllocator, ReserveNew, ReserveNewArgs},
48     stats::BinderStats,
49     thread::{PushWorkRes, Thread},
50     BinderfsProcFile, DArc, DLArc, DTRWrap, DeliverToRead,
51 };
52 
53 #[path = "freeze.rs"]
54 mod freeze;
55 use self::freeze::{FreezeCookie, FreezeListener};
56 
57 struct Mapping {
58     address: usize,
59     alloc: RangeAllocator<AllocationInfo>,
60 }
61 
62 impl Mapping {
63     fn new(address: usize, size: usize) -> Self {
64         Self {
65             address,
66             alloc: RangeAllocator::new(size),
67         }
68     }
69 }
70 
71 // bitflags for defer_work.
72 const PROC_DEFER_FLUSH: u8 = 1;
73 const PROC_DEFER_RELEASE: u8 = 2;
74 
75 /// The fields of `Process` protected by the spinlock.
76 pub(crate) struct ProcessInner {
77     is_manager: bool,
78     pub(crate) is_dead: bool,
79     threads: RBTree<i32, Arc<Thread>>,
80     /// INVARIANT: Threads pushed to this list must be owned by this process.
81     ready_threads: List<Thread>,
82     nodes: RBTree<u64, DArc<Node>>,
83     mapping: Option<Mapping>,
84     work: List<DTRWrap<dyn DeliverToRead>>,
85     delivered_deaths: List<DTRWrap<NodeDeath>, 2>,
86 
87     /// The number of requested threads that haven't registered yet.
88     requested_thread_count: u32,
89     /// The maximum number of threads used by the process thread pool.
90     max_threads: u32,
91     /// The number of threads the started and registered with the thread pool.
92     started_thread_count: u32,
93 
94     /// Bitmap of deferred work to do.
95     defer_work: u8,
96 
97     /// Number of transactions to be transmitted before processes in freeze_wait
98     /// are woken up.
99     outstanding_txns: u32,
100     /// Process is frozen and unable to service binder transactions.
101     pub(crate) is_frozen: bool,
102     /// Process received sync transactions since last frozen.
103     pub(crate) sync_recv: bool,
104     /// Process received async transactions since last frozen.
105     pub(crate) async_recv: bool,
106     pub(crate) binderfs_file: Option<BinderfsProcFile>,
107     /// Check for oneway spam
108     oneway_spam_detection_enabled: bool,
109 }
110 
111 impl ProcessInner {
112     fn new() -> Self {
113         Self {
114             is_manager: false,
115             is_dead: false,
116             threads: RBTree::new(),
117             ready_threads: List::new(),
118             mapping: None,
119             nodes: RBTree::new(),
120             work: List::new(),
121             delivered_deaths: List::new(),
122             requested_thread_count: 0,
123             max_threads: 0,
124             started_thread_count: 0,
125             defer_work: 0,
126             outstanding_txns: 0,
127             is_frozen: false,
128             sync_recv: false,
129             async_recv: false,
130             binderfs_file: None,
131             oneway_spam_detection_enabled: false,
132         }
133     }
134 
135     /// Schedule the work item for execution on this process.
136     ///
137     /// If any threads are ready for work, then the work item is given directly to that thread and
138     /// it is woken up. Otherwise, it is pushed to the process work list.
139     ///
140     /// This call can fail only if the process is dead. In this case, the work item is returned to
141     /// the caller so that the caller can drop it after releasing the inner process lock. This is
142     /// necessary since the destructor of `Transaction` will take locks that can't necessarily be
143     /// taken while holding the inner process lock.
144     pub(crate) fn push_work(
145         &mut self,
146         work: DLArc<dyn DeliverToRead>,
147     ) -> Result<(), (BinderError, DLArc<dyn DeliverToRead>)> {
148         // Try to find a ready thread to which to push the work.
149         if let Some(thread) = self.ready_threads.pop_front() {
150             // Push to thread while holding state lock. This prevents the thread from giving up
151             // (for example, because of a signal) when we're about to deliver work.
152             match thread.push_work(work) {
153                 PushWorkRes::Ok => Ok(()),
154                 PushWorkRes::FailedDead(work) => Err((BinderError::new_dead(), work)),
155             }
156         } else if self.is_dead {
157             Err((BinderError::new_dead(), work))
158         } else {
159             let sync = work.should_sync_wakeup();
160 
161             // Didn't find a thread waiting for proc work; this can happen
162             // in two scenarios:
163             // 1. All threads are busy handling transactions
164             //    In that case, one of those threads should call back into
165             //    the kernel driver soon and pick up this work.
166             // 2. Threads are using the (e)poll interface, in which case
167             //    they may be blocked on the waitqueue without having been
168             //    added to waiting_threads. For this case, we just iterate
169             //    over all threads not handling transaction work, and
170             //    wake them all up. We wake all because we don't know whether
171             //    a thread that called into (e)poll is handling non-binder
172             //    work currently.
173             self.work.push_back(work);
174 
175             // Wake up polling threads, if any.
176             for thread in self.threads.values() {
177                 thread.notify_if_poll_ready(sync);
178             }
179 
180             Ok(())
181         }
182     }
183 
184     pub(crate) fn remove_node(&mut self, ptr: u64) {
185         self.nodes.remove(&ptr);
186     }
187 
188     /// Updates the reference count on the given node.
189     pub(crate) fn update_node_refcount(
190         &mut self,
191         node: &DArc<Node>,
192         inc: bool,
193         strong: bool,
194         count: usize,
195         othread: Option<&Thread>,
196     ) {
197         let push = node.update_refcount_locked(inc, strong, count, self);
198 
199         // If we decided that we need to push work, push either to the process or to a thread if
200         // one is specified.
201         if let Some(node) = push {
202             if let Some(thread) = othread {
203                 thread.push_work_deferred(node);
204             } else {
205                 let _ = self.push_work(node);
206                 // Nothing to do: `push_work` may fail if the process is dead, but that's ok as in
207                 // that case, it doesn't care about the notification.
208             }
209         }
210     }
211 
212     pub(crate) fn new_node_ref(
213         &mut self,
214         node: DArc<Node>,
215         strong: bool,
216         thread: Option<&Thread>,
217     ) -> NodeRef {
218         self.update_node_refcount(&node, true, strong, 1, thread);
219         let strong_count = if strong { 1 } else { 0 };
220         NodeRef::new(node, strong_count, 1 - strong_count)
221     }
222 
223     pub(crate) fn new_node_ref_with_thread(
224         &mut self,
225         node: DArc<Node>,
226         strong: bool,
227         thread: &Thread,
228         wrapper: Option<CritIncrWrapper>,
229     ) -> Result<NodeRef, CouldNotDeliverCriticalIncrement> {
230         let push = match wrapper {
231             None => node
232                 .incr_refcount_allow_zero2one(strong, self)?
233                 .map(|node| node as _),
234             Some(wrapper) => node.incr_refcount_allow_zero2one_with_wrapper(strong, wrapper, self),
235         };
236         if let Some(node) = push {
237             thread.push_work_deferred(node);
238         }
239         let strong_count = if strong { 1 } else { 0 };
240         Ok(NodeRef::new(node, strong_count, 1 - strong_count))
241     }
242 
243     /// Returns an existing node with the given pointer and cookie, if one exists.
244     ///
245     /// Returns an error if a node with the given pointer but a different cookie exists.
246     fn get_existing_node(&self, ptr: u64, cookie: u64) -> Result<Option<DArc<Node>>> {
247         match self.nodes.get(&ptr) {
248             None => Ok(None),
249             Some(node) => {
250                 let (_, node_cookie) = node.get_id();
251                 if node_cookie == cookie {
252                     Ok(Some(node.clone()))
253                 } else {
254                     Err(EINVAL)
255                 }
256             }
257         }
258     }
259 
260     fn register_thread(&mut self) -> bool {
261         if self.requested_thread_count == 0 {
262             return false;
263         }
264 
265         self.requested_thread_count -= 1;
266         self.started_thread_count += 1;
267         true
268     }
269 
270     /// Finds a delivered death notification with the given cookie, removes it from the thread's
271     /// delivered list, and returns it.
272     fn pull_delivered_death(&mut self, cookie: u64) -> Option<DArc<NodeDeath>> {
273         let mut cursor = self.delivered_deaths.cursor_front();
274         while let Some(next) = cursor.peek_next() {
275             if next.cookie == cookie {
276                 return Some(next.remove().into_arc());
277             }
278             cursor.move_next();
279         }
280         None
281     }
282 
283     pub(crate) fn death_delivered(&mut self, death: DArc<NodeDeath>) {
284         if let Some(death) = ListArc::try_from_arc_or_drop(death) {
285             self.delivered_deaths.push_back(death);
286         } else {
287             pr_warn!("Notification added to `delivered_deaths` twice.");
288         }
289     }
290 
291     pub(crate) fn add_outstanding_txn(&mut self) {
292         self.outstanding_txns += 1;
293     }
294 
295     fn txns_pending_locked(&self) -> bool {
296         if self.outstanding_txns > 0 {
297             return true;
298         }
299         for thread in self.threads.values() {
300             if thread.has_current_transaction() {
301                 return true;
302             }
303         }
304         false
305     }
306 }
307 
308 /// Used to keep track of a node that this process has a handle to.
309 #[pin_data]
310 pub(crate) struct NodeRefInfo {
311     debug_id: usize,
312     /// The refcount that this process owns to the node.
313     node_ref: ListArcField<NodeRef, { Self::LIST_PROC }>,
314     death: ListArcField<Option<DArc<NodeDeath>>, { Self::LIST_PROC }>,
315     /// Cookie of the active freeze listener for this node.
316     freeze: ListArcField<Option<FreezeCookie>, { Self::LIST_PROC }>,
317     /// Used to store this `NodeRefInfo` in the node's `refs` list.
318     #[pin]
319     links: ListLinks<{ Self::LIST_NODE }>,
320     /// The handle for this `NodeRefInfo`.
321     handle: u32,
322     /// The process that has a handle to the node.
323     pub(crate) process: Arc<Process>,
324 }
325 
326 impl NodeRefInfo {
327     /// The id used for the `Node::refs` list.
328     pub(crate) const LIST_NODE: u64 = 0x2da16350fb724a10;
329     /// The id used for the `ListArc` in `ProcessNodeRefs`.
330     const LIST_PROC: u64 = 0xd703a5263dcc8650;
331 
332     fn new(node_ref: NodeRef, handle: u32, process: Arc<Process>) -> impl PinInit<Self> {
333         pin_init!(Self {
334             debug_id: super::next_debug_id(),
335             node_ref: ListArcField::new(node_ref),
336             death: ListArcField::new(None),
337             freeze: ListArcField::new(None),
338             links <- ListLinks::new(),
339             handle,
340             process,
341         })
342     }
343 
344     kernel::list::define_list_arc_field_getter! {
345         pub(crate) fn death(&mut self<{Self::LIST_PROC}>) -> &mut Option<DArc<NodeDeath>> { death }
346         pub(crate) fn freeze(&mut self<{Self::LIST_PROC}>) -> &mut Option<FreezeCookie> { freeze }
347         pub(crate) fn node_ref(&mut self<{Self::LIST_PROC}>) -> &mut NodeRef { node_ref }
348         pub(crate) fn node_ref2(&self<{Self::LIST_PROC}>) -> &NodeRef { node_ref }
349     }
350 }
351 
352 kernel::list::impl_list_arc_safe! {
353     impl ListArcSafe<{Self::LIST_NODE}> for NodeRefInfo { untracked; }
354     impl ListArcSafe<{Self::LIST_PROC}> for NodeRefInfo { untracked; }
355 }
356 kernel::list::impl_list_item! {
357     impl ListItem<{Self::LIST_NODE}> for NodeRefInfo {
358         using ListLinks { self.links };
359     }
360 }
361 
362 /// Keeps track of references this process has to nodes owned by other processes.
363 ///
364 /// TODO: Currently, the rbtree requires two allocations per node reference, and two tree
365 /// traversals to look up a node by `Node::global_id`. Once the rbtree is more powerful, these
366 /// extra costs should be eliminated.
367 struct ProcessNodeRefs {
368     /// Used to look up nodes using the 32-bit id that this process knows it by.
369     by_handle: RBTree<u32, ListArc<NodeRefInfo, { NodeRefInfo::LIST_PROC }>>,
370     /// Used to look up nodes without knowing their local 32-bit id. The usize is the address of
371     /// the underlying `Node` struct as returned by `Node::global_id`.
372     by_node: RBTree<usize, u32>,
373     /// Used to look up a `FreezeListener` by cookie.
374     ///
375     /// There might be multiple freeze listeners for the same node, but at most one of them is
376     /// active.
377     freeze_listeners: RBTree<FreezeCookie, FreezeListener>,
378 }
379 
380 impl ProcessNodeRefs {
381     fn new() -> Self {
382         Self {
383             by_handle: RBTree::new(),
384             by_node: RBTree::new(),
385             freeze_listeners: RBTree::new(),
386         }
387     }
388 }
389 
390 /// A process using binder.
391 ///
392 /// Strictly speaking, there can be multiple of these per process. There is one for each binder fd
393 /// that a process has opened, so processes using several binder contexts have several `Process`
394 /// objects. This ensures that the contexts are fully separated.
395 #[pin_data]
396 pub(crate) struct Process {
397     pub(crate) ctx: Arc<Context>,
398 
399     // The task leader (process).
400     pub(crate) task: ARef<Task>,
401 
402     // Credential associated with file when `Process` is created.
403     pub(crate) cred: ARef<Credential>,
404 
405     #[pin]
406     pub(crate) inner: SpinLock<ProcessInner>,
407 
408     #[pin]
409     pub(crate) pages: ShrinkablePageRange,
410 
411     // Waitqueue of processes waiting for all outstanding transactions to be
412     // processed.
413     #[pin]
414     freeze_wait: CondVar,
415 
416     // Node references are in a different lock to avoid recursive acquisition when
417     // incrementing/decrementing a node in another process.
418     #[pin]
419     node_refs: Mutex<ProcessNodeRefs>,
420 
421     // Work node for deferred work item.
422     #[pin]
423     defer_work: Work<Process>,
424 
425     // Links for process list in Context.
426     #[pin]
427     links: ListLinks,
428 
429     pub(crate) stats: BinderStats,
430 }
431 
432 kernel::impl_has_work! {
433     impl HasWork<Process> for Process { self.defer_work }
434 }
435 
436 kernel::list::impl_list_arc_safe! {
437     impl ListArcSafe<0> for Process { untracked; }
438 }
439 kernel::list::impl_list_item! {
440     impl ListItem<0> for Process {
441         using ListLinks { self.links };
442     }
443 }
444 
445 impl workqueue::WorkItem for Process {
446     type Pointer = Arc<Process>;
447 
448     fn run(me: Arc<Self>) {
449         let defer;
450         {
451             let mut inner = me.inner.lock();
452             defer = inner.defer_work;
453             inner.defer_work = 0;
454         }
455 
456         if defer & PROC_DEFER_FLUSH != 0 {
457             me.deferred_flush();
458         }
459         if defer & PROC_DEFER_RELEASE != 0 {
460             me.deferred_release();
461         }
462     }
463 }
464 
465 impl Process {
466     fn new(ctx: Arc<Context>, cred: ARef<Credential>) -> Result<Arc<Self>> {
467         let current = kernel::current!();
468         let list_process = ListArc::pin_init::<Error>(
469             try_pin_init!(Process {
470                 ctx,
471                 cred,
472                 inner <- kernel::new_spinlock!(ProcessInner::new(), "Process::inner"),
473                 pages <- ShrinkablePageRange::new(&super::BINDER_SHRINKER),
474                 node_refs <- kernel::new_mutex!(ProcessNodeRefs::new(), "Process::node_refs"),
475                 freeze_wait <- kernel::new_condvar!("Process::freeze_wait"),
476                 task: current.group_leader().into(),
477                 defer_work <- kernel::new_work!("Process::defer_work"),
478                 links <- ListLinks::new(),
479                 stats: BinderStats::new(),
480             }),
481             GFP_KERNEL,
482         )?;
483 
484         let process = list_process.clone_arc();
485         process.ctx.register_process(list_process);
486 
487         Ok(process)
488     }
489 
490     pub(crate) fn pid_in_current_ns(&self) -> kernel::task::Pid {
491         self.task.tgid_nr_ns(None)
492     }
493 
494     #[inline(never)]
495     pub(crate) fn debug_print_stats(&self, m: &SeqFile, ctx: &Context) -> Result<()> {
496         seq_print!(m, "proc {}\n", self.pid_in_current_ns());
497         seq_print!(m, "context {}\n", &*ctx.name);
498 
499         let inner = self.inner.lock();
500         seq_print!(m, "  threads: {}\n", inner.threads.iter().count());
501         seq_print!(
502             m,
503             "  requested threads: {}+{}/{}\n",
504             inner.requested_thread_count,
505             inner.started_thread_count,
506             inner.max_threads,
507         );
508         if let Some(mapping) = &inner.mapping {
509             seq_print!(
510                 m,
511                 "  free oneway space: {}\n",
512                 mapping.alloc.free_oneway_space()
513             );
514             seq_print!(m, "  buffers: {}\n", mapping.alloc.count_buffers());
515         }
516         seq_print!(
517             m,
518             "  outstanding transactions: {}\n",
519             inner.outstanding_txns
520         );
521         seq_print!(m, "  nodes: {}\n", inner.nodes.iter().count());
522         drop(inner);
523 
524         {
525             let mut refs = self.node_refs.lock();
526             let (mut count, mut weak, mut strong) = (0, 0, 0);
527             for r in refs.by_handle.values_mut() {
528                 let node_ref = r.node_ref();
529                 let (nstrong, nweak) = node_ref.get_count();
530                 count += 1;
531                 weak += nweak;
532                 strong += nstrong;
533             }
534             seq_print!(m, "  refs: {count} s {strong} w {weak}\n");
535         }
536 
537         self.stats.debug_print("  ", m);
538 
539         Ok(())
540     }
541 
542     #[inline(never)]
543     pub(crate) fn debug_print(&self, m: &SeqFile, ctx: &Context, print_all: bool) -> Result<()> {
544         seq_print!(m, "proc {}\n", self.pid_in_current_ns());
545         seq_print!(m, "context {}\n", &*ctx.name);
546 
547         let mut all_threads = KVec::new();
548         let mut all_nodes = KVec::new();
549         loop {
550             let inner = self.inner.lock();
551             let num_threads = inner.threads.iter().count();
552             let num_nodes = inner.nodes.iter().count();
553 
554             if all_threads.capacity() < num_threads || all_nodes.capacity() < num_nodes {
555                 drop(inner);
556                 all_threads.reserve(num_threads, GFP_KERNEL)?;
557                 all_nodes.reserve(num_nodes, GFP_KERNEL)?;
558                 continue;
559             }
560 
561             for thread in inner.threads.values() {
562                 assert!(all_threads.len() < all_threads.capacity());
563                 let _ = all_threads.push(thread.clone(), GFP_ATOMIC);
564             }
565 
566             for node in inner.nodes.values() {
567                 assert!(all_nodes.len() < all_nodes.capacity());
568                 let _ = all_nodes.push(node.clone(), GFP_ATOMIC);
569             }
570 
571             break;
572         }
573 
574         for thread in all_threads {
575             thread.debug_print(m, print_all)?;
576         }
577 
578         let mut inner = self.inner.lock();
579         for node in all_nodes {
580             if print_all || node.has_oneway_transaction(&mut inner) {
581                 node.full_debug_print(m, &mut inner)?;
582             }
583         }
584         drop(inner);
585 
586         if print_all {
587             let mut refs = self.node_refs.lock();
588             for r in refs.by_handle.values_mut() {
589                 let node_ref = r.node_ref();
590                 let dead = node_ref.node.owner.inner.lock().is_dead;
591                 let (strong, weak) = node_ref.get_count();
592                 let debug_id = node_ref.node.debug_id;
593 
594                 seq_print!(
595                     m,
596                     "  ref {}: desc {} {}node {debug_id} s {strong} w {weak}",
597                     r.debug_id,
598                     r.handle,
599                     if dead { "dead " } else { "" },
600                 );
601             }
602         }
603 
604         let inner = self.inner.lock();
605         for work in &inner.work {
606             work.debug_print(m, "  ", "  pending transaction ")?;
607         }
608         for _death in &inner.delivered_deaths {
609             seq_print!(m, "  has delivered dead binder\n");
610         }
611         if let Some(mapping) = &inner.mapping {
612             mapping.alloc.debug_print(m)?;
613         }
614         drop(inner);
615 
616         Ok(())
617     }
618 
619     /// Attempts to fetch a work item from the process queue.
620     pub(crate) fn get_work(&self) -> Option<DLArc<dyn DeliverToRead>> {
621         self.inner.lock().work.pop_front()
622     }
623 
624     /// Attempts to fetch a work item from the process queue. If none is available, it registers the
625     /// given thread as ready to receive work directly.
626     ///
627     /// This must only be called when the thread is not participating in a transaction chain; when
628     /// it is, work will always be delivered directly to the thread (and not through the process
629     /// queue).
630     pub(crate) fn get_work_or_register<'a>(
631         &'a self,
632         thread: &'a Arc<Thread>,
633     ) -> GetWorkOrRegister<'a> {
634         let mut inner = self.inner.lock();
635         // Try to get work from the process queue.
636         if let Some(work) = inner.work.pop_front() {
637             return GetWorkOrRegister::Work(work);
638         }
639 
640         // Register the thread as ready.
641         GetWorkOrRegister::Register(Registration::new(thread, &mut inner))
642     }
643 
644     fn get_current_thread(self: ArcBorrow<'_, Self>) -> Result<Arc<Thread>> {
645         let id = {
646             let current = kernel::current!();
647             if !core::ptr::eq(current.group_leader(), &*self.task) {
648                 pr_err!("get_current_thread was called from the wrong process.");
649                 return Err(EINVAL);
650             }
651             current.pid()
652         };
653 
654         {
655             let inner = self.inner.lock();
656             if let Some(thread) = inner.threads.get(&id) {
657                 return Ok(thread.clone());
658             }
659         }
660 
661         // Allocate a new `Thread` without holding any locks.
662         let reservation = RBTreeNodeReservation::new(GFP_KERNEL)?;
663         let ta: Arc<Thread> = Thread::new(id, self.into())?;
664 
665         let mut inner = self.inner.lock();
666         match inner.threads.entry(id) {
667             rbtree::Entry::Vacant(entry) => {
668                 entry.insert(ta.clone(), reservation);
669                 Ok(ta)
670             }
671             rbtree::Entry::Occupied(_entry) => {
672                 pr_err!("Cannot create two threads with the same id.");
673                 Err(EINVAL)
674             }
675         }
676     }
677 
678     pub(crate) fn push_work(&self, work: DLArc<dyn DeliverToRead>) -> BinderResult {
679         // If push_work fails, drop the work item outside the lock.
680         let res = self.inner.lock().push_work(work);
681         match res {
682             Ok(()) => Ok(()),
683             Err((err, work)) => {
684                 drop(work);
685                 Err(err)
686             }
687         }
688     }
689 
690     fn set_as_manager(
691         self: ArcBorrow<'_, Self>,
692         info: Option<FlatBinderObject>,
693         thread: &Thread,
694     ) -> Result {
695         let (ptr, cookie, flags) = if let Some(obj) = info {
696             (
697                 // SAFETY: The object type for this ioctl is implicitly `BINDER_TYPE_BINDER`, so it
698                 // is safe to access the `binder` field.
699                 unsafe { obj.__bindgen_anon_1.binder },
700                 obj.cookie,
701                 obj.flags,
702             )
703         } else {
704             (0, 0, 0)
705         };
706         let node_ref = self.get_node(ptr, cookie, flags as _, true, thread)?;
707         let node = node_ref.node.clone();
708         self.ctx.set_manager_node(node_ref)?;
709         self.inner.lock().is_manager = true;
710 
711         // Force the state of the node to prevent the delivery of acquire/increfs.
712         let mut owner_inner = node.owner.inner.lock();
713         node.force_has_count(&mut owner_inner);
714         Ok(())
715     }
716 
717     fn get_node_inner(
718         self: ArcBorrow<'_, Self>,
719         ptr: u64,
720         cookie: u64,
721         flags: u32,
722         strong: bool,
723         thread: &Thread,
724         wrapper: Option<CritIncrWrapper>,
725     ) -> Result<Result<NodeRef, CouldNotDeliverCriticalIncrement>> {
726         // Try to find an existing node.
727         {
728             let mut inner = self.inner.lock();
729             if let Some(node) = inner.get_existing_node(ptr, cookie)? {
730                 return Ok(inner.new_node_ref_with_thread(node, strong, thread, wrapper));
731             }
732         }
733 
734         // Allocate the node before reacquiring the lock.
735         let node = DTRWrap::arc_pin_init(Node::new(ptr, cookie, flags, self.into()))?.into_arc();
736         let rbnode = RBTreeNode::new(ptr, node.clone(), GFP_KERNEL)?;
737         let mut inner = self.inner.lock();
738         if let Some(node) = inner.get_existing_node(ptr, cookie)? {
739             return Ok(inner.new_node_ref_with_thread(node, strong, thread, wrapper));
740         }
741 
742         inner.nodes.insert(rbnode);
743         // This can only fail if someone has already pushed the node to a list, but we just created
744         // it and still hold the lock, so it can't fail right now.
745         let node_ref = inner
746             .new_node_ref_with_thread(node, strong, thread, wrapper)
747             .unwrap();
748 
749         Ok(Ok(node_ref))
750     }
751 
752     pub(crate) fn get_node(
753         self: ArcBorrow<'_, Self>,
754         ptr: u64,
755         cookie: u64,
756         flags: u32,
757         strong: bool,
758         thread: &Thread,
759     ) -> Result<NodeRef> {
760         let mut wrapper = None;
761         for _ in 0..2 {
762             match self.get_node_inner(ptr, cookie, flags, strong, thread, wrapper) {
763                 Err(err) => return Err(err),
764                 Ok(Ok(node_ref)) => return Ok(node_ref),
765                 Ok(Err(CouldNotDeliverCriticalIncrement)) => {
766                     wrapper = Some(CritIncrWrapper::new()?);
767                 }
768             }
769         }
770         // We only get a `CouldNotDeliverCriticalIncrement` error if `wrapper` is `None`, so the
771         // loop should run at most twice.
772         unreachable!()
773     }
774 
775     pub(crate) fn insert_or_update_handle(
776         self: ArcBorrow<'_, Process>,
777         node_ref: NodeRef,
778         is_mananger: bool,
779     ) -> Result<u32> {
780         {
781             let mut refs = self.node_refs.lock();
782 
783             // Do a lookup before inserting.
784             if let Some(handle_ref) = refs.by_node.get(&node_ref.node.global_id()) {
785                 let handle = *handle_ref;
786                 let info = refs.by_handle.get_mut(&handle).unwrap();
787                 info.node_ref().absorb(node_ref);
788                 return Ok(handle);
789             }
790         }
791 
792         // Reserve memory for tree nodes.
793         let reserve1 = RBTreeNodeReservation::new(GFP_KERNEL)?;
794         let reserve2 = RBTreeNodeReservation::new(GFP_KERNEL)?;
795         let info = UniqueArc::new_uninit(GFP_KERNEL)?;
796 
797         let mut refs = self.node_refs.lock();
798 
799         // Do a lookup again as node may have been inserted before the lock was reacquired.
800         if let Some(handle_ref) = refs.by_node.get(&node_ref.node.global_id()) {
801             let handle = *handle_ref;
802             let info = refs.by_handle.get_mut(&handle).unwrap();
803             info.node_ref().absorb(node_ref);
804             return Ok(handle);
805         }
806 
807         // Find id.
808         let mut target: u32 = if is_mananger { 0 } else { 1 };
809         for handle in refs.by_handle.keys() {
810             if *handle > target {
811                 break;
812             }
813             if *handle == target {
814                 target = target.checked_add(1).ok_or(ENOMEM)?;
815             }
816         }
817 
818         let gid = node_ref.node.global_id();
819         let (info_proc, info_node) = {
820             let info_init = NodeRefInfo::new(node_ref, target, self.into());
821             match info.pin_init_with(info_init) {
822                 Ok(info) => ListArc::pair_from_pin_unique(info),
823                 // error is infallible
824                 Err(err) => match err {},
825             }
826         };
827 
828         // Ensure the process is still alive while we insert a new reference.
829         //
830         // This releases the lock before inserting the nodes, but since `is_dead` is set as the
831         // first thing in `deferred_release`, process cleanup will not miss the items inserted into
832         // `refs` below.
833         if self.inner.lock().is_dead {
834             return Err(ESRCH);
835         }
836 
837         // SAFETY: `info_proc` and `info_node` reference the same node, so we are inserting
838         // `info_node` into the right node's `refs` list.
839         unsafe { info_proc.node_ref2().node.insert_node_info(info_node) };
840 
841         refs.by_node.insert(reserve1.into_node(gid, target));
842         refs.by_handle.insert(reserve2.into_node(target, info_proc));
843         Ok(target)
844     }
845 
846     pub(crate) fn get_transaction_node(&self, handle: u32) -> BinderResult<NodeRef> {
847         // When handle is zero, try to get the context manager.
848         if handle == 0 {
849             Ok(self.ctx.get_manager_node(true)?)
850         } else {
851             Ok(self.get_node_from_handle(handle, true)?)
852         }
853     }
854 
855     pub(crate) fn get_node_from_handle(&self, handle: u32, strong: bool) -> Result<NodeRef> {
856         self.node_refs
857             .lock()
858             .by_handle
859             .get_mut(&handle)
860             .ok_or(ENOENT)?
861             .node_ref()
862             .clone(strong)
863     }
864 
865     pub(crate) fn remove_from_delivered_deaths(&self, death: &DArc<NodeDeath>) {
866         let mut inner = self.inner.lock();
867         // SAFETY: By the invariant on the `delivered_links` field, this is the right linked list.
868         let removed = unsafe { inner.delivered_deaths.remove(death) };
869         drop(inner);
870         drop(removed);
871     }
872 
873     pub(crate) fn update_ref(
874         self: ArcBorrow<'_, Process>,
875         handle: u32,
876         inc: bool,
877         strong: bool,
878     ) -> Result {
879         if inc && handle == 0 {
880             if let Ok(node_ref) = self.ctx.get_manager_node(strong) {
881                 if core::ptr::eq(&*self, &*node_ref.node.owner) {
882                     return Err(EINVAL);
883                 }
884                 let _ = self.insert_or_update_handle(node_ref, true);
885                 return Ok(());
886             }
887         }
888 
889         // To preserve original binder behaviour, we only fail requests where the manager tries to
890         // increment references on itself.
891         let mut refs = self.node_refs.lock();
892         if let Some(info) = refs.by_handle.get_mut(&handle) {
893             if info.node_ref().update(inc, strong) {
894                 // Clean up death if there is one attached to this node reference.
895                 if let Some(death) = info.death().take() {
896                     death.set_cleared(true);
897                     self.remove_from_delivered_deaths(&death);
898                 }
899 
900                 // Remove reference from process tables, and from the node's `refs` list.
901 
902                 // SAFETY: We are removing the `NodeRefInfo` from the right node.
903                 unsafe { info.node_ref2().node.remove_node_info(info) };
904 
905                 let id = info.node_ref().node.global_id();
906                 refs.by_handle.remove(&handle);
907                 refs.by_node.remove(&id);
908             }
909         } else {
910             // All refs are cleared in process exit, so this warning is expected in that case.
911             if !self.inner.lock().is_dead {
912                 pr_warn!("{}: no such ref {handle}\n", self.pid_in_current_ns());
913             }
914         }
915         Ok(())
916     }
917 
918     /// Decrements the refcount of the given node, if one exists.
919     pub(crate) fn update_node(&self, ptr: u64, cookie: u64, strong: bool) {
920         let mut inner = self.inner.lock();
921         if let Ok(Some(node)) = inner.get_existing_node(ptr, cookie) {
922             inner.update_node_refcount(&node, false, strong, 1, None);
923         }
924     }
925 
926     pub(crate) fn inc_ref_done(&self, reader: &mut UserSliceReader, strong: bool) -> Result {
927         let ptr = reader.read::<u64>()?;
928         let cookie = reader.read::<u64>()?;
929         let mut inner = self.inner.lock();
930         if let Ok(Some(node)) = inner.get_existing_node(ptr, cookie) {
931             if let Some(node) = node.inc_ref_done_locked(strong, &mut inner) {
932                 // This only fails if the process is dead.
933                 let _ = inner.push_work(node);
934             }
935         }
936         Ok(())
937     }
938 
939     pub(crate) fn buffer_alloc(
940         self: &Arc<Self>,
941         debug_id: usize,
942         size: usize,
943         is_oneway: bool,
944         from_pid: i32,
945     ) -> BinderResult<NewAllocation> {
946         use kernel::page::PAGE_SIZE;
947 
948         let mut reserve_new_args = ReserveNewArgs {
949             debug_id,
950             size,
951             is_oneway,
952             pid: from_pid,
953             ..ReserveNewArgs::default()
954         };
955 
956         let (new_alloc, addr) = loop {
957             let mut inner = self.inner.lock();
958             let mapping = inner.mapping.as_mut().ok_or_else(BinderError::new_dead)?;
959             let alloc_request = match mapping.alloc.reserve_new(reserve_new_args)? {
960                 ReserveNew::Success(new_alloc) => break (new_alloc, mapping.address),
961                 ReserveNew::NeedAlloc(request) => request,
962             };
963             drop(inner);
964             // We need to allocate memory and then call `reserve_new` again.
965             reserve_new_args = alloc_request.make_alloc()?;
966         };
967 
968         let res = Allocation::new(
969             self.clone(),
970             debug_id,
971             new_alloc.offset,
972             size,
973             addr + new_alloc.offset,
974             new_alloc.oneway_spam_detected,
975         );
976 
977         // This allocation will be marked as in use until the `Allocation` is used to free it.
978         //
979         // This method can't be called while holding a lock, so we release the lock first. It's
980         // okay for several threads to use the method on the same index at the same time. In that
981         // case, one of the calls will allocate the given page (if missing), and the other call
982         // will wait for the other call to finish allocating the page.
983         //
984         // We will not call `stop_using_range` in parallel with this on the same page, because the
985         // allocation can only be removed via the destructor of the `Allocation` object that we
986         // currently own.
987         match self.pages.use_range(
988             new_alloc.offset / PAGE_SIZE,
989             (new_alloc.offset + size).div_ceil(PAGE_SIZE),
990         ) {
991             Ok(()) => {}
992             Err(err) => {
993                 pr_warn!("use_range failure {:?}", err);
994                 return Err(err.into());
995             }
996         }
997 
998         Ok(NewAllocation(res))
999     }
1000 
1001     pub(crate) fn buffer_get(self: &Arc<Self>, ptr: usize) -> Option<Allocation> {
1002         let mut inner = self.inner.lock();
1003         let mapping = inner.mapping.as_mut()?;
1004         let offset = ptr.checked_sub(mapping.address)?;
1005         let (size, debug_id, odata) = mapping.alloc.reserve_existing(offset).ok()?;
1006         let mut alloc = Allocation::new(self.clone(), debug_id, offset, size, ptr, false);
1007         if let Some(data) = odata {
1008             alloc.set_info(data);
1009         }
1010         Some(alloc)
1011     }
1012 
1013     pub(crate) fn buffer_raw_free(&self, ptr: usize) {
1014         let mut inner = self.inner.lock();
1015         if let Some(ref mut mapping) = &mut inner.mapping {
1016             let offset = match ptr.checked_sub(mapping.address) {
1017                 Some(offset) => offset,
1018                 None => return,
1019             };
1020 
1021             let freed_range = match mapping.alloc.reservation_abort(offset) {
1022                 Ok(freed_range) => freed_range,
1023                 Err(_) => {
1024                     pr_warn!(
1025                         "Pointer {:x} failed to free, base = {:x}\n",
1026                         ptr,
1027                         mapping.address
1028                     );
1029                     return;
1030                 }
1031             };
1032 
1033             // No more allocations in this range. Mark them as not in use.
1034             //
1035             // Must be done before we release the lock so that `use_range` is not used on these
1036             // indices until `stop_using_range` returns.
1037             self.pages
1038                 .stop_using_range(freed_range.start_page_idx, freed_range.end_page_idx);
1039         }
1040     }
1041 
1042     pub(crate) fn buffer_make_freeable(&self, offset: usize, mut data: Option<AllocationInfo>) {
1043         let mut inner = self.inner.lock();
1044         if let Some(ref mut mapping) = &mut inner.mapping {
1045             if mapping.alloc.reservation_commit(offset, &mut data).is_err() {
1046                 pr_warn!("Offset {} failed to be marked freeable\n", offset);
1047             }
1048         }
1049     }
1050 
1051     fn create_mapping(&self, vma: &mm::virt::VmaNew) -> Result {
1052         use kernel::page::PAGE_SIZE;
1053         let size = usize::min(vma.end() - vma.start(), bindings::SZ_4M as usize);
1054         let mapping = Mapping::new(vma.start(), size);
1055         let page_count = self.pages.register_with_vma(vma)?;
1056         if page_count * PAGE_SIZE != size {
1057             return Err(EINVAL);
1058         }
1059 
1060         // Save range allocator for later.
1061         self.inner.lock().mapping = Some(mapping);
1062 
1063         Ok(())
1064     }
1065 
1066     fn version(&self, data: UserSlice) -> Result {
1067         data.writer().write(&BinderVersion::current())
1068     }
1069 
1070     pub(crate) fn register_thread(&self) -> bool {
1071         self.inner.lock().register_thread()
1072     }
1073 
1074     fn remove_thread(&self, thread: Arc<Thread>) {
1075         self.inner.lock().threads.remove(&thread.id);
1076         thread.release();
1077     }
1078 
1079     fn set_max_threads(&self, max: u32) {
1080         self.inner.lock().max_threads = max;
1081     }
1082 
1083     fn set_oneway_spam_detection_enabled(&self, enabled: u32) {
1084         self.inner.lock().oneway_spam_detection_enabled = enabled != 0;
1085     }
1086 
1087     pub(crate) fn is_oneway_spam_detection_enabled(&self) -> bool {
1088         self.inner.lock().oneway_spam_detection_enabled
1089     }
1090 
1091     fn get_node_debug_info(&self, data: UserSlice) -> Result {
1092         let (mut reader, mut writer) = data.reader_writer();
1093 
1094         // Read the starting point.
1095         let ptr = reader.read::<BinderNodeDebugInfo>()?.ptr;
1096         let mut out = BinderNodeDebugInfo::default();
1097 
1098         {
1099             let inner = self.inner.lock();
1100             for (node_ptr, node) in &inner.nodes {
1101                 if *node_ptr > ptr {
1102                     node.populate_debug_info(&mut out, &inner);
1103                     break;
1104                 }
1105             }
1106         }
1107 
1108         writer.write(&out)
1109     }
1110 
1111     fn get_node_info_from_ref(&self, data: UserSlice) -> Result {
1112         let (mut reader, mut writer) = data.reader_writer();
1113         let mut out = reader.read::<BinderNodeInfoForRef>()?;
1114 
1115         if out.strong_count != 0
1116             || out.weak_count != 0
1117             || out.reserved1 != 0
1118             || out.reserved2 != 0
1119             || out.reserved3 != 0
1120         {
1121             return Err(EINVAL);
1122         }
1123 
1124         // Only the context manager is allowed to use this ioctl.
1125         if !self.inner.lock().is_manager {
1126             return Err(EPERM);
1127         }
1128 
1129         {
1130             let mut node_refs = self.node_refs.lock();
1131             let node_info = node_refs.by_handle.get_mut(&out.handle).ok_or(ENOENT)?;
1132             let node_ref = node_info.node_ref();
1133             let owner_inner = node_ref.node.owner.inner.lock();
1134             node_ref.node.populate_counts(&mut out, &owner_inner);
1135         }
1136 
1137         // Write the result back.
1138         writer.write(&out)
1139     }
1140 
1141     pub(crate) fn needs_thread(&self) -> bool {
1142         let mut inner = self.inner.lock();
1143         let ret = inner.requested_thread_count == 0
1144             && inner.ready_threads.is_empty()
1145             && inner.started_thread_count < inner.max_threads;
1146         if ret {
1147             inner.requested_thread_count += 1
1148         }
1149         ret
1150     }
1151 
1152     pub(crate) fn request_death(
1153         self: &Arc<Self>,
1154         reader: &mut UserSliceReader,
1155         thread: &Thread,
1156     ) -> Result {
1157         let handle: u32 = reader.read()?;
1158         let cookie: u64 = reader.read()?;
1159 
1160         // Queue BR_ERROR if we can't allocate memory for the death notification.
1161         let death = UniqueArc::new_uninit(GFP_KERNEL).inspect_err(|_| {
1162             thread.push_return_work(BR_ERROR);
1163         })?;
1164         let mut refs = self.node_refs.lock();
1165         let Some(info) = refs.by_handle.get_mut(&handle) else {
1166             pr_warn!("BC_REQUEST_DEATH_NOTIFICATION invalid ref {handle}\n");
1167             return Ok(());
1168         };
1169 
1170         // Nothing to do if there is already a death notification request for this handle.
1171         if info.death().is_some() {
1172             pr_warn!("BC_REQUEST_DEATH_NOTIFICATION death notification already set\n");
1173             return Ok(());
1174         }
1175 
1176         let death = {
1177             let death_init = NodeDeath::new(info.node_ref().node.clone(), self.clone(), cookie);
1178             match death.pin_init_with(death_init) {
1179                 Ok(death) => death,
1180                 // error is infallible
1181                 Err(err) => match err {},
1182             }
1183         };
1184 
1185         // Register the death notification.
1186         {
1187             let owner = info.node_ref2().node.owner.clone();
1188             let mut owner_inner = owner.inner.lock();
1189             if owner_inner.is_dead {
1190                 let death = Arc::from(death);
1191                 *info.death() = Some(death.clone());
1192                 drop(owner_inner);
1193                 death.set_dead();
1194             } else {
1195                 let death = ListArc::from(death);
1196                 *info.death() = Some(death.clone_arc());
1197                 info.node_ref().node.add_death(death, &mut owner_inner);
1198             }
1199         }
1200         Ok(())
1201     }
1202 
1203     pub(crate) fn clear_death(&self, reader: &mut UserSliceReader, thread: &Thread) -> Result {
1204         let handle: u32 = reader.read()?;
1205         let cookie: u64 = reader.read()?;
1206 
1207         let mut refs = self.node_refs.lock();
1208         let Some(info) = refs.by_handle.get_mut(&handle) else {
1209             pr_warn!("BC_CLEAR_DEATH_NOTIFICATION invalid ref {handle}\n");
1210             return Ok(());
1211         };
1212 
1213         let Some(death) = info.death().take() else {
1214             pr_warn!("BC_CLEAR_DEATH_NOTIFICATION death notification not active\n");
1215             return Ok(());
1216         };
1217         if death.cookie != cookie {
1218             *info.death() = Some(death);
1219             pr_warn!("BC_CLEAR_DEATH_NOTIFICATION death notification cookie mismatch\n");
1220             return Ok(());
1221         }
1222 
1223         // Update state and determine if we need to queue a work item. We only need to do it when
1224         // the node is not dead or if the user already completed the death notification.
1225         if death.set_cleared(false) {
1226             if let Some(death) = ListArc::try_from_arc_or_drop(death) {
1227                 let _ = thread.push_work_if_looper(death);
1228             }
1229         }
1230 
1231         Ok(())
1232     }
1233 
1234     pub(crate) fn dead_binder_done(&self, cookie: u64, thread: &Thread) {
1235         if let Some(death) = self.inner.lock().pull_delivered_death(cookie) {
1236             death.set_notification_done(thread);
1237         }
1238     }
1239 
1240     /// Locks the spinlock and move the `nodes` rbtree out.
1241     ///
1242     /// This allows you to iterate through `nodes` while also allowing you to give other parts of
1243     /// the codebase exclusive access to `ProcessInner`.
1244     pub(crate) fn lock_with_nodes(&self) -> WithNodes<'_> {
1245         let mut inner = self.inner.lock();
1246         WithNodes {
1247             nodes: take(&mut inner.nodes),
1248             inner,
1249         }
1250     }
1251 
1252     fn deferred_flush(&self) {
1253         let inner = self.inner.lock();
1254         for thread in inner.threads.values() {
1255             thread.exit_looper();
1256         }
1257     }
1258 
1259     fn deferred_release(self: Arc<Self>) {
1260         let is_manager = {
1261             let mut inner = self.inner.lock();
1262             inner.is_dead = true;
1263             inner.is_frozen = false;
1264             inner.sync_recv = false;
1265             inner.async_recv = false;
1266             inner.is_manager
1267         };
1268 
1269         if is_manager {
1270             self.ctx.unset_manager_node();
1271         }
1272 
1273         self.ctx.deregister_process(&self);
1274 
1275         let binderfs_file = self.inner.lock().binderfs_file.take();
1276         drop(binderfs_file);
1277 
1278         // Release threads.
1279         let threads = {
1280             let mut inner = self.inner.lock();
1281             let threads = take(&mut inner.threads);
1282             let ready = take(&mut inner.ready_threads);
1283             drop(inner);
1284             drop(ready);
1285 
1286             for thread in threads.values() {
1287                 thread.release();
1288             }
1289             threads
1290         };
1291 
1292         // Release nodes.
1293         {
1294             while let Some(node) = {
1295                 let mut lock = self.inner.lock();
1296                 lock.nodes.cursor_front().map(|c| c.remove_current().1)
1297             } {
1298                 node.to_key_value().1.release();
1299             }
1300         }
1301 
1302         // Clean up death listeners and remove nodes from external node info lists.
1303         for info in self.node_refs.lock().by_handle.values_mut() {
1304             // SAFETY: We are removing the `NodeRefInfo` from the right node.
1305             unsafe { info.node_ref2().node.remove_node_info(info) };
1306 
1307             // Remove all death notifications from the nodes (that belong to a different process).
1308             let death = if let Some(existing) = info.death().take() {
1309                 existing
1310             } else {
1311                 continue;
1312             };
1313             death.set_cleared(false);
1314         }
1315 
1316         // Clean up freeze listeners.
1317         let freeze_listeners = take(&mut self.node_refs.lock().freeze_listeners);
1318         for listener in freeze_listeners.values() {
1319             listener.on_process_exit(&self);
1320         }
1321         drop(freeze_listeners);
1322 
1323         // Release refs on foreign nodes.
1324         {
1325             let mut refs = self.node_refs.lock();
1326             let by_handle = take(&mut refs.by_handle);
1327             let by_node = take(&mut refs.by_node);
1328             drop(refs);
1329             drop(by_node);
1330             drop(by_handle);
1331         }
1332 
1333         // Cancel all pending work items.
1334         while let Some(work) = self.get_work() {
1335             work.into_arc().cancel();
1336         }
1337 
1338         let delivered_deaths = take(&mut self.inner.lock().delivered_deaths);
1339         drop(delivered_deaths);
1340 
1341         // Free any resources kept alive by allocated buffers.
1342         let omapping = self.inner.lock().mapping.take();
1343         if let Some(mut mapping) = omapping {
1344             let address = mapping.address;
1345             mapping
1346                 .alloc
1347                 .take_for_each(|offset, size, debug_id, odata| {
1348                     let ptr = offset + address;
1349                     pr_warn!(
1350                         "{}: removing orphan mapping {offset}:{size}\n",
1351                         self.pid_in_current_ns()
1352                     );
1353                     let mut alloc =
1354                         Allocation::new(self.clone(), debug_id, offset, size, ptr, false);
1355                     if let Some(data) = odata {
1356                         alloc.set_info(data);
1357                     }
1358                     drop(alloc)
1359                 });
1360         }
1361 
1362         // calls to synchronize_rcu() in thread drop will happen here
1363         drop(threads);
1364     }
1365 
1366     pub(crate) fn drop_outstanding_txn(&self) {
1367         let wake = {
1368             let mut inner = self.inner.lock();
1369             if inner.outstanding_txns == 0 {
1370                 pr_err!("outstanding_txns underflow");
1371                 return;
1372             }
1373             inner.outstanding_txns -= 1;
1374             inner.is_frozen && inner.outstanding_txns == 0
1375         };
1376 
1377         if wake {
1378             self.freeze_wait.notify_all();
1379         }
1380     }
1381 
1382     pub(crate) fn ioctl_freeze(&self, info: &BinderFreezeInfo) -> Result {
1383         if info.enable == 0 {
1384             let msgs = self.prepare_freeze_messages()?;
1385             let mut inner = self.inner.lock();
1386             inner.sync_recv = false;
1387             inner.async_recv = false;
1388             inner.is_frozen = false;
1389             drop(inner);
1390             msgs.send_messages();
1391             return Ok(());
1392         }
1393 
1394         let mut inner = self.inner.lock();
1395         inner.sync_recv = false;
1396         inner.async_recv = false;
1397         inner.is_frozen = true;
1398 
1399         if info.timeout_ms > 0 {
1400             let mut jiffies = kernel::time::msecs_to_jiffies(info.timeout_ms);
1401             while jiffies > 0 {
1402                 if inner.outstanding_txns == 0 {
1403                     break;
1404                 }
1405 
1406                 match self
1407                     .freeze_wait
1408                     .wait_interruptible_timeout(&mut inner, jiffies)
1409                 {
1410                     CondVarTimeoutResult::Signal { .. } => {
1411                         inner.is_frozen = false;
1412                         return Err(ERESTARTSYS);
1413                     }
1414                     CondVarTimeoutResult::Woken { jiffies: remaining } => {
1415                         jiffies = remaining;
1416                     }
1417                     CondVarTimeoutResult::Timeout => {
1418                         jiffies = 0;
1419                     }
1420                 }
1421             }
1422         }
1423 
1424         if inner.txns_pending_locked() {
1425             inner.is_frozen = false;
1426             Err(EAGAIN)
1427         } else {
1428             drop(inner);
1429             match self.prepare_freeze_messages() {
1430                 Ok(batch) => {
1431                     batch.send_messages();
1432                     Ok(())
1433                 }
1434                 Err(kernel::alloc::AllocError) => {
1435                     self.inner.lock().is_frozen = false;
1436                     Err(ENOMEM)
1437                 }
1438             }
1439         }
1440     }
1441 }
1442 
1443 fn get_frozen_status(data: UserSlice) -> Result {
1444     let (mut reader, mut writer) = data.reader_writer();
1445 
1446     let mut info = reader.read::<BinderFrozenStatusInfo>()?;
1447     info.sync_recv = 0;
1448     info.async_recv = 0;
1449     let mut found = false;
1450 
1451     for ctx in crate::context::get_all_contexts()? {
1452         ctx.for_each_proc(|proc| {
1453             if proc.task.pid() == info.pid as _ {
1454                 found = true;
1455                 let inner = proc.inner.lock();
1456                 let txns_pending = inner.txns_pending_locked();
1457                 info.async_recv |= inner.async_recv as u32;
1458                 info.sync_recv |= inner.sync_recv as u32;
1459                 info.sync_recv |= (txns_pending as u32) << 1;
1460             }
1461         });
1462     }
1463 
1464     if found {
1465         writer.write(&info)?;
1466         Ok(())
1467     } else {
1468         Err(EINVAL)
1469     }
1470 }
1471 
1472 fn ioctl_freeze(reader: &mut UserSliceReader) -> Result {
1473     let info = reader.read::<BinderFreezeInfo>()?;
1474 
1475     // Very unlikely for there to be more than 3, since a process normally uses at most binder and
1476     // hwbinder.
1477     let mut procs = KVec::with_capacity(3, GFP_KERNEL)?;
1478 
1479     let ctxs = crate::context::get_all_contexts()?;
1480     for ctx in ctxs {
1481         for proc in ctx.get_procs_with_pid(info.pid as i32)? {
1482             procs.push(proc, GFP_KERNEL)?;
1483         }
1484     }
1485 
1486     for proc in procs {
1487         proc.ioctl_freeze(&info)?;
1488     }
1489     Ok(())
1490 }
1491 
1492 /// The ioctl handler.
1493 impl Process {
1494     /// Ioctls that are write-only from the perspective of userspace.
1495     ///
1496     /// The kernel will only read from the pointer that userspace provided to us.
1497     fn ioctl_write_only(
1498         this: ArcBorrow<'_, Process>,
1499         _file: &File,
1500         cmd: u32,
1501         reader: &mut UserSliceReader,
1502     ) -> Result {
1503         let thread = this.get_current_thread()?;
1504         match cmd {
1505             uapi::BINDER_SET_MAX_THREADS => this.set_max_threads(reader.read()?),
1506             uapi::BINDER_THREAD_EXIT => this.remove_thread(thread),
1507             uapi::BINDER_SET_CONTEXT_MGR => this.set_as_manager(None, &thread)?,
1508             uapi::BINDER_SET_CONTEXT_MGR_EXT => {
1509                 this.set_as_manager(Some(reader.read()?), &thread)?
1510             }
1511             uapi::BINDER_ENABLE_ONEWAY_SPAM_DETECTION => {
1512                 this.set_oneway_spam_detection_enabled(reader.read()?)
1513             }
1514             uapi::BINDER_FREEZE => ioctl_freeze(reader)?,
1515             _ => return Err(EINVAL),
1516         }
1517         Ok(())
1518     }
1519 
1520     /// Ioctls that are read/write from the perspective of userspace.
1521     ///
1522     /// The kernel will both read from and write to the pointer that userspace provided to us.
1523     fn ioctl_write_read(
1524         this: ArcBorrow<'_, Process>,
1525         file: &File,
1526         cmd: u32,
1527         data: UserSlice,
1528     ) -> Result {
1529         let thread = this.get_current_thread()?;
1530         let blocking = (file.flags() & file::flags::O_NONBLOCK) == 0;
1531         match cmd {
1532             uapi::BINDER_WRITE_READ => thread.write_read(data, blocking)?,
1533             uapi::BINDER_GET_NODE_DEBUG_INFO => this.get_node_debug_info(data)?,
1534             uapi::BINDER_GET_NODE_INFO_FOR_REF => this.get_node_info_from_ref(data)?,
1535             uapi::BINDER_VERSION => this.version(data)?,
1536             uapi::BINDER_GET_FROZEN_INFO => get_frozen_status(data)?,
1537             uapi::BINDER_GET_EXTENDED_ERROR => thread.get_extended_error(data)?,
1538             _ => return Err(EINVAL),
1539         }
1540         Ok(())
1541     }
1542 }
1543 
1544 /// The file operations supported by `Process`.
1545 impl Process {
1546     pub(crate) fn open(ctx: ArcBorrow<'_, Context>, file: &File) -> Result<Arc<Process>> {
1547         Self::new(ctx.into(), ARef::from(file.cred()))
1548     }
1549 
1550     pub(crate) fn release(this: Arc<Process>, _file: &File) {
1551         let binderfs_file;
1552         let should_schedule;
1553         {
1554             let mut inner = this.inner.lock();
1555             should_schedule = inner.defer_work == 0;
1556             inner.defer_work |= PROC_DEFER_RELEASE;
1557             binderfs_file = inner.binderfs_file.take();
1558         }
1559 
1560         if should_schedule {
1561             // Ignore failures to schedule to the workqueue. Those just mean that we're already
1562             // scheduled for execution.
1563             let _ = workqueue::system().enqueue(this);
1564         }
1565 
1566         drop(binderfs_file);
1567     }
1568 
1569     pub(crate) fn flush(this: ArcBorrow<'_, Process>) -> Result {
1570         let should_schedule;
1571         {
1572             let mut inner = this.inner.lock();
1573             should_schedule = inner.defer_work == 0;
1574             inner.defer_work |= PROC_DEFER_FLUSH;
1575         }
1576 
1577         if should_schedule {
1578             // Ignore failures to schedule to the workqueue. Those just mean that we're already
1579             // scheduled for execution.
1580             let _ = workqueue::system().enqueue(Arc::from(this));
1581         }
1582         Ok(())
1583     }
1584 
1585     pub(crate) fn ioctl(this: ArcBorrow<'_, Process>, file: &File, cmd: u32, arg: usize) -> Result {
1586         use kernel::ioctl::{_IOC_DIR, _IOC_SIZE};
1587         use kernel::uapi::{_IOC_READ, _IOC_WRITE};
1588 
1589         crate::trace::trace_ioctl(cmd, arg);
1590 
1591         let user_slice = UserSlice::new(UserPtr::from_addr(arg), _IOC_SIZE(cmd));
1592 
1593         const _IOC_READ_WRITE: u32 = _IOC_READ | _IOC_WRITE;
1594 
1595         match _IOC_DIR(cmd) {
1596             _IOC_WRITE => Self::ioctl_write_only(this, file, cmd, &mut user_slice.reader()),
1597             _IOC_READ_WRITE => Self::ioctl_write_read(this, file, cmd, user_slice),
1598             _ => Err(EINVAL),
1599         }
1600     }
1601 
1602     pub(crate) fn compat_ioctl(
1603         this: ArcBorrow<'_, Process>,
1604         file: &File,
1605         cmd: u32,
1606         arg: usize,
1607     ) -> Result {
1608         Self::ioctl(this, file, cmd, arg)
1609     }
1610 
1611     pub(crate) fn mmap(
1612         this: ArcBorrow<'_, Process>,
1613         _file: &File,
1614         vma: &mm::virt::VmaNew,
1615     ) -> Result {
1616         // We don't allow mmap to be used in a different process.
1617         if !core::ptr::eq(kernel::current!().group_leader(), &*this.task) {
1618             return Err(EINVAL);
1619         }
1620         if vma.start() == 0 {
1621             return Err(EINVAL);
1622         }
1623 
1624         vma.try_clear_maywrite().map_err(|_| EPERM)?;
1625         vma.set_dontcopy();
1626         vma.set_mixedmap();
1627 
1628         // TODO: Set ops. We need to learn when the user unmaps so that we can stop using it.
1629         this.create_mapping(vma)
1630     }
1631 
1632     pub(crate) fn poll(
1633         this: ArcBorrow<'_, Process>,
1634         file: &File,
1635         table: PollTable<'_>,
1636     ) -> Result<u32> {
1637         let thread = this.get_current_thread()?;
1638         let (from_proc, mut mask) = thread.poll(file, table);
1639         if mask == 0 && from_proc && !this.inner.lock().work.is_empty() {
1640             mask |= bindings::POLLIN;
1641         }
1642         Ok(mask)
1643     }
1644 }
1645 
1646 /// Represents that a thread has registered with the `ready_threads` list of its process.
1647 ///
1648 /// The destructor of this type will unregister the thread from the list of ready threads.
1649 pub(crate) struct Registration<'a> {
1650     thread: &'a Arc<Thread>,
1651 }
1652 
1653 impl<'a> Registration<'a> {
1654     fn new(thread: &'a Arc<Thread>, guard: &mut Guard<'_, ProcessInner, SpinLockBackend>) -> Self {
1655         assert!(core::ptr::eq(&thread.process.inner, guard.lock_ref()));
1656         // INVARIANT: We are pushing this thread to the right `ready_threads` list.
1657         if let Ok(list_arc) = ListArc::try_from_arc(thread.clone()) {
1658             guard.ready_threads.push_front(list_arc);
1659         } else {
1660             // It is an error to hit this branch, and it should not be reachable. We try to do
1661             // something reasonable when the failure path happens. Most likely, the thread in
1662             // question will sleep forever.
1663             pr_err!("Same thread registered with `ready_threads` twice.");
1664         }
1665         Self { thread }
1666     }
1667 }
1668 
1669 impl Drop for Registration<'_> {
1670     fn drop(&mut self) {
1671         let mut inner = self.thread.process.inner.lock();
1672         // SAFETY: The thread has the invariant that we never push it to any other linked list than
1673         // the `ready_threads` list of its parent process. Therefore, the thread is either in that
1674         // list, or in no list.
1675         unsafe { inner.ready_threads.remove(self.thread) };
1676     }
1677 }
1678 
1679 pub(crate) struct WithNodes<'a> {
1680     pub(crate) inner: Guard<'a, ProcessInner, SpinLockBackend>,
1681     pub(crate) nodes: RBTree<u64, DArc<Node>>,
1682 }
1683 
1684 impl Drop for WithNodes<'_> {
1685     fn drop(&mut self) {
1686         core::mem::swap(&mut self.nodes, &mut self.inner.nodes);
1687         if self.nodes.iter().next().is_some() {
1688             pr_err!("nodes array was modified while using lock_with_nodes\n");
1689         }
1690     }
1691 }
1692 
1693 pub(crate) enum GetWorkOrRegister<'a> {
1694     Work(DLArc<dyn DeliverToRead>),
1695     Register(Registration<'a>),
1696 }
1697