xref: /linux/drivers/android/binder/process.rs (revision f468cf53c5240bf5063d0c6fe620b5ae2de37801)
1 // SPDX-License-Identifier: GPL-2.0
2 
3 // Copyright (C) 2025 Google LLC.
4 
5 //! This module defines the `Process` type, which represents a process using a particular binder
6 //! context.
7 //!
8 //! The `Process` object keeps track of all of the resources that this process owns in the binder
9 //! context.
10 //!
11 //! There is one `Process` object for each binder fd that a process has opened, so processes using
12 //! several binder contexts have several `Process` objects. This ensures that the contexts are
13 //! fully separated.
14 
15 use core::mem::take;
16 
17 use kernel::{
18     bindings,
19     cred::Credential,
20     error::Error,
21     fs::file::{self, File},
22     id_pool::IdPool,
23     list::{List, ListArc, ListArcField, ListLinks},
24     mm,
25     prelude::*,
26     rbtree::{self, RBTree, RBTreeNode, RBTreeNodeReservation},
27     seq_file::SeqFile,
28     seq_print,
29     sync::poll::PollTable,
30     sync::{
31         lock::{spinlock::SpinLockBackend, Guard},
32         Arc, ArcBorrow, CondVar, CondVarTimeoutResult, Mutex, SpinLock, UniqueArc,
33     },
34     task::Task,
35     types::ARef,
36     uaccess::{UserSlice, UserSliceReader},
37     uapi,
38     workqueue::{self, Work},
39 };
40 
41 use crate::{
42     allocation::{Allocation, AllocationInfo, NewAllocation},
43     context::Context,
44     defs::*,
45     error::{BinderError, BinderResult},
46     node::{CouldNotDeliverCriticalIncrement, CritIncrWrapper, Node, NodeDeath, NodeRef},
47     page_range::ShrinkablePageRange,
48     range_alloc::{RangeAllocator, ReserveNew, ReserveNewArgs},
49     stats::BinderStats,
50     thread::{PushWorkRes, Thread},
51     BinderfsProcFile, DArc, DLArc, DTRWrap, DeliverToRead,
52 };
53 
54 #[path = "freeze.rs"]
55 mod freeze;
56 use self::freeze::{FreezeCookie, FreezeListener};
57 
58 struct Mapping {
59     address: usize,
60     alloc: RangeAllocator<AllocationInfo>,
61 }
62 
63 impl Mapping {
new(address: usize, size: usize) -> Self64     fn new(address: usize, size: usize) -> Self {
65         Self {
66             address,
67             alloc: RangeAllocator::new(size),
68         }
69     }
70 }
71 
72 // bitflags for defer_work.
73 const PROC_DEFER_FLUSH: u8 = 1;
74 const PROC_DEFER_RELEASE: u8 = 2;
75 
76 #[derive(Copy, Clone)]
77 pub(crate) enum IsFrozen {
78     Yes,
79     No,
80     InProgress,
81 }
82 
83 impl IsFrozen {
84     /// Whether incoming transactions should be rejected due to freeze.
is_frozen(self) -> bool85     pub(crate) fn is_frozen(self) -> bool {
86         match self {
87             IsFrozen::Yes => true,
88             IsFrozen::No => false,
89             IsFrozen::InProgress => true,
90         }
91     }
92 
93     /// Whether freeze notifications consider this process frozen.
is_fully_frozen(self) -> bool94     pub(crate) fn is_fully_frozen(self) -> bool {
95         match self {
96             IsFrozen::Yes => true,
97             IsFrozen::No => false,
98             IsFrozen::InProgress => false,
99         }
100     }
101 }
102 
103 /// The fields of `Process` protected by the spinlock.
104 pub(crate) struct ProcessInner {
105     is_manager: bool,
106     pub(crate) is_dead: bool,
107     threads: RBTree<i32, Arc<Thread>>,
108     /// INVARIANT: Threads pushed to this list must be owned by this process.
109     ready_threads: List<Thread>,
110     nodes: RBTree<u64, DArc<Node>>,
111     mapping: Option<Mapping>,
112     work: List<DTRWrap<dyn DeliverToRead>>,
113     delivered_deaths: List<DTRWrap<NodeDeath>, 2>,
114 
115     /// The number of requested threads that haven't registered yet.
116     requested_thread_count: u32,
117     /// The maximum number of threads used by the process thread pool.
118     max_threads: u32,
119     /// The number of threads the started and registered with the thread pool.
120     started_thread_count: u32,
121 
122     /// Bitmap of deferred work to do.
123     defer_work: u8,
124 
125     /// Number of transactions to be transmitted before processes in freeze_wait
126     /// are woken up.
127     outstanding_txns: u32,
128     /// Process is frozen and unable to service binder transactions.
129     pub(crate) is_frozen: IsFrozen,
130     /// Process received sync transactions since last frozen.
131     pub(crate) sync_recv: bool,
132     /// Process received async transactions since last frozen.
133     pub(crate) async_recv: bool,
134     pub(crate) binderfs_file: Option<BinderfsProcFile>,
135     /// Check for oneway spam
136     oneway_spam_detection_enabled: bool,
137 }
138 
139 impl ProcessInner {
new() -> Self140     fn new() -> Self {
141         Self {
142             is_manager: false,
143             is_dead: false,
144             threads: RBTree::new(),
145             ready_threads: List::new(),
146             mapping: None,
147             nodes: RBTree::new(),
148             work: List::new(),
149             delivered_deaths: List::new(),
150             requested_thread_count: 0,
151             max_threads: 0,
152             started_thread_count: 0,
153             defer_work: 0,
154             outstanding_txns: 0,
155             is_frozen: IsFrozen::No,
156             sync_recv: false,
157             async_recv: false,
158             binderfs_file: None,
159             oneway_spam_detection_enabled: false,
160         }
161     }
162 
163     /// Schedule the work item for execution on this process.
164     ///
165     /// If any threads are ready for work, then the work item is given directly to that thread and
166     /// it is woken up. Otherwise, it is pushed to the process work list.
167     ///
168     /// This call can fail only if the process is dead. In this case, the work item is returned to
169     /// the caller so that the caller can drop it after releasing the inner process lock. This is
170     /// necessary since the destructor of `Transaction` will take locks that can't necessarily be
171     /// taken while holding the inner process lock.
push_work( &mut self, work: DLArc<dyn DeliverToRead>, ) -> Result<(), (BinderError, DLArc<dyn DeliverToRead>)>172     pub(crate) fn push_work(
173         &mut self,
174         work: DLArc<dyn DeliverToRead>,
175     ) -> Result<(), (BinderError, DLArc<dyn DeliverToRead>)> {
176         // Try to find a ready thread to which to push the work.
177         if let Some(thread) = self.ready_threads.pop_front() {
178             // Push to thread while holding state lock. This prevents the thread from giving up
179             // (for example, because of a signal) when we're about to deliver work.
180             match thread.push_work(work) {
181                 PushWorkRes::Ok => Ok(()),
182                 PushWorkRes::FailedDead(work) => Err((BinderError::new_dead(), work)),
183             }
184         } else if self.is_dead {
185             Err((BinderError::new_dead(), work))
186         } else {
187             let sync = work.should_sync_wakeup();
188 
189             // Didn't find a thread waiting for proc work; this can happen
190             // in two scenarios:
191             // 1. All threads are busy handling transactions
192             //    In that case, one of those threads should call back into
193             //    the kernel driver soon and pick up this work.
194             // 2. Threads are using the (e)poll interface, in which case
195             //    they may be blocked on the waitqueue without having been
196             //    added to waiting_threads. For this case, we just iterate
197             //    over all threads not handling transaction work, and
198             //    wake them all up. We wake all because we don't know whether
199             //    a thread that called into (e)poll is handling non-binder
200             //    work currently.
201             self.work.push_back(work);
202 
203             // Wake up polling threads, if any.
204             for thread in self.threads.values() {
205                 thread.notify_if_poll_ready(sync);
206             }
207 
208             Ok(())
209         }
210     }
211 
remove_node(&mut self, ptr: u64)212     pub(crate) fn remove_node(&mut self, ptr: u64) {
213         self.nodes.remove(&ptr);
214     }
215 
216     /// Updates the reference count on the given node.
update_node_refcount( &mut self, node: &DArc<Node>, inc: bool, strong: bool, count: usize, othread: Option<&Thread>, )217     pub(crate) fn update_node_refcount(
218         &mut self,
219         node: &DArc<Node>,
220         inc: bool,
221         strong: bool,
222         count: usize,
223         othread: Option<&Thread>,
224     ) {
225         let push = node.update_refcount_locked(inc, strong, count, self);
226 
227         // If we decided that we need to push work, push either to the process or to a thread if
228         // one is specified.
229         if let Some(node) = push {
230             if let Some(thread) = othread {
231                 thread.push_work_deferred(node);
232             } else {
233                 let _ = self.push_work(node);
234                 // Nothing to do: `push_work` may fail if the process is dead, but that's ok as in
235                 // that case, it doesn't care about the notification.
236             }
237         }
238     }
239 
new_node_ref( &mut self, node: DArc<Node>, strong: bool, thread: Option<&Thread>, ) -> NodeRef240     pub(crate) fn new_node_ref(
241         &mut self,
242         node: DArc<Node>,
243         strong: bool,
244         thread: Option<&Thread>,
245     ) -> NodeRef {
246         self.update_node_refcount(&node, true, strong, 1, thread);
247         let strong_count = if strong { 1 } else { 0 };
248         NodeRef::new(node, strong_count, 1 - strong_count)
249     }
250 
new_node_ref_with_thread( &mut self, node: DArc<Node>, strong: bool, thread: &Thread, wrapper: Option<CritIncrWrapper>, ) -> Result<NodeRef, CouldNotDeliverCriticalIncrement>251     pub(crate) fn new_node_ref_with_thread(
252         &mut self,
253         node: DArc<Node>,
254         strong: bool,
255         thread: &Thread,
256         wrapper: Option<CritIncrWrapper>,
257     ) -> Result<NodeRef, CouldNotDeliverCriticalIncrement> {
258         let push = match wrapper {
259             None => node
260                 .incr_refcount_allow_zero2one(strong, self)?
261                 .map(|node| node as _),
262             Some(wrapper) => node.incr_refcount_allow_zero2one_with_wrapper(strong, wrapper, self),
263         };
264         if let Some(node) = push {
265             thread.push_work_deferred(node);
266         }
267         let strong_count = if strong { 1 } else { 0 };
268         Ok(NodeRef::new(node, strong_count, 1 - strong_count))
269     }
270 
271     /// Returns an existing node with the given pointer and cookie, if one exists.
272     ///
273     /// Returns an error if a node with the given pointer but a different cookie exists.
get_existing_node(&self, ptr: u64, cookie: u64) -> Result<Option<DArc<Node>>>274     fn get_existing_node(&self, ptr: u64, cookie: u64) -> Result<Option<DArc<Node>>> {
275         match self.nodes.get(&ptr) {
276             None => Ok(None),
277             Some(node) => {
278                 let (_, node_cookie) = node.get_id();
279                 if node_cookie == cookie {
280                     Ok(Some(node.clone()))
281                 } else {
282                     Err(EINVAL)
283                 }
284             }
285         }
286     }
287 
register_thread(&mut self) -> bool288     fn register_thread(&mut self) -> bool {
289         if self.requested_thread_count == 0 {
290             return false;
291         }
292 
293         self.requested_thread_count -= 1;
294         self.started_thread_count += 1;
295         true
296     }
297 
298     /// Finds a delivered death notification with the given cookie, removes it from the thread's
299     /// delivered list, and returns it.
pull_delivered_death(&mut self, cookie: u64) -> Option<DArc<NodeDeath>>300     fn pull_delivered_death(&mut self, cookie: u64) -> Option<DArc<NodeDeath>> {
301         let mut cursor = self.delivered_deaths.cursor_front();
302         while let Some(next) = cursor.peek_next() {
303             if next.cookie == cookie {
304                 return Some(next.remove().into_arc());
305             }
306             cursor.move_next();
307         }
308         None
309     }
310 
death_delivered(&mut self, death: DArc<NodeDeath>)311     pub(crate) fn death_delivered(&mut self, death: DArc<NodeDeath>) {
312         if let Some(death) = ListArc::try_from_arc_or_drop(death) {
313             self.delivered_deaths.push_back(death);
314         } else {
315             pr_warn!("Notification added to `delivered_deaths` twice.");
316         }
317     }
318 
add_outstanding_txn(&mut self)319     pub(crate) fn add_outstanding_txn(&mut self) {
320         self.outstanding_txns += 1;
321     }
322 
txns_pending_locked(&self) -> bool323     fn txns_pending_locked(&self) -> bool {
324         if self.outstanding_txns > 0 {
325             return true;
326         }
327         for thread in self.threads.values() {
328             if thread.has_current_transaction() {
329                 return true;
330             }
331         }
332         false
333     }
334 }
335 
336 /// Used to keep track of a node that this process has a handle to.
337 #[pin_data]
338 pub(crate) struct NodeRefInfo {
339     debug_id: usize,
340     /// The refcount that this process owns to the node.
341     node_ref: ListArcField<NodeRef, { Self::LIST_PROC }>,
342     death: ListArcField<Option<DArc<NodeDeath>>, { Self::LIST_PROC }>,
343     /// Cookie of the active freeze listener for this node.
344     freeze: ListArcField<Option<FreezeCookie>, { Self::LIST_PROC }>,
345     /// Used to store this `NodeRefInfo` in the node's `refs` list.
346     #[pin]
347     links: ListLinks<{ Self::LIST_NODE }>,
348     /// The handle for this `NodeRefInfo`.
349     handle: u32,
350     /// The process that has a handle to the node.
351     pub(crate) process: Arc<Process>,
352 }
353 
354 impl NodeRefInfo {
355     /// The id used for the `Node::refs` list.
356     pub(crate) const LIST_NODE: u64 = 0x2da16350fb724a10;
357     /// The id used for the `ListArc` in `ProcessNodeRefs`.
358     const LIST_PROC: u64 = 0xd703a5263dcc8650;
359 
new(node_ref: NodeRef, handle: u32, process: Arc<Process>) -> impl PinInit<Self>360     fn new(node_ref: NodeRef, handle: u32, process: Arc<Process>) -> impl PinInit<Self> {
361         pin_init!(Self {
362             debug_id: super::next_debug_id(),
363             node_ref: ListArcField::new(node_ref),
364             death: ListArcField::new(None),
365             freeze: ListArcField::new(None),
366             links <- ListLinks::new(),
367             handle,
368             process,
369         })
370     }
371 
372     kernel::list::define_list_arc_field_getter! {
373         pub(crate) fn death(&mut self<{Self::LIST_PROC}>) -> &mut Option<DArc<NodeDeath>> { death }
374         pub(crate) fn freeze(&mut self<{Self::LIST_PROC}>) -> &mut Option<FreezeCookie> { freeze }
375         pub(crate) fn node_ref(&mut self<{Self::LIST_PROC}>) -> &mut NodeRef { node_ref }
376         pub(crate) fn node_ref2(&self<{Self::LIST_PROC}>) -> &NodeRef { node_ref }
377     }
378 }
379 
380 kernel::list::impl_list_arc_safe! {
381     impl ListArcSafe<{Self::LIST_NODE}> for NodeRefInfo { untracked; }
382     impl ListArcSafe<{Self::LIST_PROC}> for NodeRefInfo { untracked; }
383 }
384 kernel::list::impl_list_item! {
385     impl ListItem<{Self::LIST_NODE}> for NodeRefInfo {
386         using ListLinks { self.links };
387     }
388 }
389 
390 /// Keeps track of references this process has to nodes owned by other processes.
391 ///
392 /// TODO: Currently, the rbtree requires two allocations per node reference, and two tree
393 /// traversals to look up a node by `Node::global_id`. Once the rbtree is more powerful, these
394 /// extra costs should be eliminated.
395 struct ProcessNodeRefs {
396     /// Used to look up nodes using the 32-bit id that this process knows it by.
397     by_handle: RBTree<u32, ListArc<NodeRefInfo, { NodeRefInfo::LIST_PROC }>>,
398     /// Used to quickly find unused ids in `by_handle`.
399     handle_is_present: IdPool,
400     /// Used to look up nodes without knowing their local 32-bit id. The usize is the address of
401     /// the underlying `Node` struct as returned by `Node::global_id`.
402     by_node: RBTree<usize, u32>,
403     /// Used to look up a `FreezeListener` by cookie.
404     ///
405     /// There might be multiple freeze listeners for the same node, but at most one of them is
406     /// active.
407     freeze_listeners: RBTree<FreezeCookie, FreezeListener>,
408 }
409 
410 impl ProcessNodeRefs {
new() -> Self411     fn new() -> Self {
412         Self {
413             by_handle: RBTree::new(),
414             handle_is_present: IdPool::new(),
415             by_node: RBTree::new(),
416             freeze_listeners: RBTree::new(),
417         }
418     }
419 }
420 
421 /// A process using binder.
422 ///
423 /// Strictly speaking, there can be multiple of these per process. There is one for each binder fd
424 /// that a process has opened, so processes using several binder contexts have several `Process`
425 /// objects. This ensures that the contexts are fully separated.
426 #[pin_data]
427 pub(crate) struct Process {
428     pub(crate) ctx: Arc<Context>,
429 
430     // The task leader (process).
431     pub(crate) task: ARef<Task>,
432 
433     // Credential associated with file when `Process` is created.
434     pub(crate) cred: ARef<Credential>,
435 
436     #[pin]
437     pub(crate) inner: SpinLock<ProcessInner>,
438 
439     #[pin]
440     pub(crate) pages: ShrinkablePageRange,
441 
442     // Waitqueue of processes waiting for all outstanding transactions to be
443     // processed.
444     #[pin]
445     freeze_wait: CondVar,
446 
447     // Node references are in a different lock to avoid recursive acquisition when
448     // incrementing/decrementing a node in another process.
449     #[pin]
450     node_refs: Mutex<ProcessNodeRefs>,
451 
452     // Work node for deferred work item.
453     #[pin]
454     defer_work: Work<Process>,
455 
456     // Links for process list in Context.
457     #[pin]
458     links: ListLinks,
459 
460     pub(crate) stats: BinderStats,
461 }
462 
463 kernel::impl_has_work! {
464     impl HasWork<Process> for Process { self.defer_work }
465 }
466 
467 kernel::list::impl_list_arc_safe! {
468     impl ListArcSafe<0> for Process { untracked; }
469 }
470 kernel::list::impl_list_item! {
471     impl ListItem<0> for Process {
472         using ListLinks { self.links };
473     }
474 }
475 
476 impl workqueue::WorkItem for Process {
477     type Pointer = Arc<Process>;
478 
run(me: Arc<Self>)479     fn run(me: Arc<Self>) {
480         let defer;
481         {
482             let mut inner = me.inner.lock();
483             defer = inner.defer_work;
484             inner.defer_work = 0;
485         }
486 
487         if defer & PROC_DEFER_FLUSH != 0 {
488             me.deferred_flush();
489         }
490         if defer & PROC_DEFER_RELEASE != 0 {
491             me.deferred_release();
492         }
493     }
494 }
495 
496 impl Process {
new(ctx: Arc<Context>, cred: ARef<Credential>) -> Result<Arc<Self>>497     fn new(ctx: Arc<Context>, cred: ARef<Credential>) -> Result<Arc<Self>> {
498         let current = kernel::current!();
499         let list_process = ListArc::pin_init::<Error>(
500             try_pin_init!(Process {
501                 ctx,
502                 cred,
503                 inner <- kernel::new_spinlock!(ProcessInner::new(), "Process::inner"),
504                 pages <- ShrinkablePageRange::new(&super::BINDER_SHRINKER),
505                 node_refs <- kernel::new_mutex!(ProcessNodeRefs::new(), "Process::node_refs"),
506                 freeze_wait <- kernel::new_condvar!("Process::freeze_wait"),
507                 task: current.group_leader().into(),
508                 defer_work <- kernel::new_work!("Process::defer_work"),
509                 links <- ListLinks::new(),
510                 stats: BinderStats::new(),
511             }),
512             GFP_KERNEL,
513         )?;
514 
515         let process = list_process.clone_arc();
516         process.ctx.register_process(list_process);
517 
518         Ok(process)
519     }
520 
pid_in_current_ns(&self) -> kernel::task::Pid521     pub(crate) fn pid_in_current_ns(&self) -> kernel::task::Pid {
522         self.task.tgid_nr_ns(None)
523     }
524 
525     #[inline(never)]
debug_print_stats(&self, m: &SeqFile, ctx: &Context) -> Result<()>526     pub(crate) fn debug_print_stats(&self, m: &SeqFile, ctx: &Context) -> Result<()> {
527         seq_print!(m, "proc {}\n", self.pid_in_current_ns());
528         seq_print!(m, "context {}\n", &*ctx.name);
529 
530         let inner = self.inner.lock();
531         seq_print!(m, "  threads: {}\n", inner.threads.iter().count());
532         seq_print!(
533             m,
534             "  requested threads: {}+{}/{}\n",
535             inner.requested_thread_count,
536             inner.started_thread_count,
537             inner.max_threads,
538         );
539         if let Some(mapping) = &inner.mapping {
540             seq_print!(
541                 m,
542                 "  free oneway space: {}\n",
543                 mapping.alloc.free_oneway_space()
544             );
545             seq_print!(m, "  buffers: {}\n", mapping.alloc.count_buffers());
546         }
547         seq_print!(
548             m,
549             "  outstanding transactions: {}\n",
550             inner.outstanding_txns
551         );
552         seq_print!(m, "  nodes: {}\n", inner.nodes.iter().count());
553         drop(inner);
554 
555         {
556             let mut refs = self.node_refs.lock();
557             let (mut count, mut weak, mut strong) = (0, 0, 0);
558             for r in refs.by_handle.values_mut() {
559                 let node_ref = r.node_ref();
560                 let (nstrong, nweak) = node_ref.get_count();
561                 count += 1;
562                 weak += nweak;
563                 strong += nstrong;
564             }
565             seq_print!(m, "  refs: {count} s {strong} w {weak}\n");
566         }
567 
568         self.stats.debug_print("  ", m);
569 
570         Ok(())
571     }
572 
573     #[inline(never)]
debug_print(&self, m: &SeqFile, ctx: &Context, print_all: bool) -> Result<()>574     pub(crate) fn debug_print(&self, m: &SeqFile, ctx: &Context, print_all: bool) -> Result<()> {
575         seq_print!(m, "proc {}\n", self.pid_in_current_ns());
576         seq_print!(m, "context {}\n", &*ctx.name);
577 
578         let mut all_threads = KVec::new();
579         let mut all_nodes = KVec::new();
580         loop {
581             let inner = self.inner.lock();
582             let num_threads = inner.threads.iter().count();
583             let num_nodes = inner.nodes.iter().count();
584 
585             if all_threads.capacity() < num_threads || all_nodes.capacity() < num_nodes {
586                 drop(inner);
587                 all_threads.reserve(num_threads, GFP_KERNEL)?;
588                 all_nodes.reserve(num_nodes, GFP_KERNEL)?;
589                 continue;
590             }
591 
592             for thread in inner.threads.values() {
593                 assert!(all_threads.len() < all_threads.capacity());
594                 let _ = all_threads.push(thread.clone(), GFP_ATOMIC);
595             }
596 
597             for node in inner.nodes.values() {
598                 assert!(all_nodes.len() < all_nodes.capacity());
599                 let _ = all_nodes.push(node.clone(), GFP_ATOMIC);
600             }
601 
602             break;
603         }
604 
605         for thread in all_threads {
606             thread.debug_print(m, print_all)?;
607         }
608 
609         let mut inner = self.inner.lock();
610         for node in all_nodes {
611             if print_all || node.has_oneway_transaction(&mut inner) {
612                 node.full_debug_print(m, &mut inner)?;
613             }
614         }
615         drop(inner);
616 
617         if print_all {
618             let mut refs = self.node_refs.lock();
619             for r in refs.by_handle.values_mut() {
620                 let node_ref = r.node_ref();
621                 let dead = node_ref.node.owner.inner.lock().is_dead;
622                 let (strong, weak) = node_ref.get_count();
623                 let debug_id = node_ref.node.debug_id;
624 
625                 seq_print!(
626                     m,
627                     "  ref {}: desc {} {}node {debug_id} s {strong} w {weak}",
628                     r.debug_id,
629                     r.handle,
630                     if dead { "dead " } else { "" }
631                 );
632             }
633         }
634 
635         let inner = self.inner.lock();
636         for work in &inner.work {
637             work.debug_print(m, "  ", "  pending transaction ")?;
638         }
639         for _death in &inner.delivered_deaths {
640             seq_print!(m, "  has delivered dead binder\n");
641         }
642         if let Some(mapping) = &inner.mapping {
643             mapping.alloc.debug_print(m)?;
644         }
645         drop(inner);
646 
647         Ok(())
648     }
649 
650     /// Attempts to fetch a work item from the process queue.
get_work(&self) -> Option<DLArc<dyn DeliverToRead>>651     pub(crate) fn get_work(&self) -> Option<DLArc<dyn DeliverToRead>> {
652         self.inner.lock().work.pop_front()
653     }
654 
655     /// Attempts to fetch a work item from the process queue. If none is available, it registers the
656     /// given thread as ready to receive work directly.
657     ///
658     /// This must only be called when the thread is not participating in a transaction chain; when
659     /// it is, work will always be delivered directly to the thread (and not through the process
660     /// queue).
get_work_or_register<'a>( &'a self, thread: &'a Arc<Thread>, ) -> GetWorkOrRegister<'a>661     pub(crate) fn get_work_or_register<'a>(
662         &'a self,
663         thread: &'a Arc<Thread>,
664     ) -> GetWorkOrRegister<'a> {
665         let mut inner = self.inner.lock();
666         // Try to get work from the process queue.
667         if let Some(work) = inner.work.pop_front() {
668             return GetWorkOrRegister::Work(work);
669         }
670 
671         // Register the thread as ready.
672         GetWorkOrRegister::Register(Registration::new(thread, &mut inner))
673     }
674 
get_current_thread(self: ArcBorrow<'_, Self>) -> Result<Arc<Thread>>675     fn get_current_thread(self: ArcBorrow<'_, Self>) -> Result<Arc<Thread>> {
676         let id = {
677             let current = kernel::current!();
678             if !core::ptr::eq(current.group_leader(), &*self.task) {
679                 pr_err!("get_current_thread was called from the wrong process.");
680                 return Err(EINVAL);
681             }
682             current.pid()
683         };
684 
685         {
686             let inner = self.inner.lock();
687             if let Some(thread) = inner.threads.get(&id) {
688                 return Ok(thread.clone());
689             }
690         }
691 
692         // Allocate a new `Thread` without holding any locks.
693         let reservation = RBTreeNodeReservation::new(GFP_KERNEL)?;
694         let ta: Arc<Thread> = Thread::new(id, self.into())?;
695 
696         let mut inner = self.inner.lock();
697         match inner.threads.entry(id) {
698             rbtree::Entry::Vacant(entry) => {
699                 entry.insert(ta.clone(), reservation);
700                 Ok(ta)
701             }
702             rbtree::Entry::Occupied(_entry) => {
703                 pr_err!("Cannot create two threads with the same id.");
704                 Err(EINVAL)
705             }
706         }
707     }
708 
push_work(&self, work: DLArc<dyn DeliverToRead>) -> BinderResult709     pub(crate) fn push_work(&self, work: DLArc<dyn DeliverToRead>) -> BinderResult {
710         // If push_work fails, drop the work item outside the lock.
711         let res = self.inner.lock().push_work(work);
712         match res {
713             Ok(()) => Ok(()),
714             Err((err, work)) => {
715                 drop(work);
716                 Err(err)
717             }
718         }
719     }
720 
set_as_manager( self: ArcBorrow<'_, Self>, info: Option<FlatBinderObject>, thread: &Thread, ) -> Result721     fn set_as_manager(
722         self: ArcBorrow<'_, Self>,
723         info: Option<FlatBinderObject>,
724         thread: &Thread,
725     ) -> Result {
726         let (ptr, cookie, flags) = if let Some(obj) = info {
727             (
728                 // SAFETY: The object type for this ioctl is implicitly `BINDER_TYPE_BINDER`, so it
729                 // is safe to access the `binder` field.
730                 unsafe { obj.__bindgen_anon_1.binder },
731                 obj.cookie,
732                 obj.flags,
733             )
734         } else {
735             (0, 0, 0)
736         };
737         let node_ref = self.get_node(ptr, cookie, flags as _, true, thread)?;
738         let node = node_ref.node.clone();
739         self.ctx.set_manager_node(node_ref)?;
740         self.inner.lock().is_manager = true;
741 
742         // Force the state of the node to prevent the delivery of acquire/increfs.
743         let mut owner_inner = node.owner.inner.lock();
744         node.force_has_count(&mut owner_inner);
745         Ok(())
746     }
747 
get_node_inner( self: ArcBorrow<'_, Self>, ptr: u64, cookie: u64, flags: u32, strong: bool, thread: &Thread, wrapper: Option<CritIncrWrapper>, ) -> Result<Result<NodeRef, CouldNotDeliverCriticalIncrement>>748     fn get_node_inner(
749         self: ArcBorrow<'_, Self>,
750         ptr: u64,
751         cookie: u64,
752         flags: u32,
753         strong: bool,
754         thread: &Thread,
755         wrapper: Option<CritIncrWrapper>,
756     ) -> Result<Result<NodeRef, CouldNotDeliverCriticalIncrement>> {
757         // Try to find an existing node.
758         {
759             let mut inner = self.inner.lock();
760             if let Some(node) = inner.get_existing_node(ptr, cookie)? {
761                 return Ok(inner.new_node_ref_with_thread(node, strong, thread, wrapper));
762             }
763         }
764 
765         // Allocate the node before reacquiring the lock.
766         let node = DTRWrap::arc_pin_init(Node::new(ptr, cookie, flags, self.into()))?.into_arc();
767         let rbnode = RBTreeNode::new(ptr, node.clone(), GFP_KERNEL)?;
768         let mut inner = self.inner.lock();
769         if let Some(node) = inner.get_existing_node(ptr, cookie)? {
770             return Ok(inner.new_node_ref_with_thread(node, strong, thread, wrapper));
771         }
772 
773         inner.nodes.insert(rbnode);
774         // This can only fail if someone has already pushed the node to a list, but we just created
775         // it and still hold the lock, so it can't fail right now.
776         let node_ref = inner
777             .new_node_ref_with_thread(node, strong, thread, wrapper)
778             .unwrap();
779 
780         Ok(Ok(node_ref))
781     }
782 
get_node( self: ArcBorrow<'_, Self>, ptr: u64, cookie: u64, flags: u32, strong: bool, thread: &Thread, ) -> Result<NodeRef>783     pub(crate) fn get_node(
784         self: ArcBorrow<'_, Self>,
785         ptr: u64,
786         cookie: u64,
787         flags: u32,
788         strong: bool,
789         thread: &Thread,
790     ) -> Result<NodeRef> {
791         let mut wrapper = None;
792         for _ in 0..2 {
793             match self.get_node_inner(ptr, cookie, flags, strong, thread, wrapper) {
794                 Err(err) => return Err(err),
795                 Ok(Ok(node_ref)) => return Ok(node_ref),
796                 Ok(Err(CouldNotDeliverCriticalIncrement)) => {
797                     wrapper = Some(CritIncrWrapper::new()?);
798                 }
799             }
800         }
801         // We only get a `CouldNotDeliverCriticalIncrement` error if `wrapper` is `None`, so the
802         // loop should run at most twice.
803         unreachable!()
804     }
805 
insert_or_update_handle( self: ArcBorrow<'_, Process>, node_ref: NodeRef, is_manager: bool, ) -> Result<u32>806     pub(crate) fn insert_or_update_handle(
807         self: ArcBorrow<'_, Process>,
808         node_ref: NodeRef,
809         is_manager: bool,
810     ) -> Result<u32> {
811         {
812             let mut refs = self.node_refs.lock();
813 
814             // Do a lookup before inserting.
815             if let Some(handle_ref) = refs.by_node.get(&node_ref.node.global_id()) {
816                 let handle = *handle_ref;
817                 let info = refs.by_handle.get_mut(&handle).unwrap();
818                 info.node_ref().absorb(node_ref);
819                 return Ok(handle);
820             }
821         }
822 
823         // Reserve memory for tree nodes.
824         let reserve1 = RBTreeNodeReservation::new(GFP_KERNEL)?;
825         let reserve2 = RBTreeNodeReservation::new(GFP_KERNEL)?;
826         let info = UniqueArc::new_uninit(GFP_KERNEL)?;
827 
828         let mut refs_lock = self.node_refs.lock();
829         let mut refs = &mut *refs_lock;
830 
831         let (unused_id, by_handle_slot) = loop {
832             // ID 0 may only be used by the manager.
833             let start = if is_manager { 0 } else { 1 };
834 
835             if let Some(res) = refs.handle_is_present.find_unused_id(start) {
836                 match refs.by_handle.entry(res.as_u32()) {
837                     rbtree::Entry::Vacant(entry) => break (res, entry),
838                     rbtree::Entry::Occupied(_) => {
839                         pr_err!("Detected mismatch between handle_is_present and by_handle");
840                         res.acquire();
841                         kernel::warn_on!(true);
842                         return Err(EINVAL);
843                     }
844                 }
845             }
846 
847             let grow_request = refs.handle_is_present.grow_request().ok_or(ENOMEM)?;
848             drop(refs_lock);
849             let resizer = grow_request.realloc(GFP_KERNEL)?;
850             refs_lock = self.node_refs.lock();
851             refs = &mut *refs_lock;
852             refs.handle_is_present.grow(resizer);
853         };
854         let handle = unused_id.as_u32();
855 
856         // Do a lookup again as node may have been inserted before the lock was reacquired.
857         if let Some(handle_ref) = refs.by_node.get(&node_ref.node.global_id()) {
858             let handle = *handle_ref;
859             let info = refs.by_handle.get_mut(&handle).unwrap();
860             info.node_ref().absorb(node_ref);
861             return Ok(handle);
862         }
863 
864         let gid = node_ref.node.global_id();
865         let (info_proc, info_node) = {
866             let info_init = NodeRefInfo::new(node_ref, handle, self.into());
867             match info.pin_init_with(info_init) {
868                 Ok(info) => ListArc::pair_from_pin_unique(info),
869                 // error is infallible
870                 Err(err) => match err {},
871             }
872         };
873 
874         // Ensure the process is still alive while we insert a new reference.
875         //
876         // This releases the lock before inserting the nodes, but since `is_dead` is set as the
877         // first thing in `deferred_release`, process cleanup will not miss the items inserted into
878         // `refs` below.
879         if self.inner.lock().is_dead {
880             return Err(ESRCH);
881         }
882 
883         // SAFETY: `info_proc` and `info_node` reference the same node, so we are inserting
884         // `info_node` into the right node's `refs` list.
885         unsafe { info_proc.node_ref2().node.insert_node_info(info_node) };
886 
887         refs.by_node.insert(reserve1.into_node(gid, handle));
888         by_handle_slot.insert(info_proc, reserve2);
889         unused_id.acquire();
890         Ok(handle)
891     }
892 
get_transaction_node(&self, handle: u32) -> BinderResult<NodeRef>893     pub(crate) fn get_transaction_node(&self, handle: u32) -> BinderResult<NodeRef> {
894         // When handle is zero, try to get the context manager.
895         if handle == 0 {
896             Ok(self.ctx.get_manager_node(true)?)
897         } else {
898             Ok(self.get_node_from_handle(handle, true)?)
899         }
900     }
901 
get_node_from_handle(&self, handle: u32, strong: bool) -> Result<NodeRef>902     pub(crate) fn get_node_from_handle(&self, handle: u32, strong: bool) -> Result<NodeRef> {
903         self.node_refs
904             .lock()
905             .by_handle
906             .get_mut(&handle)
907             .ok_or(ENOENT)?
908             .node_ref()
909             .clone(strong)
910     }
911 
remove_from_delivered_deaths(&self, death: &DArc<NodeDeath>)912     pub(crate) fn remove_from_delivered_deaths(&self, death: &DArc<NodeDeath>) {
913         let mut inner = self.inner.lock();
914         // SAFETY: By the invariant on the `delivered_links` field, this is the right linked list.
915         let removed = unsafe { inner.delivered_deaths.remove(death) };
916         drop(inner);
917         drop(removed);
918     }
919 
update_ref( self: ArcBorrow<'_, Process>, handle: u32, inc: bool, strong: bool, ) -> Result920     pub(crate) fn update_ref(
921         self: ArcBorrow<'_, Process>,
922         handle: u32,
923         inc: bool,
924         strong: bool,
925     ) -> Result {
926         if inc && handle == 0 {
927             if let Ok(node_ref) = self.ctx.get_manager_node(strong) {
928                 if core::ptr::eq(&*self, &*node_ref.node.owner) {
929                     return Err(EINVAL);
930                 }
931                 let _ = self.insert_or_update_handle(node_ref, true);
932                 return Ok(());
933             }
934         }
935 
936         // To preserve original binder behaviour, we only fail requests where the manager tries to
937         // increment references on itself.
938         let mut refs = self.node_refs.lock();
939         if let Some(info) = refs.by_handle.get_mut(&handle) {
940             if info.node_ref().update(inc, strong) {
941                 // Clean up death if there is one attached to this node reference.
942                 if let Some(death) = info.death().take() {
943                     death.set_cleared(true);
944                     self.remove_from_delivered_deaths(&death);
945                 }
946 
947                 // Remove reference from process tables, and from the node's `refs` list.
948 
949                 // SAFETY: We are removing the `NodeRefInfo` from the right node.
950                 unsafe { info.node_ref2().node.remove_node_info(info) };
951 
952                 let id = info.node_ref().node.global_id();
953                 refs.by_handle.remove(&handle);
954                 refs.by_node.remove(&id);
955                 refs.handle_is_present.release_id(handle as usize);
956 
957                 if let Some(shrink) = refs.handle_is_present.shrink_request() {
958                     drop(refs);
959                     // This intentionally ignores allocation failures.
960                     if let Ok(new_bitmap) = shrink.realloc(GFP_KERNEL) {
961                         refs = self.node_refs.lock();
962                         refs.handle_is_present.shrink(new_bitmap);
963                     }
964                 }
965             }
966         } else {
967             // All refs are cleared in process exit, so this warning is expected in that case.
968             if !self.inner.lock().is_dead {
969                 pr_warn!("{}: no such ref {handle}\n", self.pid_in_current_ns());
970             }
971         }
972         Ok(())
973     }
974 
975     /// Decrements the refcount of the given node, if one exists.
update_node(&self, ptr: u64, cookie: u64, strong: bool)976     pub(crate) fn update_node(&self, ptr: u64, cookie: u64, strong: bool) {
977         let mut inner = self.inner.lock();
978         if let Ok(Some(node)) = inner.get_existing_node(ptr, cookie) {
979             inner.update_node_refcount(&node, false, strong, 1, None);
980         }
981     }
982 
inc_ref_done(&self, reader: &mut UserSliceReader, strong: bool) -> Result983     pub(crate) fn inc_ref_done(&self, reader: &mut UserSliceReader, strong: bool) -> Result {
984         let ptr = reader.read::<u64>()?;
985         let cookie = reader.read::<u64>()?;
986         let mut inner = self.inner.lock();
987         if let Ok(Some(node)) = inner.get_existing_node(ptr, cookie) {
988             if let Some(node) = node.inc_ref_done_locked(strong, &mut inner) {
989                 // This only fails if the process is dead.
990                 let _ = inner.push_work(node);
991             }
992         }
993         Ok(())
994     }
995 
buffer_alloc( self: &Arc<Self>, debug_id: usize, size: usize, is_oneway: bool, from_pid: i32, ) -> BinderResult<NewAllocation>996     pub(crate) fn buffer_alloc(
997         self: &Arc<Self>,
998         debug_id: usize,
999         size: usize,
1000         is_oneway: bool,
1001         from_pid: i32,
1002     ) -> BinderResult<NewAllocation> {
1003         use kernel::page::PAGE_SIZE;
1004 
1005         let mut reserve_new_args = ReserveNewArgs {
1006             debug_id,
1007             size,
1008             is_oneway,
1009             pid: from_pid,
1010             ..ReserveNewArgs::default()
1011         };
1012 
1013         let (new_alloc, addr) = loop {
1014             let mut inner = self.inner.lock();
1015             let mapping = inner.mapping.as_mut().ok_or_else(BinderError::new_dead)?;
1016             let alloc_request = match mapping.alloc.reserve_new(reserve_new_args)? {
1017                 ReserveNew::Success(new_alloc) => break (new_alloc, mapping.address),
1018                 ReserveNew::NeedAlloc(request) => request,
1019             };
1020             drop(inner);
1021             // We need to allocate memory and then call `reserve_new` again.
1022             reserve_new_args = alloc_request.make_alloc()?;
1023         };
1024 
1025         let res = Allocation::new(
1026             self.clone(),
1027             debug_id,
1028             new_alloc.offset,
1029             size,
1030             addr + new_alloc.offset,
1031             new_alloc.oneway_spam_detected,
1032         );
1033 
1034         // This allocation will be marked as in use until the `Allocation` is used to free it.
1035         //
1036         // This method can't be called while holding a lock, so we release the lock first. It's
1037         // okay for several threads to use the method on the same index at the same time. In that
1038         // case, one of the calls will allocate the given page (if missing), and the other call
1039         // will wait for the other call to finish allocating the page.
1040         //
1041         // We will not call `stop_using_range` in parallel with this on the same page, because the
1042         // allocation can only be removed via the destructor of the `Allocation` object that we
1043         // currently own.
1044         match self.pages.use_range(
1045             new_alloc.offset / PAGE_SIZE,
1046             (new_alloc.offset + size).div_ceil(PAGE_SIZE),
1047         ) {
1048             Ok(()) => {}
1049             Err(err) => {
1050                 pr_warn!("use_range failure {:?}", err);
1051                 return Err(err.into());
1052             }
1053         }
1054 
1055         Ok(NewAllocation(res))
1056     }
1057 
buffer_get(self: &Arc<Self>, ptr: usize) -> Option<Allocation>1058     pub(crate) fn buffer_get(self: &Arc<Self>, ptr: usize) -> Option<Allocation> {
1059         let mut inner = self.inner.lock();
1060         let mapping = inner.mapping.as_mut()?;
1061         let offset = ptr.checked_sub(mapping.address)?;
1062         let (size, debug_id, odata) = mapping.alloc.reserve_existing(offset).ok()?;
1063         let mut alloc = Allocation::new(self.clone(), debug_id, offset, size, ptr, false);
1064         if let Some(data) = odata {
1065             alloc.set_info(data);
1066         }
1067         Some(alloc)
1068     }
1069 
buffer_raw_free(&self, ptr: usize)1070     pub(crate) fn buffer_raw_free(&self, ptr: usize) {
1071         let mut inner = self.inner.lock();
1072         if let Some(ref mut mapping) = &mut inner.mapping {
1073             let offset = match ptr.checked_sub(mapping.address) {
1074                 Some(offset) => offset,
1075                 None => return,
1076             };
1077 
1078             let freed_range = match mapping.alloc.reservation_abort(offset) {
1079                 Ok(freed_range) => freed_range,
1080                 Err(_) => {
1081                     pr_warn!(
1082                         "Pointer {:x} failed to free, base = {:x}\n",
1083                         ptr,
1084                         mapping.address
1085                     );
1086                     return;
1087                 }
1088             };
1089 
1090             // No more allocations in this range. Mark them as not in use.
1091             //
1092             // Must be done before we release the lock so that `use_range` is not used on these
1093             // indices until `stop_using_range` returns.
1094             self.pages
1095                 .stop_using_range(freed_range.start_page_idx, freed_range.end_page_idx);
1096         }
1097     }
1098 
buffer_make_freeable(&self, offset: usize, mut data: Option<AllocationInfo>)1099     pub(crate) fn buffer_make_freeable(&self, offset: usize, mut data: Option<AllocationInfo>) {
1100         let mut inner = self.inner.lock();
1101         if let Some(ref mut mapping) = &mut inner.mapping {
1102             if mapping.alloc.reservation_commit(offset, &mut data).is_err() {
1103                 pr_warn!("Offset {} failed to be marked freeable\n", offset);
1104             }
1105         }
1106     }
1107 
create_mapping(&self, vma: &mm::virt::VmaNew) -> Result1108     fn create_mapping(&self, vma: &mm::virt::VmaNew) -> Result {
1109         use kernel::page::PAGE_SIZE;
1110         let size = usize::min(vma.end() - vma.start(), bindings::SZ_4M as usize);
1111         let mapping = Mapping::new(vma.start(), size);
1112         let page_count = self.pages.register_with_vma(vma)?;
1113         if page_count * PAGE_SIZE != size {
1114             return Err(EINVAL);
1115         }
1116 
1117         // Save range allocator for later.
1118         self.inner.lock().mapping = Some(mapping);
1119 
1120         Ok(())
1121     }
1122 
version(&self, data: UserSlice) -> Result1123     fn version(&self, data: UserSlice) -> Result {
1124         data.writer().write(&BinderVersion::current())
1125     }
1126 
register_thread(&self) -> bool1127     pub(crate) fn register_thread(&self) -> bool {
1128         self.inner.lock().register_thread()
1129     }
1130 
remove_thread(&self, thread: Arc<Thread>)1131     fn remove_thread(&self, thread: Arc<Thread>) {
1132         self.inner.lock().threads.remove(&thread.id);
1133         thread.release();
1134     }
1135 
set_max_threads(&self, max: u32)1136     fn set_max_threads(&self, max: u32) {
1137         self.inner.lock().max_threads = max;
1138     }
1139 
set_oneway_spam_detection_enabled(&self, enabled: u32)1140     fn set_oneway_spam_detection_enabled(&self, enabled: u32) {
1141         self.inner.lock().oneway_spam_detection_enabled = enabled != 0;
1142     }
1143 
is_oneway_spam_detection_enabled(&self) -> bool1144     pub(crate) fn is_oneway_spam_detection_enabled(&self) -> bool {
1145         self.inner.lock().oneway_spam_detection_enabled
1146     }
1147 
get_node_debug_info(&self, data: UserSlice) -> Result1148     fn get_node_debug_info(&self, data: UserSlice) -> Result {
1149         let (mut reader, mut writer) = data.reader_writer();
1150 
1151         // Read the starting point.
1152         let ptr = reader.read::<BinderNodeDebugInfo>()?.ptr;
1153         let mut out = BinderNodeDebugInfo::default();
1154 
1155         {
1156             let inner = self.inner.lock();
1157             for (node_ptr, node) in &inner.nodes {
1158                 if *node_ptr > ptr {
1159                     node.populate_debug_info(&mut out, &inner);
1160                     break;
1161                 }
1162             }
1163         }
1164 
1165         writer.write(&out)
1166     }
1167 
get_node_info_from_ref(&self, data: UserSlice) -> Result1168     fn get_node_info_from_ref(&self, data: UserSlice) -> Result {
1169         let (mut reader, mut writer) = data.reader_writer();
1170         let mut out = reader.read::<BinderNodeInfoForRef>()?;
1171 
1172         if out.strong_count != 0
1173             || out.weak_count != 0
1174             || out.reserved1 != 0
1175             || out.reserved2 != 0
1176             || out.reserved3 != 0
1177         {
1178             return Err(EINVAL);
1179         }
1180 
1181         // Only the context manager is allowed to use this ioctl.
1182         if !self.inner.lock().is_manager {
1183             return Err(EPERM);
1184         }
1185 
1186         {
1187             let mut node_refs = self.node_refs.lock();
1188             let node_info = node_refs.by_handle.get_mut(&out.handle).ok_or(ENOENT)?;
1189             let node_ref = node_info.node_ref();
1190             let owner_inner = node_ref.node.owner.inner.lock();
1191             node_ref.node.populate_counts(&mut out, &owner_inner);
1192         }
1193 
1194         // Write the result back.
1195         writer.write(&out)
1196     }
1197 
needs_thread(&self) -> bool1198     pub(crate) fn needs_thread(&self) -> bool {
1199         let mut inner = self.inner.lock();
1200         let ret = inner.requested_thread_count == 0
1201             && inner.ready_threads.is_empty()
1202             && inner.started_thread_count < inner.max_threads;
1203         if ret {
1204             inner.requested_thread_count += 1
1205         }
1206         ret
1207     }
1208 
request_death( self: &Arc<Self>, reader: &mut UserSliceReader, thread: &Thread, ) -> Result1209     pub(crate) fn request_death(
1210         self: &Arc<Self>,
1211         reader: &mut UserSliceReader,
1212         thread: &Thread,
1213     ) -> Result {
1214         let handle: u32 = reader.read()?;
1215         let cookie: u64 = reader.read()?;
1216 
1217         // Queue BR_ERROR if we can't allocate memory for the death notification.
1218         let death = UniqueArc::new_uninit(GFP_KERNEL).inspect_err(|_| {
1219             thread.push_return_work(BR_ERROR);
1220         })?;
1221         let mut refs = self.node_refs.lock();
1222         let Some(info) = refs.by_handle.get_mut(&handle) else {
1223             pr_warn!("BC_REQUEST_DEATH_NOTIFICATION invalid ref {handle}\n");
1224             return Ok(());
1225         };
1226 
1227         // Nothing to do if there is already a death notification request for this handle.
1228         if info.death().is_some() {
1229             pr_warn!("BC_REQUEST_DEATH_NOTIFICATION death notification already set\n");
1230             return Ok(());
1231         }
1232 
1233         let death = {
1234             let death_init = NodeDeath::new(info.node_ref().node.clone(), self.clone(), cookie);
1235             match death.pin_init_with(death_init) {
1236                 Ok(death) => death,
1237                 // error is infallible
1238                 Err(err) => match err {},
1239             }
1240         };
1241 
1242         // Register the death notification.
1243         {
1244             let owner = info.node_ref2().node.owner.clone();
1245             let mut owner_inner = owner.inner.lock();
1246             if owner_inner.is_dead {
1247                 let death = Arc::from(death);
1248                 *info.death() = Some(death.clone());
1249                 drop(owner_inner);
1250                 death.set_dead();
1251             } else {
1252                 let death = ListArc::from(death);
1253                 *info.death() = Some(death.clone_arc());
1254                 info.node_ref().node.add_death(death, &mut owner_inner);
1255             }
1256         }
1257         Ok(())
1258     }
1259 
clear_death(&self, reader: &mut UserSliceReader, thread: &Thread) -> Result1260     pub(crate) fn clear_death(&self, reader: &mut UserSliceReader, thread: &Thread) -> Result {
1261         let handle: u32 = reader.read()?;
1262         let cookie: u64 = reader.read()?;
1263 
1264         let mut refs = self.node_refs.lock();
1265         let Some(info) = refs.by_handle.get_mut(&handle) else {
1266             pr_warn!("BC_CLEAR_DEATH_NOTIFICATION invalid ref {handle}\n");
1267             return Ok(());
1268         };
1269 
1270         let Some(death) = info.death().take() else {
1271             pr_warn!("BC_CLEAR_DEATH_NOTIFICATION death notification not active\n");
1272             return Ok(());
1273         };
1274         if death.cookie != cookie {
1275             *info.death() = Some(death);
1276             pr_warn!("BC_CLEAR_DEATH_NOTIFICATION death notification cookie mismatch\n");
1277             return Ok(());
1278         }
1279 
1280         // Update state and determine if we need to queue a work item. We only need to do it when
1281         // the node is not dead or if the user already completed the death notification.
1282         if death.set_cleared(false) {
1283             if let Some(death) = ListArc::try_from_arc_or_drop(death) {
1284                 let _ = thread.push_work_if_looper(death);
1285             }
1286         }
1287 
1288         Ok(())
1289     }
1290 
dead_binder_done(&self, cookie: u64, thread: &Thread)1291     pub(crate) fn dead_binder_done(&self, cookie: u64, thread: &Thread) {
1292         if let Some(death) = self.inner.lock().pull_delivered_death(cookie) {
1293             death.set_notification_done(thread);
1294         }
1295     }
1296 
1297     /// Locks the spinlock and move the `nodes` rbtree out.
1298     ///
1299     /// This allows you to iterate through `nodes` while also allowing you to give other parts of
1300     /// the codebase exclusive access to `ProcessInner`.
lock_with_nodes(&self) -> WithNodes<'_>1301     pub(crate) fn lock_with_nodes(&self) -> WithNodes<'_> {
1302         let mut inner = self.inner.lock();
1303         WithNodes {
1304             nodes: take(&mut inner.nodes),
1305             inner,
1306         }
1307     }
1308 
deferred_flush(&self)1309     fn deferred_flush(&self) {
1310         let inner = self.inner.lock();
1311         for thread in inner.threads.values() {
1312             thread.exit_looper();
1313         }
1314     }
1315 
deferred_release(self: Arc<Self>)1316     fn deferred_release(self: Arc<Self>) {
1317         let is_manager = {
1318             let mut inner = self.inner.lock();
1319             inner.is_dead = true;
1320             inner.is_frozen = IsFrozen::No;
1321             inner.sync_recv = false;
1322             inner.async_recv = false;
1323             inner.is_manager
1324         };
1325 
1326         if is_manager {
1327             self.ctx.unset_manager_node();
1328         }
1329 
1330         self.ctx.deregister_process(&self);
1331 
1332         let binderfs_file = self.inner.lock().binderfs_file.take();
1333         drop(binderfs_file);
1334 
1335         // Release threads.
1336         let threads = {
1337             let mut inner = self.inner.lock();
1338             let threads = take(&mut inner.threads);
1339             let ready = take(&mut inner.ready_threads);
1340             drop(inner);
1341             drop(ready);
1342 
1343             for thread in threads.values() {
1344                 thread.release();
1345             }
1346             threads
1347         };
1348 
1349         // Release nodes.
1350         {
1351             while let Some(node) = {
1352                 let mut lock = self.inner.lock();
1353                 lock.nodes.cursor_front_mut().map(|c| c.remove_current().1)
1354             } {
1355                 node.to_key_value().1.release();
1356             }
1357         }
1358 
1359         // Clean up death listeners and remove nodes from external node info lists.
1360         for info in self.node_refs.lock().by_handle.values_mut() {
1361             // SAFETY: We are removing the `NodeRefInfo` from the right node.
1362             unsafe { info.node_ref2().node.remove_node_info(info) };
1363 
1364             // Remove all death notifications from the nodes (that belong to a different process).
1365             let death = if let Some(existing) = info.death().take() {
1366                 existing
1367             } else {
1368                 continue;
1369             };
1370             death.set_cleared(false);
1371         }
1372 
1373         // Clean up freeze listeners.
1374         let freeze_listeners = take(&mut self.node_refs.lock().freeze_listeners);
1375         for listener in freeze_listeners.values() {
1376             listener.on_process_exit(&self);
1377         }
1378         drop(freeze_listeners);
1379 
1380         // Release refs on foreign nodes.
1381         {
1382             let mut refs = self.node_refs.lock();
1383             let by_handle = take(&mut refs.by_handle);
1384             let by_node = take(&mut refs.by_node);
1385             drop(refs);
1386             drop(by_node);
1387             drop(by_handle);
1388         }
1389 
1390         // Cancel all pending work items.
1391         while let Some(work) = self.get_work() {
1392             work.into_arc().cancel();
1393         }
1394 
1395         let delivered_deaths = take(&mut self.inner.lock().delivered_deaths);
1396         drop(delivered_deaths);
1397 
1398         // Free any resources kept alive by allocated buffers.
1399         let omapping = self.inner.lock().mapping.take();
1400         if let Some(mut mapping) = omapping {
1401             let address = mapping.address;
1402             mapping
1403                 .alloc
1404                 .take_for_each(|offset, size, debug_id, odata| {
1405                     let ptr = offset + address;
1406                     let mut alloc =
1407                         Allocation::new(self.clone(), debug_id, offset, size, ptr, false);
1408                     if let Some(data) = odata {
1409                         alloc.set_info(data);
1410                     }
1411                     drop(alloc)
1412                 });
1413         }
1414 
1415         // calls to synchronize_rcu() in thread drop will happen here
1416         drop(threads);
1417     }
1418 
drop_outstanding_txn(&self)1419     pub(crate) fn drop_outstanding_txn(&self) {
1420         let wake = {
1421             let mut inner = self.inner.lock();
1422             if inner.outstanding_txns == 0 {
1423                 pr_err!("outstanding_txns underflow");
1424                 return;
1425             }
1426             inner.outstanding_txns -= 1;
1427             inner.is_frozen.is_frozen() && inner.outstanding_txns == 0
1428         };
1429 
1430         if wake {
1431             self.freeze_wait.notify_all();
1432         }
1433     }
1434 
ioctl_freeze(&self, info: &BinderFreezeInfo) -> Result1435     pub(crate) fn ioctl_freeze(&self, info: &BinderFreezeInfo) -> Result {
1436         if info.enable == 0 {
1437             let msgs = self.prepare_freeze_messages()?;
1438             let mut inner = self.inner.lock();
1439             inner.sync_recv = false;
1440             inner.async_recv = false;
1441             inner.is_frozen = IsFrozen::No;
1442             drop(inner);
1443             msgs.send_messages();
1444             return Ok(());
1445         }
1446 
1447         let mut inner = self.inner.lock();
1448         inner.sync_recv = false;
1449         inner.async_recv = false;
1450         inner.is_frozen = IsFrozen::InProgress;
1451 
1452         if info.timeout_ms > 0 {
1453             let mut jiffies = kernel::time::msecs_to_jiffies(info.timeout_ms);
1454             while jiffies > 0 {
1455                 if inner.outstanding_txns == 0 {
1456                     break;
1457                 }
1458 
1459                 match self
1460                     .freeze_wait
1461                     .wait_interruptible_timeout(&mut inner, jiffies)
1462                 {
1463                     CondVarTimeoutResult::Signal { .. } => {
1464                         inner.is_frozen = IsFrozen::No;
1465                         return Err(ERESTARTSYS);
1466                     }
1467                     CondVarTimeoutResult::Woken { jiffies: remaining } => {
1468                         jiffies = remaining;
1469                     }
1470                     CondVarTimeoutResult::Timeout => {
1471                         jiffies = 0;
1472                     }
1473                 }
1474             }
1475         }
1476 
1477         if inner.txns_pending_locked() {
1478             inner.is_frozen = IsFrozen::No;
1479             Err(EAGAIN)
1480         } else {
1481             drop(inner);
1482             match self.prepare_freeze_messages() {
1483                 Ok(batch) => {
1484                     self.inner.lock().is_frozen = IsFrozen::Yes;
1485                     batch.send_messages();
1486                     Ok(())
1487                 }
1488                 Err(kernel::alloc::AllocError) => {
1489                     self.inner.lock().is_frozen = IsFrozen::No;
1490                     Err(ENOMEM)
1491                 }
1492             }
1493         }
1494     }
1495 }
1496 
get_frozen_status(data: UserSlice) -> Result1497 fn get_frozen_status(data: UserSlice) -> Result {
1498     let (mut reader, mut writer) = data.reader_writer();
1499 
1500     let mut info = reader.read::<BinderFrozenStatusInfo>()?;
1501     info.sync_recv = 0;
1502     info.async_recv = 0;
1503     let mut found = false;
1504 
1505     for ctx in crate::context::get_all_contexts()? {
1506         ctx.for_each_proc(|proc| {
1507             if proc.task.pid() == info.pid as _ {
1508                 found = true;
1509                 let inner = proc.inner.lock();
1510                 let txns_pending = inner.txns_pending_locked();
1511                 info.async_recv |= inner.async_recv as u32;
1512                 info.sync_recv |= inner.sync_recv as u32;
1513                 info.sync_recv |= (txns_pending as u32) << 1;
1514             }
1515         });
1516     }
1517 
1518     if found {
1519         writer.write(&info)?;
1520         Ok(())
1521     } else {
1522         Err(EINVAL)
1523     }
1524 }
1525 
ioctl_freeze(reader: &mut UserSliceReader) -> Result1526 fn ioctl_freeze(reader: &mut UserSliceReader) -> Result {
1527     let info = reader.read::<BinderFreezeInfo>()?;
1528 
1529     // Very unlikely for there to be more than 3, since a process normally uses at most binder and
1530     // hwbinder.
1531     let mut procs = KVec::with_capacity(3, GFP_KERNEL)?;
1532 
1533     let ctxs = crate::context::get_all_contexts()?;
1534     for ctx in ctxs {
1535         for proc in ctx.get_procs_with_pid(info.pid as i32)? {
1536             procs.push(proc, GFP_KERNEL)?;
1537         }
1538     }
1539 
1540     for proc in procs {
1541         proc.ioctl_freeze(&info)?;
1542     }
1543     Ok(())
1544 }
1545 
1546 /// The ioctl handler.
1547 impl Process {
1548     /// Ioctls that are write-only from the perspective of userspace.
1549     ///
1550     /// The kernel will only read from the pointer that userspace provided to us.
ioctl_write_only( this: ArcBorrow<'_, Process>, _file: &File, cmd: u32, reader: &mut UserSliceReader, ) -> Result1551     fn ioctl_write_only(
1552         this: ArcBorrow<'_, Process>,
1553         _file: &File,
1554         cmd: u32,
1555         reader: &mut UserSliceReader,
1556     ) -> Result {
1557         let thread = this.get_current_thread()?;
1558         match cmd {
1559             uapi::BINDER_SET_MAX_THREADS => this.set_max_threads(reader.read()?),
1560             uapi::BINDER_THREAD_EXIT => this.remove_thread(thread),
1561             uapi::BINDER_SET_CONTEXT_MGR => this.set_as_manager(None, &thread)?,
1562             uapi::BINDER_SET_CONTEXT_MGR_EXT => {
1563                 this.set_as_manager(Some(reader.read()?), &thread)?
1564             }
1565             uapi::BINDER_ENABLE_ONEWAY_SPAM_DETECTION => {
1566                 this.set_oneway_spam_detection_enabled(reader.read()?)
1567             }
1568             uapi::BINDER_FREEZE => ioctl_freeze(reader)?,
1569             _ => return Err(EINVAL),
1570         }
1571         Ok(())
1572     }
1573 
1574     /// Ioctls that are read/write from the perspective of userspace.
1575     ///
1576     /// The kernel will both read from and write to the pointer that userspace provided to us.
ioctl_write_read( this: ArcBorrow<'_, Process>, file: &File, cmd: u32, data: UserSlice, ) -> Result1577     fn ioctl_write_read(
1578         this: ArcBorrow<'_, Process>,
1579         file: &File,
1580         cmd: u32,
1581         data: UserSlice,
1582     ) -> Result {
1583         let thread = this.get_current_thread()?;
1584         let blocking = (file.flags() & file::flags::O_NONBLOCK) == 0;
1585         match cmd {
1586             uapi::BINDER_WRITE_READ => thread.write_read(data, blocking)?,
1587             uapi::BINDER_GET_NODE_DEBUG_INFO => this.get_node_debug_info(data)?,
1588             uapi::BINDER_GET_NODE_INFO_FOR_REF => this.get_node_info_from_ref(data)?,
1589             uapi::BINDER_VERSION => this.version(data)?,
1590             uapi::BINDER_GET_FROZEN_INFO => get_frozen_status(data)?,
1591             uapi::BINDER_GET_EXTENDED_ERROR => thread.get_extended_error(data)?,
1592             _ => return Err(EINVAL),
1593         }
1594         Ok(())
1595     }
1596 }
1597 
1598 /// The file operations supported by `Process`.
1599 impl Process {
open(ctx: ArcBorrow<'_, Context>, file: &File) -> Result<Arc<Process>>1600     pub(crate) fn open(ctx: ArcBorrow<'_, Context>, file: &File) -> Result<Arc<Process>> {
1601         Self::new(ctx.into(), ARef::from(file.cred()))
1602     }
1603 
release(this: Arc<Process>, _file: &File)1604     pub(crate) fn release(this: Arc<Process>, _file: &File) {
1605         let binderfs_file;
1606         let should_schedule;
1607         {
1608             let mut inner = this.inner.lock();
1609             should_schedule = inner.defer_work == 0;
1610             inner.defer_work |= PROC_DEFER_RELEASE;
1611             binderfs_file = inner.binderfs_file.take();
1612         }
1613 
1614         if should_schedule {
1615             // Ignore failures to schedule to the workqueue. Those just mean that we're already
1616             // scheduled for execution.
1617             let _ = workqueue::system().enqueue(this);
1618         }
1619 
1620         drop(binderfs_file);
1621     }
1622 
flush(this: ArcBorrow<'_, Process>) -> Result1623     pub(crate) fn flush(this: ArcBorrow<'_, Process>) -> Result {
1624         let should_schedule;
1625         {
1626             let mut inner = this.inner.lock();
1627             should_schedule = inner.defer_work == 0;
1628             inner.defer_work |= PROC_DEFER_FLUSH;
1629         }
1630 
1631         if should_schedule {
1632             // Ignore failures to schedule to the workqueue. Those just mean that we're already
1633             // scheduled for execution.
1634             let _ = workqueue::system().enqueue(Arc::from(this));
1635         }
1636         Ok(())
1637     }
1638 
ioctl(this: ArcBorrow<'_, Process>, file: &File, cmd: u32, arg: usize) -> Result1639     pub(crate) fn ioctl(this: ArcBorrow<'_, Process>, file: &File, cmd: u32, arg: usize) -> Result {
1640         use kernel::ioctl::{_IOC_DIR, _IOC_SIZE};
1641         use kernel::uapi::{_IOC_READ, _IOC_WRITE};
1642 
1643         crate::trace::trace_ioctl(cmd, arg);
1644 
1645         let user_slice = UserSlice::new(UserPtr::from_addr(arg), _IOC_SIZE(cmd));
1646 
1647         const _IOC_READ_WRITE: u32 = _IOC_READ | _IOC_WRITE;
1648 
1649         match _IOC_DIR(cmd) {
1650             _IOC_WRITE => Self::ioctl_write_only(this, file, cmd, &mut user_slice.reader()),
1651             _IOC_READ_WRITE => Self::ioctl_write_read(this, file, cmd, user_slice),
1652             _ => Err(EINVAL),
1653         }
1654     }
1655 
compat_ioctl( this: ArcBorrow<'_, Process>, file: &File, cmd: u32, arg: usize, ) -> Result1656     pub(crate) fn compat_ioctl(
1657         this: ArcBorrow<'_, Process>,
1658         file: &File,
1659         cmd: u32,
1660         arg: usize,
1661     ) -> Result {
1662         Self::ioctl(this, file, cmd, arg)
1663     }
1664 
mmap( this: ArcBorrow<'_, Process>, _file: &File, vma: &mm::virt::VmaNew, ) -> Result1665     pub(crate) fn mmap(
1666         this: ArcBorrow<'_, Process>,
1667         _file: &File,
1668         vma: &mm::virt::VmaNew,
1669     ) -> Result {
1670         // We don't allow mmap to be used in a different process.
1671         if !core::ptr::eq(kernel::current!().group_leader(), &*this.task) {
1672             return Err(EINVAL);
1673         }
1674         if vma.start() == 0 {
1675             return Err(EINVAL);
1676         }
1677 
1678         vma.try_clear_maywrite().map_err(|_| EPERM)?;
1679         vma.set_dontcopy();
1680         vma.set_mixedmap();
1681 
1682         // TODO: Set ops. We need to learn when the user unmaps so that we can stop using it.
1683         this.create_mapping(vma)
1684     }
1685 
poll( this: ArcBorrow<'_, Process>, file: &File, table: PollTable<'_>, ) -> Result<u32>1686     pub(crate) fn poll(
1687         this: ArcBorrow<'_, Process>,
1688         file: &File,
1689         table: PollTable<'_>,
1690     ) -> Result<u32> {
1691         let thread = this.get_current_thread()?;
1692         let (from_proc, mut mask) = thread.poll(file, table);
1693         if mask == 0 && from_proc && !this.inner.lock().work.is_empty() {
1694             mask |= bindings::POLLIN;
1695         }
1696         Ok(mask)
1697     }
1698 }
1699 
1700 /// Represents that a thread has registered with the `ready_threads` list of its process.
1701 ///
1702 /// The destructor of this type will unregister the thread from the list of ready threads.
1703 pub(crate) struct Registration<'a> {
1704     thread: &'a Arc<Thread>,
1705 }
1706 
1707 impl<'a> Registration<'a> {
new(thread: &'a Arc<Thread>, guard: &mut Guard<'_, ProcessInner, SpinLockBackend>) -> Self1708     fn new(thread: &'a Arc<Thread>, guard: &mut Guard<'_, ProcessInner, SpinLockBackend>) -> Self {
1709         assert!(core::ptr::eq(&thread.process.inner, guard.lock_ref()));
1710         // INVARIANT: We are pushing this thread to the right `ready_threads` list.
1711         if let Ok(list_arc) = ListArc::try_from_arc(thread.clone()) {
1712             guard.ready_threads.push_front(list_arc);
1713         } else {
1714             // It is an error to hit this branch, and it should not be reachable. We try to do
1715             // something reasonable when the failure path happens. Most likely, the thread in
1716             // question will sleep forever.
1717             pr_err!("Same thread registered with `ready_threads` twice.");
1718         }
1719         Self { thread }
1720     }
1721 }
1722 
1723 impl Drop for Registration<'_> {
drop(&mut self)1724     fn drop(&mut self) {
1725         let mut inner = self.thread.process.inner.lock();
1726         // SAFETY: The thread has the invariant that we never push it to any other linked list than
1727         // the `ready_threads` list of its parent process. Therefore, the thread is either in that
1728         // list, or in no list.
1729         unsafe { inner.ready_threads.remove(self.thread) };
1730     }
1731 }
1732 
1733 pub(crate) struct WithNodes<'a> {
1734     pub(crate) inner: Guard<'a, ProcessInner, SpinLockBackend>,
1735     pub(crate) nodes: RBTree<u64, DArc<Node>>,
1736 }
1737 
1738 impl Drop for WithNodes<'_> {
drop(&mut self)1739     fn drop(&mut self) {
1740         core::mem::swap(&mut self.nodes, &mut self.inner.nodes);
1741         if self.nodes.iter().next().is_some() {
1742             pr_err!("nodes array was modified while using lock_with_nodes\n");
1743         }
1744     }
1745 }
1746 
1747 pub(crate) enum GetWorkOrRegister<'a> {
1748     Work(DLArc<dyn DeliverToRead>),
1749     Register(Registration<'a>),
1750 }
1751