xref: /linux/drivers/android/binder/process.rs (revision 4bb1f7e19c4a1d6eeb52b80acff5ac63edd1b91d)
1 // SPDX-License-Identifier: GPL-2.0
2 
3 // Copyright (C) 2025 Google LLC.
4 
5 //! This module defines the `Process` type, which represents a process using a particular binder
6 //! context.
7 //!
8 //! The `Process` object keeps track of all of the resources that this process owns in the binder
9 //! context.
10 //!
11 //! There is one `Process` object for each binder fd that a process has opened, so processes using
12 //! several binder contexts have several `Process` objects. This ensures that the contexts are
13 //! fully separated.
14 
15 use core::mem::take;
16 
17 use kernel::{
18     bindings,
19     cred::Credential,
20     error::Error,
21     fs::file::{self, File},
22     list::{List, ListArc, ListArcField, ListLinks},
23     mm,
24     prelude::*,
25     rbtree::{self, RBTree, RBTreeNode, RBTreeNodeReservation},
26     seq_file::SeqFile,
27     seq_print,
28     sync::poll::PollTable,
29     sync::{
30         lock::{spinlock::SpinLockBackend, Guard},
31         Arc, ArcBorrow, CondVar, CondVarTimeoutResult, Mutex, SpinLock, UniqueArc,
32     },
33     task::Task,
34     types::ARef,
35     uaccess::{UserSlice, UserSliceReader},
36     uapi,
37     workqueue::{self, Work},
38 };
39 
40 use crate::{
41     allocation::{Allocation, AllocationInfo, NewAllocation},
42     context::Context,
43     defs::*,
44     error::{BinderError, BinderResult},
45     node::{CouldNotDeliverCriticalIncrement, CritIncrWrapper, Node, NodeDeath, NodeRef},
46     page_range::ShrinkablePageRange,
47     range_alloc::{RangeAllocator, ReserveNew, ReserveNewArgs},
48     stats::BinderStats,
49     thread::{PushWorkRes, Thread},
50     BinderfsProcFile, DArc, DLArc, DTRWrap, DeliverToRead,
51 };
52 
53 #[path = "freeze.rs"]
54 mod freeze;
55 use self::freeze::{FreezeCookie, FreezeListener};
56 
57 struct Mapping {
58     address: usize,
59     alloc: RangeAllocator<AllocationInfo>,
60 }
61 
62 impl Mapping {
new(address: usize, size: usize) -> Self63     fn new(address: usize, size: usize) -> Self {
64         Self {
65             address,
66             alloc: RangeAllocator::new(size),
67         }
68     }
69 }
70 
71 // bitflags for defer_work.
72 const PROC_DEFER_FLUSH: u8 = 1;
73 const PROC_DEFER_RELEASE: u8 = 2;
74 
75 #[derive(Copy, Clone)]
76 pub(crate) enum IsFrozen {
77     Yes,
78     No,
79     InProgress,
80 }
81 
82 impl IsFrozen {
83     /// Whether incoming transactions should be rejected due to freeze.
is_frozen(self) -> bool84     pub(crate) fn is_frozen(self) -> bool {
85         match self {
86             IsFrozen::Yes => true,
87             IsFrozen::No => false,
88             IsFrozen::InProgress => true,
89         }
90     }
91 
92     /// Whether freeze notifications consider this process frozen.
is_fully_frozen(self) -> bool93     pub(crate) fn is_fully_frozen(self) -> bool {
94         match self {
95             IsFrozen::Yes => true,
96             IsFrozen::No => false,
97             IsFrozen::InProgress => false,
98         }
99     }
100 }
101 
102 /// The fields of `Process` protected by the spinlock.
103 pub(crate) struct ProcessInner {
104     is_manager: bool,
105     pub(crate) is_dead: bool,
106     threads: RBTree<i32, Arc<Thread>>,
107     /// INVARIANT: Threads pushed to this list must be owned by this process.
108     ready_threads: List<Thread>,
109     nodes: RBTree<u64, DArc<Node>>,
110     mapping: Option<Mapping>,
111     work: List<DTRWrap<dyn DeliverToRead>>,
112     delivered_deaths: List<DTRWrap<NodeDeath>, 2>,
113 
114     /// The number of requested threads that haven't registered yet.
115     requested_thread_count: u32,
116     /// The maximum number of threads used by the process thread pool.
117     max_threads: u32,
118     /// The number of threads the started and registered with the thread pool.
119     started_thread_count: u32,
120 
121     /// Bitmap of deferred work to do.
122     defer_work: u8,
123 
124     /// Number of transactions to be transmitted before processes in freeze_wait
125     /// are woken up.
126     outstanding_txns: u32,
127     /// Process is frozen and unable to service binder transactions.
128     pub(crate) is_frozen: IsFrozen,
129     /// Process received sync transactions since last frozen.
130     pub(crate) sync_recv: bool,
131     /// Process received async transactions since last frozen.
132     pub(crate) async_recv: bool,
133     pub(crate) binderfs_file: Option<BinderfsProcFile>,
134     /// Check for oneway spam
135     oneway_spam_detection_enabled: bool,
136 }
137 
138 impl ProcessInner {
new() -> Self139     fn new() -> Self {
140         Self {
141             is_manager: false,
142             is_dead: false,
143             threads: RBTree::new(),
144             ready_threads: List::new(),
145             mapping: None,
146             nodes: RBTree::new(),
147             work: List::new(),
148             delivered_deaths: List::new(),
149             requested_thread_count: 0,
150             max_threads: 0,
151             started_thread_count: 0,
152             defer_work: 0,
153             outstanding_txns: 0,
154             is_frozen: IsFrozen::No,
155             sync_recv: false,
156             async_recv: false,
157             binderfs_file: None,
158             oneway_spam_detection_enabled: false,
159         }
160     }
161 
162     /// Schedule the work item for execution on this process.
163     ///
164     /// If any threads are ready for work, then the work item is given directly to that thread and
165     /// it is woken up. Otherwise, it is pushed to the process work list.
166     ///
167     /// This call can fail only if the process is dead. In this case, the work item is returned to
168     /// the caller so that the caller can drop it after releasing the inner process lock. This is
169     /// necessary since the destructor of `Transaction` will take locks that can't necessarily be
170     /// taken while holding the inner process lock.
push_work( &mut self, work: DLArc<dyn DeliverToRead>, ) -> Result<(), (BinderError, DLArc<dyn DeliverToRead>)>171     pub(crate) fn push_work(
172         &mut self,
173         work: DLArc<dyn DeliverToRead>,
174     ) -> Result<(), (BinderError, DLArc<dyn DeliverToRead>)> {
175         // Try to find a ready thread to which to push the work.
176         if let Some(thread) = self.ready_threads.pop_front() {
177             // Push to thread while holding state lock. This prevents the thread from giving up
178             // (for example, because of a signal) when we're about to deliver work.
179             match thread.push_work(work) {
180                 PushWorkRes::Ok => Ok(()),
181                 PushWorkRes::FailedDead(work) => Err((BinderError::new_dead(), work)),
182             }
183         } else if self.is_dead {
184             Err((BinderError::new_dead(), work))
185         } else {
186             let sync = work.should_sync_wakeup();
187 
188             // Didn't find a thread waiting for proc work; this can happen
189             // in two scenarios:
190             // 1. All threads are busy handling transactions
191             //    In that case, one of those threads should call back into
192             //    the kernel driver soon and pick up this work.
193             // 2. Threads are using the (e)poll interface, in which case
194             //    they may be blocked on the waitqueue without having been
195             //    added to waiting_threads. For this case, we just iterate
196             //    over all threads not handling transaction work, and
197             //    wake them all up. We wake all because we don't know whether
198             //    a thread that called into (e)poll is handling non-binder
199             //    work currently.
200             self.work.push_back(work);
201 
202             // Wake up polling threads, if any.
203             for thread in self.threads.values() {
204                 thread.notify_if_poll_ready(sync);
205             }
206 
207             Ok(())
208         }
209     }
210 
remove_node(&mut self, ptr: u64)211     pub(crate) fn remove_node(&mut self, ptr: u64) {
212         self.nodes.remove(&ptr);
213     }
214 
215     /// Updates the reference count on the given node.
update_node_refcount( &mut self, node: &DArc<Node>, inc: bool, strong: bool, count: usize, othread: Option<&Thread>, )216     pub(crate) fn update_node_refcount(
217         &mut self,
218         node: &DArc<Node>,
219         inc: bool,
220         strong: bool,
221         count: usize,
222         othread: Option<&Thread>,
223     ) {
224         let push = node.update_refcount_locked(inc, strong, count, self);
225 
226         // If we decided that we need to push work, push either to the process or to a thread if
227         // one is specified.
228         if let Some(node) = push {
229             if let Some(thread) = othread {
230                 thread.push_work_deferred(node);
231             } else {
232                 let _ = self.push_work(node);
233                 // Nothing to do: `push_work` may fail if the process is dead, but that's ok as in
234                 // that case, it doesn't care about the notification.
235             }
236         }
237     }
238 
new_node_ref( &mut self, node: DArc<Node>, strong: bool, thread: Option<&Thread>, ) -> NodeRef239     pub(crate) fn new_node_ref(
240         &mut self,
241         node: DArc<Node>,
242         strong: bool,
243         thread: Option<&Thread>,
244     ) -> NodeRef {
245         self.update_node_refcount(&node, true, strong, 1, thread);
246         let strong_count = if strong { 1 } else { 0 };
247         NodeRef::new(node, strong_count, 1 - strong_count)
248     }
249 
new_node_ref_with_thread( &mut self, node: DArc<Node>, strong: bool, thread: &Thread, wrapper: Option<CritIncrWrapper>, ) -> Result<NodeRef, CouldNotDeliverCriticalIncrement>250     pub(crate) fn new_node_ref_with_thread(
251         &mut self,
252         node: DArc<Node>,
253         strong: bool,
254         thread: &Thread,
255         wrapper: Option<CritIncrWrapper>,
256     ) -> Result<NodeRef, CouldNotDeliverCriticalIncrement> {
257         let push = match wrapper {
258             None => node
259                 .incr_refcount_allow_zero2one(strong, self)?
260                 .map(|node| node as _),
261             Some(wrapper) => node.incr_refcount_allow_zero2one_with_wrapper(strong, wrapper, self),
262         };
263         if let Some(node) = push {
264             thread.push_work_deferred(node);
265         }
266         let strong_count = if strong { 1 } else { 0 };
267         Ok(NodeRef::new(node, strong_count, 1 - strong_count))
268     }
269 
270     /// Returns an existing node with the given pointer and cookie, if one exists.
271     ///
272     /// Returns an error if a node with the given pointer but a different cookie exists.
get_existing_node(&self, ptr: u64, cookie: u64) -> Result<Option<DArc<Node>>>273     fn get_existing_node(&self, ptr: u64, cookie: u64) -> Result<Option<DArc<Node>>> {
274         match self.nodes.get(&ptr) {
275             None => Ok(None),
276             Some(node) => {
277                 let (_, node_cookie) = node.get_id();
278                 if node_cookie == cookie {
279                     Ok(Some(node.clone()))
280                 } else {
281                     Err(EINVAL)
282                 }
283             }
284         }
285     }
286 
register_thread(&mut self) -> bool287     fn register_thread(&mut self) -> bool {
288         if self.requested_thread_count == 0 {
289             return false;
290         }
291 
292         self.requested_thread_count -= 1;
293         self.started_thread_count += 1;
294         true
295     }
296 
297     /// Finds a delivered death notification with the given cookie, removes it from the thread's
298     /// delivered list, and returns it.
pull_delivered_death(&mut self, cookie: u64) -> Option<DArc<NodeDeath>>299     fn pull_delivered_death(&mut self, cookie: u64) -> Option<DArc<NodeDeath>> {
300         let mut cursor = self.delivered_deaths.cursor_front();
301         while let Some(next) = cursor.peek_next() {
302             if next.cookie == cookie {
303                 return Some(next.remove().into_arc());
304             }
305             cursor.move_next();
306         }
307         None
308     }
309 
death_delivered(&mut self, death: DArc<NodeDeath>)310     pub(crate) fn death_delivered(&mut self, death: DArc<NodeDeath>) {
311         if let Some(death) = ListArc::try_from_arc_or_drop(death) {
312             self.delivered_deaths.push_back(death);
313         } else {
314             pr_warn!("Notification added to `delivered_deaths` twice.");
315         }
316     }
317 
add_outstanding_txn(&mut self)318     pub(crate) fn add_outstanding_txn(&mut self) {
319         self.outstanding_txns += 1;
320     }
321 
txns_pending_locked(&self) -> bool322     fn txns_pending_locked(&self) -> bool {
323         if self.outstanding_txns > 0 {
324             return true;
325         }
326         for thread in self.threads.values() {
327             if thread.has_current_transaction() {
328                 return true;
329             }
330         }
331         false
332     }
333 }
334 
335 /// Used to keep track of a node that this process has a handle to.
336 #[pin_data]
337 pub(crate) struct NodeRefInfo {
338     debug_id: usize,
339     /// The refcount that this process owns to the node.
340     node_ref: ListArcField<NodeRef, { Self::LIST_PROC }>,
341     death: ListArcField<Option<DArc<NodeDeath>>, { Self::LIST_PROC }>,
342     /// Cookie of the active freeze listener for this node.
343     freeze: ListArcField<Option<FreezeCookie>, { Self::LIST_PROC }>,
344     /// Used to store this `NodeRefInfo` in the node's `refs` list.
345     #[pin]
346     links: ListLinks<{ Self::LIST_NODE }>,
347     /// The handle for this `NodeRefInfo`.
348     handle: u32,
349     /// The process that has a handle to the node.
350     pub(crate) process: Arc<Process>,
351 }
352 
353 impl NodeRefInfo {
354     /// The id used for the `Node::refs` list.
355     pub(crate) const LIST_NODE: u64 = 0x2da16350fb724a10;
356     /// The id used for the `ListArc` in `ProcessNodeRefs`.
357     const LIST_PROC: u64 = 0xd703a5263dcc8650;
358 
new(node_ref: NodeRef, handle: u32, process: Arc<Process>) -> impl PinInit<Self>359     fn new(node_ref: NodeRef, handle: u32, process: Arc<Process>) -> impl PinInit<Self> {
360         pin_init!(Self {
361             debug_id: super::next_debug_id(),
362             node_ref: ListArcField::new(node_ref),
363             death: ListArcField::new(None),
364             freeze: ListArcField::new(None),
365             links <- ListLinks::new(),
366             handle,
367             process,
368         })
369     }
370 
371     kernel::list::define_list_arc_field_getter! {
372         pub(crate) fn death(&mut self<{Self::LIST_PROC}>) -> &mut Option<DArc<NodeDeath>> { death }
373         pub(crate) fn freeze(&mut self<{Self::LIST_PROC}>) -> &mut Option<FreezeCookie> { freeze }
374         pub(crate) fn node_ref(&mut self<{Self::LIST_PROC}>) -> &mut NodeRef { node_ref }
375         pub(crate) fn node_ref2(&self<{Self::LIST_PROC}>) -> &NodeRef { node_ref }
376     }
377 }
378 
379 kernel::list::impl_list_arc_safe! {
380     impl ListArcSafe<{Self::LIST_NODE}> for NodeRefInfo { untracked; }
381     impl ListArcSafe<{Self::LIST_PROC}> for NodeRefInfo { untracked; }
382 }
383 kernel::list::impl_list_item! {
384     impl ListItem<{Self::LIST_NODE}> for NodeRefInfo {
385         using ListLinks { self.links };
386     }
387 }
388 
389 /// Keeps track of references this process has to nodes owned by other processes.
390 ///
391 /// TODO: Currently, the rbtree requires two allocations per node reference, and two tree
392 /// traversals to look up a node by `Node::global_id`. Once the rbtree is more powerful, these
393 /// extra costs should be eliminated.
394 struct ProcessNodeRefs {
395     /// Used to look up nodes using the 32-bit id that this process knows it by.
396     by_handle: RBTree<u32, ListArc<NodeRefInfo, { NodeRefInfo::LIST_PROC }>>,
397     /// Used to look up nodes without knowing their local 32-bit id. The usize is the address of
398     /// the underlying `Node` struct as returned by `Node::global_id`.
399     by_node: RBTree<usize, u32>,
400     /// Used to look up a `FreezeListener` by cookie.
401     ///
402     /// There might be multiple freeze listeners for the same node, but at most one of them is
403     /// active.
404     freeze_listeners: RBTree<FreezeCookie, FreezeListener>,
405 }
406 
407 impl ProcessNodeRefs {
new() -> Self408     fn new() -> Self {
409         Self {
410             by_handle: RBTree::new(),
411             by_node: RBTree::new(),
412             freeze_listeners: RBTree::new(),
413         }
414     }
415 }
416 
417 /// A process using binder.
418 ///
419 /// Strictly speaking, there can be multiple of these per process. There is one for each binder fd
420 /// that a process has opened, so processes using several binder contexts have several `Process`
421 /// objects. This ensures that the contexts are fully separated.
422 #[pin_data]
423 pub(crate) struct Process {
424     pub(crate) ctx: Arc<Context>,
425 
426     // The task leader (process).
427     pub(crate) task: ARef<Task>,
428 
429     // Credential associated with file when `Process` is created.
430     pub(crate) cred: ARef<Credential>,
431 
432     #[pin]
433     pub(crate) inner: SpinLock<ProcessInner>,
434 
435     #[pin]
436     pub(crate) pages: ShrinkablePageRange,
437 
438     // Waitqueue of processes waiting for all outstanding transactions to be
439     // processed.
440     #[pin]
441     freeze_wait: CondVar,
442 
443     // Node references are in a different lock to avoid recursive acquisition when
444     // incrementing/decrementing a node in another process.
445     #[pin]
446     node_refs: Mutex<ProcessNodeRefs>,
447 
448     // Work node for deferred work item.
449     #[pin]
450     defer_work: Work<Process>,
451 
452     // Links for process list in Context.
453     #[pin]
454     links: ListLinks,
455 
456     pub(crate) stats: BinderStats,
457 }
458 
459 kernel::impl_has_work! {
460     impl HasWork<Process> for Process { self.defer_work }
461 }
462 
463 kernel::list::impl_list_arc_safe! {
464     impl ListArcSafe<0> for Process { untracked; }
465 }
466 kernel::list::impl_list_item! {
467     impl ListItem<0> for Process {
468         using ListLinks { self.links };
469     }
470 }
471 
472 impl workqueue::WorkItem for Process {
473     type Pointer = Arc<Process>;
474 
run(me: Arc<Self>)475     fn run(me: Arc<Self>) {
476         let defer;
477         {
478             let mut inner = me.inner.lock();
479             defer = inner.defer_work;
480             inner.defer_work = 0;
481         }
482 
483         if defer & PROC_DEFER_FLUSH != 0 {
484             me.deferred_flush();
485         }
486         if defer & PROC_DEFER_RELEASE != 0 {
487             me.deferred_release();
488         }
489     }
490 }
491 
492 impl Process {
new(ctx: Arc<Context>, cred: ARef<Credential>) -> Result<Arc<Self>>493     fn new(ctx: Arc<Context>, cred: ARef<Credential>) -> Result<Arc<Self>> {
494         let current = kernel::current!();
495         let list_process = ListArc::pin_init::<Error>(
496             try_pin_init!(Process {
497                 ctx,
498                 cred,
499                 inner <- kernel::new_spinlock!(ProcessInner::new(), "Process::inner"),
500                 pages <- ShrinkablePageRange::new(&super::BINDER_SHRINKER),
501                 node_refs <- kernel::new_mutex!(ProcessNodeRefs::new(), "Process::node_refs"),
502                 freeze_wait <- kernel::new_condvar!("Process::freeze_wait"),
503                 task: current.group_leader().into(),
504                 defer_work <- kernel::new_work!("Process::defer_work"),
505                 links <- ListLinks::new(),
506                 stats: BinderStats::new(),
507             }),
508             GFP_KERNEL,
509         )?;
510 
511         let process = list_process.clone_arc();
512         process.ctx.register_process(list_process);
513 
514         Ok(process)
515     }
516 
pid_in_current_ns(&self) -> kernel::task::Pid517     pub(crate) fn pid_in_current_ns(&self) -> kernel::task::Pid {
518         self.task.tgid_nr_ns(None)
519     }
520 
521     #[inline(never)]
debug_print_stats(&self, m: &SeqFile, ctx: &Context) -> Result<()>522     pub(crate) fn debug_print_stats(&self, m: &SeqFile, ctx: &Context) -> Result<()> {
523         seq_print!(m, "proc {}\n", self.pid_in_current_ns());
524         seq_print!(m, "context {}\n", &*ctx.name);
525 
526         let inner = self.inner.lock();
527         seq_print!(m, "  threads: {}\n", inner.threads.iter().count());
528         seq_print!(
529             m,
530             "  requested threads: {}+{}/{}\n",
531             inner.requested_thread_count,
532             inner.started_thread_count,
533             inner.max_threads,
534         );
535         if let Some(mapping) = &inner.mapping {
536             seq_print!(
537                 m,
538                 "  free oneway space: {}\n",
539                 mapping.alloc.free_oneway_space()
540             );
541             seq_print!(m, "  buffers: {}\n", mapping.alloc.count_buffers());
542         }
543         seq_print!(
544             m,
545             "  outstanding transactions: {}\n",
546             inner.outstanding_txns
547         );
548         seq_print!(m, "  nodes: {}\n", inner.nodes.iter().count());
549         drop(inner);
550 
551         {
552             let mut refs = self.node_refs.lock();
553             let (mut count, mut weak, mut strong) = (0, 0, 0);
554             for r in refs.by_handle.values_mut() {
555                 let node_ref = r.node_ref();
556                 let (nstrong, nweak) = node_ref.get_count();
557                 count += 1;
558                 weak += nweak;
559                 strong += nstrong;
560             }
561             seq_print!(m, "  refs: {count} s {strong} w {weak}\n");
562         }
563 
564         self.stats.debug_print("  ", m);
565 
566         Ok(())
567     }
568 
569     #[inline(never)]
debug_print(&self, m: &SeqFile, ctx: &Context, print_all: bool) -> Result<()>570     pub(crate) fn debug_print(&self, m: &SeqFile, ctx: &Context, print_all: bool) -> Result<()> {
571         seq_print!(m, "proc {}\n", self.pid_in_current_ns());
572         seq_print!(m, "context {}\n", &*ctx.name);
573 
574         let mut all_threads = KVec::new();
575         let mut all_nodes = KVec::new();
576         loop {
577             let inner = self.inner.lock();
578             let num_threads = inner.threads.iter().count();
579             let num_nodes = inner.nodes.iter().count();
580 
581             if all_threads.capacity() < num_threads || all_nodes.capacity() < num_nodes {
582                 drop(inner);
583                 all_threads.reserve(num_threads, GFP_KERNEL)?;
584                 all_nodes.reserve(num_nodes, GFP_KERNEL)?;
585                 continue;
586             }
587 
588             for thread in inner.threads.values() {
589                 assert!(all_threads.len() < all_threads.capacity());
590                 let _ = all_threads.push(thread.clone(), GFP_ATOMIC);
591             }
592 
593             for node in inner.nodes.values() {
594                 assert!(all_nodes.len() < all_nodes.capacity());
595                 let _ = all_nodes.push(node.clone(), GFP_ATOMIC);
596             }
597 
598             break;
599         }
600 
601         for thread in all_threads {
602             thread.debug_print(m, print_all)?;
603         }
604 
605         let mut inner = self.inner.lock();
606         for node in all_nodes {
607             if print_all || node.has_oneway_transaction(&mut inner) {
608                 node.full_debug_print(m, &mut inner)?;
609             }
610         }
611         drop(inner);
612 
613         if print_all {
614             let mut refs = self.node_refs.lock();
615             for r in refs.by_handle.values_mut() {
616                 let node_ref = r.node_ref();
617                 let dead = node_ref.node.owner.inner.lock().is_dead;
618                 let (strong, weak) = node_ref.get_count();
619                 let debug_id = node_ref.node.debug_id;
620 
621                 seq_print!(
622                     m,
623                     "  ref {}: desc {} {}node {debug_id} s {strong} w {weak}",
624                     r.debug_id,
625                     r.handle,
626                     if dead { "dead " } else { "" },
627                 );
628             }
629         }
630 
631         let inner = self.inner.lock();
632         for work in &inner.work {
633             work.debug_print(m, "  ", "  pending transaction ")?;
634         }
635         for _death in &inner.delivered_deaths {
636             seq_print!(m, "  has delivered dead binder\n");
637         }
638         if let Some(mapping) = &inner.mapping {
639             mapping.alloc.debug_print(m)?;
640         }
641         drop(inner);
642 
643         Ok(())
644     }
645 
646     /// Attempts to fetch a work item from the process queue.
get_work(&self) -> Option<DLArc<dyn DeliverToRead>>647     pub(crate) fn get_work(&self) -> Option<DLArc<dyn DeliverToRead>> {
648         self.inner.lock().work.pop_front()
649     }
650 
651     /// Attempts to fetch a work item from the process queue. If none is available, it registers the
652     /// given thread as ready to receive work directly.
653     ///
654     /// This must only be called when the thread is not participating in a transaction chain; when
655     /// it is, work will always be delivered directly to the thread (and not through the process
656     /// queue).
get_work_or_register<'a>( &'a self, thread: &'a Arc<Thread>, ) -> GetWorkOrRegister<'a>657     pub(crate) fn get_work_or_register<'a>(
658         &'a self,
659         thread: &'a Arc<Thread>,
660     ) -> GetWorkOrRegister<'a> {
661         let mut inner = self.inner.lock();
662         // Try to get work from the process queue.
663         if let Some(work) = inner.work.pop_front() {
664             return GetWorkOrRegister::Work(work);
665         }
666 
667         // Register the thread as ready.
668         GetWorkOrRegister::Register(Registration::new(thread, &mut inner))
669     }
670 
get_current_thread(self: ArcBorrow<'_, Self>) -> Result<Arc<Thread>>671     fn get_current_thread(self: ArcBorrow<'_, Self>) -> Result<Arc<Thread>> {
672         let id = {
673             let current = kernel::current!();
674             if !core::ptr::eq(current.group_leader(), &*self.task) {
675                 pr_err!("get_current_thread was called from the wrong process.");
676                 return Err(EINVAL);
677             }
678             current.pid()
679         };
680 
681         {
682             let inner = self.inner.lock();
683             if let Some(thread) = inner.threads.get(&id) {
684                 return Ok(thread.clone());
685             }
686         }
687 
688         // Allocate a new `Thread` without holding any locks.
689         let reservation = RBTreeNodeReservation::new(GFP_KERNEL)?;
690         let ta: Arc<Thread> = Thread::new(id, self.into())?;
691 
692         let mut inner = self.inner.lock();
693         match inner.threads.entry(id) {
694             rbtree::Entry::Vacant(entry) => {
695                 entry.insert(ta.clone(), reservation);
696                 Ok(ta)
697             }
698             rbtree::Entry::Occupied(_entry) => {
699                 pr_err!("Cannot create two threads with the same id.");
700                 Err(EINVAL)
701             }
702         }
703     }
704 
push_work(&self, work: DLArc<dyn DeliverToRead>) -> BinderResult705     pub(crate) fn push_work(&self, work: DLArc<dyn DeliverToRead>) -> BinderResult {
706         // If push_work fails, drop the work item outside the lock.
707         let res = self.inner.lock().push_work(work);
708         match res {
709             Ok(()) => Ok(()),
710             Err((err, work)) => {
711                 drop(work);
712                 Err(err)
713             }
714         }
715     }
716 
set_as_manager( self: ArcBorrow<'_, Self>, info: Option<FlatBinderObject>, thread: &Thread, ) -> Result717     fn set_as_manager(
718         self: ArcBorrow<'_, Self>,
719         info: Option<FlatBinderObject>,
720         thread: &Thread,
721     ) -> Result {
722         let (ptr, cookie, flags) = if let Some(obj) = info {
723             (
724                 // SAFETY: The object type for this ioctl is implicitly `BINDER_TYPE_BINDER`, so it
725                 // is safe to access the `binder` field.
726                 unsafe { obj.__bindgen_anon_1.binder },
727                 obj.cookie,
728                 obj.flags,
729             )
730         } else {
731             (0, 0, 0)
732         };
733         let node_ref = self.get_node(ptr, cookie, flags as _, true, thread)?;
734         let node = node_ref.node.clone();
735         self.ctx.set_manager_node(node_ref)?;
736         self.inner.lock().is_manager = true;
737 
738         // Force the state of the node to prevent the delivery of acquire/increfs.
739         let mut owner_inner = node.owner.inner.lock();
740         node.force_has_count(&mut owner_inner);
741         Ok(())
742     }
743 
get_node_inner( self: ArcBorrow<'_, Self>, ptr: u64, cookie: u64, flags: u32, strong: bool, thread: &Thread, wrapper: Option<CritIncrWrapper>, ) -> Result<Result<NodeRef, CouldNotDeliverCriticalIncrement>>744     fn get_node_inner(
745         self: ArcBorrow<'_, Self>,
746         ptr: u64,
747         cookie: u64,
748         flags: u32,
749         strong: bool,
750         thread: &Thread,
751         wrapper: Option<CritIncrWrapper>,
752     ) -> Result<Result<NodeRef, CouldNotDeliverCriticalIncrement>> {
753         // Try to find an existing node.
754         {
755             let mut inner = self.inner.lock();
756             if let Some(node) = inner.get_existing_node(ptr, cookie)? {
757                 return Ok(inner.new_node_ref_with_thread(node, strong, thread, wrapper));
758             }
759         }
760 
761         // Allocate the node before reacquiring the lock.
762         let node = DTRWrap::arc_pin_init(Node::new(ptr, cookie, flags, self.into()))?.into_arc();
763         let rbnode = RBTreeNode::new(ptr, node.clone(), GFP_KERNEL)?;
764         let mut inner = self.inner.lock();
765         if let Some(node) = inner.get_existing_node(ptr, cookie)? {
766             return Ok(inner.new_node_ref_with_thread(node, strong, thread, wrapper));
767         }
768 
769         inner.nodes.insert(rbnode);
770         // This can only fail if someone has already pushed the node to a list, but we just created
771         // it and still hold the lock, so it can't fail right now.
772         let node_ref = inner
773             .new_node_ref_with_thread(node, strong, thread, wrapper)
774             .unwrap();
775 
776         Ok(Ok(node_ref))
777     }
778 
get_node( self: ArcBorrow<'_, Self>, ptr: u64, cookie: u64, flags: u32, strong: bool, thread: &Thread, ) -> Result<NodeRef>779     pub(crate) fn get_node(
780         self: ArcBorrow<'_, Self>,
781         ptr: u64,
782         cookie: u64,
783         flags: u32,
784         strong: bool,
785         thread: &Thread,
786     ) -> Result<NodeRef> {
787         let mut wrapper = None;
788         for _ in 0..2 {
789             match self.get_node_inner(ptr, cookie, flags, strong, thread, wrapper) {
790                 Err(err) => return Err(err),
791                 Ok(Ok(node_ref)) => return Ok(node_ref),
792                 Ok(Err(CouldNotDeliverCriticalIncrement)) => {
793                     wrapper = Some(CritIncrWrapper::new()?);
794                 }
795             }
796         }
797         // We only get a `CouldNotDeliverCriticalIncrement` error if `wrapper` is `None`, so the
798         // loop should run at most twice.
799         unreachable!()
800     }
801 
insert_or_update_handle( self: ArcBorrow<'_, Process>, node_ref: NodeRef, is_mananger: bool, ) -> Result<u32>802     pub(crate) fn insert_or_update_handle(
803         self: ArcBorrow<'_, Process>,
804         node_ref: NodeRef,
805         is_mananger: bool,
806     ) -> Result<u32> {
807         {
808             let mut refs = self.node_refs.lock();
809 
810             // Do a lookup before inserting.
811             if let Some(handle_ref) = refs.by_node.get(&node_ref.node.global_id()) {
812                 let handle = *handle_ref;
813                 let info = refs.by_handle.get_mut(&handle).unwrap();
814                 info.node_ref().absorb(node_ref);
815                 return Ok(handle);
816             }
817         }
818 
819         // Reserve memory for tree nodes.
820         let reserve1 = RBTreeNodeReservation::new(GFP_KERNEL)?;
821         let reserve2 = RBTreeNodeReservation::new(GFP_KERNEL)?;
822         let info = UniqueArc::new_uninit(GFP_KERNEL)?;
823 
824         let mut refs = self.node_refs.lock();
825 
826         // Do a lookup again as node may have been inserted before the lock was reacquired.
827         if let Some(handle_ref) = refs.by_node.get(&node_ref.node.global_id()) {
828             let handle = *handle_ref;
829             let info = refs.by_handle.get_mut(&handle).unwrap();
830             info.node_ref().absorb(node_ref);
831             return Ok(handle);
832         }
833 
834         // Find id.
835         let mut target: u32 = if is_mananger { 0 } else { 1 };
836         for handle in refs.by_handle.keys() {
837             if *handle > target {
838                 break;
839             }
840             if *handle == target {
841                 target = target.checked_add(1).ok_or(ENOMEM)?;
842             }
843         }
844 
845         let gid = node_ref.node.global_id();
846         let (info_proc, info_node) = {
847             let info_init = NodeRefInfo::new(node_ref, target, self.into());
848             match info.pin_init_with(info_init) {
849                 Ok(info) => ListArc::pair_from_pin_unique(info),
850                 // error is infallible
851                 Err(err) => match err {},
852             }
853         };
854 
855         // Ensure the process is still alive while we insert a new reference.
856         //
857         // This releases the lock before inserting the nodes, but since `is_dead` is set as the
858         // first thing in `deferred_release`, process cleanup will not miss the items inserted into
859         // `refs` below.
860         if self.inner.lock().is_dead {
861             return Err(ESRCH);
862         }
863 
864         // SAFETY: `info_proc` and `info_node` reference the same node, so we are inserting
865         // `info_node` into the right node's `refs` list.
866         unsafe { info_proc.node_ref2().node.insert_node_info(info_node) };
867 
868         refs.by_node.insert(reserve1.into_node(gid, target));
869         refs.by_handle.insert(reserve2.into_node(target, info_proc));
870         Ok(target)
871     }
872 
get_transaction_node(&self, handle: u32) -> BinderResult<NodeRef>873     pub(crate) fn get_transaction_node(&self, handle: u32) -> BinderResult<NodeRef> {
874         // When handle is zero, try to get the context manager.
875         if handle == 0 {
876             Ok(self.ctx.get_manager_node(true)?)
877         } else {
878             Ok(self.get_node_from_handle(handle, true)?)
879         }
880     }
881 
get_node_from_handle(&self, handle: u32, strong: bool) -> Result<NodeRef>882     pub(crate) fn get_node_from_handle(&self, handle: u32, strong: bool) -> Result<NodeRef> {
883         self.node_refs
884             .lock()
885             .by_handle
886             .get_mut(&handle)
887             .ok_or(ENOENT)?
888             .node_ref()
889             .clone(strong)
890     }
891 
remove_from_delivered_deaths(&self, death: &DArc<NodeDeath>)892     pub(crate) fn remove_from_delivered_deaths(&self, death: &DArc<NodeDeath>) {
893         let mut inner = self.inner.lock();
894         // SAFETY: By the invariant on the `delivered_links` field, this is the right linked list.
895         let removed = unsafe { inner.delivered_deaths.remove(death) };
896         drop(inner);
897         drop(removed);
898     }
899 
update_ref( self: ArcBorrow<'_, Process>, handle: u32, inc: bool, strong: bool, ) -> Result900     pub(crate) fn update_ref(
901         self: ArcBorrow<'_, Process>,
902         handle: u32,
903         inc: bool,
904         strong: bool,
905     ) -> Result {
906         if inc && handle == 0 {
907             if let Ok(node_ref) = self.ctx.get_manager_node(strong) {
908                 if core::ptr::eq(&*self, &*node_ref.node.owner) {
909                     return Err(EINVAL);
910                 }
911                 let _ = self.insert_or_update_handle(node_ref, true);
912                 return Ok(());
913             }
914         }
915 
916         // To preserve original binder behaviour, we only fail requests where the manager tries to
917         // increment references on itself.
918         let mut refs = self.node_refs.lock();
919         if let Some(info) = refs.by_handle.get_mut(&handle) {
920             if info.node_ref().update(inc, strong) {
921                 // Clean up death if there is one attached to this node reference.
922                 if let Some(death) = info.death().take() {
923                     death.set_cleared(true);
924                     self.remove_from_delivered_deaths(&death);
925                 }
926 
927                 // Remove reference from process tables, and from the node's `refs` list.
928 
929                 // SAFETY: We are removing the `NodeRefInfo` from the right node.
930                 unsafe { info.node_ref2().node.remove_node_info(info) };
931 
932                 let id = info.node_ref().node.global_id();
933                 refs.by_handle.remove(&handle);
934                 refs.by_node.remove(&id);
935             }
936         } else {
937             // All refs are cleared in process exit, so this warning is expected in that case.
938             if !self.inner.lock().is_dead {
939                 pr_warn!("{}: no such ref {handle}\n", self.pid_in_current_ns());
940             }
941         }
942         Ok(())
943     }
944 
945     /// Decrements the refcount of the given node, if one exists.
update_node(&self, ptr: u64, cookie: u64, strong: bool)946     pub(crate) fn update_node(&self, ptr: u64, cookie: u64, strong: bool) {
947         let mut inner = self.inner.lock();
948         if let Ok(Some(node)) = inner.get_existing_node(ptr, cookie) {
949             inner.update_node_refcount(&node, false, strong, 1, None);
950         }
951     }
952 
inc_ref_done(&self, reader: &mut UserSliceReader, strong: bool) -> Result953     pub(crate) fn inc_ref_done(&self, reader: &mut UserSliceReader, strong: bool) -> Result {
954         let ptr = reader.read::<u64>()?;
955         let cookie = reader.read::<u64>()?;
956         let mut inner = self.inner.lock();
957         if let Ok(Some(node)) = inner.get_existing_node(ptr, cookie) {
958             if let Some(node) = node.inc_ref_done_locked(strong, &mut inner) {
959                 // This only fails if the process is dead.
960                 let _ = inner.push_work(node);
961             }
962         }
963         Ok(())
964     }
965 
buffer_alloc( self: &Arc<Self>, debug_id: usize, size: usize, is_oneway: bool, from_pid: i32, ) -> BinderResult<NewAllocation>966     pub(crate) fn buffer_alloc(
967         self: &Arc<Self>,
968         debug_id: usize,
969         size: usize,
970         is_oneway: bool,
971         from_pid: i32,
972     ) -> BinderResult<NewAllocation> {
973         use kernel::page::PAGE_SIZE;
974 
975         let mut reserve_new_args = ReserveNewArgs {
976             debug_id,
977             size,
978             is_oneway,
979             pid: from_pid,
980             ..ReserveNewArgs::default()
981         };
982 
983         let (new_alloc, addr) = loop {
984             let mut inner = self.inner.lock();
985             let mapping = inner.mapping.as_mut().ok_or_else(BinderError::new_dead)?;
986             let alloc_request = match mapping.alloc.reserve_new(reserve_new_args)? {
987                 ReserveNew::Success(new_alloc) => break (new_alloc, mapping.address),
988                 ReserveNew::NeedAlloc(request) => request,
989             };
990             drop(inner);
991             // We need to allocate memory and then call `reserve_new` again.
992             reserve_new_args = alloc_request.make_alloc()?;
993         };
994 
995         let res = Allocation::new(
996             self.clone(),
997             debug_id,
998             new_alloc.offset,
999             size,
1000             addr + new_alloc.offset,
1001             new_alloc.oneway_spam_detected,
1002         );
1003 
1004         // This allocation will be marked as in use until the `Allocation` is used to free it.
1005         //
1006         // This method can't be called while holding a lock, so we release the lock first. It's
1007         // okay for several threads to use the method on the same index at the same time. In that
1008         // case, one of the calls will allocate the given page (if missing), and the other call
1009         // will wait for the other call to finish allocating the page.
1010         //
1011         // We will not call `stop_using_range` in parallel with this on the same page, because the
1012         // allocation can only be removed via the destructor of the `Allocation` object that we
1013         // currently own.
1014         match self.pages.use_range(
1015             new_alloc.offset / PAGE_SIZE,
1016             (new_alloc.offset + size).div_ceil(PAGE_SIZE),
1017         ) {
1018             Ok(()) => {}
1019             Err(err) => {
1020                 pr_warn!("use_range failure {:?}", err);
1021                 return Err(err.into());
1022             }
1023         }
1024 
1025         Ok(NewAllocation(res))
1026     }
1027 
buffer_get(self: &Arc<Self>, ptr: usize) -> Option<Allocation>1028     pub(crate) fn buffer_get(self: &Arc<Self>, ptr: usize) -> Option<Allocation> {
1029         let mut inner = self.inner.lock();
1030         let mapping = inner.mapping.as_mut()?;
1031         let offset = ptr.checked_sub(mapping.address)?;
1032         let (size, debug_id, odata) = mapping.alloc.reserve_existing(offset).ok()?;
1033         let mut alloc = Allocation::new(self.clone(), debug_id, offset, size, ptr, false);
1034         if let Some(data) = odata {
1035             alloc.set_info(data);
1036         }
1037         Some(alloc)
1038     }
1039 
buffer_raw_free(&self, ptr: usize)1040     pub(crate) fn buffer_raw_free(&self, ptr: usize) {
1041         let mut inner = self.inner.lock();
1042         if let Some(ref mut mapping) = &mut inner.mapping {
1043             let offset = match ptr.checked_sub(mapping.address) {
1044                 Some(offset) => offset,
1045                 None => return,
1046             };
1047 
1048             let freed_range = match mapping.alloc.reservation_abort(offset) {
1049                 Ok(freed_range) => freed_range,
1050                 Err(_) => {
1051                     pr_warn!(
1052                         "Pointer {:x} failed to free, base = {:x}\n",
1053                         ptr,
1054                         mapping.address
1055                     );
1056                     return;
1057                 }
1058             };
1059 
1060             // No more allocations in this range. Mark them as not in use.
1061             //
1062             // Must be done before we release the lock so that `use_range` is not used on these
1063             // indices until `stop_using_range` returns.
1064             self.pages
1065                 .stop_using_range(freed_range.start_page_idx, freed_range.end_page_idx);
1066         }
1067     }
1068 
buffer_make_freeable(&self, offset: usize, mut data: Option<AllocationInfo>)1069     pub(crate) fn buffer_make_freeable(&self, offset: usize, mut data: Option<AllocationInfo>) {
1070         let mut inner = self.inner.lock();
1071         if let Some(ref mut mapping) = &mut inner.mapping {
1072             if mapping.alloc.reservation_commit(offset, &mut data).is_err() {
1073                 pr_warn!("Offset {} failed to be marked freeable\n", offset);
1074             }
1075         }
1076     }
1077 
create_mapping(&self, vma: &mm::virt::VmaNew) -> Result1078     fn create_mapping(&self, vma: &mm::virt::VmaNew) -> Result {
1079         use kernel::page::PAGE_SIZE;
1080         let size = usize::min(vma.end() - vma.start(), bindings::SZ_4M as usize);
1081         let mapping = Mapping::new(vma.start(), size);
1082         let page_count = self.pages.register_with_vma(vma)?;
1083         if page_count * PAGE_SIZE != size {
1084             return Err(EINVAL);
1085         }
1086 
1087         // Save range allocator for later.
1088         self.inner.lock().mapping = Some(mapping);
1089 
1090         Ok(())
1091     }
1092 
version(&self, data: UserSlice) -> Result1093     fn version(&self, data: UserSlice) -> Result {
1094         data.writer().write(&BinderVersion::current())
1095     }
1096 
register_thread(&self) -> bool1097     pub(crate) fn register_thread(&self) -> bool {
1098         self.inner.lock().register_thread()
1099     }
1100 
remove_thread(&self, thread: Arc<Thread>)1101     fn remove_thread(&self, thread: Arc<Thread>) {
1102         self.inner.lock().threads.remove(&thread.id);
1103         thread.release();
1104     }
1105 
set_max_threads(&self, max: u32)1106     fn set_max_threads(&self, max: u32) {
1107         self.inner.lock().max_threads = max;
1108     }
1109 
set_oneway_spam_detection_enabled(&self, enabled: u32)1110     fn set_oneway_spam_detection_enabled(&self, enabled: u32) {
1111         self.inner.lock().oneway_spam_detection_enabled = enabled != 0;
1112     }
1113 
is_oneway_spam_detection_enabled(&self) -> bool1114     pub(crate) fn is_oneway_spam_detection_enabled(&self) -> bool {
1115         self.inner.lock().oneway_spam_detection_enabled
1116     }
1117 
get_node_debug_info(&self, data: UserSlice) -> Result1118     fn get_node_debug_info(&self, data: UserSlice) -> Result {
1119         let (mut reader, mut writer) = data.reader_writer();
1120 
1121         // Read the starting point.
1122         let ptr = reader.read::<BinderNodeDebugInfo>()?.ptr;
1123         let mut out = BinderNodeDebugInfo::default();
1124 
1125         {
1126             let inner = self.inner.lock();
1127             for (node_ptr, node) in &inner.nodes {
1128                 if *node_ptr > ptr {
1129                     node.populate_debug_info(&mut out, &inner);
1130                     break;
1131                 }
1132             }
1133         }
1134 
1135         writer.write(&out)
1136     }
1137 
get_node_info_from_ref(&self, data: UserSlice) -> Result1138     fn get_node_info_from_ref(&self, data: UserSlice) -> Result {
1139         let (mut reader, mut writer) = data.reader_writer();
1140         let mut out = reader.read::<BinderNodeInfoForRef>()?;
1141 
1142         if out.strong_count != 0
1143             || out.weak_count != 0
1144             || out.reserved1 != 0
1145             || out.reserved2 != 0
1146             || out.reserved3 != 0
1147         {
1148             return Err(EINVAL);
1149         }
1150 
1151         // Only the context manager is allowed to use this ioctl.
1152         if !self.inner.lock().is_manager {
1153             return Err(EPERM);
1154         }
1155 
1156         {
1157             let mut node_refs = self.node_refs.lock();
1158             let node_info = node_refs.by_handle.get_mut(&out.handle).ok_or(ENOENT)?;
1159             let node_ref = node_info.node_ref();
1160             let owner_inner = node_ref.node.owner.inner.lock();
1161             node_ref.node.populate_counts(&mut out, &owner_inner);
1162         }
1163 
1164         // Write the result back.
1165         writer.write(&out)
1166     }
1167 
needs_thread(&self) -> bool1168     pub(crate) fn needs_thread(&self) -> bool {
1169         let mut inner = self.inner.lock();
1170         let ret = inner.requested_thread_count == 0
1171             && inner.ready_threads.is_empty()
1172             && inner.started_thread_count < inner.max_threads;
1173         if ret {
1174             inner.requested_thread_count += 1
1175         }
1176         ret
1177     }
1178 
request_death( self: &Arc<Self>, reader: &mut UserSliceReader, thread: &Thread, ) -> Result1179     pub(crate) fn request_death(
1180         self: &Arc<Self>,
1181         reader: &mut UserSliceReader,
1182         thread: &Thread,
1183     ) -> Result {
1184         let handle: u32 = reader.read()?;
1185         let cookie: u64 = reader.read()?;
1186 
1187         // Queue BR_ERROR if we can't allocate memory for the death notification.
1188         let death = UniqueArc::new_uninit(GFP_KERNEL).inspect_err(|_| {
1189             thread.push_return_work(BR_ERROR);
1190         })?;
1191         let mut refs = self.node_refs.lock();
1192         let Some(info) = refs.by_handle.get_mut(&handle) else {
1193             pr_warn!("BC_REQUEST_DEATH_NOTIFICATION invalid ref {handle}\n");
1194             return Ok(());
1195         };
1196 
1197         // Nothing to do if there is already a death notification request for this handle.
1198         if info.death().is_some() {
1199             pr_warn!("BC_REQUEST_DEATH_NOTIFICATION death notification already set\n");
1200             return Ok(());
1201         }
1202 
1203         let death = {
1204             let death_init = NodeDeath::new(info.node_ref().node.clone(), self.clone(), cookie);
1205             match death.pin_init_with(death_init) {
1206                 Ok(death) => death,
1207                 // error is infallible
1208                 Err(err) => match err {},
1209             }
1210         };
1211 
1212         // Register the death notification.
1213         {
1214             let owner = info.node_ref2().node.owner.clone();
1215             let mut owner_inner = owner.inner.lock();
1216             if owner_inner.is_dead {
1217                 let death = Arc::from(death);
1218                 *info.death() = Some(death.clone());
1219                 drop(owner_inner);
1220                 death.set_dead();
1221             } else {
1222                 let death = ListArc::from(death);
1223                 *info.death() = Some(death.clone_arc());
1224                 info.node_ref().node.add_death(death, &mut owner_inner);
1225             }
1226         }
1227         Ok(())
1228     }
1229 
clear_death(&self, reader: &mut UserSliceReader, thread: &Thread) -> Result1230     pub(crate) fn clear_death(&self, reader: &mut UserSliceReader, thread: &Thread) -> Result {
1231         let handle: u32 = reader.read()?;
1232         let cookie: u64 = reader.read()?;
1233 
1234         let mut refs = self.node_refs.lock();
1235         let Some(info) = refs.by_handle.get_mut(&handle) else {
1236             pr_warn!("BC_CLEAR_DEATH_NOTIFICATION invalid ref {handle}\n");
1237             return Ok(());
1238         };
1239 
1240         let Some(death) = info.death().take() else {
1241             pr_warn!("BC_CLEAR_DEATH_NOTIFICATION death notification not active\n");
1242             return Ok(());
1243         };
1244         if death.cookie != cookie {
1245             *info.death() = Some(death);
1246             pr_warn!("BC_CLEAR_DEATH_NOTIFICATION death notification cookie mismatch\n");
1247             return Ok(());
1248         }
1249 
1250         // Update state and determine if we need to queue a work item. We only need to do it when
1251         // the node is not dead or if the user already completed the death notification.
1252         if death.set_cleared(false) {
1253             if let Some(death) = ListArc::try_from_arc_or_drop(death) {
1254                 let _ = thread.push_work_if_looper(death);
1255             }
1256         }
1257 
1258         Ok(())
1259     }
1260 
dead_binder_done(&self, cookie: u64, thread: &Thread)1261     pub(crate) fn dead_binder_done(&self, cookie: u64, thread: &Thread) {
1262         if let Some(death) = self.inner.lock().pull_delivered_death(cookie) {
1263             death.set_notification_done(thread);
1264         }
1265     }
1266 
1267     /// Locks the spinlock and move the `nodes` rbtree out.
1268     ///
1269     /// This allows you to iterate through `nodes` while also allowing you to give other parts of
1270     /// the codebase exclusive access to `ProcessInner`.
lock_with_nodes(&self) -> WithNodes<'_>1271     pub(crate) fn lock_with_nodes(&self) -> WithNodes<'_> {
1272         let mut inner = self.inner.lock();
1273         WithNodes {
1274             nodes: take(&mut inner.nodes),
1275             inner,
1276         }
1277     }
1278 
deferred_flush(&self)1279     fn deferred_flush(&self) {
1280         let inner = self.inner.lock();
1281         for thread in inner.threads.values() {
1282             thread.exit_looper();
1283         }
1284     }
1285 
deferred_release(self: Arc<Self>)1286     fn deferred_release(self: Arc<Self>) {
1287         let is_manager = {
1288             let mut inner = self.inner.lock();
1289             inner.is_dead = true;
1290             inner.is_frozen = IsFrozen::No;
1291             inner.sync_recv = false;
1292             inner.async_recv = false;
1293             inner.is_manager
1294         };
1295 
1296         if is_manager {
1297             self.ctx.unset_manager_node();
1298         }
1299 
1300         self.ctx.deregister_process(&self);
1301 
1302         let binderfs_file = self.inner.lock().binderfs_file.take();
1303         drop(binderfs_file);
1304 
1305         // Release threads.
1306         let threads = {
1307             let mut inner = self.inner.lock();
1308             let threads = take(&mut inner.threads);
1309             let ready = take(&mut inner.ready_threads);
1310             drop(inner);
1311             drop(ready);
1312 
1313             for thread in threads.values() {
1314                 thread.release();
1315             }
1316             threads
1317         };
1318 
1319         // Release nodes.
1320         {
1321             while let Some(node) = {
1322                 let mut lock = self.inner.lock();
1323                 lock.nodes.cursor_front().map(|c| c.remove_current().1)
1324             } {
1325                 node.to_key_value().1.release();
1326             }
1327         }
1328 
1329         // Clean up death listeners and remove nodes from external node info lists.
1330         for info in self.node_refs.lock().by_handle.values_mut() {
1331             // SAFETY: We are removing the `NodeRefInfo` from the right node.
1332             unsafe { info.node_ref2().node.remove_node_info(info) };
1333 
1334             // Remove all death notifications from the nodes (that belong to a different process).
1335             let death = if let Some(existing) = info.death().take() {
1336                 existing
1337             } else {
1338                 continue;
1339             };
1340             death.set_cleared(false);
1341         }
1342 
1343         // Clean up freeze listeners.
1344         let freeze_listeners = take(&mut self.node_refs.lock().freeze_listeners);
1345         for listener in freeze_listeners.values() {
1346             listener.on_process_exit(&self);
1347         }
1348         drop(freeze_listeners);
1349 
1350         // Release refs on foreign nodes.
1351         {
1352             let mut refs = self.node_refs.lock();
1353             let by_handle = take(&mut refs.by_handle);
1354             let by_node = take(&mut refs.by_node);
1355             drop(refs);
1356             drop(by_node);
1357             drop(by_handle);
1358         }
1359 
1360         // Cancel all pending work items.
1361         while let Some(work) = self.get_work() {
1362             work.into_arc().cancel();
1363         }
1364 
1365         let delivered_deaths = take(&mut self.inner.lock().delivered_deaths);
1366         drop(delivered_deaths);
1367 
1368         // Free any resources kept alive by allocated buffers.
1369         let omapping = self.inner.lock().mapping.take();
1370         if let Some(mut mapping) = omapping {
1371             let address = mapping.address;
1372             mapping
1373                 .alloc
1374                 .take_for_each(|offset, size, debug_id, odata| {
1375                     let ptr = offset + address;
1376                     let mut alloc =
1377                         Allocation::new(self.clone(), debug_id, offset, size, ptr, false);
1378                     if let Some(data) = odata {
1379                         alloc.set_info(data);
1380                     }
1381                     drop(alloc)
1382                 });
1383         }
1384 
1385         // calls to synchronize_rcu() in thread drop will happen here
1386         drop(threads);
1387     }
1388 
drop_outstanding_txn(&self)1389     pub(crate) fn drop_outstanding_txn(&self) {
1390         let wake = {
1391             let mut inner = self.inner.lock();
1392             if inner.outstanding_txns == 0 {
1393                 pr_err!("outstanding_txns underflow");
1394                 return;
1395             }
1396             inner.outstanding_txns -= 1;
1397             inner.is_frozen.is_frozen() && inner.outstanding_txns == 0
1398         };
1399 
1400         if wake {
1401             self.freeze_wait.notify_all();
1402         }
1403     }
1404 
ioctl_freeze(&self, info: &BinderFreezeInfo) -> Result1405     pub(crate) fn ioctl_freeze(&self, info: &BinderFreezeInfo) -> Result {
1406         if info.enable == 0 {
1407             let msgs = self.prepare_freeze_messages()?;
1408             let mut inner = self.inner.lock();
1409             inner.sync_recv = false;
1410             inner.async_recv = false;
1411             inner.is_frozen = IsFrozen::No;
1412             drop(inner);
1413             msgs.send_messages();
1414             return Ok(());
1415         }
1416 
1417         let mut inner = self.inner.lock();
1418         inner.sync_recv = false;
1419         inner.async_recv = false;
1420         inner.is_frozen = IsFrozen::InProgress;
1421 
1422         if info.timeout_ms > 0 {
1423             let mut jiffies = kernel::time::msecs_to_jiffies(info.timeout_ms);
1424             while jiffies > 0 {
1425                 if inner.outstanding_txns == 0 {
1426                     break;
1427                 }
1428 
1429                 match self
1430                     .freeze_wait
1431                     .wait_interruptible_timeout(&mut inner, jiffies)
1432                 {
1433                     CondVarTimeoutResult::Signal { .. } => {
1434                         inner.is_frozen = IsFrozen::No;
1435                         return Err(ERESTARTSYS);
1436                     }
1437                     CondVarTimeoutResult::Woken { jiffies: remaining } => {
1438                         jiffies = remaining;
1439                     }
1440                     CondVarTimeoutResult::Timeout => {
1441                         jiffies = 0;
1442                     }
1443                 }
1444             }
1445         }
1446 
1447         if inner.txns_pending_locked() {
1448             inner.is_frozen = IsFrozen::No;
1449             Err(EAGAIN)
1450         } else {
1451             drop(inner);
1452             match self.prepare_freeze_messages() {
1453                 Ok(batch) => {
1454                     self.inner.lock().is_frozen = IsFrozen::Yes;
1455                     batch.send_messages();
1456                     Ok(())
1457                 }
1458                 Err(kernel::alloc::AllocError) => {
1459                     self.inner.lock().is_frozen = IsFrozen::No;
1460                     Err(ENOMEM)
1461                 }
1462             }
1463         }
1464     }
1465 }
1466 
get_frozen_status(data: UserSlice) -> Result1467 fn get_frozen_status(data: UserSlice) -> Result {
1468     let (mut reader, mut writer) = data.reader_writer();
1469 
1470     let mut info = reader.read::<BinderFrozenStatusInfo>()?;
1471     info.sync_recv = 0;
1472     info.async_recv = 0;
1473     let mut found = false;
1474 
1475     for ctx in crate::context::get_all_contexts()? {
1476         ctx.for_each_proc(|proc| {
1477             if proc.task.pid() == info.pid as _ {
1478                 found = true;
1479                 let inner = proc.inner.lock();
1480                 let txns_pending = inner.txns_pending_locked();
1481                 info.async_recv |= inner.async_recv as u32;
1482                 info.sync_recv |= inner.sync_recv as u32;
1483                 info.sync_recv |= (txns_pending as u32) << 1;
1484             }
1485         });
1486     }
1487 
1488     if found {
1489         writer.write(&info)?;
1490         Ok(())
1491     } else {
1492         Err(EINVAL)
1493     }
1494 }
1495 
ioctl_freeze(reader: &mut UserSliceReader) -> Result1496 fn ioctl_freeze(reader: &mut UserSliceReader) -> Result {
1497     let info = reader.read::<BinderFreezeInfo>()?;
1498 
1499     // Very unlikely for there to be more than 3, since a process normally uses at most binder and
1500     // hwbinder.
1501     let mut procs = KVec::with_capacity(3, GFP_KERNEL)?;
1502 
1503     let ctxs = crate::context::get_all_contexts()?;
1504     for ctx in ctxs {
1505         for proc in ctx.get_procs_with_pid(info.pid as i32)? {
1506             procs.push(proc, GFP_KERNEL)?;
1507         }
1508     }
1509 
1510     for proc in procs {
1511         proc.ioctl_freeze(&info)?;
1512     }
1513     Ok(())
1514 }
1515 
1516 /// The ioctl handler.
1517 impl Process {
1518     /// Ioctls that are write-only from the perspective of userspace.
1519     ///
1520     /// The kernel will only read from the pointer that userspace provided to us.
ioctl_write_only( this: ArcBorrow<'_, Process>, _file: &File, cmd: u32, reader: &mut UserSliceReader, ) -> Result1521     fn ioctl_write_only(
1522         this: ArcBorrow<'_, Process>,
1523         _file: &File,
1524         cmd: u32,
1525         reader: &mut UserSliceReader,
1526     ) -> Result {
1527         let thread = this.get_current_thread()?;
1528         match cmd {
1529             uapi::BINDER_SET_MAX_THREADS => this.set_max_threads(reader.read()?),
1530             uapi::BINDER_THREAD_EXIT => this.remove_thread(thread),
1531             uapi::BINDER_SET_CONTEXT_MGR => this.set_as_manager(None, &thread)?,
1532             uapi::BINDER_SET_CONTEXT_MGR_EXT => {
1533                 this.set_as_manager(Some(reader.read()?), &thread)?
1534             }
1535             uapi::BINDER_ENABLE_ONEWAY_SPAM_DETECTION => {
1536                 this.set_oneway_spam_detection_enabled(reader.read()?)
1537             }
1538             uapi::BINDER_FREEZE => ioctl_freeze(reader)?,
1539             _ => return Err(EINVAL),
1540         }
1541         Ok(())
1542     }
1543 
1544     /// Ioctls that are read/write from the perspective of userspace.
1545     ///
1546     /// The kernel will both read from and write to the pointer that userspace provided to us.
ioctl_write_read( this: ArcBorrow<'_, Process>, file: &File, cmd: u32, data: UserSlice, ) -> Result1547     fn ioctl_write_read(
1548         this: ArcBorrow<'_, Process>,
1549         file: &File,
1550         cmd: u32,
1551         data: UserSlice,
1552     ) -> Result {
1553         let thread = this.get_current_thread()?;
1554         let blocking = (file.flags() & file::flags::O_NONBLOCK) == 0;
1555         match cmd {
1556             uapi::BINDER_WRITE_READ => thread.write_read(data, blocking)?,
1557             uapi::BINDER_GET_NODE_DEBUG_INFO => this.get_node_debug_info(data)?,
1558             uapi::BINDER_GET_NODE_INFO_FOR_REF => this.get_node_info_from_ref(data)?,
1559             uapi::BINDER_VERSION => this.version(data)?,
1560             uapi::BINDER_GET_FROZEN_INFO => get_frozen_status(data)?,
1561             uapi::BINDER_GET_EXTENDED_ERROR => thread.get_extended_error(data)?,
1562             _ => return Err(EINVAL),
1563         }
1564         Ok(())
1565     }
1566 }
1567 
1568 /// The file operations supported by `Process`.
1569 impl Process {
open(ctx: ArcBorrow<'_, Context>, file: &File) -> Result<Arc<Process>>1570     pub(crate) fn open(ctx: ArcBorrow<'_, Context>, file: &File) -> Result<Arc<Process>> {
1571         Self::new(ctx.into(), ARef::from(file.cred()))
1572     }
1573 
release(this: Arc<Process>, _file: &File)1574     pub(crate) fn release(this: Arc<Process>, _file: &File) {
1575         let binderfs_file;
1576         let should_schedule;
1577         {
1578             let mut inner = this.inner.lock();
1579             should_schedule = inner.defer_work == 0;
1580             inner.defer_work |= PROC_DEFER_RELEASE;
1581             binderfs_file = inner.binderfs_file.take();
1582         }
1583 
1584         if should_schedule {
1585             // Ignore failures to schedule to the workqueue. Those just mean that we're already
1586             // scheduled for execution.
1587             let _ = workqueue::system().enqueue(this);
1588         }
1589 
1590         drop(binderfs_file);
1591     }
1592 
flush(this: ArcBorrow<'_, Process>) -> Result1593     pub(crate) fn flush(this: ArcBorrow<'_, Process>) -> Result {
1594         let should_schedule;
1595         {
1596             let mut inner = this.inner.lock();
1597             should_schedule = inner.defer_work == 0;
1598             inner.defer_work |= PROC_DEFER_FLUSH;
1599         }
1600 
1601         if should_schedule {
1602             // Ignore failures to schedule to the workqueue. Those just mean that we're already
1603             // scheduled for execution.
1604             let _ = workqueue::system().enqueue(Arc::from(this));
1605         }
1606         Ok(())
1607     }
1608 
ioctl(this: ArcBorrow<'_, Process>, file: &File, cmd: u32, arg: usize) -> Result1609     pub(crate) fn ioctl(this: ArcBorrow<'_, Process>, file: &File, cmd: u32, arg: usize) -> Result {
1610         use kernel::ioctl::{_IOC_DIR, _IOC_SIZE};
1611         use kernel::uapi::{_IOC_READ, _IOC_WRITE};
1612 
1613         crate::trace::trace_ioctl(cmd, arg);
1614 
1615         let user_slice = UserSlice::new(UserPtr::from_addr(arg), _IOC_SIZE(cmd));
1616 
1617         const _IOC_READ_WRITE: u32 = _IOC_READ | _IOC_WRITE;
1618 
1619         match _IOC_DIR(cmd) {
1620             _IOC_WRITE => Self::ioctl_write_only(this, file, cmd, &mut user_slice.reader()),
1621             _IOC_READ_WRITE => Self::ioctl_write_read(this, file, cmd, user_slice),
1622             _ => Err(EINVAL),
1623         }
1624     }
1625 
compat_ioctl( this: ArcBorrow<'_, Process>, file: &File, cmd: u32, arg: usize, ) -> Result1626     pub(crate) fn compat_ioctl(
1627         this: ArcBorrow<'_, Process>,
1628         file: &File,
1629         cmd: u32,
1630         arg: usize,
1631     ) -> Result {
1632         Self::ioctl(this, file, cmd, arg)
1633     }
1634 
mmap( this: ArcBorrow<'_, Process>, _file: &File, vma: &mm::virt::VmaNew, ) -> Result1635     pub(crate) fn mmap(
1636         this: ArcBorrow<'_, Process>,
1637         _file: &File,
1638         vma: &mm::virt::VmaNew,
1639     ) -> Result {
1640         // We don't allow mmap to be used in a different process.
1641         if !core::ptr::eq(kernel::current!().group_leader(), &*this.task) {
1642             return Err(EINVAL);
1643         }
1644         if vma.start() == 0 {
1645             return Err(EINVAL);
1646         }
1647 
1648         vma.try_clear_maywrite().map_err(|_| EPERM)?;
1649         vma.set_dontcopy();
1650         vma.set_mixedmap();
1651 
1652         // TODO: Set ops. We need to learn when the user unmaps so that we can stop using it.
1653         this.create_mapping(vma)
1654     }
1655 
poll( this: ArcBorrow<'_, Process>, file: &File, table: PollTable<'_>, ) -> Result<u32>1656     pub(crate) fn poll(
1657         this: ArcBorrow<'_, Process>,
1658         file: &File,
1659         table: PollTable<'_>,
1660     ) -> Result<u32> {
1661         let thread = this.get_current_thread()?;
1662         let (from_proc, mut mask) = thread.poll(file, table);
1663         if mask == 0 && from_proc && !this.inner.lock().work.is_empty() {
1664             mask |= bindings::POLLIN;
1665         }
1666         Ok(mask)
1667     }
1668 }
1669 
1670 /// Represents that a thread has registered with the `ready_threads` list of its process.
1671 ///
1672 /// The destructor of this type will unregister the thread from the list of ready threads.
1673 pub(crate) struct Registration<'a> {
1674     thread: &'a Arc<Thread>,
1675 }
1676 
1677 impl<'a> Registration<'a> {
new(thread: &'a Arc<Thread>, guard: &mut Guard<'_, ProcessInner, SpinLockBackend>) -> Self1678     fn new(thread: &'a Arc<Thread>, guard: &mut Guard<'_, ProcessInner, SpinLockBackend>) -> Self {
1679         assert!(core::ptr::eq(&thread.process.inner, guard.lock_ref()));
1680         // INVARIANT: We are pushing this thread to the right `ready_threads` list.
1681         if let Ok(list_arc) = ListArc::try_from_arc(thread.clone()) {
1682             guard.ready_threads.push_front(list_arc);
1683         } else {
1684             // It is an error to hit this branch, and it should not be reachable. We try to do
1685             // something reasonable when the failure path happens. Most likely, the thread in
1686             // question will sleep forever.
1687             pr_err!("Same thread registered with `ready_threads` twice.");
1688         }
1689         Self { thread }
1690     }
1691 }
1692 
1693 impl Drop for Registration<'_> {
drop(&mut self)1694     fn drop(&mut self) {
1695         let mut inner = self.thread.process.inner.lock();
1696         // SAFETY: The thread has the invariant that we never push it to any other linked list than
1697         // the `ready_threads` list of its parent process. Therefore, the thread is either in that
1698         // list, or in no list.
1699         unsafe { inner.ready_threads.remove(self.thread) };
1700     }
1701 }
1702 
1703 pub(crate) struct WithNodes<'a> {
1704     pub(crate) inner: Guard<'a, ProcessInner, SpinLockBackend>,
1705     pub(crate) nodes: RBTree<u64, DArc<Node>>,
1706 }
1707 
1708 impl Drop for WithNodes<'_> {
drop(&mut self)1709     fn drop(&mut self) {
1710         core::mem::swap(&mut self.nodes, &mut self.inner.nodes);
1711         if self.nodes.iter().next().is_some() {
1712             pr_err!("nodes array was modified while using lock_with_nodes\n");
1713         }
1714     }
1715 }
1716 
1717 pub(crate) enum GetWorkOrRegister<'a> {
1718     Work(DLArc<dyn DeliverToRead>),
1719     Register(Registration<'a>),
1720 }
1721