1 // SPDX-License-Identifier: GPL-2.0
2
3 // Copyright (C) 2025 Google LLC.
4
5 //! This module defines the `Process` type, which represents a process using a particular binder
6 //! context.
7 //!
8 //! The `Process` object keeps track of all of the resources that this process owns in the binder
9 //! context.
10 //!
11 //! There is one `Process` object for each binder fd that a process has opened, so processes using
12 //! several binder contexts have several `Process` objects. This ensures that the contexts are
13 //! fully separated.
14
15 use core::mem::take;
16
17 use kernel::{
18 bindings,
19 cred::Credential,
20 error::Error,
21 fs::file::{self, File},
22 id_pool::IdPool,
23 list::{List, ListArc, ListArcField, ListLinks},
24 mm,
25 prelude::*,
26 rbtree::{self, RBTree, RBTreeNode, RBTreeNodeReservation},
27 seq_file::SeqFile,
28 seq_print,
29 sync::poll::PollTable,
30 sync::{
31 aref::ARef,
32 lock::{spinlock::SpinLockBackend, Guard},
33 Arc, ArcBorrow, CondVar, CondVarTimeoutResult, Mutex, SpinLock, UniqueArc,
34 },
35 task::Task,
36 uaccess::{UserSlice, UserSliceReader},
37 uapi,
38 workqueue::{self, Work},
39 };
40
41 use crate::{
42 allocation::{Allocation, AllocationInfo, NewAllocation},
43 context::Context,
44 defs::*,
45 error::{BinderError, BinderResult},
46 node::{CouldNotDeliverCriticalIncrement, CritIncrWrapper, Node, NodeDeath, NodeRef},
47 page_range::ShrinkablePageRange,
48 range_alloc::{RangeAllocator, ReserveNew, ReserveNewArgs},
49 stats::BinderStats,
50 thread::{PushWorkRes, Thread},
51 transaction::TransactionInfo,
52 BinderfsProcFile, DArc, DLArc, DTRWrap, DeliverToRead,
53 };
54
55 #[path = "freeze.rs"]
56 mod freeze;
57 use self::freeze::{FreezeCookie, FreezeListener};
58
59 struct Mapping {
60 address: usize,
61 alloc: RangeAllocator<AllocationInfo>,
62 }
63
64 impl Mapping {
new(address: usize, size: usize) -> Self65 fn new(address: usize, size: usize) -> Self {
66 Self {
67 address,
68 alloc: RangeAllocator::new(size),
69 }
70 }
71 }
72
73 // bitflags for defer_work.
74 const PROC_DEFER_FLUSH: u8 = 1;
75 const PROC_DEFER_RELEASE: u8 = 2;
76
77 #[derive(Copy, Clone)]
78 pub(crate) enum IsFrozen {
79 Yes,
80 No,
81 InProgress,
82 }
83
84 impl IsFrozen {
85 /// Whether incoming transactions should be rejected due to freeze.
is_frozen(self) -> bool86 pub(crate) fn is_frozen(self) -> bool {
87 match self {
88 IsFrozen::Yes => true,
89 IsFrozen::No => false,
90 IsFrozen::InProgress => true,
91 }
92 }
93
94 /// Whether freeze notifications consider this process frozen.
is_fully_frozen(self) -> bool95 pub(crate) fn is_fully_frozen(self) -> bool {
96 match self {
97 IsFrozen::Yes => true,
98 IsFrozen::No => false,
99 IsFrozen::InProgress => false,
100 }
101 }
102 }
103
104 /// The fields of `Process` protected by the spinlock.
105 pub(crate) struct ProcessInner {
106 is_manager: bool,
107 pub(crate) is_dead: bool,
108 threads: RBTree<i32, Arc<Thread>>,
109 /// INVARIANT: Threads pushed to this list must be owned by this process.
110 ready_threads: List<Thread>,
111 nodes: RBTree<u64, DArc<Node>>,
112 mapping: Option<Mapping>,
113 work: List<DTRWrap<dyn DeliverToRead>>,
114 delivered_deaths: List<DTRWrap<NodeDeath>, 2>,
115
116 /// The number of requested threads that haven't registered yet.
117 requested_thread_count: u32,
118 /// The maximum number of threads used by the process thread pool.
119 max_threads: u32,
120 /// The number of threads the started and registered with the thread pool.
121 started_thread_count: u32,
122
123 /// Bitmap of deferred work to do.
124 defer_work: u8,
125
126 /// Number of transactions to be transmitted before processes in freeze_wait
127 /// are woken up.
128 outstanding_txns: u32,
129 /// Process is frozen and unable to service binder transactions.
130 pub(crate) is_frozen: IsFrozen,
131 /// Process received sync transactions since last frozen.
132 pub(crate) sync_recv: bool,
133 /// Process received async transactions since last frozen.
134 pub(crate) async_recv: bool,
135 pub(crate) binderfs_file: Option<BinderfsProcFile>,
136 /// Check for oneway spam
137 oneway_spam_detection_enabled: bool,
138 }
139
140 impl ProcessInner {
new() -> Self141 fn new() -> Self {
142 Self {
143 is_manager: false,
144 is_dead: false,
145 threads: RBTree::new(),
146 ready_threads: List::new(),
147 mapping: None,
148 nodes: RBTree::new(),
149 work: List::new(),
150 delivered_deaths: List::new(),
151 requested_thread_count: 0,
152 max_threads: 0,
153 started_thread_count: 0,
154 defer_work: 0,
155 outstanding_txns: 0,
156 is_frozen: IsFrozen::No,
157 sync_recv: false,
158 async_recv: false,
159 binderfs_file: None,
160 oneway_spam_detection_enabled: false,
161 }
162 }
163
164 /// Schedule the work item for execution on this process.
165 ///
166 /// If any threads are ready for work, then the work item is given directly to that thread and
167 /// it is woken up. Otherwise, it is pushed to the process work list.
168 ///
169 /// This call can fail only if the process is dead. In this case, the work item is returned to
170 /// the caller so that the caller can drop it after releasing the inner process lock. This is
171 /// necessary since the destructor of `Transaction` will take locks that can't necessarily be
172 /// taken while holding the inner process lock.
push_work( &mut self, work: DLArc<dyn DeliverToRead>, ) -> Result<(), (BinderError, DLArc<dyn DeliverToRead>)>173 pub(crate) fn push_work(
174 &mut self,
175 work: DLArc<dyn DeliverToRead>,
176 ) -> Result<(), (BinderError, DLArc<dyn DeliverToRead>)> {
177 // Try to find a ready thread to which to push the work.
178 if let Some(thread) = self.ready_threads.pop_front() {
179 // Push to thread while holding state lock. This prevents the thread from giving up
180 // (for example, because of a signal) when we're about to deliver work.
181 match thread.push_work(work) {
182 PushWorkRes::Ok => Ok(()),
183 PushWorkRes::FailedDead(work) => Err((BinderError::new_dead(), work)),
184 }
185 } else if self.is_dead {
186 Err((BinderError::new_dead(), work))
187 } else {
188 let sync = work.should_sync_wakeup();
189
190 // Didn't find a thread waiting for proc work; this can happen
191 // in two scenarios:
192 // 1. All threads are busy handling transactions
193 // In that case, one of those threads should call back into
194 // the kernel driver soon and pick up this work.
195 // 2. Threads are using the (e)poll interface, in which case
196 // they may be blocked on the waitqueue without having been
197 // added to waiting_threads. For this case, we just iterate
198 // over all threads not handling transaction work, and
199 // wake them all up. We wake all because we don't know whether
200 // a thread that called into (e)poll is handling non-binder
201 // work currently.
202 self.work.push_back(work);
203
204 // Wake up polling threads, if any.
205 for thread in self.threads.values() {
206 thread.notify_if_poll_ready(sync);
207 }
208
209 Ok(())
210 }
211 }
212
remove_node(&mut self, ptr: u64)213 pub(crate) fn remove_node(&mut self, ptr: u64) {
214 self.nodes.remove(&ptr);
215 }
216
217 /// Updates the reference count on the given node.
update_node_refcount( &mut self, node: &DArc<Node>, inc: bool, strong: bool, count: usize, othread: Option<&Thread>, )218 pub(crate) fn update_node_refcount(
219 &mut self,
220 node: &DArc<Node>,
221 inc: bool,
222 strong: bool,
223 count: usize,
224 othread: Option<&Thread>,
225 ) {
226 let push = node.update_refcount_locked(inc, strong, count, self);
227
228 // If we decided that we need to push work, push either to the process or to a thread if
229 // one is specified.
230 if let Some(node) = push {
231 if let Some(thread) = othread {
232 thread.push_work_deferred(node);
233 } else {
234 let _ = self.push_work(node);
235 // Nothing to do: `push_work` may fail if the process is dead, but that's ok as in
236 // that case, it doesn't care about the notification.
237 }
238 }
239 }
240
new_node_ref( &mut self, node: DArc<Node>, strong: bool, thread: Option<&Thread>, ) -> NodeRef241 pub(crate) fn new_node_ref(
242 &mut self,
243 node: DArc<Node>,
244 strong: bool,
245 thread: Option<&Thread>,
246 ) -> NodeRef {
247 self.update_node_refcount(&node, true, strong, 1, thread);
248 let strong_count = if strong { 1 } else { 0 };
249 NodeRef::new(node, strong_count, 1 - strong_count)
250 }
251
new_node_ref_with_thread( &mut self, node: DArc<Node>, strong: bool, thread: &Thread, wrapper: Option<CritIncrWrapper>, ) -> Result<NodeRef, CouldNotDeliverCriticalIncrement>252 pub(crate) fn new_node_ref_with_thread(
253 &mut self,
254 node: DArc<Node>,
255 strong: bool,
256 thread: &Thread,
257 wrapper: Option<CritIncrWrapper>,
258 ) -> Result<NodeRef, CouldNotDeliverCriticalIncrement> {
259 let push = match wrapper {
260 None => node
261 .incr_refcount_allow_zero2one(strong, self)?
262 .map(|node| node as _),
263 Some(wrapper) => node.incr_refcount_allow_zero2one_with_wrapper(strong, wrapper, self),
264 };
265 if let Some(node) = push {
266 thread.push_work_deferred(node);
267 }
268 let strong_count = if strong { 1 } else { 0 };
269 Ok(NodeRef::new(node, strong_count, 1 - strong_count))
270 }
271
272 /// Returns an existing node with the given pointer and cookie, if one exists.
273 ///
274 /// Returns an error if a node with the given pointer but a different cookie exists.
get_existing_node(&self, ptr: u64, cookie: u64) -> Result<Option<DArc<Node>>>275 fn get_existing_node(&self, ptr: u64, cookie: u64) -> Result<Option<DArc<Node>>> {
276 match self.nodes.get(&ptr) {
277 None => Ok(None),
278 Some(node) => {
279 let (_, node_cookie) = node.get_id();
280 if node_cookie == cookie {
281 Ok(Some(node.clone()))
282 } else {
283 Err(EINVAL)
284 }
285 }
286 }
287 }
288
register_thread(&mut self) -> bool289 fn register_thread(&mut self) -> bool {
290 if self.requested_thread_count == 0 {
291 return false;
292 }
293
294 self.requested_thread_count -= 1;
295 self.started_thread_count += 1;
296 true
297 }
298
299 /// Finds a delivered death notification with the given cookie, removes it from the thread's
300 /// delivered list, and returns it.
pull_delivered_death(&mut self, cookie: u64) -> Option<DArc<NodeDeath>>301 fn pull_delivered_death(&mut self, cookie: u64) -> Option<DArc<NodeDeath>> {
302 let mut cursor = self.delivered_deaths.cursor_front();
303 while let Some(next) = cursor.peek_next() {
304 if next.cookie == cookie {
305 return Some(next.remove().into_arc());
306 }
307 cursor.move_next();
308 }
309 None
310 }
311
death_delivered(&mut self, death: DArc<NodeDeath>)312 pub(crate) fn death_delivered(&mut self, death: DArc<NodeDeath>) {
313 if let Some(death) = ListArc::try_from_arc_or_drop(death) {
314 self.delivered_deaths.push_back(death);
315 } else {
316 pr_warn!("Notification added to `delivered_deaths` twice.");
317 }
318 }
319
add_outstanding_txn(&mut self)320 pub(crate) fn add_outstanding_txn(&mut self) {
321 self.outstanding_txns += 1;
322 }
323
txns_pending_locked(&self) -> bool324 fn txns_pending_locked(&self) -> bool {
325 if self.outstanding_txns > 0 {
326 return true;
327 }
328 for thread in self.threads.values() {
329 if thread.has_current_transaction() {
330 return true;
331 }
332 }
333 false
334 }
335 }
336
337 /// Used to keep track of a node that this process has a handle to.
338 #[pin_data]
339 pub(crate) struct NodeRefInfo {
340 debug_id: usize,
341 /// The refcount that this process owns to the node.
342 node_ref: ListArcField<NodeRef, { Self::LIST_PROC }>,
343 death: ListArcField<Option<DArc<NodeDeath>>, { Self::LIST_PROC }>,
344 /// Cookie of the active freeze listener for this node.
345 freeze: ListArcField<Option<FreezeCookie>, { Self::LIST_PROC }>,
346 /// Used to store this `NodeRefInfo` in the node's `refs` list.
347 #[pin]
348 links: ListLinks<{ Self::LIST_NODE }>,
349 /// The handle for this `NodeRefInfo`.
350 handle: u32,
351 /// The process that has a handle to the node.
352 pub(crate) process: Arc<Process>,
353 }
354
355 impl NodeRefInfo {
356 /// The id used for the `Node::refs` list.
357 pub(crate) const LIST_NODE: u64 = 0x2da16350fb724a10;
358 /// The id used for the `ListArc` in `ProcessNodeRefs`.
359 const LIST_PROC: u64 = 0xd703a5263dcc8650;
360
new(node_ref: NodeRef, handle: u32, process: Arc<Process>) -> impl PinInit<Self>361 fn new(node_ref: NodeRef, handle: u32, process: Arc<Process>) -> impl PinInit<Self> {
362 pin_init!(Self {
363 debug_id: super::next_debug_id(),
364 node_ref: ListArcField::new(node_ref),
365 death: ListArcField::new(None),
366 freeze: ListArcField::new(None),
367 links <- ListLinks::new(),
368 handle,
369 process,
370 })
371 }
372
373 kernel::list::define_list_arc_field_getter! {
374 pub(crate) fn death(&mut self<{Self::LIST_PROC}>) -> &mut Option<DArc<NodeDeath>> { death }
375 pub(crate) fn freeze(&mut self<{Self::LIST_PROC}>) -> &mut Option<FreezeCookie> { freeze }
376 pub(crate) fn node_ref(&mut self<{Self::LIST_PROC}>) -> &mut NodeRef { node_ref }
377 pub(crate) fn node_ref2(&self<{Self::LIST_PROC}>) -> &NodeRef { node_ref }
378 }
379 }
380
381 kernel::list::impl_list_arc_safe! {
382 impl ListArcSafe<{Self::LIST_NODE}> for NodeRefInfo { untracked; }
383 impl ListArcSafe<{Self::LIST_PROC}> for NodeRefInfo { untracked; }
384 }
385 kernel::list::impl_list_item! {
386 impl ListItem<{Self::LIST_NODE}> for NodeRefInfo {
387 using ListLinks { self.links };
388 }
389 }
390
391 /// Keeps track of references this process has to nodes owned by other processes.
392 ///
393 /// TODO: Currently, the rbtree requires two allocations per node reference, and two tree
394 /// traversals to look up a node by `Node::global_id`. Once the rbtree is more powerful, these
395 /// extra costs should be eliminated.
396 struct ProcessNodeRefs {
397 /// Used to look up nodes using the 32-bit id that this process knows it by.
398 by_handle: RBTree<u32, ListArc<NodeRefInfo, { NodeRefInfo::LIST_PROC }>>,
399 /// Used to quickly find unused ids in `by_handle`.
400 handle_is_present: IdPool,
401 /// Used to look up nodes without knowing their local 32-bit id. The usize is the address of
402 /// the underlying `Node` struct as returned by `Node::global_id`.
403 by_node: RBTree<usize, u32>,
404 /// Used to look up a `FreezeListener` by cookie.
405 ///
406 /// There might be multiple freeze listeners for the same node, but at most one of them is
407 /// active.
408 freeze_listeners: RBTree<FreezeCookie, FreezeListener>,
409 }
410
411 impl ProcessNodeRefs {
new() -> Self412 fn new() -> Self {
413 Self {
414 by_handle: RBTree::new(),
415 handle_is_present: IdPool::new(),
416 by_node: RBTree::new(),
417 freeze_listeners: RBTree::new(),
418 }
419 }
420 }
421
422 use core::mem::offset_of;
423 use kernel::bindings::rb_process_layout;
424 pub(crate) const PROCESS_LAYOUT: rb_process_layout = rb_process_layout {
425 arc_offset: Arc::<Process>::DATA_OFFSET,
426 task: offset_of!(Process, task),
427 };
428
429 /// A process using binder.
430 ///
431 /// Strictly speaking, there can be multiple of these per process. There is one for each binder fd
432 /// that a process has opened, so processes using several binder contexts have several `Process`
433 /// objects. This ensures that the contexts are fully separated.
434 #[pin_data]
435 pub(crate) struct Process {
436 pub(crate) ctx: Arc<Context>,
437
438 // The task leader (process).
439 pub(crate) task: ARef<Task>,
440
441 // Credential associated with file when `Process` is created.
442 pub(crate) cred: ARef<Credential>,
443
444 #[pin]
445 pub(crate) inner: SpinLock<ProcessInner>,
446
447 #[pin]
448 pub(crate) pages: ShrinkablePageRange,
449
450 // Waitqueue of processes waiting for all outstanding transactions to be
451 // processed.
452 #[pin]
453 freeze_wait: CondVar,
454
455 // Node references are in a different lock to avoid recursive acquisition when
456 // incrementing/decrementing a node in another process.
457 #[pin]
458 node_refs: Mutex<ProcessNodeRefs>,
459
460 // Work node for deferred work item.
461 #[pin]
462 defer_work: Work<Process>,
463
464 // Links for process list in Context.
465 #[pin]
466 links: ListLinks,
467
468 pub(crate) stats: BinderStats,
469 }
470
471 kernel::impl_has_work! {
472 impl HasWork<Process> for Process { self.defer_work }
473 }
474
475 kernel::list::impl_list_arc_safe! {
476 impl ListArcSafe<0> for Process { untracked; }
477 }
478 kernel::list::impl_list_item! {
479 impl ListItem<0> for Process {
480 using ListLinks { self.links };
481 }
482 }
483
484 impl workqueue::WorkItem for Process {
485 type Pointer = Arc<Process>;
486
run(me: Arc<Self>)487 fn run(me: Arc<Self>) {
488 let defer;
489 {
490 let mut inner = me.inner.lock();
491 defer = inner.defer_work;
492 inner.defer_work = 0;
493 }
494
495 if defer & PROC_DEFER_FLUSH != 0 {
496 me.deferred_flush();
497 }
498 if defer & PROC_DEFER_RELEASE != 0 {
499 me.deferred_release();
500 }
501 }
502 }
503
504 impl Process {
new(ctx: Arc<Context>, cred: ARef<Credential>) -> Result<Arc<Self>>505 fn new(ctx: Arc<Context>, cred: ARef<Credential>) -> Result<Arc<Self>> {
506 let current = kernel::current!();
507 let process = Arc::pin_init::<Error>(
508 try_pin_init!(Process {
509 ctx,
510 cred,
511 inner <- kernel::new_spinlock!(ProcessInner::new(), "Process::inner"),
512 pages <- ShrinkablePageRange::new(&super::BINDER_SHRINKER),
513 node_refs <- kernel::new_mutex!(ProcessNodeRefs::new(), "Process::node_refs"),
514 freeze_wait <- kernel::new_condvar!("Process::freeze_wait"),
515 task: current.group_leader().into(),
516 defer_work <- kernel::new_work!("Process::defer_work"),
517 links <- ListLinks::new(),
518 stats: BinderStats::new(),
519 }),
520 GFP_KERNEL,
521 )?;
522
523 process.ctx.register_process(process.clone())?;
524
525 Ok(process)
526 }
527
pid_in_current_ns(&self) -> kernel::task::Pid528 pub(crate) fn pid_in_current_ns(&self) -> kernel::task::Pid {
529 self.task.tgid_nr_ns(None)
530 }
531
532 #[inline(never)]
debug_print_stats(&self, m: &SeqFile, ctx: &Context) -> Result<()>533 pub(crate) fn debug_print_stats(&self, m: &SeqFile, ctx: &Context) -> Result<()> {
534 seq_print!(m, "proc {}\n", self.pid_in_current_ns());
535 seq_print!(m, "context {}\n", &*ctx.name);
536
537 let inner = self.inner.lock();
538 seq_print!(m, " threads: {}\n", inner.threads.iter().count());
539 seq_print!(
540 m,
541 " requested threads: {}+{}/{}\n",
542 inner.requested_thread_count,
543 inner.started_thread_count,
544 inner.max_threads,
545 );
546 if let Some(mapping) = &inner.mapping {
547 seq_print!(
548 m,
549 " free oneway space: {}\n",
550 mapping.alloc.free_oneway_space()
551 );
552 seq_print!(m, " buffers: {}\n", mapping.alloc.count_buffers());
553 }
554 seq_print!(
555 m,
556 " outstanding transactions: {}\n",
557 inner.outstanding_txns
558 );
559 seq_print!(m, " nodes: {}\n", inner.nodes.iter().count());
560 drop(inner);
561
562 {
563 let mut refs = self.node_refs.lock();
564 let (mut count, mut weak, mut strong) = (0, 0, 0);
565 for r in refs.by_handle.values_mut() {
566 let node_ref = r.node_ref();
567 let (nstrong, nweak) = node_ref.get_count();
568 count += 1;
569 weak += nweak;
570 strong += nstrong;
571 }
572 seq_print!(m, " refs: {count} s {strong} w {weak}\n");
573 }
574
575 self.stats.debug_print(" ", m);
576
577 Ok(())
578 }
579
580 #[inline(never)]
debug_print(&self, m: &SeqFile, ctx: &Context, print_all: bool) -> Result<()>581 pub(crate) fn debug_print(&self, m: &SeqFile, ctx: &Context, print_all: bool) -> Result<()> {
582 seq_print!(m, "proc {}\n", self.pid_in_current_ns());
583 seq_print!(m, "context {}\n", &*ctx.name);
584
585 let mut all_threads = KVec::new();
586 let mut all_nodes = KVec::new();
587 loop {
588 let inner = self.inner.lock();
589 let num_threads = inner.threads.iter().count();
590 let num_nodes = inner.nodes.iter().count();
591
592 if all_threads.capacity() < num_threads || all_nodes.capacity() < num_nodes {
593 drop(inner);
594 all_threads.reserve(num_threads, GFP_KERNEL)?;
595 all_nodes.reserve(num_nodes, GFP_KERNEL)?;
596 continue;
597 }
598
599 for thread in inner.threads.values() {
600 assert!(all_threads.len() < all_threads.capacity());
601 let _ = all_threads.push(thread.clone(), GFP_ATOMIC);
602 }
603
604 for node in inner.nodes.values() {
605 assert!(all_nodes.len() < all_nodes.capacity());
606 let _ = all_nodes.push(node.clone(), GFP_ATOMIC);
607 }
608
609 break;
610 }
611
612 for thread in all_threads {
613 thread.debug_print(m, print_all)?;
614 }
615
616 let mut inner = self.inner.lock();
617 for node in all_nodes {
618 if print_all || node.has_oneway_transaction(&mut inner) {
619 node.full_debug_print(m, &mut inner)?;
620 }
621 }
622 drop(inner);
623
624 if print_all {
625 let mut refs = self.node_refs.lock();
626 for r in refs.by_handle.values_mut() {
627 let node_ref = r.node_ref();
628 let dead = node_ref.node.owner.inner.lock().is_dead;
629 let (strong, weak) = node_ref.get_count();
630 let debug_id = node_ref.node.debug_id;
631
632 seq_print!(
633 m,
634 " ref {}: desc {} {}node {debug_id} s {strong} w {weak}",
635 r.debug_id,
636 r.handle,
637 if dead { "dead " } else { "" }
638 );
639 }
640 }
641
642 let inner = self.inner.lock();
643 for work in &inner.work {
644 work.debug_print(m, " ", " pending transaction ")?;
645 }
646 for _death in &inner.delivered_deaths {
647 seq_print!(m, " has delivered dead binder\n");
648 }
649 if let Some(mapping) = &inner.mapping {
650 mapping.alloc.debug_print(m)?;
651 }
652 drop(inner);
653
654 Ok(())
655 }
656
657 /// Attempts to fetch a work item from the process queue.
get_work(&self) -> Option<DLArc<dyn DeliverToRead>>658 pub(crate) fn get_work(&self) -> Option<DLArc<dyn DeliverToRead>> {
659 self.inner.lock().work.pop_front()
660 }
661
662 /// Attempts to fetch a work item from the process queue. If none is available, it registers the
663 /// given thread as ready to receive work directly.
664 ///
665 /// This must only be called when the thread is not participating in a transaction chain; when
666 /// it is, work will always be delivered directly to the thread (and not through the process
667 /// queue).
get_work_or_register<'a>( &'a self, thread: &'a Arc<Thread>, ) -> GetWorkOrRegister<'a>668 pub(crate) fn get_work_or_register<'a>(
669 &'a self,
670 thread: &'a Arc<Thread>,
671 ) -> GetWorkOrRegister<'a> {
672 let mut inner = self.inner.lock();
673 // Try to get work from the process queue.
674 if let Some(work) = inner.work.pop_front() {
675 return GetWorkOrRegister::Work(work);
676 }
677
678 // Register the thread as ready.
679 GetWorkOrRegister::Register(Registration::new(thread, &mut inner))
680 }
681
get_current_thread(self: ArcBorrow<'_, Self>) -> Result<Arc<Thread>>682 fn get_current_thread(self: ArcBorrow<'_, Self>) -> Result<Arc<Thread>> {
683 let id = {
684 let current = kernel::current!();
685 if self.task != current.group_leader() {
686 pr_err!("get_current_thread was called from the wrong process.");
687 return Err(EINVAL);
688 }
689 current.pid()
690 };
691
692 {
693 let inner = self.inner.lock();
694 if let Some(thread) = inner.threads.get(&id) {
695 return Ok(thread.clone());
696 }
697 }
698
699 // Allocate a new `Thread` without holding any locks.
700 let reservation = RBTreeNodeReservation::new(GFP_KERNEL)?;
701 let ta: Arc<Thread> = Thread::new(id, self.into())?;
702
703 let mut inner = self.inner.lock();
704 match inner.threads.entry(id) {
705 rbtree::Entry::Vacant(entry) => {
706 entry.insert(ta.clone(), reservation);
707 Ok(ta)
708 }
709 rbtree::Entry::Occupied(_entry) => {
710 pr_err!("Cannot create two threads with the same id.");
711 Err(EINVAL)
712 }
713 }
714 }
715
push_work(&self, work: DLArc<dyn DeliverToRead>) -> BinderResult716 pub(crate) fn push_work(&self, work: DLArc<dyn DeliverToRead>) -> BinderResult {
717 // If push_work fails, drop the work item outside the lock.
718 let res = self.inner.lock().push_work(work);
719 match res {
720 Ok(()) => Ok(()),
721 Err((err, work)) => {
722 drop(work);
723 Err(err)
724 }
725 }
726 }
727
set_as_manager( self: ArcBorrow<'_, Self>, info: Option<FlatBinderObject>, thread: &Thread, ) -> Result728 fn set_as_manager(
729 self: ArcBorrow<'_, Self>,
730 info: Option<FlatBinderObject>,
731 thread: &Thread,
732 ) -> Result {
733 let (ptr, cookie, flags) = if let Some(obj) = info {
734 (
735 // SAFETY: The object type for this ioctl is implicitly `BINDER_TYPE_BINDER`, so it
736 // is safe to access the `binder` field.
737 unsafe { obj.__bindgen_anon_1.binder },
738 obj.cookie,
739 obj.flags,
740 )
741 } else {
742 (0, 0, 0)
743 };
744 let node_ref = self.get_node(ptr, cookie, flags as _, true, thread)?;
745 let node = node_ref.node.clone();
746 self.ctx.set_manager_node(node_ref)?;
747 self.inner.lock().is_manager = true;
748
749 // Force the state of the node to prevent the delivery of acquire/increfs.
750 let mut owner_inner = node.owner.inner.lock();
751 node.force_has_count(&mut owner_inner);
752 Ok(())
753 }
754
get_node_inner( self: ArcBorrow<'_, Self>, ptr: u64, cookie: u64, flags: u32, strong: bool, thread: &Thread, wrapper: Option<CritIncrWrapper>, ) -> Result<Result<NodeRef, CouldNotDeliverCriticalIncrement>>755 fn get_node_inner(
756 self: ArcBorrow<'_, Self>,
757 ptr: u64,
758 cookie: u64,
759 flags: u32,
760 strong: bool,
761 thread: &Thread,
762 wrapper: Option<CritIncrWrapper>,
763 ) -> Result<Result<NodeRef, CouldNotDeliverCriticalIncrement>> {
764 // Try to find an existing node.
765 {
766 let mut inner = self.inner.lock();
767 if let Some(node) = inner.get_existing_node(ptr, cookie)? {
768 return Ok(inner.new_node_ref_with_thread(node, strong, thread, wrapper));
769 }
770 }
771
772 // Allocate the node before reacquiring the lock.
773 let node = DTRWrap::arc_pin_init(Node::new(ptr, cookie, flags, self.into()))?.into_arc();
774 let rbnode = RBTreeNode::new(ptr, node.clone(), GFP_KERNEL)?;
775 let mut inner = self.inner.lock();
776 if let Some(node) = inner.get_existing_node(ptr, cookie)? {
777 return Ok(inner.new_node_ref_with_thread(node, strong, thread, wrapper));
778 }
779
780 inner.nodes.insert(rbnode);
781 // This can only fail if someone has already pushed the node to a list, but we just created
782 // it and still hold the lock, so it can't fail right now.
783 let node_ref = inner
784 .new_node_ref_with_thread(node, strong, thread, wrapper)
785 .unwrap();
786
787 Ok(Ok(node_ref))
788 }
789
get_node( self: ArcBorrow<'_, Self>, ptr: u64, cookie: u64, flags: u32, strong: bool, thread: &Thread, ) -> Result<NodeRef>790 pub(crate) fn get_node(
791 self: ArcBorrow<'_, Self>,
792 ptr: u64,
793 cookie: u64,
794 flags: u32,
795 strong: bool,
796 thread: &Thread,
797 ) -> Result<NodeRef> {
798 let mut wrapper = None;
799 for _ in 0..2 {
800 match self.get_node_inner(ptr, cookie, flags, strong, thread, wrapper) {
801 Err(err) => return Err(err),
802 Ok(Ok(node_ref)) => return Ok(node_ref),
803 Ok(Err(CouldNotDeliverCriticalIncrement)) => {
804 wrapper = Some(CritIncrWrapper::new()?);
805 }
806 }
807 }
808 // We only get a `CouldNotDeliverCriticalIncrement` error if `wrapper` is `None`, so the
809 // loop should run at most twice.
810 unreachable!()
811 }
812
insert_or_update_handle( self: ArcBorrow<'_, Process>, node_ref: NodeRef, is_manager: bool, ) -> Result<u32>813 pub(crate) fn insert_or_update_handle(
814 self: ArcBorrow<'_, Process>,
815 node_ref: NodeRef,
816 is_manager: bool,
817 ) -> Result<u32> {
818 {
819 let mut refs = self.node_refs.lock();
820
821 // Do a lookup before inserting.
822 if let Some(handle_ref) = refs.by_node.get(&node_ref.node.global_id()) {
823 let handle = *handle_ref;
824 let info = refs.by_handle.get_mut(&handle).unwrap();
825 info.node_ref().absorb(node_ref);
826 return Ok(handle);
827 }
828 }
829
830 // Reserve memory for tree nodes.
831 let reserve1 = RBTreeNodeReservation::new(GFP_KERNEL)?;
832 let reserve2 = RBTreeNodeReservation::new(GFP_KERNEL)?;
833 let info = UniqueArc::new_uninit(GFP_KERNEL)?;
834
835 let mut refs_lock = self.node_refs.lock();
836 let mut refs = &mut *refs_lock;
837
838 let (unused_id, by_handle_slot) = loop {
839 // ID 0 may only be used by the manager.
840 let start = if is_manager { 0 } else { 1 };
841
842 if let Some(res) = refs.handle_is_present.find_unused_id(start) {
843 match refs.by_handle.entry(res.as_u32()) {
844 rbtree::Entry::Vacant(entry) => break (res, entry),
845 rbtree::Entry::Occupied(_) => {
846 pr_err!("Detected mismatch between handle_is_present and by_handle");
847 res.acquire();
848 kernel::warn_on!(true);
849 return Err(EINVAL);
850 }
851 }
852 }
853
854 let grow_request = refs.handle_is_present.grow_request().ok_or(ENOMEM)?;
855 drop(refs_lock);
856 let resizer = grow_request.realloc(GFP_KERNEL)?;
857 refs_lock = self.node_refs.lock();
858 refs = &mut *refs_lock;
859 refs.handle_is_present.grow(resizer);
860 };
861 let handle = unused_id.as_u32();
862
863 // Do a lookup again as node may have been inserted before the lock was reacquired.
864 if let Some(handle_ref) = refs.by_node.get(&node_ref.node.global_id()) {
865 let handle = *handle_ref;
866 let info = refs.by_handle.get_mut(&handle).unwrap();
867 info.node_ref().absorb(node_ref);
868 return Ok(handle);
869 }
870
871 let gid = node_ref.node.global_id();
872 let (info_proc, info_node) = {
873 let info_init = NodeRefInfo::new(node_ref, handle, self.into());
874 match info.pin_init_with(info_init) {
875 Ok(info) => ListArc::pair_from_pin_unique(info),
876 // error is infallible
877 Err(err) => match err {},
878 }
879 };
880
881 // Ensure the process is still alive while we insert a new reference.
882 //
883 // This releases the lock before inserting the nodes, but since `is_dead` is set as the
884 // first thing in `deferred_release`, process cleanup will not miss the items inserted into
885 // `refs` below.
886 if self.inner.lock().is_dead {
887 return Err(ESRCH);
888 }
889
890 // SAFETY: `info_proc` and `info_node` reference the same node, so we are inserting
891 // `info_node` into the right node's `refs` list.
892 unsafe { info_proc.node_ref2().node.insert_node_info(info_node) };
893
894 refs.by_node.insert(reserve1.into_node(gid, handle));
895 by_handle_slot.insert(info_proc, reserve2);
896 unused_id.acquire();
897 Ok(handle)
898 }
899
get_transaction_node(&self, handle: u32) -> BinderResult<NodeRef>900 pub(crate) fn get_transaction_node(&self, handle: u32) -> BinderResult<NodeRef> {
901 // When handle is zero, try to get the context manager.
902 if handle == 0 {
903 Ok(self.ctx.get_manager_node(true)?)
904 } else {
905 Ok(self.get_node_from_handle(handle, true)?)
906 }
907 }
908
get_node_from_handle(&self, handle: u32, strong: bool) -> Result<NodeRef>909 pub(crate) fn get_node_from_handle(&self, handle: u32, strong: bool) -> Result<NodeRef> {
910 self.node_refs
911 .lock()
912 .by_handle
913 .get_mut(&handle)
914 .ok_or(ENOENT)?
915 .node_ref()
916 .clone(strong)
917 }
918
remove_from_delivered_deaths(&self, death: &DArc<NodeDeath>)919 pub(crate) fn remove_from_delivered_deaths(&self, death: &DArc<NodeDeath>) {
920 let mut inner = self.inner.lock();
921 // SAFETY: By the invariant on the `delivered_links` field, this is the right linked list.
922 let removed = unsafe { inner.delivered_deaths.remove(death) };
923 drop(inner);
924 drop(removed);
925 }
926
update_ref( self: ArcBorrow<'_, Process>, handle: u32, inc: bool, strong: bool, ) -> Result927 pub(crate) fn update_ref(
928 self: ArcBorrow<'_, Process>,
929 handle: u32,
930 inc: bool,
931 strong: bool,
932 ) -> Result {
933 if inc && handle == 0 {
934 if let Ok(node_ref) = self.ctx.get_manager_node(strong) {
935 if core::ptr::eq(&*self, &*node_ref.node.owner) {
936 return Err(EINVAL);
937 }
938 let _ = self.insert_or_update_handle(node_ref, true);
939 return Ok(());
940 }
941 }
942
943 // To preserve original binder behaviour, we only fail requests where the manager tries to
944 // increment references on itself.
945 let mut refs = self.node_refs.lock();
946 if let Some(info) = refs.by_handle.get_mut(&handle) {
947 if info.node_ref().update(inc, strong) {
948 // Clean up death if there is one attached to this node reference.
949 if let Some(death) = info.death().take() {
950 death.set_cleared(true);
951 self.remove_from_delivered_deaths(&death);
952 }
953
954 // Remove reference from process tables, and from the node's `refs` list.
955
956 // SAFETY: We are removing the `NodeRefInfo` from the right node.
957 unsafe { info.node_ref2().node.remove_node_info(info) };
958
959 let id = info.node_ref().node.global_id();
960 refs.by_handle.remove(&handle);
961 refs.by_node.remove(&id);
962 refs.handle_is_present.release_id(handle as usize);
963
964 if let Some(shrink) = refs.handle_is_present.shrink_request() {
965 drop(refs);
966 // This intentionally ignores allocation failures.
967 if let Ok(new_bitmap) = shrink.realloc(GFP_KERNEL) {
968 refs = self.node_refs.lock();
969 refs.handle_is_present.shrink(new_bitmap);
970 }
971 }
972 }
973 } else {
974 // All refs are cleared in process exit, so this warning is expected in that case.
975 if !self.inner.lock().is_dead {
976 pr_warn!("{}: no such ref {handle}\n", self.pid_in_current_ns());
977 }
978 }
979 Ok(())
980 }
981
982 /// Decrements the refcount of the given node, if one exists.
update_node(&self, ptr: u64, cookie: u64, strong: bool)983 pub(crate) fn update_node(&self, ptr: u64, cookie: u64, strong: bool) {
984 let mut inner = self.inner.lock();
985 if let Ok(Some(node)) = inner.get_existing_node(ptr, cookie) {
986 inner.update_node_refcount(&node, false, strong, 1, None);
987 }
988 }
989
inc_ref_done(&self, reader: &mut UserSliceReader, strong: bool) -> Result990 pub(crate) fn inc_ref_done(&self, reader: &mut UserSliceReader, strong: bool) -> Result {
991 let ptr = reader.read::<u64>()?;
992 let cookie = reader.read::<u64>()?;
993 let mut inner = self.inner.lock();
994 if let Ok(Some(node)) = inner.get_existing_node(ptr, cookie) {
995 if let Some(node) = node.inc_ref_done_locked(strong, &mut inner) {
996 // This only fails if the process is dead.
997 let _ = inner.push_work(node);
998 }
999 }
1000 Ok(())
1001 }
1002
buffer_alloc( self: &Arc<Self>, debug_id: usize, size: usize, info: &mut TransactionInfo, ) -> BinderResult<NewAllocation>1003 pub(crate) fn buffer_alloc(
1004 self: &Arc<Self>,
1005 debug_id: usize,
1006 size: usize,
1007 info: &mut TransactionInfo,
1008 ) -> BinderResult<NewAllocation> {
1009 use kernel::page::PAGE_SIZE;
1010
1011 let mut reserve_new_args = ReserveNewArgs {
1012 debug_id,
1013 size,
1014 is_oneway: info.is_oneway(),
1015 pid: info.from_pid,
1016 ..ReserveNewArgs::default()
1017 };
1018
1019 let (new_alloc, addr) = loop {
1020 let mut inner = self.inner.lock();
1021 let mapping = inner.mapping.as_mut().ok_or_else(BinderError::new_dead)?;
1022 let alloc_request = match mapping.alloc.reserve_new(reserve_new_args)? {
1023 ReserveNew::Success(new_alloc) => break (new_alloc, mapping.address),
1024 ReserveNew::NeedAlloc(request) => request,
1025 };
1026 drop(inner);
1027 // We need to allocate memory and then call `reserve_new` again.
1028 reserve_new_args = alloc_request.make_alloc()?;
1029 };
1030
1031 info.oneway_spam_suspect = new_alloc.oneway_spam_detected;
1032 let res = Allocation::new(
1033 self.clone(),
1034 debug_id,
1035 new_alloc.offset,
1036 size,
1037 addr + new_alloc.offset,
1038 );
1039
1040 // This allocation will be marked as in use until the `Allocation` is used to free it.
1041 //
1042 // This method can't be called while holding a lock, so we release the lock first. It's
1043 // okay for several threads to use the method on the same index at the same time. In that
1044 // case, one of the calls will allocate the given page (if missing), and the other call
1045 // will wait for the other call to finish allocating the page.
1046 //
1047 // We will not call `stop_using_range` in parallel with this on the same page, because the
1048 // allocation can only be removed via the destructor of the `Allocation` object that we
1049 // currently own.
1050 match self.pages.use_range(
1051 new_alloc.offset / PAGE_SIZE,
1052 (new_alloc.offset + size).div_ceil(PAGE_SIZE),
1053 ) {
1054 Ok(()) => {}
1055 Err(err) => {
1056 pr_warn!("use_range failure {:?}", err);
1057 return Err(err.into());
1058 }
1059 }
1060
1061 Ok(NewAllocation(res))
1062 }
1063
buffer_get(self: &Arc<Self>, ptr: usize) -> Option<Allocation>1064 pub(crate) fn buffer_get(self: &Arc<Self>, ptr: usize) -> Option<Allocation> {
1065 let mut inner = self.inner.lock();
1066 let mapping = inner.mapping.as_mut()?;
1067 let offset = ptr.checked_sub(mapping.address)?;
1068 let (size, debug_id, odata) = mapping.alloc.reserve_existing(offset).ok()?;
1069 let mut alloc = Allocation::new(self.clone(), debug_id, offset, size, ptr);
1070 if let Some(data) = odata {
1071 alloc.set_info(data);
1072 }
1073 Some(alloc)
1074 }
1075
buffer_raw_free(&self, ptr: usize)1076 pub(crate) fn buffer_raw_free(&self, ptr: usize) {
1077 let mut inner = self.inner.lock();
1078 if let Some(ref mut mapping) = &mut inner.mapping {
1079 let offset = match ptr.checked_sub(mapping.address) {
1080 Some(offset) => offset,
1081 None => return,
1082 };
1083
1084 let freed_range = match mapping.alloc.reservation_abort(offset) {
1085 Ok(freed_range) => freed_range,
1086 Err(_) => {
1087 pr_warn!(
1088 "Pointer {:x} failed to free, base = {:x}\n",
1089 ptr,
1090 mapping.address
1091 );
1092 return;
1093 }
1094 };
1095
1096 // No more allocations in this range. Mark them as not in use.
1097 //
1098 // Must be done before we release the lock so that `use_range` is not used on these
1099 // indices until `stop_using_range` returns.
1100 self.pages
1101 .stop_using_range(freed_range.start_page_idx, freed_range.end_page_idx);
1102 }
1103 }
1104
buffer_make_freeable(&self, offset: usize, mut data: Option<AllocationInfo>)1105 pub(crate) fn buffer_make_freeable(&self, offset: usize, mut data: Option<AllocationInfo>) {
1106 let mut inner = self.inner.lock();
1107 if let Some(ref mut mapping) = &mut inner.mapping {
1108 if mapping.alloc.reservation_commit(offset, &mut data).is_err() {
1109 pr_warn!("Offset {} failed to be marked freeable\n", offset);
1110 }
1111 }
1112 }
1113
create_mapping(&self, vma: &mm::virt::VmaNew) -> Result1114 fn create_mapping(&self, vma: &mm::virt::VmaNew) -> Result {
1115 use kernel::page::PAGE_SIZE;
1116 let size = usize::min(vma.end() - vma.start(), bindings::SZ_4M as usize);
1117 let mapping = Mapping::new(vma.start(), size);
1118 let page_count = self.pages.register_with_vma(vma)?;
1119 if page_count * PAGE_SIZE != size {
1120 return Err(EINVAL);
1121 }
1122
1123 // Save range allocator for later.
1124 self.inner.lock().mapping = Some(mapping);
1125
1126 Ok(())
1127 }
1128
version(&self, data: UserSlice) -> Result1129 fn version(&self, data: UserSlice) -> Result {
1130 data.writer().write(&BinderVersion::current())
1131 }
1132
register_thread(&self) -> bool1133 pub(crate) fn register_thread(&self) -> bool {
1134 self.inner.lock().register_thread()
1135 }
1136
remove_thread(&self, thread: Arc<Thread>)1137 fn remove_thread(&self, thread: Arc<Thread>) {
1138 self.inner.lock().threads.remove(&thread.id);
1139 thread.release();
1140 }
1141
set_max_threads(&self, max: u32)1142 fn set_max_threads(&self, max: u32) {
1143 self.inner.lock().max_threads = max;
1144 }
1145
set_oneway_spam_detection_enabled(&self, enabled: u32)1146 fn set_oneway_spam_detection_enabled(&self, enabled: u32) {
1147 self.inner.lock().oneway_spam_detection_enabled = enabled != 0;
1148 }
1149
is_oneway_spam_detection_enabled(&self) -> bool1150 pub(crate) fn is_oneway_spam_detection_enabled(&self) -> bool {
1151 self.inner.lock().oneway_spam_detection_enabled
1152 }
1153
get_node_debug_info(&self, data: UserSlice) -> Result1154 fn get_node_debug_info(&self, data: UserSlice) -> Result {
1155 let (mut reader, mut writer) = data.reader_writer();
1156
1157 // Read the starting point.
1158 let ptr = reader.read::<BinderNodeDebugInfo>()?.ptr;
1159 let mut out = BinderNodeDebugInfo::default();
1160
1161 {
1162 let inner = self.inner.lock();
1163 for (node_ptr, node) in &inner.nodes {
1164 if *node_ptr > ptr {
1165 node.populate_debug_info(&mut out, &inner);
1166 break;
1167 }
1168 }
1169 }
1170
1171 writer.write(&out)
1172 }
1173
get_node_info_from_ref(&self, data: UserSlice) -> Result1174 fn get_node_info_from_ref(&self, data: UserSlice) -> Result {
1175 let (mut reader, mut writer) = data.reader_writer();
1176 let mut out = reader.read::<BinderNodeInfoForRef>()?;
1177
1178 if out.strong_count != 0
1179 || out.weak_count != 0
1180 || out.reserved1 != 0
1181 || out.reserved2 != 0
1182 || out.reserved3 != 0
1183 {
1184 return Err(EINVAL);
1185 }
1186
1187 // Only the context manager is allowed to use this ioctl.
1188 if !self.inner.lock().is_manager {
1189 return Err(EPERM);
1190 }
1191
1192 {
1193 let mut node_refs = self.node_refs.lock();
1194 let node_info = node_refs.by_handle.get_mut(&out.handle).ok_or(ENOENT)?;
1195 let node_ref = node_info.node_ref();
1196 let owner_inner = node_ref.node.owner.inner.lock();
1197 node_ref.node.populate_counts(&mut out, &owner_inner);
1198 }
1199
1200 // Write the result back.
1201 writer.write(&out)
1202 }
1203
needs_thread(&self) -> bool1204 pub(crate) fn needs_thread(&self) -> bool {
1205 let mut inner = self.inner.lock();
1206 let ret = inner.requested_thread_count == 0
1207 && inner.ready_threads.is_empty()
1208 && inner.started_thread_count < inner.max_threads;
1209 if ret {
1210 inner.requested_thread_count += 1
1211 }
1212 ret
1213 }
1214
request_death( self: &Arc<Self>, reader: &mut UserSliceReader, thread: &Thread, ) -> Result1215 pub(crate) fn request_death(
1216 self: &Arc<Self>,
1217 reader: &mut UserSliceReader,
1218 thread: &Thread,
1219 ) -> Result {
1220 let handle: u32 = reader.read()?;
1221 let cookie: u64 = reader.read()?;
1222
1223 // Queue BR_ERROR if we can't allocate memory for the death notification.
1224 let death = UniqueArc::new_uninit(GFP_KERNEL).inspect_err(|_| {
1225 thread.push_return_work(BR_ERROR);
1226 })?;
1227 let mut refs = self.node_refs.lock();
1228 let Some(info) = refs.by_handle.get_mut(&handle) else {
1229 pr_warn!("BC_REQUEST_DEATH_NOTIFICATION invalid ref {handle}\n");
1230 return Ok(());
1231 };
1232
1233 // Nothing to do if there is already a death notification request for this handle.
1234 if info.death().is_some() {
1235 pr_warn!("BC_REQUEST_DEATH_NOTIFICATION death notification already set\n");
1236 return Ok(());
1237 }
1238
1239 let death = {
1240 let death_init = NodeDeath::new(info.node_ref().node.clone(), self.clone(), cookie);
1241 match death.pin_init_with(death_init) {
1242 Ok(death) => death,
1243 // error is infallible
1244 Err(err) => match err {},
1245 }
1246 };
1247
1248 // Register the death notification.
1249 {
1250 let owner = info.node_ref2().node.owner.clone();
1251 let mut owner_inner = owner.inner.lock();
1252 if owner_inner.is_dead {
1253 let death = Arc::from(death);
1254 *info.death() = Some(death.clone());
1255 drop(owner_inner);
1256 death.set_dead();
1257 } else {
1258 let death = ListArc::from(death);
1259 *info.death() = Some(death.clone_arc());
1260 info.node_ref().node.add_death(death, &mut owner_inner);
1261 }
1262 }
1263 Ok(())
1264 }
1265
clear_death(&self, reader: &mut UserSliceReader, thread: &Thread) -> Result1266 pub(crate) fn clear_death(&self, reader: &mut UserSliceReader, thread: &Thread) -> Result {
1267 let handle: u32 = reader.read()?;
1268 let cookie: u64 = reader.read()?;
1269
1270 let mut refs = self.node_refs.lock();
1271 let Some(info) = refs.by_handle.get_mut(&handle) else {
1272 pr_warn!("BC_CLEAR_DEATH_NOTIFICATION invalid ref {handle}\n");
1273 return Ok(());
1274 };
1275
1276 let Some(death) = info.death().take() else {
1277 pr_warn!("BC_CLEAR_DEATH_NOTIFICATION death notification not active\n");
1278 return Ok(());
1279 };
1280 if death.cookie != cookie {
1281 *info.death() = Some(death);
1282 pr_warn!("BC_CLEAR_DEATH_NOTIFICATION death notification cookie mismatch\n");
1283 return Ok(());
1284 }
1285
1286 // Update state and determine if we need to queue a work item. We only need to do it when
1287 // the node is not dead or if the user already completed the death notification.
1288 if death.set_cleared(false) {
1289 if let Some(death) = ListArc::try_from_arc_or_drop(death) {
1290 let _ = thread.push_work_if_looper(death);
1291 }
1292 }
1293
1294 Ok(())
1295 }
1296
dead_binder_done(&self, cookie: u64, thread: &Thread)1297 pub(crate) fn dead_binder_done(&self, cookie: u64, thread: &Thread) {
1298 let death = self.inner.lock().pull_delivered_death(cookie);
1299 if let Some(death) = death {
1300 death.set_notification_done(thread);
1301 }
1302 }
1303
1304 /// Locks the spinlock and move the `nodes` rbtree out.
1305 ///
1306 /// This allows you to iterate through `nodes` while also allowing you to give other parts of
1307 /// the codebase exclusive access to `ProcessInner`.
lock_with_nodes(&self) -> WithNodes<'_>1308 pub(crate) fn lock_with_nodes(&self) -> WithNodes<'_> {
1309 let mut inner = self.inner.lock();
1310 WithNodes {
1311 nodes: take(&mut inner.nodes),
1312 inner,
1313 }
1314 }
1315
deferred_flush(&self)1316 fn deferred_flush(&self) {
1317 let inner = self.inner.lock();
1318 for thread in inner.threads.values() {
1319 thread.exit_looper();
1320 }
1321 }
1322
deferred_release(self: Arc<Self>)1323 fn deferred_release(self: Arc<Self>) {
1324 let is_manager = {
1325 let mut inner = self.inner.lock();
1326 inner.is_dead = true;
1327 inner.is_frozen = IsFrozen::No;
1328 inner.sync_recv = false;
1329 inner.async_recv = false;
1330 inner.is_manager
1331 };
1332
1333 if is_manager {
1334 self.ctx.unset_manager_node();
1335 }
1336
1337 self.ctx.deregister_process(&self);
1338
1339 let binderfs_file = self.inner.lock().binderfs_file.take();
1340 drop(binderfs_file);
1341
1342 // Release threads.
1343 let threads = {
1344 let mut inner = self.inner.lock();
1345 let threads = take(&mut inner.threads);
1346 let ready = take(&mut inner.ready_threads);
1347 drop(inner);
1348 drop(ready);
1349
1350 for thread in threads.values() {
1351 thread.release();
1352 }
1353 threads
1354 };
1355
1356 // Release nodes.
1357 {
1358 while let Some(node) = {
1359 let mut lock = self.inner.lock();
1360 lock.nodes.cursor_front_mut().map(|c| c.remove_current().1)
1361 } {
1362 node.to_key_value().1.release();
1363 }
1364 }
1365
1366 // Clean up death listeners and remove nodes from external node info lists.
1367 for info in self.node_refs.lock().by_handle.values_mut() {
1368 // SAFETY: We are removing the `NodeRefInfo` from the right node.
1369 unsafe { info.node_ref2().node.remove_node_info(info) };
1370
1371 // Remove all death notifications from the nodes (that belong to a different process).
1372 let death = if let Some(existing) = info.death().take() {
1373 existing
1374 } else {
1375 continue;
1376 };
1377 death.set_cleared(false);
1378 }
1379
1380 // Clean up freeze listeners.
1381 let freeze_listeners = take(&mut self.node_refs.lock().freeze_listeners);
1382 for listener in freeze_listeners.values() {
1383 listener.on_process_exit(&self);
1384 }
1385 drop(freeze_listeners);
1386
1387 // Release refs on foreign nodes.
1388 {
1389 let mut refs = self.node_refs.lock();
1390 let by_handle = take(&mut refs.by_handle);
1391 let by_node = take(&mut refs.by_node);
1392 drop(refs);
1393 drop(by_node);
1394 drop(by_handle);
1395 }
1396
1397 // Cancel all pending work items.
1398 while let Some(work) = self.get_work() {
1399 work.into_arc().cancel();
1400 }
1401
1402 // Clear delivered_deaths list.
1403 //
1404 // Scope ensures that MutexGuard is dropped while executing the body.
1405 while let Some(delivered_death) = { self.inner.lock().delivered_deaths.pop_front() } {
1406 drop(delivered_death);
1407 }
1408
1409 // Free any resources kept alive by allocated buffers.
1410 let omapping = self.inner.lock().mapping.take();
1411 if let Some(mut mapping) = omapping {
1412 let address = mapping.address;
1413 mapping
1414 .alloc
1415 .take_for_each(|offset, size, debug_id, odata| {
1416 let ptr = offset + address;
1417 let mut alloc = Allocation::new(self.clone(), debug_id, offset, size, ptr);
1418 if let Some(data) = odata {
1419 alloc.set_info(data);
1420 }
1421 drop(alloc)
1422 });
1423 }
1424
1425 // calls to synchronize_rcu() in thread drop will happen here
1426 drop(threads);
1427 }
1428
drop_outstanding_txn(&self)1429 pub(crate) fn drop_outstanding_txn(&self) {
1430 let wake = {
1431 let mut inner = self.inner.lock();
1432 if inner.outstanding_txns == 0 {
1433 pr_err!("outstanding_txns underflow");
1434 return;
1435 }
1436 inner.outstanding_txns -= 1;
1437 inner.is_frozen.is_frozen() && inner.outstanding_txns == 0
1438 };
1439
1440 if wake {
1441 self.freeze_wait.notify_all();
1442 }
1443 }
1444
1445 // #[export_name] is a temporary workaround so that ps output does not become unreadable from
1446 // mangled symbol names.
1447 #[export_name = "rust_binder_freeze"]
ioctl_freeze(&self, info: &BinderFreezeInfo) -> Result1448 pub(crate) fn ioctl_freeze(&self, info: &BinderFreezeInfo) -> Result {
1449 if info.enable == 0 {
1450 let msgs = self.prepare_freeze_messages()?;
1451 let mut inner = self.inner.lock();
1452 inner.sync_recv = false;
1453 inner.async_recv = false;
1454 inner.is_frozen = IsFrozen::No;
1455 drop(inner);
1456 msgs.send_messages();
1457 return Ok(());
1458 }
1459
1460 let mut inner = self.inner.lock();
1461 inner.sync_recv = false;
1462 inner.async_recv = false;
1463 inner.is_frozen = IsFrozen::InProgress;
1464
1465 if info.timeout_ms > 0 {
1466 let mut jiffies = kernel::time::msecs_to_jiffies(info.timeout_ms);
1467 while jiffies > 0 {
1468 if inner.outstanding_txns == 0 {
1469 break;
1470 }
1471
1472 match self
1473 .freeze_wait
1474 .wait_interruptible_timeout(&mut inner, jiffies)
1475 {
1476 CondVarTimeoutResult::Signal { .. } => {
1477 inner.is_frozen = IsFrozen::No;
1478 return Err(ERESTARTSYS);
1479 }
1480 CondVarTimeoutResult::Woken { jiffies: remaining } => {
1481 jiffies = remaining;
1482 }
1483 CondVarTimeoutResult::Timeout => {
1484 jiffies = 0;
1485 }
1486 }
1487 }
1488 }
1489
1490 if inner.txns_pending_locked() {
1491 inner.is_frozen = IsFrozen::No;
1492 Err(EAGAIN)
1493 } else {
1494 drop(inner);
1495 match self.prepare_freeze_messages() {
1496 Ok(batch) => {
1497 self.inner.lock().is_frozen = IsFrozen::Yes;
1498 batch.send_messages();
1499 Ok(())
1500 }
1501 Err(kernel::alloc::AllocError) => {
1502 self.inner.lock().is_frozen = IsFrozen::No;
1503 Err(ENOMEM)
1504 }
1505 }
1506 }
1507 }
1508 }
1509
get_frozen_status(data: UserSlice) -> Result1510 fn get_frozen_status(data: UserSlice) -> Result {
1511 let (mut reader, mut writer) = data.reader_writer();
1512
1513 let mut info = reader.read::<BinderFrozenStatusInfo>()?;
1514 info.sync_recv = 0;
1515 info.async_recv = 0;
1516 let mut found = false;
1517
1518 for ctx in crate::context::get_all_contexts()? {
1519 ctx.for_each_proc(|proc| {
1520 if proc.task.pid() == info.pid as _ {
1521 found = true;
1522 let inner = proc.inner.lock();
1523 let txns_pending = inner.txns_pending_locked();
1524 info.async_recv |= inner.async_recv as u32;
1525 info.sync_recv |= inner.sync_recv as u32;
1526 info.sync_recv |= (txns_pending as u32) << 1;
1527 }
1528 });
1529 }
1530
1531 if found {
1532 writer.write(&info)?;
1533 Ok(())
1534 } else {
1535 Err(EINVAL)
1536 }
1537 }
1538
ioctl_freeze(reader: &mut UserSliceReader) -> Result1539 fn ioctl_freeze(reader: &mut UserSliceReader) -> Result {
1540 let info = reader.read::<BinderFreezeInfo>()?;
1541
1542 // Very unlikely for there to be more than 3, since a process normally uses at most binder and
1543 // hwbinder.
1544 let mut procs = KVec::with_capacity(3, GFP_KERNEL)?;
1545
1546 let ctxs = crate::context::get_all_contexts()?;
1547 for ctx in ctxs {
1548 for proc in ctx.get_procs_with_pid(info.pid as i32)? {
1549 procs.push(proc, GFP_KERNEL)?;
1550 }
1551 }
1552
1553 for proc in procs {
1554 proc.ioctl_freeze(&info)?;
1555 }
1556 Ok(())
1557 }
1558
1559 /// The ioctl handler.
1560 impl Process {
1561 /// Ioctls that are write-only from the perspective of userspace.
1562 ///
1563 /// The kernel will only read from the pointer that userspace provided to us.
ioctl_write_only( this: ArcBorrow<'_, Process>, _file: &File, cmd: u32, reader: &mut UserSliceReader, ) -> Result1564 fn ioctl_write_only(
1565 this: ArcBorrow<'_, Process>,
1566 _file: &File,
1567 cmd: u32,
1568 reader: &mut UserSliceReader,
1569 ) -> Result {
1570 let thread = this.get_current_thread()?;
1571 match cmd {
1572 uapi::BINDER_SET_MAX_THREADS => this.set_max_threads(reader.read()?),
1573 uapi::BINDER_THREAD_EXIT => this.remove_thread(thread),
1574 uapi::BINDER_SET_CONTEXT_MGR => this.set_as_manager(None, &thread)?,
1575 uapi::BINDER_SET_CONTEXT_MGR_EXT => {
1576 this.set_as_manager(Some(reader.read()?), &thread)?
1577 }
1578 uapi::BINDER_ENABLE_ONEWAY_SPAM_DETECTION => {
1579 this.set_oneway_spam_detection_enabled(reader.read()?)
1580 }
1581 uapi::BINDER_FREEZE => ioctl_freeze(reader)?,
1582 _ => return Err(EINVAL),
1583 }
1584 Ok(())
1585 }
1586
1587 /// Ioctls that are read/write from the perspective of userspace.
1588 ///
1589 /// The kernel will both read from and write to the pointer that userspace provided to us.
ioctl_write_read( this: ArcBorrow<'_, Process>, file: &File, cmd: u32, data: UserSlice, ) -> Result1590 fn ioctl_write_read(
1591 this: ArcBorrow<'_, Process>,
1592 file: &File,
1593 cmd: u32,
1594 data: UserSlice,
1595 ) -> Result {
1596 let thread = this.get_current_thread()?;
1597 let blocking = (file.flags() & file::flags::O_NONBLOCK) == 0;
1598 match cmd {
1599 uapi::BINDER_WRITE_READ => thread.write_read(data, blocking)?,
1600 uapi::BINDER_GET_NODE_DEBUG_INFO => this.get_node_debug_info(data)?,
1601 uapi::BINDER_GET_NODE_INFO_FOR_REF => this.get_node_info_from_ref(data)?,
1602 uapi::BINDER_VERSION => this.version(data)?,
1603 uapi::BINDER_GET_FROZEN_INFO => get_frozen_status(data)?,
1604 uapi::BINDER_GET_EXTENDED_ERROR => thread.get_extended_error(data)?,
1605 _ => return Err(EINVAL),
1606 }
1607 Ok(())
1608 }
1609 }
1610
1611 /// The file operations supported by `Process`.
1612 impl Process {
open(ctx: ArcBorrow<'_, Context>, file: &File) -> Result<Arc<Process>>1613 pub(crate) fn open(ctx: ArcBorrow<'_, Context>, file: &File) -> Result<Arc<Process>> {
1614 Self::new(ctx.into(), ARef::from(file.cred()))
1615 }
1616
release(this: Arc<Process>, _file: &File)1617 pub(crate) fn release(this: Arc<Process>, _file: &File) {
1618 let binderfs_file;
1619 let should_schedule;
1620 {
1621 let mut inner = this.inner.lock();
1622 should_schedule = inner.defer_work == 0;
1623 inner.defer_work |= PROC_DEFER_RELEASE;
1624 binderfs_file = inner.binderfs_file.take();
1625 }
1626
1627 if should_schedule {
1628 // Ignore failures to schedule to the workqueue. Those just mean that we're already
1629 // scheduled for execution.
1630 let _ = workqueue::system().enqueue(this);
1631 }
1632
1633 drop(binderfs_file);
1634 }
1635
flush(this: ArcBorrow<'_, Process>) -> Result1636 pub(crate) fn flush(this: ArcBorrow<'_, Process>) -> Result {
1637 let should_schedule;
1638 {
1639 let mut inner = this.inner.lock();
1640 should_schedule = inner.defer_work == 0;
1641 inner.defer_work |= PROC_DEFER_FLUSH;
1642 }
1643
1644 if should_schedule {
1645 // Ignore failures to schedule to the workqueue. Those just mean that we're already
1646 // scheduled for execution.
1647 let _ = workqueue::system().enqueue(Arc::from(this));
1648 }
1649 Ok(())
1650 }
1651
ioctl(this: ArcBorrow<'_, Process>, file: &File, cmd: u32, arg: usize) -> Result1652 pub(crate) fn ioctl(this: ArcBorrow<'_, Process>, file: &File, cmd: u32, arg: usize) -> Result {
1653 use kernel::ioctl::{_IOC_DIR, _IOC_SIZE};
1654 use kernel::uapi::{_IOC_READ, _IOC_WRITE};
1655
1656 crate::trace::trace_ioctl(cmd, arg);
1657
1658 let user_slice = UserSlice::new(UserPtr::from_addr(arg), _IOC_SIZE(cmd));
1659
1660 const _IOC_READ_WRITE: u32 = _IOC_READ | _IOC_WRITE;
1661
1662 let res = match _IOC_DIR(cmd) {
1663 _IOC_WRITE => Self::ioctl_write_only(this, file, cmd, &mut user_slice.reader()),
1664 _IOC_READ_WRITE => Self::ioctl_write_read(this, file, cmd, user_slice),
1665 _ => Err(EINVAL),
1666 };
1667
1668 crate::trace::trace_ioctl_done(res);
1669 res
1670 }
1671
mmap( this: ArcBorrow<'_, Process>, _file: &File, vma: &mm::virt::VmaNew, ) -> Result1672 pub(crate) fn mmap(
1673 this: ArcBorrow<'_, Process>,
1674 _file: &File,
1675 vma: &mm::virt::VmaNew,
1676 ) -> Result {
1677 // We don't allow mmap to be used in a different process.
1678 if this.task != kernel::current!().group_leader() {
1679 return Err(EINVAL);
1680 }
1681 if vma.start() == 0 {
1682 return Err(EINVAL);
1683 }
1684
1685 vma.try_clear_maywrite().map_err(|_| EPERM)?;
1686 vma.set_dontcopy();
1687 vma.set_mixedmap();
1688
1689 // TODO: Set ops. We need to learn when the user unmaps so that we can stop using it.
1690 this.create_mapping(vma)
1691 }
1692
poll( this: ArcBorrow<'_, Process>, file: &File, table: PollTable<'_>, ) -> Result<u32>1693 pub(crate) fn poll(
1694 this: ArcBorrow<'_, Process>,
1695 file: &File,
1696 table: PollTable<'_>,
1697 ) -> Result<u32> {
1698 let thread = this.get_current_thread()?;
1699 let (from_proc, mut mask) = thread.poll(file, table);
1700 if mask == 0 && from_proc && !this.inner.lock().work.is_empty() {
1701 mask |= bindings::POLLIN;
1702 }
1703 Ok(mask)
1704 }
1705 }
1706
1707 /// Represents that a thread has registered with the `ready_threads` list of its process.
1708 ///
1709 /// The destructor of this type will unregister the thread from the list of ready threads.
1710 pub(crate) struct Registration<'a> {
1711 thread: &'a Arc<Thread>,
1712 }
1713
1714 impl<'a> Registration<'a> {
new(thread: &'a Arc<Thread>, guard: &mut Guard<'_, ProcessInner, SpinLockBackend>) -> Self1715 fn new(thread: &'a Arc<Thread>, guard: &mut Guard<'_, ProcessInner, SpinLockBackend>) -> Self {
1716 assert!(core::ptr::eq(&thread.process.inner, guard.lock_ref()));
1717 // INVARIANT: We are pushing this thread to the right `ready_threads` list.
1718 if let Ok(list_arc) = ListArc::try_from_arc(thread.clone()) {
1719 guard.ready_threads.push_front(list_arc);
1720 } else {
1721 // It is an error to hit this branch, and it should not be reachable. We try to do
1722 // something reasonable when the failure path happens. Most likely, the thread in
1723 // question will sleep forever.
1724 pr_err!("Same thread registered with `ready_threads` twice.");
1725 }
1726 Self { thread }
1727 }
1728 }
1729
1730 impl Drop for Registration<'_> {
drop(&mut self)1731 fn drop(&mut self) {
1732 let mut inner = self.thread.process.inner.lock();
1733 // SAFETY: The thread has the invariant that we never push it to any other linked list than
1734 // the `ready_threads` list of its parent process. Therefore, the thread is either in that
1735 // list, or in no list.
1736 unsafe { inner.ready_threads.remove(self.thread) };
1737 }
1738 }
1739
1740 pub(crate) struct WithNodes<'a> {
1741 pub(crate) inner: Guard<'a, ProcessInner, SpinLockBackend>,
1742 pub(crate) nodes: RBTree<u64, DArc<Node>>,
1743 }
1744
1745 impl Drop for WithNodes<'_> {
drop(&mut self)1746 fn drop(&mut self) {
1747 core::mem::swap(&mut self.nodes, &mut self.inner.nodes);
1748 if self.nodes.iter().next().is_some() {
1749 pr_err!("nodes array was modified while using lock_with_nodes\n");
1750 }
1751 }
1752 }
1753
1754 pub(crate) enum GetWorkOrRegister<'a> {
1755 Work(DLArc<dyn DeliverToRead>),
1756 Register(Registration<'a>),
1757 }
1758