1 // SPDX-License-Identifier: GPL-2.0
2
3 // Copyright (C) 2025 Google LLC.
4
5 //! This module defines the `Process` type, which represents a process using a particular binder
6 //! context.
7 //!
8 //! The `Process` object keeps track of all of the resources that this process owns in the binder
9 //! context.
10 //!
11 //! There is one `Process` object for each binder fd that a process has opened, so processes using
12 //! several binder contexts have several `Process` objects. This ensures that the contexts are
13 //! fully separated.
14
15 use core::mem::take;
16
17 use kernel::{
18 bindings,
19 cred::Credential,
20 error::Error,
21 fs::file::{self, File},
22 id_pool::IdPool,
23 list::{List, ListArc, ListArcField, ListLinks},
24 mm,
25 prelude::*,
26 rbtree::{self, RBTree, RBTreeNode, RBTreeNodeReservation},
27 seq_file::SeqFile,
28 seq_print,
29 sync::poll::PollTable,
30 sync::{
31 aref::ARef,
32 lock::{spinlock::SpinLockBackend, Guard},
33 Arc, ArcBorrow, CondVar, CondVarTimeoutResult, Mutex, SpinLock, UniqueArc,
34 },
35 task::Task,
36 uaccess::{UserSlice, UserSliceReader},
37 uapi,
38 workqueue::{self, Work},
39 };
40
41 use crate::{
42 allocation::{Allocation, AllocationInfo, NewAllocation},
43 context::Context,
44 defs::*,
45 error::{BinderError, BinderResult},
46 node::{CouldNotDeliverCriticalIncrement, CritIncrWrapper, Node, NodeDeath, NodeRef},
47 page_range::ShrinkablePageRange,
48 range_alloc::{RangeAllocator, ReserveNew, ReserveNewArgs},
49 stats::BinderStats,
50 thread::{PushWorkRes, Thread},
51 BinderfsProcFile, DArc, DLArc, DTRWrap, DeliverToRead,
52 };
53
54 #[path = "freeze.rs"]
55 mod freeze;
56 use self::freeze::{FreezeCookie, FreezeListener};
57
58 struct Mapping {
59 address: usize,
60 alloc: RangeAllocator<AllocationInfo>,
61 }
62
63 impl Mapping {
new(address: usize, size: usize) -> Self64 fn new(address: usize, size: usize) -> Self {
65 Self {
66 address,
67 alloc: RangeAllocator::new(size),
68 }
69 }
70 }
71
72 // bitflags for defer_work.
73 const PROC_DEFER_FLUSH: u8 = 1;
74 const PROC_DEFER_RELEASE: u8 = 2;
75
76 #[derive(Copy, Clone)]
77 pub(crate) enum IsFrozen {
78 Yes,
79 No,
80 InProgress,
81 }
82
83 impl IsFrozen {
84 /// Whether incoming transactions should be rejected due to freeze.
is_frozen(self) -> bool85 pub(crate) fn is_frozen(self) -> bool {
86 match self {
87 IsFrozen::Yes => true,
88 IsFrozen::No => false,
89 IsFrozen::InProgress => true,
90 }
91 }
92
93 /// Whether freeze notifications consider this process frozen.
is_fully_frozen(self) -> bool94 pub(crate) fn is_fully_frozen(self) -> bool {
95 match self {
96 IsFrozen::Yes => true,
97 IsFrozen::No => false,
98 IsFrozen::InProgress => false,
99 }
100 }
101 }
102
103 /// The fields of `Process` protected by the spinlock.
104 pub(crate) struct ProcessInner {
105 is_manager: bool,
106 pub(crate) is_dead: bool,
107 threads: RBTree<i32, Arc<Thread>>,
108 /// INVARIANT: Threads pushed to this list must be owned by this process.
109 ready_threads: List<Thread>,
110 nodes: RBTree<u64, DArc<Node>>,
111 mapping: Option<Mapping>,
112 work: List<DTRWrap<dyn DeliverToRead>>,
113 delivered_deaths: List<DTRWrap<NodeDeath>, 2>,
114
115 /// The number of requested threads that haven't registered yet.
116 requested_thread_count: u32,
117 /// The maximum number of threads used by the process thread pool.
118 max_threads: u32,
119 /// The number of threads the started and registered with the thread pool.
120 started_thread_count: u32,
121
122 /// Bitmap of deferred work to do.
123 defer_work: u8,
124
125 /// Number of transactions to be transmitted before processes in freeze_wait
126 /// are woken up.
127 outstanding_txns: u32,
128 /// Process is frozen and unable to service binder transactions.
129 pub(crate) is_frozen: IsFrozen,
130 /// Process received sync transactions since last frozen.
131 pub(crate) sync_recv: bool,
132 /// Process received async transactions since last frozen.
133 pub(crate) async_recv: bool,
134 pub(crate) binderfs_file: Option<BinderfsProcFile>,
135 /// Check for oneway spam
136 oneway_spam_detection_enabled: bool,
137 }
138
139 impl ProcessInner {
new() -> Self140 fn new() -> Self {
141 Self {
142 is_manager: false,
143 is_dead: false,
144 threads: RBTree::new(),
145 ready_threads: List::new(),
146 mapping: None,
147 nodes: RBTree::new(),
148 work: List::new(),
149 delivered_deaths: List::new(),
150 requested_thread_count: 0,
151 max_threads: 0,
152 started_thread_count: 0,
153 defer_work: 0,
154 outstanding_txns: 0,
155 is_frozen: IsFrozen::No,
156 sync_recv: false,
157 async_recv: false,
158 binderfs_file: None,
159 oneway_spam_detection_enabled: false,
160 }
161 }
162
163 /// Schedule the work item for execution on this process.
164 ///
165 /// If any threads are ready for work, then the work item is given directly to that thread and
166 /// it is woken up. Otherwise, it is pushed to the process work list.
167 ///
168 /// This call can fail only if the process is dead. In this case, the work item is returned to
169 /// the caller so that the caller can drop it after releasing the inner process lock. This is
170 /// necessary since the destructor of `Transaction` will take locks that can't necessarily be
171 /// taken while holding the inner process lock.
push_work( &mut self, work: DLArc<dyn DeliverToRead>, ) -> Result<(), (BinderError, DLArc<dyn DeliverToRead>)>172 pub(crate) fn push_work(
173 &mut self,
174 work: DLArc<dyn DeliverToRead>,
175 ) -> Result<(), (BinderError, DLArc<dyn DeliverToRead>)> {
176 // Try to find a ready thread to which to push the work.
177 if let Some(thread) = self.ready_threads.pop_front() {
178 // Push to thread while holding state lock. This prevents the thread from giving up
179 // (for example, because of a signal) when we're about to deliver work.
180 match thread.push_work(work) {
181 PushWorkRes::Ok => Ok(()),
182 PushWorkRes::FailedDead(work) => Err((BinderError::new_dead(), work)),
183 }
184 } else if self.is_dead {
185 Err((BinderError::new_dead(), work))
186 } else {
187 let sync = work.should_sync_wakeup();
188
189 // Didn't find a thread waiting for proc work; this can happen
190 // in two scenarios:
191 // 1. All threads are busy handling transactions
192 // In that case, one of those threads should call back into
193 // the kernel driver soon and pick up this work.
194 // 2. Threads are using the (e)poll interface, in which case
195 // they may be blocked on the waitqueue without having been
196 // added to waiting_threads. For this case, we just iterate
197 // over all threads not handling transaction work, and
198 // wake them all up. We wake all because we don't know whether
199 // a thread that called into (e)poll is handling non-binder
200 // work currently.
201 self.work.push_back(work);
202
203 // Wake up polling threads, if any.
204 for thread in self.threads.values() {
205 thread.notify_if_poll_ready(sync);
206 }
207
208 Ok(())
209 }
210 }
211
remove_node(&mut self, ptr: u64)212 pub(crate) fn remove_node(&mut self, ptr: u64) {
213 self.nodes.remove(&ptr);
214 }
215
216 /// Updates the reference count on the given node.
update_node_refcount( &mut self, node: &DArc<Node>, inc: bool, strong: bool, count: usize, othread: Option<&Thread>, )217 pub(crate) fn update_node_refcount(
218 &mut self,
219 node: &DArc<Node>,
220 inc: bool,
221 strong: bool,
222 count: usize,
223 othread: Option<&Thread>,
224 ) {
225 let push = node.update_refcount_locked(inc, strong, count, self);
226
227 // If we decided that we need to push work, push either to the process or to a thread if
228 // one is specified.
229 if let Some(node) = push {
230 if let Some(thread) = othread {
231 thread.push_work_deferred(node);
232 } else {
233 let _ = self.push_work(node);
234 // Nothing to do: `push_work` may fail if the process is dead, but that's ok as in
235 // that case, it doesn't care about the notification.
236 }
237 }
238 }
239
new_node_ref( &mut self, node: DArc<Node>, strong: bool, thread: Option<&Thread>, ) -> NodeRef240 pub(crate) fn new_node_ref(
241 &mut self,
242 node: DArc<Node>,
243 strong: bool,
244 thread: Option<&Thread>,
245 ) -> NodeRef {
246 self.update_node_refcount(&node, true, strong, 1, thread);
247 let strong_count = if strong { 1 } else { 0 };
248 NodeRef::new(node, strong_count, 1 - strong_count)
249 }
250
new_node_ref_with_thread( &mut self, node: DArc<Node>, strong: bool, thread: &Thread, wrapper: Option<CritIncrWrapper>, ) -> Result<NodeRef, CouldNotDeliverCriticalIncrement>251 pub(crate) fn new_node_ref_with_thread(
252 &mut self,
253 node: DArc<Node>,
254 strong: bool,
255 thread: &Thread,
256 wrapper: Option<CritIncrWrapper>,
257 ) -> Result<NodeRef, CouldNotDeliverCriticalIncrement> {
258 let push = match wrapper {
259 None => node
260 .incr_refcount_allow_zero2one(strong, self)?
261 .map(|node| node as _),
262 Some(wrapper) => node.incr_refcount_allow_zero2one_with_wrapper(strong, wrapper, self),
263 };
264 if let Some(node) = push {
265 thread.push_work_deferred(node);
266 }
267 let strong_count = if strong { 1 } else { 0 };
268 Ok(NodeRef::new(node, strong_count, 1 - strong_count))
269 }
270
271 /// Returns an existing node with the given pointer and cookie, if one exists.
272 ///
273 /// Returns an error if a node with the given pointer but a different cookie exists.
get_existing_node(&self, ptr: u64, cookie: u64) -> Result<Option<DArc<Node>>>274 fn get_existing_node(&self, ptr: u64, cookie: u64) -> Result<Option<DArc<Node>>> {
275 match self.nodes.get(&ptr) {
276 None => Ok(None),
277 Some(node) => {
278 let (_, node_cookie) = node.get_id();
279 if node_cookie == cookie {
280 Ok(Some(node.clone()))
281 } else {
282 Err(EINVAL)
283 }
284 }
285 }
286 }
287
register_thread(&mut self) -> bool288 fn register_thread(&mut self) -> bool {
289 if self.requested_thread_count == 0 {
290 return false;
291 }
292
293 self.requested_thread_count -= 1;
294 self.started_thread_count += 1;
295 true
296 }
297
298 /// Finds a delivered death notification with the given cookie, removes it from the thread's
299 /// delivered list, and returns it.
pull_delivered_death(&mut self, cookie: u64) -> Option<DArc<NodeDeath>>300 fn pull_delivered_death(&mut self, cookie: u64) -> Option<DArc<NodeDeath>> {
301 let mut cursor = self.delivered_deaths.cursor_front();
302 while let Some(next) = cursor.peek_next() {
303 if next.cookie == cookie {
304 return Some(next.remove().into_arc());
305 }
306 cursor.move_next();
307 }
308 None
309 }
310
death_delivered(&mut self, death: DArc<NodeDeath>)311 pub(crate) fn death_delivered(&mut self, death: DArc<NodeDeath>) {
312 if let Some(death) = ListArc::try_from_arc_or_drop(death) {
313 self.delivered_deaths.push_back(death);
314 } else {
315 pr_warn!("Notification added to `delivered_deaths` twice.");
316 }
317 }
318
add_outstanding_txn(&mut self)319 pub(crate) fn add_outstanding_txn(&mut self) {
320 self.outstanding_txns += 1;
321 }
322
txns_pending_locked(&self) -> bool323 fn txns_pending_locked(&self) -> bool {
324 if self.outstanding_txns > 0 {
325 return true;
326 }
327 for thread in self.threads.values() {
328 if thread.has_current_transaction() {
329 return true;
330 }
331 }
332 false
333 }
334 }
335
336 /// Used to keep track of a node that this process has a handle to.
337 #[pin_data]
338 pub(crate) struct NodeRefInfo {
339 debug_id: usize,
340 /// The refcount that this process owns to the node.
341 node_ref: ListArcField<NodeRef, { Self::LIST_PROC }>,
342 death: ListArcField<Option<DArc<NodeDeath>>, { Self::LIST_PROC }>,
343 /// Cookie of the active freeze listener for this node.
344 freeze: ListArcField<Option<FreezeCookie>, { Self::LIST_PROC }>,
345 /// Used to store this `NodeRefInfo` in the node's `refs` list.
346 #[pin]
347 links: ListLinks<{ Self::LIST_NODE }>,
348 /// The handle for this `NodeRefInfo`.
349 handle: u32,
350 /// The process that has a handle to the node.
351 pub(crate) process: Arc<Process>,
352 }
353
354 impl NodeRefInfo {
355 /// The id used for the `Node::refs` list.
356 pub(crate) const LIST_NODE: u64 = 0x2da16350fb724a10;
357 /// The id used for the `ListArc` in `ProcessNodeRefs`.
358 const LIST_PROC: u64 = 0xd703a5263dcc8650;
359
new(node_ref: NodeRef, handle: u32, process: Arc<Process>) -> impl PinInit<Self>360 fn new(node_ref: NodeRef, handle: u32, process: Arc<Process>) -> impl PinInit<Self> {
361 pin_init!(Self {
362 debug_id: super::next_debug_id(),
363 node_ref: ListArcField::new(node_ref),
364 death: ListArcField::new(None),
365 freeze: ListArcField::new(None),
366 links <- ListLinks::new(),
367 handle,
368 process,
369 })
370 }
371
372 kernel::list::define_list_arc_field_getter! {
373 pub(crate) fn death(&mut self<{Self::LIST_PROC}>) -> &mut Option<DArc<NodeDeath>> { death }
374 pub(crate) fn freeze(&mut self<{Self::LIST_PROC}>) -> &mut Option<FreezeCookie> { freeze }
375 pub(crate) fn node_ref(&mut self<{Self::LIST_PROC}>) -> &mut NodeRef { node_ref }
376 pub(crate) fn node_ref2(&self<{Self::LIST_PROC}>) -> &NodeRef { node_ref }
377 }
378 }
379
380 kernel::list::impl_list_arc_safe! {
381 impl ListArcSafe<{Self::LIST_NODE}> for NodeRefInfo { untracked; }
382 impl ListArcSafe<{Self::LIST_PROC}> for NodeRefInfo { untracked; }
383 }
384 kernel::list::impl_list_item! {
385 impl ListItem<{Self::LIST_NODE}> for NodeRefInfo {
386 using ListLinks { self.links };
387 }
388 }
389
390 /// Keeps track of references this process has to nodes owned by other processes.
391 ///
392 /// TODO: Currently, the rbtree requires two allocations per node reference, and two tree
393 /// traversals to look up a node by `Node::global_id`. Once the rbtree is more powerful, these
394 /// extra costs should be eliminated.
395 struct ProcessNodeRefs {
396 /// Used to look up nodes using the 32-bit id that this process knows it by.
397 by_handle: RBTree<u32, ListArc<NodeRefInfo, { NodeRefInfo::LIST_PROC }>>,
398 /// Used to quickly find unused ids in `by_handle`.
399 handle_is_present: IdPool,
400 /// Used to look up nodes without knowing their local 32-bit id. The usize is the address of
401 /// the underlying `Node` struct as returned by `Node::global_id`.
402 by_node: RBTree<usize, u32>,
403 /// Used to look up a `FreezeListener` by cookie.
404 ///
405 /// There might be multiple freeze listeners for the same node, but at most one of them is
406 /// active.
407 freeze_listeners: RBTree<FreezeCookie, FreezeListener>,
408 }
409
410 impl ProcessNodeRefs {
new() -> Self411 fn new() -> Self {
412 Self {
413 by_handle: RBTree::new(),
414 handle_is_present: IdPool::new(),
415 by_node: RBTree::new(),
416 freeze_listeners: RBTree::new(),
417 }
418 }
419 }
420
421 use core::mem::offset_of;
422 use kernel::bindings::rb_process_layout;
423 pub(crate) const PROCESS_LAYOUT: rb_process_layout = rb_process_layout {
424 arc_offset: Arc::<Process>::DATA_OFFSET,
425 task: offset_of!(Process, task),
426 };
427
428 /// A process using binder.
429 ///
430 /// Strictly speaking, there can be multiple of these per process. There is one for each binder fd
431 /// that a process has opened, so processes using several binder contexts have several `Process`
432 /// objects. This ensures that the contexts are fully separated.
433 #[pin_data]
434 pub(crate) struct Process {
435 pub(crate) ctx: Arc<Context>,
436
437 // The task leader (process).
438 pub(crate) task: ARef<Task>,
439
440 // Credential associated with file when `Process` is created.
441 pub(crate) cred: ARef<Credential>,
442
443 #[pin]
444 pub(crate) inner: SpinLock<ProcessInner>,
445
446 #[pin]
447 pub(crate) pages: ShrinkablePageRange,
448
449 // Waitqueue of processes waiting for all outstanding transactions to be
450 // processed.
451 #[pin]
452 freeze_wait: CondVar,
453
454 // Node references are in a different lock to avoid recursive acquisition when
455 // incrementing/decrementing a node in another process.
456 #[pin]
457 node_refs: Mutex<ProcessNodeRefs>,
458
459 // Work node for deferred work item.
460 #[pin]
461 defer_work: Work<Process>,
462
463 // Links for process list in Context.
464 #[pin]
465 links: ListLinks,
466
467 pub(crate) stats: BinderStats,
468 }
469
470 kernel::impl_has_work! {
471 impl HasWork<Process> for Process { self.defer_work }
472 }
473
474 kernel::list::impl_list_arc_safe! {
475 impl ListArcSafe<0> for Process { untracked; }
476 }
477 kernel::list::impl_list_item! {
478 impl ListItem<0> for Process {
479 using ListLinks { self.links };
480 }
481 }
482
483 impl workqueue::WorkItem for Process {
484 type Pointer = Arc<Process>;
485
run(me: Arc<Self>)486 fn run(me: Arc<Self>) {
487 let defer;
488 {
489 let mut inner = me.inner.lock();
490 defer = inner.defer_work;
491 inner.defer_work = 0;
492 }
493
494 if defer & PROC_DEFER_FLUSH != 0 {
495 me.deferred_flush();
496 }
497 if defer & PROC_DEFER_RELEASE != 0 {
498 me.deferred_release();
499 }
500 }
501 }
502
503 impl Process {
new(ctx: Arc<Context>, cred: ARef<Credential>) -> Result<Arc<Self>>504 fn new(ctx: Arc<Context>, cred: ARef<Credential>) -> Result<Arc<Self>> {
505 let current = kernel::current!();
506 let process = Arc::pin_init::<Error>(
507 try_pin_init!(Process {
508 ctx,
509 cred,
510 inner <- kernel::new_spinlock!(ProcessInner::new(), "Process::inner"),
511 pages <- ShrinkablePageRange::new(&super::BINDER_SHRINKER),
512 node_refs <- kernel::new_mutex!(ProcessNodeRefs::new(), "Process::node_refs"),
513 freeze_wait <- kernel::new_condvar!("Process::freeze_wait"),
514 task: current.group_leader().into(),
515 defer_work <- kernel::new_work!("Process::defer_work"),
516 links <- ListLinks::new(),
517 stats: BinderStats::new(),
518 }),
519 GFP_KERNEL,
520 )?;
521
522 process.ctx.register_process(process.clone())?;
523
524 Ok(process)
525 }
526
pid_in_current_ns(&self) -> kernel::task::Pid527 pub(crate) fn pid_in_current_ns(&self) -> kernel::task::Pid {
528 self.task.tgid_nr_ns(None)
529 }
530
531 #[inline(never)]
debug_print_stats(&self, m: &SeqFile, ctx: &Context) -> Result<()>532 pub(crate) fn debug_print_stats(&self, m: &SeqFile, ctx: &Context) -> Result<()> {
533 seq_print!(m, "proc {}\n", self.pid_in_current_ns());
534 seq_print!(m, "context {}\n", &*ctx.name);
535
536 let inner = self.inner.lock();
537 seq_print!(m, " threads: {}\n", inner.threads.iter().count());
538 seq_print!(
539 m,
540 " requested threads: {}+{}/{}\n",
541 inner.requested_thread_count,
542 inner.started_thread_count,
543 inner.max_threads,
544 );
545 if let Some(mapping) = &inner.mapping {
546 seq_print!(
547 m,
548 " free oneway space: {}\n",
549 mapping.alloc.free_oneway_space()
550 );
551 seq_print!(m, " buffers: {}\n", mapping.alloc.count_buffers());
552 }
553 seq_print!(
554 m,
555 " outstanding transactions: {}\n",
556 inner.outstanding_txns
557 );
558 seq_print!(m, " nodes: {}\n", inner.nodes.iter().count());
559 drop(inner);
560
561 {
562 let mut refs = self.node_refs.lock();
563 let (mut count, mut weak, mut strong) = (0, 0, 0);
564 for r in refs.by_handle.values_mut() {
565 let node_ref = r.node_ref();
566 let (nstrong, nweak) = node_ref.get_count();
567 count += 1;
568 weak += nweak;
569 strong += nstrong;
570 }
571 seq_print!(m, " refs: {count} s {strong} w {weak}\n");
572 }
573
574 self.stats.debug_print(" ", m);
575
576 Ok(())
577 }
578
579 #[inline(never)]
debug_print(&self, m: &SeqFile, ctx: &Context, print_all: bool) -> Result<()>580 pub(crate) fn debug_print(&self, m: &SeqFile, ctx: &Context, print_all: bool) -> Result<()> {
581 seq_print!(m, "proc {}\n", self.pid_in_current_ns());
582 seq_print!(m, "context {}\n", &*ctx.name);
583
584 let mut all_threads = KVec::new();
585 let mut all_nodes = KVec::new();
586 loop {
587 let inner = self.inner.lock();
588 let num_threads = inner.threads.iter().count();
589 let num_nodes = inner.nodes.iter().count();
590
591 if all_threads.capacity() < num_threads || all_nodes.capacity() < num_nodes {
592 drop(inner);
593 all_threads.reserve(num_threads, GFP_KERNEL)?;
594 all_nodes.reserve(num_nodes, GFP_KERNEL)?;
595 continue;
596 }
597
598 for thread in inner.threads.values() {
599 assert!(all_threads.len() < all_threads.capacity());
600 let _ = all_threads.push(thread.clone(), GFP_ATOMIC);
601 }
602
603 for node in inner.nodes.values() {
604 assert!(all_nodes.len() < all_nodes.capacity());
605 let _ = all_nodes.push(node.clone(), GFP_ATOMIC);
606 }
607
608 break;
609 }
610
611 for thread in all_threads {
612 thread.debug_print(m, print_all)?;
613 }
614
615 let mut inner = self.inner.lock();
616 for node in all_nodes {
617 if print_all || node.has_oneway_transaction(&mut inner) {
618 node.full_debug_print(m, &mut inner)?;
619 }
620 }
621 drop(inner);
622
623 if print_all {
624 let mut refs = self.node_refs.lock();
625 for r in refs.by_handle.values_mut() {
626 let node_ref = r.node_ref();
627 let dead = node_ref.node.owner.inner.lock().is_dead;
628 let (strong, weak) = node_ref.get_count();
629 let debug_id = node_ref.node.debug_id;
630
631 seq_print!(
632 m,
633 " ref {}: desc {} {}node {debug_id} s {strong} w {weak}",
634 r.debug_id,
635 r.handle,
636 if dead { "dead " } else { "" }
637 );
638 }
639 }
640
641 let inner = self.inner.lock();
642 for work in &inner.work {
643 work.debug_print(m, " ", " pending transaction ")?;
644 }
645 for _death in &inner.delivered_deaths {
646 seq_print!(m, " has delivered dead binder\n");
647 }
648 if let Some(mapping) = &inner.mapping {
649 mapping.alloc.debug_print(m)?;
650 }
651 drop(inner);
652
653 Ok(())
654 }
655
656 /// Attempts to fetch a work item from the process queue.
get_work(&self) -> Option<DLArc<dyn DeliverToRead>>657 pub(crate) fn get_work(&self) -> Option<DLArc<dyn DeliverToRead>> {
658 self.inner.lock().work.pop_front()
659 }
660
661 /// Attempts to fetch a work item from the process queue. If none is available, it registers the
662 /// given thread as ready to receive work directly.
663 ///
664 /// This must only be called when the thread is not participating in a transaction chain; when
665 /// it is, work will always be delivered directly to the thread (and not through the process
666 /// queue).
get_work_or_register<'a>( &'a self, thread: &'a Arc<Thread>, ) -> GetWorkOrRegister<'a>667 pub(crate) fn get_work_or_register<'a>(
668 &'a self,
669 thread: &'a Arc<Thread>,
670 ) -> GetWorkOrRegister<'a> {
671 let mut inner = self.inner.lock();
672 // Try to get work from the process queue.
673 if let Some(work) = inner.work.pop_front() {
674 return GetWorkOrRegister::Work(work);
675 }
676
677 // Register the thread as ready.
678 GetWorkOrRegister::Register(Registration::new(thread, &mut inner))
679 }
680
get_current_thread(self: ArcBorrow<'_, Self>) -> Result<Arc<Thread>>681 fn get_current_thread(self: ArcBorrow<'_, Self>) -> Result<Arc<Thread>> {
682 let id = {
683 let current = kernel::current!();
684 if !core::ptr::eq(current.group_leader(), &*self.task) {
685 pr_err!("get_current_thread was called from the wrong process.");
686 return Err(EINVAL);
687 }
688 current.pid()
689 };
690
691 {
692 let inner = self.inner.lock();
693 if let Some(thread) = inner.threads.get(&id) {
694 return Ok(thread.clone());
695 }
696 }
697
698 // Allocate a new `Thread` without holding any locks.
699 let reservation = RBTreeNodeReservation::new(GFP_KERNEL)?;
700 let ta: Arc<Thread> = Thread::new(id, self.into())?;
701
702 let mut inner = self.inner.lock();
703 match inner.threads.entry(id) {
704 rbtree::Entry::Vacant(entry) => {
705 entry.insert(ta.clone(), reservation);
706 Ok(ta)
707 }
708 rbtree::Entry::Occupied(_entry) => {
709 pr_err!("Cannot create two threads with the same id.");
710 Err(EINVAL)
711 }
712 }
713 }
714
push_work(&self, work: DLArc<dyn DeliverToRead>) -> BinderResult715 pub(crate) fn push_work(&self, work: DLArc<dyn DeliverToRead>) -> BinderResult {
716 // If push_work fails, drop the work item outside the lock.
717 let res = self.inner.lock().push_work(work);
718 match res {
719 Ok(()) => Ok(()),
720 Err((err, work)) => {
721 drop(work);
722 Err(err)
723 }
724 }
725 }
726
set_as_manager( self: ArcBorrow<'_, Self>, info: Option<FlatBinderObject>, thread: &Thread, ) -> Result727 fn set_as_manager(
728 self: ArcBorrow<'_, Self>,
729 info: Option<FlatBinderObject>,
730 thread: &Thread,
731 ) -> Result {
732 let (ptr, cookie, flags) = if let Some(obj) = info {
733 (
734 // SAFETY: The object type for this ioctl is implicitly `BINDER_TYPE_BINDER`, so it
735 // is safe to access the `binder` field.
736 unsafe { obj.__bindgen_anon_1.binder },
737 obj.cookie,
738 obj.flags,
739 )
740 } else {
741 (0, 0, 0)
742 };
743 let node_ref = self.get_node(ptr, cookie, flags as _, true, thread)?;
744 let node = node_ref.node.clone();
745 self.ctx.set_manager_node(node_ref)?;
746 self.inner.lock().is_manager = true;
747
748 // Force the state of the node to prevent the delivery of acquire/increfs.
749 let mut owner_inner = node.owner.inner.lock();
750 node.force_has_count(&mut owner_inner);
751 Ok(())
752 }
753
get_node_inner( self: ArcBorrow<'_, Self>, ptr: u64, cookie: u64, flags: u32, strong: bool, thread: &Thread, wrapper: Option<CritIncrWrapper>, ) -> Result<Result<NodeRef, CouldNotDeliverCriticalIncrement>>754 fn get_node_inner(
755 self: ArcBorrow<'_, Self>,
756 ptr: u64,
757 cookie: u64,
758 flags: u32,
759 strong: bool,
760 thread: &Thread,
761 wrapper: Option<CritIncrWrapper>,
762 ) -> Result<Result<NodeRef, CouldNotDeliverCriticalIncrement>> {
763 // Try to find an existing node.
764 {
765 let mut inner = self.inner.lock();
766 if let Some(node) = inner.get_existing_node(ptr, cookie)? {
767 return Ok(inner.new_node_ref_with_thread(node, strong, thread, wrapper));
768 }
769 }
770
771 // Allocate the node before reacquiring the lock.
772 let node = DTRWrap::arc_pin_init(Node::new(ptr, cookie, flags, self.into()))?.into_arc();
773 let rbnode = RBTreeNode::new(ptr, node.clone(), GFP_KERNEL)?;
774 let mut inner = self.inner.lock();
775 if let Some(node) = inner.get_existing_node(ptr, cookie)? {
776 return Ok(inner.new_node_ref_with_thread(node, strong, thread, wrapper));
777 }
778
779 inner.nodes.insert(rbnode);
780 // This can only fail if someone has already pushed the node to a list, but we just created
781 // it and still hold the lock, so it can't fail right now.
782 let node_ref = inner
783 .new_node_ref_with_thread(node, strong, thread, wrapper)
784 .unwrap();
785
786 Ok(Ok(node_ref))
787 }
788
get_node( self: ArcBorrow<'_, Self>, ptr: u64, cookie: u64, flags: u32, strong: bool, thread: &Thread, ) -> Result<NodeRef>789 pub(crate) fn get_node(
790 self: ArcBorrow<'_, Self>,
791 ptr: u64,
792 cookie: u64,
793 flags: u32,
794 strong: bool,
795 thread: &Thread,
796 ) -> Result<NodeRef> {
797 let mut wrapper = None;
798 for _ in 0..2 {
799 match self.get_node_inner(ptr, cookie, flags, strong, thread, wrapper) {
800 Err(err) => return Err(err),
801 Ok(Ok(node_ref)) => return Ok(node_ref),
802 Ok(Err(CouldNotDeliverCriticalIncrement)) => {
803 wrapper = Some(CritIncrWrapper::new()?);
804 }
805 }
806 }
807 // We only get a `CouldNotDeliverCriticalIncrement` error if `wrapper` is `None`, so the
808 // loop should run at most twice.
809 unreachable!()
810 }
811
insert_or_update_handle( self: ArcBorrow<'_, Process>, node_ref: NodeRef, is_manager: bool, ) -> Result<u32>812 pub(crate) fn insert_or_update_handle(
813 self: ArcBorrow<'_, Process>,
814 node_ref: NodeRef,
815 is_manager: bool,
816 ) -> Result<u32> {
817 {
818 let mut refs = self.node_refs.lock();
819
820 // Do a lookup before inserting.
821 if let Some(handle_ref) = refs.by_node.get(&node_ref.node.global_id()) {
822 let handle = *handle_ref;
823 let info = refs.by_handle.get_mut(&handle).unwrap();
824 info.node_ref().absorb(node_ref);
825 return Ok(handle);
826 }
827 }
828
829 // Reserve memory for tree nodes.
830 let reserve1 = RBTreeNodeReservation::new(GFP_KERNEL)?;
831 let reserve2 = RBTreeNodeReservation::new(GFP_KERNEL)?;
832 let info = UniqueArc::new_uninit(GFP_KERNEL)?;
833
834 let mut refs_lock = self.node_refs.lock();
835 let mut refs = &mut *refs_lock;
836
837 let (unused_id, by_handle_slot) = loop {
838 // ID 0 may only be used by the manager.
839 let start = if is_manager { 0 } else { 1 };
840
841 if let Some(res) = refs.handle_is_present.find_unused_id(start) {
842 match refs.by_handle.entry(res.as_u32()) {
843 rbtree::Entry::Vacant(entry) => break (res, entry),
844 rbtree::Entry::Occupied(_) => {
845 pr_err!("Detected mismatch between handle_is_present and by_handle");
846 res.acquire();
847 kernel::warn_on!(true);
848 return Err(EINVAL);
849 }
850 }
851 }
852
853 let grow_request = refs.handle_is_present.grow_request().ok_or(ENOMEM)?;
854 drop(refs_lock);
855 let resizer = grow_request.realloc(GFP_KERNEL)?;
856 refs_lock = self.node_refs.lock();
857 refs = &mut *refs_lock;
858 refs.handle_is_present.grow(resizer);
859 };
860 let handle = unused_id.as_u32();
861
862 // Do a lookup again as node may have been inserted before the lock was reacquired.
863 if let Some(handle_ref) = refs.by_node.get(&node_ref.node.global_id()) {
864 let handle = *handle_ref;
865 let info = refs.by_handle.get_mut(&handle).unwrap();
866 info.node_ref().absorb(node_ref);
867 return Ok(handle);
868 }
869
870 let gid = node_ref.node.global_id();
871 let (info_proc, info_node) = {
872 let info_init = NodeRefInfo::new(node_ref, handle, self.into());
873 match info.pin_init_with(info_init) {
874 Ok(info) => ListArc::pair_from_pin_unique(info),
875 // error is infallible
876 Err(err) => match err {},
877 }
878 };
879
880 // Ensure the process is still alive while we insert a new reference.
881 //
882 // This releases the lock before inserting the nodes, but since `is_dead` is set as the
883 // first thing in `deferred_release`, process cleanup will not miss the items inserted into
884 // `refs` below.
885 if self.inner.lock().is_dead {
886 return Err(ESRCH);
887 }
888
889 // SAFETY: `info_proc` and `info_node` reference the same node, so we are inserting
890 // `info_node` into the right node's `refs` list.
891 unsafe { info_proc.node_ref2().node.insert_node_info(info_node) };
892
893 refs.by_node.insert(reserve1.into_node(gid, handle));
894 by_handle_slot.insert(info_proc, reserve2);
895 unused_id.acquire();
896 Ok(handle)
897 }
898
get_transaction_node(&self, handle: u32) -> BinderResult<NodeRef>899 pub(crate) fn get_transaction_node(&self, handle: u32) -> BinderResult<NodeRef> {
900 // When handle is zero, try to get the context manager.
901 if handle == 0 {
902 Ok(self.ctx.get_manager_node(true)?)
903 } else {
904 Ok(self.get_node_from_handle(handle, true)?)
905 }
906 }
907
get_node_from_handle(&self, handle: u32, strong: bool) -> Result<NodeRef>908 pub(crate) fn get_node_from_handle(&self, handle: u32, strong: bool) -> Result<NodeRef> {
909 self.node_refs
910 .lock()
911 .by_handle
912 .get_mut(&handle)
913 .ok_or(ENOENT)?
914 .node_ref()
915 .clone(strong)
916 }
917
remove_from_delivered_deaths(&self, death: &DArc<NodeDeath>)918 pub(crate) fn remove_from_delivered_deaths(&self, death: &DArc<NodeDeath>) {
919 let mut inner = self.inner.lock();
920 // SAFETY: By the invariant on the `delivered_links` field, this is the right linked list.
921 let removed = unsafe { inner.delivered_deaths.remove(death) };
922 drop(inner);
923 drop(removed);
924 }
925
update_ref( self: ArcBorrow<'_, Process>, handle: u32, inc: bool, strong: bool, ) -> Result926 pub(crate) fn update_ref(
927 self: ArcBorrow<'_, Process>,
928 handle: u32,
929 inc: bool,
930 strong: bool,
931 ) -> Result {
932 if inc && handle == 0 {
933 if let Ok(node_ref) = self.ctx.get_manager_node(strong) {
934 if core::ptr::eq(&*self, &*node_ref.node.owner) {
935 return Err(EINVAL);
936 }
937 let _ = self.insert_or_update_handle(node_ref, true);
938 return Ok(());
939 }
940 }
941
942 // To preserve original binder behaviour, we only fail requests where the manager tries to
943 // increment references on itself.
944 let mut refs = self.node_refs.lock();
945 if let Some(info) = refs.by_handle.get_mut(&handle) {
946 if info.node_ref().update(inc, strong) {
947 // Clean up death if there is one attached to this node reference.
948 if let Some(death) = info.death().take() {
949 death.set_cleared(true);
950 self.remove_from_delivered_deaths(&death);
951 }
952
953 // Remove reference from process tables, and from the node's `refs` list.
954
955 // SAFETY: We are removing the `NodeRefInfo` from the right node.
956 unsafe { info.node_ref2().node.remove_node_info(info) };
957
958 let id = info.node_ref().node.global_id();
959 refs.by_handle.remove(&handle);
960 refs.by_node.remove(&id);
961 refs.handle_is_present.release_id(handle as usize);
962
963 if let Some(shrink) = refs.handle_is_present.shrink_request() {
964 drop(refs);
965 // This intentionally ignores allocation failures.
966 if let Ok(new_bitmap) = shrink.realloc(GFP_KERNEL) {
967 refs = self.node_refs.lock();
968 refs.handle_is_present.shrink(new_bitmap);
969 }
970 }
971 }
972 } else {
973 // All refs are cleared in process exit, so this warning is expected in that case.
974 if !self.inner.lock().is_dead {
975 pr_warn!("{}: no such ref {handle}\n", self.pid_in_current_ns());
976 }
977 }
978 Ok(())
979 }
980
981 /// Decrements the refcount of the given node, if one exists.
update_node(&self, ptr: u64, cookie: u64, strong: bool)982 pub(crate) fn update_node(&self, ptr: u64, cookie: u64, strong: bool) {
983 let mut inner = self.inner.lock();
984 if let Ok(Some(node)) = inner.get_existing_node(ptr, cookie) {
985 inner.update_node_refcount(&node, false, strong, 1, None);
986 }
987 }
988
inc_ref_done(&self, reader: &mut UserSliceReader, strong: bool) -> Result989 pub(crate) fn inc_ref_done(&self, reader: &mut UserSliceReader, strong: bool) -> Result {
990 let ptr = reader.read::<u64>()?;
991 let cookie = reader.read::<u64>()?;
992 let mut inner = self.inner.lock();
993 if let Ok(Some(node)) = inner.get_existing_node(ptr, cookie) {
994 if let Some(node) = node.inc_ref_done_locked(strong, &mut inner) {
995 // This only fails if the process is dead.
996 let _ = inner.push_work(node);
997 }
998 }
999 Ok(())
1000 }
1001
buffer_alloc( self: &Arc<Self>, debug_id: usize, size: usize, is_oneway: bool, from_pid: i32, ) -> BinderResult<NewAllocation>1002 pub(crate) fn buffer_alloc(
1003 self: &Arc<Self>,
1004 debug_id: usize,
1005 size: usize,
1006 is_oneway: bool,
1007 from_pid: i32,
1008 ) -> BinderResult<NewAllocation> {
1009 use kernel::page::PAGE_SIZE;
1010
1011 let mut reserve_new_args = ReserveNewArgs {
1012 debug_id,
1013 size,
1014 is_oneway,
1015 pid: from_pid,
1016 ..ReserveNewArgs::default()
1017 };
1018
1019 let (new_alloc, addr) = loop {
1020 let mut inner = self.inner.lock();
1021 let mapping = inner.mapping.as_mut().ok_or_else(BinderError::new_dead)?;
1022 let alloc_request = match mapping.alloc.reserve_new(reserve_new_args)? {
1023 ReserveNew::Success(new_alloc) => break (new_alloc, mapping.address),
1024 ReserveNew::NeedAlloc(request) => request,
1025 };
1026 drop(inner);
1027 // We need to allocate memory and then call `reserve_new` again.
1028 reserve_new_args = alloc_request.make_alloc()?;
1029 };
1030
1031 let res = Allocation::new(
1032 self.clone(),
1033 debug_id,
1034 new_alloc.offset,
1035 size,
1036 addr + new_alloc.offset,
1037 new_alloc.oneway_spam_detected,
1038 );
1039
1040 // This allocation will be marked as in use until the `Allocation` is used to free it.
1041 //
1042 // This method can't be called while holding a lock, so we release the lock first. It's
1043 // okay for several threads to use the method on the same index at the same time. In that
1044 // case, one of the calls will allocate the given page (if missing), and the other call
1045 // will wait for the other call to finish allocating the page.
1046 //
1047 // We will not call `stop_using_range` in parallel with this on the same page, because the
1048 // allocation can only be removed via the destructor of the `Allocation` object that we
1049 // currently own.
1050 match self.pages.use_range(
1051 new_alloc.offset / PAGE_SIZE,
1052 (new_alloc.offset + size).div_ceil(PAGE_SIZE),
1053 ) {
1054 Ok(()) => {}
1055 Err(err) => {
1056 pr_warn!("use_range failure {:?}", err);
1057 return Err(err.into());
1058 }
1059 }
1060
1061 Ok(NewAllocation(res))
1062 }
1063
buffer_get(self: &Arc<Self>, ptr: usize) -> Option<Allocation>1064 pub(crate) fn buffer_get(self: &Arc<Self>, ptr: usize) -> Option<Allocation> {
1065 let mut inner = self.inner.lock();
1066 let mapping = inner.mapping.as_mut()?;
1067 let offset = ptr.checked_sub(mapping.address)?;
1068 let (size, debug_id, odata) = mapping.alloc.reserve_existing(offset).ok()?;
1069 let mut alloc = Allocation::new(self.clone(), debug_id, offset, size, ptr, false);
1070 if let Some(data) = odata {
1071 alloc.set_info(data);
1072 }
1073 Some(alloc)
1074 }
1075
buffer_raw_free(&self, ptr: usize)1076 pub(crate) fn buffer_raw_free(&self, ptr: usize) {
1077 let mut inner = self.inner.lock();
1078 if let Some(ref mut mapping) = &mut inner.mapping {
1079 let offset = match ptr.checked_sub(mapping.address) {
1080 Some(offset) => offset,
1081 None => return,
1082 };
1083
1084 let freed_range = match mapping.alloc.reservation_abort(offset) {
1085 Ok(freed_range) => freed_range,
1086 Err(_) => {
1087 pr_warn!(
1088 "Pointer {:x} failed to free, base = {:x}\n",
1089 ptr,
1090 mapping.address
1091 );
1092 return;
1093 }
1094 };
1095
1096 // No more allocations in this range. Mark them as not in use.
1097 //
1098 // Must be done before we release the lock so that `use_range` is not used on these
1099 // indices until `stop_using_range` returns.
1100 self.pages
1101 .stop_using_range(freed_range.start_page_idx, freed_range.end_page_idx);
1102 }
1103 }
1104
buffer_make_freeable(&self, offset: usize, mut data: Option<AllocationInfo>)1105 pub(crate) fn buffer_make_freeable(&self, offset: usize, mut data: Option<AllocationInfo>) {
1106 let mut inner = self.inner.lock();
1107 if let Some(ref mut mapping) = &mut inner.mapping {
1108 if mapping.alloc.reservation_commit(offset, &mut data).is_err() {
1109 pr_warn!("Offset {} failed to be marked freeable\n", offset);
1110 }
1111 }
1112 }
1113
create_mapping(&self, vma: &mm::virt::VmaNew) -> Result1114 fn create_mapping(&self, vma: &mm::virt::VmaNew) -> Result {
1115 use kernel::page::PAGE_SIZE;
1116 let size = usize::min(vma.end() - vma.start(), bindings::SZ_4M as usize);
1117 let mapping = Mapping::new(vma.start(), size);
1118 let page_count = self.pages.register_with_vma(vma)?;
1119 if page_count * PAGE_SIZE != size {
1120 return Err(EINVAL);
1121 }
1122
1123 // Save range allocator for later.
1124 self.inner.lock().mapping = Some(mapping);
1125
1126 Ok(())
1127 }
1128
version(&self, data: UserSlice) -> Result1129 fn version(&self, data: UserSlice) -> Result {
1130 data.writer().write(&BinderVersion::current())
1131 }
1132
register_thread(&self) -> bool1133 pub(crate) fn register_thread(&self) -> bool {
1134 self.inner.lock().register_thread()
1135 }
1136
remove_thread(&self, thread: Arc<Thread>)1137 fn remove_thread(&self, thread: Arc<Thread>) {
1138 self.inner.lock().threads.remove(&thread.id);
1139 thread.release();
1140 }
1141
set_max_threads(&self, max: u32)1142 fn set_max_threads(&self, max: u32) {
1143 self.inner.lock().max_threads = max;
1144 }
1145
set_oneway_spam_detection_enabled(&self, enabled: u32)1146 fn set_oneway_spam_detection_enabled(&self, enabled: u32) {
1147 self.inner.lock().oneway_spam_detection_enabled = enabled != 0;
1148 }
1149
is_oneway_spam_detection_enabled(&self) -> bool1150 pub(crate) fn is_oneway_spam_detection_enabled(&self) -> bool {
1151 self.inner.lock().oneway_spam_detection_enabled
1152 }
1153
get_node_debug_info(&self, data: UserSlice) -> Result1154 fn get_node_debug_info(&self, data: UserSlice) -> Result {
1155 let (mut reader, mut writer) = data.reader_writer();
1156
1157 // Read the starting point.
1158 let ptr = reader.read::<BinderNodeDebugInfo>()?.ptr;
1159 let mut out = BinderNodeDebugInfo::default();
1160
1161 {
1162 let inner = self.inner.lock();
1163 for (node_ptr, node) in &inner.nodes {
1164 if *node_ptr > ptr {
1165 node.populate_debug_info(&mut out, &inner);
1166 break;
1167 }
1168 }
1169 }
1170
1171 writer.write(&out)
1172 }
1173
get_node_info_from_ref(&self, data: UserSlice) -> Result1174 fn get_node_info_from_ref(&self, data: UserSlice) -> Result {
1175 let (mut reader, mut writer) = data.reader_writer();
1176 let mut out = reader.read::<BinderNodeInfoForRef>()?;
1177
1178 if out.strong_count != 0
1179 || out.weak_count != 0
1180 || out.reserved1 != 0
1181 || out.reserved2 != 0
1182 || out.reserved3 != 0
1183 {
1184 return Err(EINVAL);
1185 }
1186
1187 // Only the context manager is allowed to use this ioctl.
1188 if !self.inner.lock().is_manager {
1189 return Err(EPERM);
1190 }
1191
1192 {
1193 let mut node_refs = self.node_refs.lock();
1194 let node_info = node_refs.by_handle.get_mut(&out.handle).ok_or(ENOENT)?;
1195 let node_ref = node_info.node_ref();
1196 let owner_inner = node_ref.node.owner.inner.lock();
1197 node_ref.node.populate_counts(&mut out, &owner_inner);
1198 }
1199
1200 // Write the result back.
1201 writer.write(&out)
1202 }
1203
needs_thread(&self) -> bool1204 pub(crate) fn needs_thread(&self) -> bool {
1205 let mut inner = self.inner.lock();
1206 let ret = inner.requested_thread_count == 0
1207 && inner.ready_threads.is_empty()
1208 && inner.started_thread_count < inner.max_threads;
1209 if ret {
1210 inner.requested_thread_count += 1
1211 }
1212 ret
1213 }
1214
request_death( self: &Arc<Self>, reader: &mut UserSliceReader, thread: &Thread, ) -> Result1215 pub(crate) fn request_death(
1216 self: &Arc<Self>,
1217 reader: &mut UserSliceReader,
1218 thread: &Thread,
1219 ) -> Result {
1220 let handle: u32 = reader.read()?;
1221 let cookie: u64 = reader.read()?;
1222
1223 // Queue BR_ERROR if we can't allocate memory for the death notification.
1224 let death = UniqueArc::new_uninit(GFP_KERNEL).inspect_err(|_| {
1225 thread.push_return_work(BR_ERROR);
1226 })?;
1227 let mut refs = self.node_refs.lock();
1228 let Some(info) = refs.by_handle.get_mut(&handle) else {
1229 pr_warn!("BC_REQUEST_DEATH_NOTIFICATION invalid ref {handle}\n");
1230 return Ok(());
1231 };
1232
1233 // Nothing to do if there is already a death notification request for this handle.
1234 if info.death().is_some() {
1235 pr_warn!("BC_REQUEST_DEATH_NOTIFICATION death notification already set\n");
1236 return Ok(());
1237 }
1238
1239 let death = {
1240 let death_init = NodeDeath::new(info.node_ref().node.clone(), self.clone(), cookie);
1241 match death.pin_init_with(death_init) {
1242 Ok(death) => death,
1243 // error is infallible
1244 Err(err) => match err {},
1245 }
1246 };
1247
1248 // Register the death notification.
1249 {
1250 let owner = info.node_ref2().node.owner.clone();
1251 let mut owner_inner = owner.inner.lock();
1252 if owner_inner.is_dead {
1253 let death = Arc::from(death);
1254 *info.death() = Some(death.clone());
1255 drop(owner_inner);
1256 death.set_dead();
1257 } else {
1258 let death = ListArc::from(death);
1259 *info.death() = Some(death.clone_arc());
1260 info.node_ref().node.add_death(death, &mut owner_inner);
1261 }
1262 }
1263 Ok(())
1264 }
1265
clear_death(&self, reader: &mut UserSliceReader, thread: &Thread) -> Result1266 pub(crate) fn clear_death(&self, reader: &mut UserSliceReader, thread: &Thread) -> Result {
1267 let handle: u32 = reader.read()?;
1268 let cookie: u64 = reader.read()?;
1269
1270 let mut refs = self.node_refs.lock();
1271 let Some(info) = refs.by_handle.get_mut(&handle) else {
1272 pr_warn!("BC_CLEAR_DEATH_NOTIFICATION invalid ref {handle}\n");
1273 return Ok(());
1274 };
1275
1276 let Some(death) = info.death().take() else {
1277 pr_warn!("BC_CLEAR_DEATH_NOTIFICATION death notification not active\n");
1278 return Ok(());
1279 };
1280 if death.cookie != cookie {
1281 *info.death() = Some(death);
1282 pr_warn!("BC_CLEAR_DEATH_NOTIFICATION death notification cookie mismatch\n");
1283 return Ok(());
1284 }
1285
1286 // Update state and determine if we need to queue a work item. We only need to do it when
1287 // the node is not dead or if the user already completed the death notification.
1288 if death.set_cleared(false) {
1289 if let Some(death) = ListArc::try_from_arc_or_drop(death) {
1290 let _ = thread.push_work_if_looper(death);
1291 }
1292 }
1293
1294 Ok(())
1295 }
1296
dead_binder_done(&self, cookie: u64, thread: &Thread)1297 pub(crate) fn dead_binder_done(&self, cookie: u64, thread: &Thread) {
1298 let death = self.inner.lock().pull_delivered_death(cookie);
1299 if let Some(death) = death {
1300 death.set_notification_done(thread);
1301 }
1302 }
1303
1304 /// Locks the spinlock and move the `nodes` rbtree out.
1305 ///
1306 /// This allows you to iterate through `nodes` while also allowing you to give other parts of
1307 /// the codebase exclusive access to `ProcessInner`.
lock_with_nodes(&self) -> WithNodes<'_>1308 pub(crate) fn lock_with_nodes(&self) -> WithNodes<'_> {
1309 let mut inner = self.inner.lock();
1310 WithNodes {
1311 nodes: take(&mut inner.nodes),
1312 inner,
1313 }
1314 }
1315
deferred_flush(&self)1316 fn deferred_flush(&self) {
1317 let inner = self.inner.lock();
1318 for thread in inner.threads.values() {
1319 thread.exit_looper();
1320 }
1321 }
1322
deferred_release(self: Arc<Self>)1323 fn deferred_release(self: Arc<Self>) {
1324 let is_manager = {
1325 let mut inner = self.inner.lock();
1326 inner.is_dead = true;
1327 inner.is_frozen = IsFrozen::No;
1328 inner.sync_recv = false;
1329 inner.async_recv = false;
1330 inner.is_manager
1331 };
1332
1333 if is_manager {
1334 self.ctx.unset_manager_node();
1335 }
1336
1337 self.ctx.deregister_process(&self);
1338
1339 let binderfs_file = self.inner.lock().binderfs_file.take();
1340 drop(binderfs_file);
1341
1342 // Release threads.
1343 let threads = {
1344 let mut inner = self.inner.lock();
1345 let threads = take(&mut inner.threads);
1346 let ready = take(&mut inner.ready_threads);
1347 drop(inner);
1348 drop(ready);
1349
1350 for thread in threads.values() {
1351 thread.release();
1352 }
1353 threads
1354 };
1355
1356 // Release nodes.
1357 {
1358 while let Some(node) = {
1359 let mut lock = self.inner.lock();
1360 lock.nodes.cursor_front_mut().map(|c| c.remove_current().1)
1361 } {
1362 node.to_key_value().1.release();
1363 }
1364 }
1365
1366 // Clean up death listeners and remove nodes from external node info lists.
1367 for info in self.node_refs.lock().by_handle.values_mut() {
1368 // SAFETY: We are removing the `NodeRefInfo` from the right node.
1369 unsafe { info.node_ref2().node.remove_node_info(info) };
1370
1371 // Remove all death notifications from the nodes (that belong to a different process).
1372 let death = if let Some(existing) = info.death().take() {
1373 existing
1374 } else {
1375 continue;
1376 };
1377 death.set_cleared(false);
1378 }
1379
1380 // Clean up freeze listeners.
1381 let freeze_listeners = take(&mut self.node_refs.lock().freeze_listeners);
1382 for listener in freeze_listeners.values() {
1383 listener.on_process_exit(&self);
1384 }
1385 drop(freeze_listeners);
1386
1387 // Release refs on foreign nodes.
1388 {
1389 let mut refs = self.node_refs.lock();
1390 let by_handle = take(&mut refs.by_handle);
1391 let by_node = take(&mut refs.by_node);
1392 drop(refs);
1393 drop(by_node);
1394 drop(by_handle);
1395 }
1396
1397 // Cancel all pending work items.
1398 while let Some(work) = self.get_work() {
1399 work.into_arc().cancel();
1400 }
1401
1402 // Clear delivered_deaths list.
1403 //
1404 // Scope ensures that MutexGuard is dropped while executing the body.
1405 while let Some(delivered_death) = { self.inner.lock().delivered_deaths.pop_front() } {
1406 drop(delivered_death);
1407 }
1408
1409 // Free any resources kept alive by allocated buffers.
1410 let omapping = self.inner.lock().mapping.take();
1411 if let Some(mut mapping) = omapping {
1412 let address = mapping.address;
1413 mapping
1414 .alloc
1415 .take_for_each(|offset, size, debug_id, odata| {
1416 let ptr = offset + address;
1417 let mut alloc =
1418 Allocation::new(self.clone(), debug_id, offset, size, ptr, false);
1419 if let Some(data) = odata {
1420 alloc.set_info(data);
1421 }
1422 drop(alloc)
1423 });
1424 }
1425
1426 // calls to synchronize_rcu() in thread drop will happen here
1427 drop(threads);
1428 }
1429
drop_outstanding_txn(&self)1430 pub(crate) fn drop_outstanding_txn(&self) {
1431 let wake = {
1432 let mut inner = self.inner.lock();
1433 if inner.outstanding_txns == 0 {
1434 pr_err!("outstanding_txns underflow");
1435 return;
1436 }
1437 inner.outstanding_txns -= 1;
1438 inner.is_frozen.is_frozen() && inner.outstanding_txns == 0
1439 };
1440
1441 if wake {
1442 self.freeze_wait.notify_all();
1443 }
1444 }
1445
ioctl_freeze(&self, info: &BinderFreezeInfo) -> Result1446 pub(crate) fn ioctl_freeze(&self, info: &BinderFreezeInfo) -> Result {
1447 if info.enable == 0 {
1448 let msgs = self.prepare_freeze_messages()?;
1449 let mut inner = self.inner.lock();
1450 inner.sync_recv = false;
1451 inner.async_recv = false;
1452 inner.is_frozen = IsFrozen::No;
1453 drop(inner);
1454 msgs.send_messages();
1455 return Ok(());
1456 }
1457
1458 let mut inner = self.inner.lock();
1459 inner.sync_recv = false;
1460 inner.async_recv = false;
1461 inner.is_frozen = IsFrozen::InProgress;
1462
1463 if info.timeout_ms > 0 {
1464 let mut jiffies = kernel::time::msecs_to_jiffies(info.timeout_ms);
1465 while jiffies > 0 {
1466 if inner.outstanding_txns == 0 {
1467 break;
1468 }
1469
1470 match self
1471 .freeze_wait
1472 .wait_interruptible_timeout(&mut inner, jiffies)
1473 {
1474 CondVarTimeoutResult::Signal { .. } => {
1475 inner.is_frozen = IsFrozen::No;
1476 return Err(ERESTARTSYS);
1477 }
1478 CondVarTimeoutResult::Woken { jiffies: remaining } => {
1479 jiffies = remaining;
1480 }
1481 CondVarTimeoutResult::Timeout => {
1482 jiffies = 0;
1483 }
1484 }
1485 }
1486 }
1487
1488 if inner.txns_pending_locked() {
1489 inner.is_frozen = IsFrozen::No;
1490 Err(EAGAIN)
1491 } else {
1492 drop(inner);
1493 match self.prepare_freeze_messages() {
1494 Ok(batch) => {
1495 self.inner.lock().is_frozen = IsFrozen::Yes;
1496 batch.send_messages();
1497 Ok(())
1498 }
1499 Err(kernel::alloc::AllocError) => {
1500 self.inner.lock().is_frozen = IsFrozen::No;
1501 Err(ENOMEM)
1502 }
1503 }
1504 }
1505 }
1506 }
1507
get_frozen_status(data: UserSlice) -> Result1508 fn get_frozen_status(data: UserSlice) -> Result {
1509 let (mut reader, mut writer) = data.reader_writer();
1510
1511 let mut info = reader.read::<BinderFrozenStatusInfo>()?;
1512 info.sync_recv = 0;
1513 info.async_recv = 0;
1514 let mut found = false;
1515
1516 for ctx in crate::context::get_all_contexts()? {
1517 ctx.for_each_proc(|proc| {
1518 if proc.task.pid() == info.pid as _ {
1519 found = true;
1520 let inner = proc.inner.lock();
1521 let txns_pending = inner.txns_pending_locked();
1522 info.async_recv |= inner.async_recv as u32;
1523 info.sync_recv |= inner.sync_recv as u32;
1524 info.sync_recv |= (txns_pending as u32) << 1;
1525 }
1526 });
1527 }
1528
1529 if found {
1530 writer.write(&info)?;
1531 Ok(())
1532 } else {
1533 Err(EINVAL)
1534 }
1535 }
1536
ioctl_freeze(reader: &mut UserSliceReader) -> Result1537 fn ioctl_freeze(reader: &mut UserSliceReader) -> Result {
1538 let info = reader.read::<BinderFreezeInfo>()?;
1539
1540 // Very unlikely for there to be more than 3, since a process normally uses at most binder and
1541 // hwbinder.
1542 let mut procs = KVec::with_capacity(3, GFP_KERNEL)?;
1543
1544 let ctxs = crate::context::get_all_contexts()?;
1545 for ctx in ctxs {
1546 for proc in ctx.get_procs_with_pid(info.pid as i32)? {
1547 procs.push(proc, GFP_KERNEL)?;
1548 }
1549 }
1550
1551 for proc in procs {
1552 proc.ioctl_freeze(&info)?;
1553 }
1554 Ok(())
1555 }
1556
1557 /// The ioctl handler.
1558 impl Process {
1559 /// Ioctls that are write-only from the perspective of userspace.
1560 ///
1561 /// The kernel will only read from the pointer that userspace provided to us.
ioctl_write_only( this: ArcBorrow<'_, Process>, _file: &File, cmd: u32, reader: &mut UserSliceReader, ) -> Result1562 fn ioctl_write_only(
1563 this: ArcBorrow<'_, Process>,
1564 _file: &File,
1565 cmd: u32,
1566 reader: &mut UserSliceReader,
1567 ) -> Result {
1568 let thread = this.get_current_thread()?;
1569 match cmd {
1570 uapi::BINDER_SET_MAX_THREADS => this.set_max_threads(reader.read()?),
1571 uapi::BINDER_THREAD_EXIT => this.remove_thread(thread),
1572 uapi::BINDER_SET_CONTEXT_MGR => this.set_as_manager(None, &thread)?,
1573 uapi::BINDER_SET_CONTEXT_MGR_EXT => {
1574 this.set_as_manager(Some(reader.read()?), &thread)?
1575 }
1576 uapi::BINDER_ENABLE_ONEWAY_SPAM_DETECTION => {
1577 this.set_oneway_spam_detection_enabled(reader.read()?)
1578 }
1579 uapi::BINDER_FREEZE => ioctl_freeze(reader)?,
1580 _ => return Err(EINVAL),
1581 }
1582 Ok(())
1583 }
1584
1585 /// Ioctls that are read/write from the perspective of userspace.
1586 ///
1587 /// The kernel will both read from and write to the pointer that userspace provided to us.
ioctl_write_read( this: ArcBorrow<'_, Process>, file: &File, cmd: u32, data: UserSlice, ) -> Result1588 fn ioctl_write_read(
1589 this: ArcBorrow<'_, Process>,
1590 file: &File,
1591 cmd: u32,
1592 data: UserSlice,
1593 ) -> Result {
1594 let thread = this.get_current_thread()?;
1595 let blocking = (file.flags() & file::flags::O_NONBLOCK) == 0;
1596 match cmd {
1597 uapi::BINDER_WRITE_READ => thread.write_read(data, blocking)?,
1598 uapi::BINDER_GET_NODE_DEBUG_INFO => this.get_node_debug_info(data)?,
1599 uapi::BINDER_GET_NODE_INFO_FOR_REF => this.get_node_info_from_ref(data)?,
1600 uapi::BINDER_VERSION => this.version(data)?,
1601 uapi::BINDER_GET_FROZEN_INFO => get_frozen_status(data)?,
1602 uapi::BINDER_GET_EXTENDED_ERROR => thread.get_extended_error(data)?,
1603 _ => return Err(EINVAL),
1604 }
1605 Ok(())
1606 }
1607 }
1608
1609 /// The file operations supported by `Process`.
1610 impl Process {
open(ctx: ArcBorrow<'_, Context>, file: &File) -> Result<Arc<Process>>1611 pub(crate) fn open(ctx: ArcBorrow<'_, Context>, file: &File) -> Result<Arc<Process>> {
1612 Self::new(ctx.into(), ARef::from(file.cred()))
1613 }
1614
release(this: Arc<Process>, _file: &File)1615 pub(crate) fn release(this: Arc<Process>, _file: &File) {
1616 let binderfs_file;
1617 let should_schedule;
1618 {
1619 let mut inner = this.inner.lock();
1620 should_schedule = inner.defer_work == 0;
1621 inner.defer_work |= PROC_DEFER_RELEASE;
1622 binderfs_file = inner.binderfs_file.take();
1623 }
1624
1625 if should_schedule {
1626 // Ignore failures to schedule to the workqueue. Those just mean that we're already
1627 // scheduled for execution.
1628 let _ = workqueue::system().enqueue(this);
1629 }
1630
1631 drop(binderfs_file);
1632 }
1633
flush(this: ArcBorrow<'_, Process>) -> Result1634 pub(crate) fn flush(this: ArcBorrow<'_, Process>) -> Result {
1635 let should_schedule;
1636 {
1637 let mut inner = this.inner.lock();
1638 should_schedule = inner.defer_work == 0;
1639 inner.defer_work |= PROC_DEFER_FLUSH;
1640 }
1641
1642 if should_schedule {
1643 // Ignore failures to schedule to the workqueue. Those just mean that we're already
1644 // scheduled for execution.
1645 let _ = workqueue::system().enqueue(Arc::from(this));
1646 }
1647 Ok(())
1648 }
1649
ioctl(this: ArcBorrow<'_, Process>, file: &File, cmd: u32, arg: usize) -> Result1650 pub(crate) fn ioctl(this: ArcBorrow<'_, Process>, file: &File, cmd: u32, arg: usize) -> Result {
1651 use kernel::ioctl::{_IOC_DIR, _IOC_SIZE};
1652 use kernel::uapi::{_IOC_READ, _IOC_WRITE};
1653
1654 crate::trace::trace_ioctl(cmd, arg);
1655
1656 let user_slice = UserSlice::new(UserPtr::from_addr(arg), _IOC_SIZE(cmd));
1657
1658 const _IOC_READ_WRITE: u32 = _IOC_READ | _IOC_WRITE;
1659
1660 match _IOC_DIR(cmd) {
1661 _IOC_WRITE => Self::ioctl_write_only(this, file, cmd, &mut user_slice.reader()),
1662 _IOC_READ_WRITE => Self::ioctl_write_read(this, file, cmd, user_slice),
1663 _ => Err(EINVAL),
1664 }
1665 }
1666
mmap( this: ArcBorrow<'_, Process>, _file: &File, vma: &mm::virt::VmaNew, ) -> Result1667 pub(crate) fn mmap(
1668 this: ArcBorrow<'_, Process>,
1669 _file: &File,
1670 vma: &mm::virt::VmaNew,
1671 ) -> Result {
1672 // We don't allow mmap to be used in a different process.
1673 if !core::ptr::eq(kernel::current!().group_leader(), &*this.task) {
1674 return Err(EINVAL);
1675 }
1676 if vma.start() == 0 {
1677 return Err(EINVAL);
1678 }
1679
1680 vma.try_clear_maywrite().map_err(|_| EPERM)?;
1681 vma.set_dontcopy();
1682 vma.set_mixedmap();
1683
1684 // TODO: Set ops. We need to learn when the user unmaps so that we can stop using it.
1685 this.create_mapping(vma)
1686 }
1687
poll( this: ArcBorrow<'_, Process>, file: &File, table: PollTable<'_>, ) -> Result<u32>1688 pub(crate) fn poll(
1689 this: ArcBorrow<'_, Process>,
1690 file: &File,
1691 table: PollTable<'_>,
1692 ) -> Result<u32> {
1693 let thread = this.get_current_thread()?;
1694 let (from_proc, mut mask) = thread.poll(file, table);
1695 if mask == 0 && from_proc && !this.inner.lock().work.is_empty() {
1696 mask |= bindings::POLLIN;
1697 }
1698 Ok(mask)
1699 }
1700 }
1701
1702 /// Represents that a thread has registered with the `ready_threads` list of its process.
1703 ///
1704 /// The destructor of this type will unregister the thread from the list of ready threads.
1705 pub(crate) struct Registration<'a> {
1706 thread: &'a Arc<Thread>,
1707 }
1708
1709 impl<'a> Registration<'a> {
new(thread: &'a Arc<Thread>, guard: &mut Guard<'_, ProcessInner, SpinLockBackend>) -> Self1710 fn new(thread: &'a Arc<Thread>, guard: &mut Guard<'_, ProcessInner, SpinLockBackend>) -> Self {
1711 assert!(core::ptr::eq(&thread.process.inner, guard.lock_ref()));
1712 // INVARIANT: We are pushing this thread to the right `ready_threads` list.
1713 if let Ok(list_arc) = ListArc::try_from_arc(thread.clone()) {
1714 guard.ready_threads.push_front(list_arc);
1715 } else {
1716 // It is an error to hit this branch, and it should not be reachable. We try to do
1717 // something reasonable when the failure path happens. Most likely, the thread in
1718 // question will sleep forever.
1719 pr_err!("Same thread registered with `ready_threads` twice.");
1720 }
1721 Self { thread }
1722 }
1723 }
1724
1725 impl Drop for Registration<'_> {
drop(&mut self)1726 fn drop(&mut self) {
1727 let mut inner = self.thread.process.inner.lock();
1728 // SAFETY: The thread has the invariant that we never push it to any other linked list than
1729 // the `ready_threads` list of its parent process. Therefore, the thread is either in that
1730 // list, or in no list.
1731 unsafe { inner.ready_threads.remove(self.thread) };
1732 }
1733 }
1734
1735 pub(crate) struct WithNodes<'a> {
1736 pub(crate) inner: Guard<'a, ProcessInner, SpinLockBackend>,
1737 pub(crate) nodes: RBTree<u64, DArc<Node>>,
1738 }
1739
1740 impl Drop for WithNodes<'_> {
drop(&mut self)1741 fn drop(&mut self) {
1742 core::mem::swap(&mut self.nodes, &mut self.inner.nodes);
1743 if self.nodes.iter().next().is_some() {
1744 pr_err!("nodes array was modified while using lock_with_nodes\n");
1745 }
1746 }
1747 }
1748
1749 pub(crate) enum GetWorkOrRegister<'a> {
1750 Work(DLArc<dyn DeliverToRead>),
1751 Register(Registration<'a>),
1752 }
1753