xref: /linux/drivers/android/binder/node.rs (revision 94c37d42cb7ca362aee9633bec2dbeed787edf3e)
1 // SPDX-License-Identifier: GPL-2.0
2 
3 // Copyright (C) 2025 Google LLC.
4 
5 use kernel::{
6     list::{AtomicTracker, List, ListArc, ListLinks, TryNewListArc},
7     prelude::*,
8     seq_file::SeqFile,
9     seq_print,
10     sync::lock::{spinlock::SpinLockBackend, Guard},
11     sync::{Arc, LockedBy, SpinLock},
12 };
13 
14 use crate::{
15     defs::*,
16     error::BinderError,
17     process::{NodeRefInfo, Process, ProcessInner},
18     thread::Thread,
19     transaction::Transaction,
20     BinderReturnWriter, DArc, DLArc, DTRWrap, DeliverToRead,
21 };
22 
23 use core::mem;
24 
25 mod wrapper;
26 pub(crate) use self::wrapper::CritIncrWrapper;
27 
28 #[derive(Debug)]
29 pub(crate) struct CouldNotDeliverCriticalIncrement;
30 
31 /// Keeps track of how this node is scheduled.
32 ///
33 /// There are two ways to schedule a node to a work list. Just schedule the node itself, or
34 /// allocate a wrapper that references the node and schedule the wrapper. These wrappers exists to
35 /// make it possible to "move" a node from one list to another - when `do_work` is called directly
36 /// on the `Node`, then it's a no-op if there's also a pending wrapper.
37 ///
38 /// Wrappers are generally only needed for zero-to-one refcount increments, and there are two cases
39 /// of this: weak increments and strong increments. We call such increments "critical" because it
40 /// is critical that they are delivered to the thread doing the increment. Some examples:
41 ///
42 /// * One thread makes a zero-to-one strong increment, and another thread makes a zero-to-one weak
43 ///   increment. Delivering the node to the thread doing the weak increment is wrong, since the
44 ///   thread doing the strong increment may have ended a long time ago when the command is actually
45 ///   processed by userspace.
46 ///
47 /// * We have a weak reference and are about to drop it on one thread. But then another thread does
48 ///   a zero-to-one strong increment. If the strong increment gets sent to the thread that was
49 ///   about to drop the weak reference, then the strong increment could be processed after the
50 ///   other thread has already exited, which would be too late.
51 ///
52 /// Note that trying to create a `ListArc` to the node can succeed even if `has_normal_push` is
53 /// set. This is because another thread might just have popped the node from a todo list, but not
54 /// yet called `do_work`. However, if `has_normal_push` is false, then creating a `ListArc` should
55 /// always succeed.
56 ///
57 /// Like the other fields in `NodeInner`, the delivery state is protected by the process lock.
58 struct DeliveryState {
59     /// Is the `Node` currently scheduled?
60     has_pushed_node: bool,
61 
62     /// Is a wrapper currently scheduled?
63     ///
64     /// The wrapper is used only for strong zero2one increments.
65     has_pushed_wrapper: bool,
66 
67     /// Is the currently scheduled `Node` scheduled due to a weak zero2one increment?
68     ///
69     /// Weak zero2one operations are always scheduled using the `Node`.
70     has_weak_zero2one: bool,
71 
72     /// Is the currently scheduled wrapper/`Node` scheduled due to a strong zero2one increment?
73     ///
74     /// If `has_pushed_wrapper` is set, then the strong zero2one increment was scheduled using the
75     /// wrapper. Otherwise, `has_pushed_node` must be set and it was scheduled using the `Node`.
76     has_strong_zero2one: bool,
77 }
78 
79 impl DeliveryState {
80     fn should_normal_push(&self) -> bool {
81         !self.has_pushed_node && !self.has_pushed_wrapper
82     }
83 
84     fn did_normal_push(&mut self) {
85         assert!(self.should_normal_push());
86         self.has_pushed_node = true;
87     }
88 
89     fn should_push_weak_zero2one(&self) -> bool {
90         !self.has_weak_zero2one && !self.has_strong_zero2one
91     }
92 
93     fn can_push_weak_zero2one_normally(&self) -> bool {
94         !self.has_pushed_node
95     }
96 
97     fn did_push_weak_zero2one(&mut self) {
98         assert!(self.should_push_weak_zero2one());
99         assert!(self.can_push_weak_zero2one_normally());
100         self.has_pushed_node = true;
101         self.has_weak_zero2one = true;
102     }
103 
104     fn should_push_strong_zero2one(&self) -> bool {
105         !self.has_strong_zero2one
106     }
107 
108     fn can_push_strong_zero2one_normally(&self) -> bool {
109         !self.has_pushed_node
110     }
111 
112     fn did_push_strong_zero2one(&mut self) {
113         assert!(self.should_push_strong_zero2one());
114         assert!(self.can_push_strong_zero2one_normally());
115         self.has_pushed_node = true;
116         self.has_strong_zero2one = true;
117     }
118 
119     fn did_push_strong_zero2one_wrapper(&mut self) {
120         assert!(self.should_push_strong_zero2one());
121         assert!(!self.can_push_strong_zero2one_normally());
122         self.has_pushed_wrapper = true;
123         self.has_strong_zero2one = true;
124     }
125 }
126 
127 struct CountState {
128     /// The reference count.
129     count: usize,
130     /// Whether the process that owns this node thinks that we hold a refcount on it. (Note that
131     /// even if count is greater than one, we only increment it once in the owning process.)
132     has_count: bool,
133 }
134 
135 impl CountState {
136     fn new() -> Self {
137         Self {
138             count: 0,
139             has_count: false,
140         }
141     }
142 }
143 
144 struct NodeInner {
145     /// Strong refcounts held on this node by `NodeRef` objects.
146     strong: CountState,
147     /// Weak refcounts held on this node by `NodeRef` objects.
148     weak: CountState,
149     delivery_state: DeliveryState,
150     /// The binder driver guarantees that oneway transactions sent to the same node are serialized,
151     /// that is, userspace will not be given the next one until it has finished processing the
152     /// previous oneway transaction. This is done to avoid the case where two oneway transactions
153     /// arrive in opposite order from the order in which they were sent. (E.g., they could be
154     /// delivered to two different threads, which could appear as-if they were sent in opposite
155     /// order.)
156     ///
157     /// To fix that, we store pending oneway transactions in a separate list in the node, and don't
158     /// deliver the next oneway transaction until userspace signals that it has finished processing
159     /// the previous oneway transaction by calling the `BC_FREE_BUFFER` ioctl.
160     oneway_todo: List<DTRWrap<Transaction>>,
161     /// Keeps track of whether this node has a pending oneway transaction.
162     ///
163     /// When this is true, incoming oneway transactions are stored in `oneway_todo`, instead of
164     /// being delivered directly to the process.
165     has_oneway_transaction: bool,
166     /// List of processes to deliver a notification to when this node is destroyed (usually due to
167     /// the process dying).
168     death_list: List<DTRWrap<NodeDeath>, 1>,
169     /// List of processes to deliver freeze notifications to.
170     freeze_list: KVVec<Arc<Process>>,
171     /// The number of active BR_INCREFS or BR_ACQUIRE operations. (should be maximum two)
172     ///
173     /// If this is non-zero, then we postpone any BR_RELEASE or BR_DECREFS notifications until the
174     /// active operations have ended. This avoids the situation an increment and decrement get
175     /// reordered from userspace's perspective.
176     active_inc_refs: u8,
177     /// List of `NodeRefInfo` objects that reference this node.
178     refs: List<NodeRefInfo, { NodeRefInfo::LIST_NODE }>,
179 }
180 
181 use kernel::bindings::rb_node_layout;
182 use mem::offset_of;
183 pub(crate) const NODE_LAYOUT: rb_node_layout = rb_node_layout {
184     arc_offset: Arc::<Node>::DATA_OFFSET + offset_of!(DTRWrap<Node>, wrapped),
185     debug_id: offset_of!(Node, debug_id),
186     ptr: offset_of!(Node, ptr),
187 };
188 
189 #[pin_data]
190 pub(crate) struct Node {
191     pub(crate) debug_id: usize,
192     ptr: u64,
193     pub(crate) cookie: u64,
194     pub(crate) flags: u32,
195     pub(crate) owner: Arc<Process>,
196     inner: LockedBy<NodeInner, ProcessInner>,
197     #[pin]
198     links_track: AtomicTracker,
199 }
200 
201 kernel::list::impl_list_arc_safe! {
202     impl ListArcSafe<0> for Node {
203         tracked_by links_track: AtomicTracker;
204     }
205 }
206 
207 // Make `oneway_todo` work.
208 kernel::list::impl_list_item! {
209     impl ListItem<0> for DTRWrap<Transaction> {
210         using ListLinks { self.links.inner };
211     }
212 }
213 
214 impl Node {
215     pub(crate) fn new(
216         ptr: u64,
217         cookie: u64,
218         flags: u32,
219         owner: Arc<Process>,
220     ) -> impl PinInit<Self> {
221         pin_init!(Self {
222             inner: LockedBy::new(
223                 &owner.inner,
224                 NodeInner {
225                     strong: CountState::new(),
226                     weak: CountState::new(),
227                     delivery_state: DeliveryState {
228                         has_pushed_node: false,
229                         has_pushed_wrapper: false,
230                         has_weak_zero2one: false,
231                         has_strong_zero2one: false,
232                     },
233                     death_list: List::new(),
234                     oneway_todo: List::new(),
235                     freeze_list: KVVec::new(),
236                     has_oneway_transaction: false,
237                     active_inc_refs: 0,
238                     refs: List::new(),
239                 },
240             ),
241             debug_id: super::next_debug_id(),
242             ptr,
243             cookie,
244             flags,
245             owner,
246             links_track <- AtomicTracker::new(),
247         })
248     }
249 
250     pub(crate) fn has_oneway_transaction(&self, owner_inner: &mut ProcessInner) -> bool {
251         let inner = self.inner.access_mut(owner_inner);
252         inner.has_oneway_transaction
253     }
254 
255     #[inline(never)]
256     pub(crate) fn full_debug_print(
257         &self,
258         m: &SeqFile,
259         owner_inner: &mut ProcessInner,
260     ) -> Result<()> {
261         let inner = self.inner.access_mut(owner_inner);
262         seq_print!(
263             m,
264             "  node {}: u{:016x} c{:016x} hs {} hw {} cs {} cw {}",
265             self.debug_id,
266             self.ptr,
267             self.cookie,
268             inner.strong.has_count,
269             inner.weak.has_count,
270             inner.strong.count,
271             inner.weak.count,
272         );
273         if !inner.refs.is_empty() {
274             seq_print!(m, " proc");
275             for node_ref in &inner.refs {
276                 seq_print!(m, " {}", node_ref.process.task.pid());
277             }
278         }
279         seq_print!(m, "\n");
280         for t in &inner.oneway_todo {
281             t.debug_print_inner(m, "    pending async transaction ");
282         }
283         Ok(())
284     }
285 
286     /// Insert the `NodeRef` into this `refs` list.
287     ///
288     /// # Safety
289     ///
290     /// It must be the case that `info.node_ref.node` is this node.
291     pub(crate) unsafe fn insert_node_info(
292         &self,
293         info: ListArc<NodeRefInfo, { NodeRefInfo::LIST_NODE }>,
294     ) {
295         self.inner
296             .access_mut(&mut self.owner.inner.lock())
297             .refs
298             .push_front(info);
299     }
300 
301     /// Insert the `NodeRef` into this `refs` list.
302     ///
303     /// # Safety
304     ///
305     /// It must be the case that `info.node_ref.node` is this node.
306     pub(crate) unsafe fn remove_node_info(
307         &self,
308         info: &NodeRefInfo,
309     ) -> Option<ListArc<NodeRefInfo, { NodeRefInfo::LIST_NODE }>> {
310         // SAFETY: We always insert `NodeRefInfo` objects into the `refs` list of the node that it
311         // references in `info.node_ref.node`. That is this node, so `info` cannot possibly be in
312         // the `refs` list of another node.
313         unsafe {
314             self.inner
315                 .access_mut(&mut self.owner.inner.lock())
316                 .refs
317                 .remove(info)
318         }
319     }
320 
321     /// An id that is unique across all binder nodes on the system. Used as the key in the
322     /// `by_node` map.
323     pub(crate) fn global_id(&self) -> usize {
324         self as *const Node as usize
325     }
326 
327     pub(crate) fn get_id(&self) -> (u64, u64) {
328         (self.ptr, self.cookie)
329     }
330 
331     pub(crate) fn add_death(
332         &self,
333         death: ListArc<DTRWrap<NodeDeath>, 1>,
334         guard: &mut Guard<'_, ProcessInner, SpinLockBackend>,
335     ) {
336         self.inner.access_mut(guard).death_list.push_back(death);
337     }
338 
339     pub(crate) fn inc_ref_done_locked(
340         self: &DArc<Node>,
341         _strong: bool,
342         owner_inner: &mut ProcessInner,
343     ) -> Option<DLArc<Node>> {
344         let inner = self.inner.access_mut(owner_inner);
345         if inner.active_inc_refs == 0 {
346             pr_err!("inc_ref_done called when no active inc_refs");
347             return None;
348         }
349 
350         inner.active_inc_refs -= 1;
351         if inner.active_inc_refs == 0 {
352             // Having active inc_refs can inhibit dropping of ref-counts. Calculate whether we
353             // would send a refcount decrement, and if so, tell the caller to schedule us.
354             let strong = inner.strong.count > 0;
355             let has_strong = inner.strong.has_count;
356             let weak = strong || inner.weak.count > 0;
357             let has_weak = inner.weak.has_count;
358 
359             let should_drop_weak = !weak && has_weak;
360             let should_drop_strong = !strong && has_strong;
361 
362             // If we want to drop the ref-count again, tell the caller to schedule a work node for
363             // that.
364             let need_push = should_drop_weak || should_drop_strong;
365 
366             if need_push && inner.delivery_state.should_normal_push() {
367                 let list_arc = ListArc::try_from_arc(self.clone()).ok().unwrap();
368                 inner.delivery_state.did_normal_push();
369                 Some(list_arc)
370             } else {
371                 None
372             }
373         } else {
374             None
375         }
376     }
377 
378     pub(crate) fn update_refcount_locked(
379         self: &DArc<Node>,
380         inc: bool,
381         strong: bool,
382         count: usize,
383         owner_inner: &mut ProcessInner,
384     ) -> Option<DLArc<Node>> {
385         let is_dead = owner_inner.is_dead;
386         let inner = self.inner.access_mut(owner_inner);
387 
388         // Get a reference to the state we'll update.
389         let state = if strong {
390             &mut inner.strong
391         } else {
392             &mut inner.weak
393         };
394 
395         // Update the count and determine whether we need to push work.
396         let need_push = if inc {
397             state.count += count;
398             // TODO: This method shouldn't be used for zero-to-one increments.
399             !is_dead && !state.has_count
400         } else {
401             if state.count < count {
402                 pr_err!("Failure: refcount underflow!");
403                 return None;
404             }
405             state.count -= count;
406             !is_dead && state.count == 0 && state.has_count
407         };
408 
409         if need_push && inner.delivery_state.should_normal_push() {
410             let list_arc = ListArc::try_from_arc(self.clone()).ok().unwrap();
411             inner.delivery_state.did_normal_push();
412             Some(list_arc)
413         } else {
414             None
415         }
416     }
417 
418     pub(crate) fn incr_refcount_allow_zero2one(
419         self: &DArc<Self>,
420         strong: bool,
421         owner_inner: &mut ProcessInner,
422     ) -> Result<Option<DLArc<Node>>, CouldNotDeliverCriticalIncrement> {
423         let is_dead = owner_inner.is_dead;
424         let inner = self.inner.access_mut(owner_inner);
425 
426         // Get a reference to the state we'll update.
427         let state = if strong {
428             &mut inner.strong
429         } else {
430             &mut inner.weak
431         };
432 
433         // Update the count and determine whether we need to push work.
434         state.count += 1;
435         if is_dead || state.has_count {
436             return Ok(None);
437         }
438 
439         // Userspace needs to be notified of this.
440         if !strong && inner.delivery_state.should_push_weak_zero2one() {
441             assert!(inner.delivery_state.can_push_weak_zero2one_normally());
442             let list_arc = ListArc::try_from_arc(self.clone()).ok().unwrap();
443             inner.delivery_state.did_push_weak_zero2one();
444             Ok(Some(list_arc))
445         } else if strong && inner.delivery_state.should_push_strong_zero2one() {
446             if inner.delivery_state.can_push_strong_zero2one_normally() {
447                 let list_arc = ListArc::try_from_arc(self.clone()).ok().unwrap();
448                 inner.delivery_state.did_push_strong_zero2one();
449                 Ok(Some(list_arc))
450             } else {
451                 state.count -= 1;
452                 Err(CouldNotDeliverCriticalIncrement)
453             }
454         } else {
455             // Work is already pushed, and we don't need to push again.
456             Ok(None)
457         }
458     }
459 
460     pub(crate) fn incr_refcount_allow_zero2one_with_wrapper(
461         self: &DArc<Self>,
462         strong: bool,
463         wrapper: CritIncrWrapper,
464         owner_inner: &mut ProcessInner,
465     ) -> Option<DLArc<dyn DeliverToRead>> {
466         match self.incr_refcount_allow_zero2one(strong, owner_inner) {
467             Ok(Some(node)) => Some(node as _),
468             Ok(None) => None,
469             Err(CouldNotDeliverCriticalIncrement) => {
470                 assert!(strong);
471                 let inner = self.inner.access_mut(owner_inner);
472                 inner.strong.count += 1;
473                 inner.delivery_state.did_push_strong_zero2one_wrapper();
474                 Some(wrapper.init(self.clone()))
475             }
476         }
477     }
478 
479     pub(crate) fn update_refcount(self: &DArc<Self>, inc: bool, count: usize, strong: bool) {
480         self.owner
481             .inner
482             .lock()
483             .update_node_refcount(self, inc, strong, count, None);
484     }
485 
486     pub(crate) fn populate_counts(
487         &self,
488         out: &mut BinderNodeInfoForRef,
489         guard: &Guard<'_, ProcessInner, SpinLockBackend>,
490     ) {
491         let inner = self.inner.access(guard);
492         out.strong_count = inner.strong.count as _;
493         out.weak_count = inner.weak.count as _;
494     }
495 
496     pub(crate) fn populate_debug_info(
497         &self,
498         out: &mut BinderNodeDebugInfo,
499         guard: &Guard<'_, ProcessInner, SpinLockBackend>,
500     ) {
501         out.ptr = self.ptr as _;
502         out.cookie = self.cookie as _;
503         let inner = self.inner.access(guard);
504         if inner.strong.has_count {
505             out.has_strong_ref = 1;
506         }
507         if inner.weak.has_count {
508             out.has_weak_ref = 1;
509         }
510     }
511 
512     pub(crate) fn force_has_count(&self, guard: &mut Guard<'_, ProcessInner, SpinLockBackend>) {
513         let inner = self.inner.access_mut(guard);
514         inner.strong.has_count = true;
515         inner.weak.has_count = true;
516     }
517 
518     fn write(&self, writer: &mut BinderReturnWriter<'_>, code: u32) -> Result {
519         writer.write_code(code)?;
520         writer.write_payload(&self.ptr)?;
521         writer.write_payload(&self.cookie)?;
522         Ok(())
523     }
524 
525     pub(crate) fn submit_oneway(
526         &self,
527         transaction: DLArc<Transaction>,
528         guard: &mut Guard<'_, ProcessInner, SpinLockBackend>,
529     ) -> Result<(), (BinderError, DLArc<dyn DeliverToRead>)> {
530         if guard.is_dead {
531             return Err((BinderError::new_dead(), transaction));
532         }
533 
534         let inner = self.inner.access_mut(guard);
535         if inner.has_oneway_transaction {
536             inner.oneway_todo.push_back(transaction);
537         } else {
538             inner.has_oneway_transaction = true;
539             guard.push_work(transaction)?;
540         }
541         Ok(())
542     }
543 
544     pub(crate) fn release(&self) {
545         let mut guard = self.owner.inner.lock();
546         while let Some(work) = self.inner.access_mut(&mut guard).oneway_todo.pop_front() {
547             drop(guard);
548             work.into_arc().cancel();
549             guard = self.owner.inner.lock();
550         }
551 
552         while let Some(death) = self.inner.access_mut(&mut guard).death_list.pop_front() {
553             drop(guard);
554             death.into_arc().set_dead();
555             guard = self.owner.inner.lock();
556         }
557     }
558 
559     pub(crate) fn pending_oneway_finished(&self) {
560         let mut guard = self.owner.inner.lock();
561         if guard.is_dead {
562             // Cleanup will happen in `Process::deferred_release`.
563             return;
564         }
565 
566         let inner = self.inner.access_mut(&mut guard);
567 
568         let transaction = inner.oneway_todo.pop_front();
569         inner.has_oneway_transaction = transaction.is_some();
570         if let Some(transaction) = transaction {
571             match guard.push_work(transaction) {
572                 Ok(()) => {}
573                 Err((_err, work)) => {
574                     // Process is dead.
575                     // This shouldn't happen due to the `is_dead` check, but if it does, just drop
576                     // the transaction and return.
577                     drop(guard);
578                     drop(work);
579                 }
580             }
581         }
582     }
583 
584     /// Finds an outdated transaction that the given transaction can replace.
585     ///
586     /// If one is found, it is removed from the list and returned.
587     pub(crate) fn take_outdated_transaction(
588         &self,
589         new: &Transaction,
590         guard: &mut Guard<'_, ProcessInner, SpinLockBackend>,
591     ) -> Option<DLArc<Transaction>> {
592         let inner = self.inner.access_mut(guard);
593         let mut cursor = inner.oneway_todo.cursor_front();
594         while let Some(next) = cursor.peek_next() {
595             if new.can_replace(&next) {
596                 return Some(next.remove());
597             }
598             cursor.move_next();
599         }
600         None
601     }
602 
603     /// This is split into a separate function since it's called by both `Node::do_work` and
604     /// `NodeWrapper::do_work`.
605     fn do_work_locked(
606         &self,
607         writer: &mut BinderReturnWriter<'_>,
608         mut guard: Guard<'_, ProcessInner, SpinLockBackend>,
609     ) -> Result<bool> {
610         let inner = self.inner.access_mut(&mut guard);
611         let strong = inner.strong.count > 0;
612         let has_strong = inner.strong.has_count;
613         let weak = strong || inner.weak.count > 0;
614         let has_weak = inner.weak.has_count;
615 
616         if weak && !has_weak {
617             inner.weak.has_count = true;
618             inner.active_inc_refs += 1;
619         }
620 
621         if strong && !has_strong {
622             inner.strong.has_count = true;
623             inner.active_inc_refs += 1;
624         }
625 
626         let no_active_inc_refs = inner.active_inc_refs == 0;
627         let should_drop_weak = no_active_inc_refs && (!weak && has_weak);
628         let should_drop_strong = no_active_inc_refs && (!strong && has_strong);
629         if should_drop_weak {
630             inner.weak.has_count = false;
631         }
632         if should_drop_strong {
633             inner.strong.has_count = false;
634         }
635         if no_active_inc_refs && !weak {
636             // Remove the node if there are no references to it.
637             guard.remove_node(self.ptr);
638         }
639         drop(guard);
640 
641         if weak && !has_weak {
642             self.write(writer, BR_INCREFS)?;
643         }
644         if strong && !has_strong {
645             self.write(writer, BR_ACQUIRE)?;
646         }
647         if should_drop_strong {
648             self.write(writer, BR_RELEASE)?;
649         }
650         if should_drop_weak {
651             self.write(writer, BR_DECREFS)?;
652         }
653 
654         Ok(true)
655     }
656 
657     pub(crate) fn add_freeze_listener(
658         &self,
659         process: &Arc<Process>,
660         flags: kernel::alloc::Flags,
661     ) -> Result {
662         let mut vec_alloc = KVVec::<Arc<Process>>::new();
663         loop {
664             let mut guard = self.owner.inner.lock();
665             // Do not check for `guard.dead`. The `dead` flag that matters here is the owner of the
666             // listener, no the target.
667             let inner = self.inner.access_mut(&mut guard);
668             let len = inner.freeze_list.len();
669             if len >= inner.freeze_list.capacity() {
670                 if len >= vec_alloc.capacity() {
671                     drop(guard);
672                     vec_alloc = KVVec::with_capacity((1 + len).next_power_of_two(), flags)?;
673                     continue;
674                 }
675                 mem::swap(&mut inner.freeze_list, &mut vec_alloc);
676                 for elem in vec_alloc.drain_all() {
677                     inner.freeze_list.push_within_capacity(elem)?;
678                 }
679             }
680             inner.freeze_list.push_within_capacity(process.clone())?;
681             return Ok(());
682         }
683     }
684 
685     pub(crate) fn remove_freeze_listener(&self, p: &Arc<Process>) {
686         let _unused_capacity;
687         let mut guard = self.owner.inner.lock();
688         let inner = self.inner.access_mut(&mut guard);
689         let len = inner.freeze_list.len();
690         inner.freeze_list.retain(|proc| !Arc::ptr_eq(proc, p));
691         if len == inner.freeze_list.len() {
692             pr_warn!(
693                 "Could not remove freeze listener for {}\n",
694                 p.pid_in_current_ns()
695             );
696         }
697         if inner.freeze_list.is_empty() {
698             _unused_capacity = mem::take(&mut inner.freeze_list);
699         }
700     }
701 
702     pub(crate) fn freeze_list<'a>(&'a self, guard: &'a ProcessInner) -> &'a [Arc<Process>] {
703         &self.inner.access(guard).freeze_list
704     }
705 }
706 
707 impl DeliverToRead for Node {
708     fn do_work(
709         self: DArc<Self>,
710         _thread: &Thread,
711         writer: &mut BinderReturnWriter<'_>,
712     ) -> Result<bool> {
713         let mut owner_inner = self.owner.inner.lock();
714         let inner = self.inner.access_mut(&mut owner_inner);
715 
716         assert!(inner.delivery_state.has_pushed_node);
717         if inner.delivery_state.has_pushed_wrapper {
718             // If the wrapper is scheduled, then we are either a normal push or weak zero2one
719             // increment, and the wrapper is a strong zero2one increment, so the wrapper always
720             // takes precedence over us.
721             assert!(inner.delivery_state.has_strong_zero2one);
722             inner.delivery_state.has_pushed_node = false;
723             inner.delivery_state.has_weak_zero2one = false;
724             return Ok(true);
725         }
726 
727         inner.delivery_state.has_pushed_node = false;
728         inner.delivery_state.has_weak_zero2one = false;
729         inner.delivery_state.has_strong_zero2one = false;
730 
731         self.do_work_locked(writer, owner_inner)
732     }
733 
734     fn cancel(self: DArc<Self>) {}
735 
736     fn should_sync_wakeup(&self) -> bool {
737         false
738     }
739 
740     #[inline(never)]
741     fn debug_print(&self, m: &SeqFile, prefix: &str, _tprefix: &str) -> Result<()> {
742         seq_print!(
743             m,
744             "{}node work {}: u{:016x} c{:016x}\n",
745             prefix,
746             self.debug_id,
747             self.ptr,
748             self.cookie,
749         );
750         Ok(())
751     }
752 }
753 
754 /// Represents something that holds one or more ref-counts to a `Node`.
755 ///
756 /// Whenever process A holds a refcount to a node owned by a different process B, then process A
757 /// will store a `NodeRef` that refers to the `Node` in process B. When process A releases the
758 /// refcount, we destroy the NodeRef, which decrements the ref-count in process A.
759 ///
760 /// This type is also used for some other cases. For example, a transaction allocation holds a
761 /// refcount on the target node, and this is implemented by storing a `NodeRef` in the allocation
762 /// so that the destructor of the allocation will drop a refcount of the `Node`.
763 pub(crate) struct NodeRef {
764     pub(crate) node: DArc<Node>,
765     /// How many times does this NodeRef hold a refcount on the Node?
766     strong_node_count: usize,
767     weak_node_count: usize,
768     /// How many times does userspace hold a refcount on this NodeRef?
769     strong_count: usize,
770     weak_count: usize,
771 }
772 
773 impl NodeRef {
774     pub(crate) fn new(node: DArc<Node>, strong_count: usize, weak_count: usize) -> Self {
775         Self {
776             node,
777             strong_node_count: strong_count,
778             weak_node_count: weak_count,
779             strong_count,
780             weak_count,
781         }
782     }
783 
784     pub(crate) fn absorb(&mut self, mut other: Self) {
785         assert!(
786             Arc::ptr_eq(&self.node, &other.node),
787             "absorb called with differing nodes"
788         );
789         self.strong_node_count += other.strong_node_count;
790         self.weak_node_count += other.weak_node_count;
791         self.strong_count += other.strong_count;
792         self.weak_count += other.weak_count;
793         other.strong_count = 0;
794         other.weak_count = 0;
795         other.strong_node_count = 0;
796         other.weak_node_count = 0;
797 
798         if self.strong_node_count >= 2 || self.weak_node_count >= 2 {
799             let mut guard = self.node.owner.inner.lock();
800             let inner = self.node.inner.access_mut(&mut guard);
801 
802             if self.strong_node_count >= 2 {
803                 inner.strong.count -= self.strong_node_count - 1;
804                 self.strong_node_count = 1;
805                 assert_ne!(inner.strong.count, 0);
806             }
807             if self.weak_node_count >= 2 {
808                 inner.weak.count -= self.weak_node_count - 1;
809                 self.weak_node_count = 1;
810                 assert_ne!(inner.weak.count, 0);
811             }
812         }
813     }
814 
815     pub(crate) fn get_count(&self) -> (usize, usize) {
816         (self.strong_count, self.weak_count)
817     }
818 
819     pub(crate) fn clone(&self, strong: bool) -> Result<NodeRef> {
820         if strong && self.strong_count == 0 {
821             return Err(EINVAL);
822         }
823         Ok(self
824             .node
825             .owner
826             .inner
827             .lock()
828             .new_node_ref(self.node.clone(), strong, None))
829     }
830 
831     /// Updates (increments or decrements) the number of references held against the node. If the
832     /// count being updated transitions from 0 to 1 or from 1 to 0, the node is notified by having
833     /// its `update_refcount` function called.
834     ///
835     /// Returns whether `self` should be removed (when both counts are zero).
836     pub(crate) fn update(&mut self, inc: bool, strong: bool) -> bool {
837         if strong && self.strong_count == 0 {
838             return false;
839         }
840         let (count, node_count, other_count) = if strong {
841             (
842                 &mut self.strong_count,
843                 &mut self.strong_node_count,
844                 self.weak_count,
845             )
846         } else {
847             (
848                 &mut self.weak_count,
849                 &mut self.weak_node_count,
850                 self.strong_count,
851             )
852         };
853         if inc {
854             if *count == 0 {
855                 *node_count = 1;
856                 self.node.update_refcount(true, 1, strong);
857             }
858             *count += 1;
859         } else {
860             if *count == 0 {
861                 pr_warn!(
862                     "pid {} performed invalid decrement on ref\n",
863                     kernel::current!().pid()
864                 );
865                 return false;
866             }
867             *count -= 1;
868             if *count == 0 {
869                 self.node.update_refcount(false, *node_count, strong);
870                 *node_count = 0;
871                 return other_count == 0;
872             }
873         }
874         false
875     }
876 }
877 
878 impl Drop for NodeRef {
879     // This destructor is called conditionally from `Allocation::drop`. That branch is often
880     // mispredicted. Inlining this method call reduces the cost of those branch mispredictions.
881     #[inline(always)]
882     fn drop(&mut self) {
883         if self.strong_node_count > 0 {
884             self.node
885                 .update_refcount(false, self.strong_node_count, true);
886         }
887         if self.weak_node_count > 0 {
888             self.node
889                 .update_refcount(false, self.weak_node_count, false);
890         }
891     }
892 }
893 
894 struct NodeDeathInner {
895     dead: bool,
896     cleared: bool,
897     notification_done: bool,
898     /// Indicates whether the normal flow was interrupted by removing the handle. In this case, we
899     /// need behave as if the death notification didn't exist (i.e., we don't deliver anything to
900     /// the user.
901     aborted: bool,
902 }
903 
904 /// Used to deliver notifications when a process dies.
905 ///
906 /// A process can request to be notified when a process dies using `BC_REQUEST_DEATH_NOTIFICATION`.
907 /// This will make the driver send a `BR_DEAD_BINDER` to userspace when the process dies (or
908 /// immediately if it is already dead). Userspace is supposed to respond with `BC_DEAD_BINDER_DONE`
909 /// once it has processed the notification.
910 ///
911 /// Userspace can unregister from death notifications using the `BC_CLEAR_DEATH_NOTIFICATION`
912 /// command. In this case, the kernel will respond with `BR_CLEAR_DEATH_NOTIFICATION_DONE` once the
913 /// notification has been removed. Note that if the remote process dies before the kernel has
914 /// responded with `BR_CLEAR_DEATH_NOTIFICATION_DONE`, then the kernel will still send a
915 /// `BR_DEAD_BINDER`, which userspace must be able to process. In this case, the kernel will wait
916 /// for the `BC_DEAD_BINDER_DONE` command before it sends `BR_CLEAR_DEATH_NOTIFICATION_DONE`.
917 ///
918 /// Note that even if the kernel sends a `BR_DEAD_BINDER`, this does not remove the death
919 /// notification. Userspace must still remove it manually using `BC_CLEAR_DEATH_NOTIFICATION`.
920 ///
921 /// If a process uses `BC_RELEASE` to destroy its last refcount on a node that has an active death
922 /// registration, then the death registration is immediately deleted (we implement this using the
923 /// `aborted` field). However, userspace is not supposed to delete a `NodeRef` without first
924 /// deregistering death notifications, so this codepath is not executed under normal circumstances.
925 #[pin_data]
926 pub(crate) struct NodeDeath {
927     node: DArc<Node>,
928     process: Arc<Process>,
929     pub(crate) cookie: u64,
930     #[pin]
931     links_track: AtomicTracker<0>,
932     /// Used by the owner `Node` to store a list of registered death notifications.
933     ///
934     /// # Invariants
935     ///
936     /// Only ever used with the `death_list` list of `self.node`.
937     #[pin]
938     death_links: ListLinks<1>,
939     /// Used by the process to keep track of the death notifications for which we have sent a
940     /// `BR_DEAD_BINDER` but not yet received a `BC_DEAD_BINDER_DONE`.
941     ///
942     /// # Invariants
943     ///
944     /// Only ever used with the `delivered_deaths` list of `self.process`.
945     #[pin]
946     delivered_links: ListLinks<2>,
947     #[pin]
948     delivered_links_track: AtomicTracker<2>,
949     #[pin]
950     inner: SpinLock<NodeDeathInner>,
951 }
952 
953 impl NodeDeath {
954     /// Constructs a new node death notification object.
955     pub(crate) fn new(
956         node: DArc<Node>,
957         process: Arc<Process>,
958         cookie: u64,
959     ) -> impl PinInit<DTRWrap<Self>> {
960         DTRWrap::new(pin_init!(
961             Self {
962                 node,
963                 process,
964                 cookie,
965                 links_track <- AtomicTracker::new(),
966                 death_links <- ListLinks::new(),
967                 delivered_links <- ListLinks::new(),
968                 delivered_links_track <- AtomicTracker::new(),
969                 inner <- kernel::new_spinlock!(NodeDeathInner {
970                     dead: false,
971                     cleared: false,
972                     notification_done: false,
973                     aborted: false,
974                 }, "NodeDeath::inner"),
975             }
976         ))
977     }
978 
979     /// Sets the cleared flag to `true`.
980     ///
981     /// It removes `self` from the node's death notification list if needed.
982     ///
983     /// Returns whether it needs to be queued.
984     pub(crate) fn set_cleared(self: &DArc<Self>, abort: bool) -> bool {
985         let (needs_removal, needs_queueing) = {
986             // Update state and determine if we need to queue a work item. We only need to do it
987             // when the node is not dead or if the user already completed the death notification.
988             let mut inner = self.inner.lock();
989             if abort {
990                 inner.aborted = true;
991             }
992             if inner.cleared {
993                 // Already cleared.
994                 return false;
995             }
996             inner.cleared = true;
997             (!inner.dead, !inner.dead || inner.notification_done)
998         };
999 
1000         // Remove death notification from node.
1001         if needs_removal {
1002             let mut owner_inner = self.node.owner.inner.lock();
1003             let node_inner = self.node.inner.access_mut(&mut owner_inner);
1004             // SAFETY: A `NodeDeath` is never inserted into the death list of any node other than
1005             // its owner, so it is either in this death list or in no death list.
1006             unsafe { node_inner.death_list.remove(self) };
1007         }
1008         needs_queueing
1009     }
1010 
1011     /// Sets the 'notification done' flag to `true`.
1012     pub(crate) fn set_notification_done(self: DArc<Self>, thread: &Thread) {
1013         let needs_queueing = {
1014             let mut inner = self.inner.lock();
1015             inner.notification_done = true;
1016             inner.cleared
1017         };
1018         if needs_queueing {
1019             if let Some(death) = ListArc::try_from_arc_or_drop(self) {
1020                 let _ = thread.push_work_if_looper(death);
1021             }
1022         }
1023     }
1024 
1025     /// Sets the 'dead' flag to `true` and queues work item if needed.
1026     pub(crate) fn set_dead(self: DArc<Self>) {
1027         let needs_queueing = {
1028             let mut inner = self.inner.lock();
1029             if inner.cleared {
1030                 false
1031             } else {
1032                 inner.dead = true;
1033                 true
1034             }
1035         };
1036         if needs_queueing {
1037             // Push the death notification to the target process. There is nothing else to do if
1038             // it's already dead.
1039             if let Some(death) = ListArc::try_from_arc_or_drop(self) {
1040                 let process = death.process.clone();
1041                 let _ = process.push_work(death);
1042             }
1043         }
1044     }
1045 }
1046 
1047 kernel::list::impl_list_arc_safe! {
1048     impl ListArcSafe<0> for NodeDeath {
1049         tracked_by links_track: AtomicTracker;
1050     }
1051 }
1052 
1053 kernel::list::impl_list_arc_safe! {
1054     impl ListArcSafe<1> for DTRWrap<NodeDeath> { untracked; }
1055 }
1056 kernel::list::impl_list_item! {
1057     impl ListItem<1> for DTRWrap<NodeDeath> {
1058         using ListLinks { self.wrapped.death_links };
1059     }
1060 }
1061 
1062 kernel::list::impl_list_arc_safe! {
1063     impl ListArcSafe<2> for DTRWrap<NodeDeath> {
1064         tracked_by wrapped: NodeDeath;
1065     }
1066 }
1067 kernel::list::impl_list_arc_safe! {
1068     impl ListArcSafe<2> for NodeDeath {
1069         tracked_by delivered_links_track: AtomicTracker<2>;
1070     }
1071 }
1072 kernel::list::impl_list_item! {
1073     impl ListItem<2> for DTRWrap<NodeDeath> {
1074         using ListLinks { self.wrapped.delivered_links };
1075     }
1076 }
1077 
1078 impl DeliverToRead for NodeDeath {
1079     fn do_work(
1080         self: DArc<Self>,
1081         _thread: &Thread,
1082         writer: &mut BinderReturnWriter<'_>,
1083     ) -> Result<bool> {
1084         let done = {
1085             let inner = self.inner.lock();
1086             if inner.aborted {
1087                 return Ok(true);
1088             }
1089             inner.cleared && (!inner.dead || inner.notification_done)
1090         };
1091 
1092         let cookie = self.cookie;
1093         let cmd = if done {
1094             BR_CLEAR_DEATH_NOTIFICATION_DONE
1095         } else {
1096             let process = self.process.clone();
1097             let mut process_inner = process.inner.lock();
1098             let inner = self.inner.lock();
1099             if inner.aborted {
1100                 return Ok(true);
1101             }
1102             // We're still holding the inner lock, so it cannot be aborted while we insert it into
1103             // the delivered list.
1104             process_inner.death_delivered(self.clone());
1105             BR_DEAD_BINDER
1106         };
1107 
1108         writer.write_code(cmd)?;
1109         writer.write_payload(&cookie)?;
1110         // DEAD_BINDER notifications can cause transactions, so stop processing work items when we
1111         // get to a death notification.
1112         Ok(cmd != BR_DEAD_BINDER)
1113     }
1114 
1115     fn cancel(self: DArc<Self>) {}
1116 
1117     fn should_sync_wakeup(&self) -> bool {
1118         false
1119     }
1120 
1121     #[inline(never)]
1122     fn debug_print(&self, m: &SeqFile, prefix: &str, _tprefix: &str) -> Result<()> {
1123         let inner = self.inner.lock();
1124 
1125         let dead_binder = inner.dead && !inner.notification_done;
1126 
1127         if dead_binder {
1128             if inner.cleared {
1129                 seq_print!(m, "{}has cleared dead binder\n", prefix);
1130             } else {
1131                 seq_print!(m, "{}has dead binder\n", prefix);
1132             }
1133         } else {
1134             seq_print!(m, "{}has cleared death notification\n", prefix);
1135         }
1136 
1137         Ok(())
1138     }
1139 }
1140