1 // SPDX-License-Identifier: GPL-2.0
2
3 // Copyright (C) 2025 Google LLC.
4
5 //! This module defines the `Thread` type, which represents a userspace thread that is using
6 //! binder.
7 //!
8 //! The `Process` object stores all of the threads in an rb tree.
9
10 use kernel::{
11 bindings,
12 fs::{File, LocalFile},
13 list::{AtomicTracker, List, ListArc, ListLinks, TryNewListArc},
14 prelude::*,
15 security,
16 seq_file::SeqFile,
17 seq_print,
18 sync::atomic::{ordering::Relaxed, Atomic},
19 sync::poll::{PollCondVar, PollTable},
20 sync::{aref::ARef, Arc, SpinLock},
21 task::Task,
22 uaccess::{UserPtr, UserSlice, UserSliceReader},
23 uapi,
24 };
25
26 use crate::{
27 allocation::{Allocation, AllocationView, BinderObject, BinderObjectRef, NewAllocation},
28 defs::*,
29 error::BinderResult,
30 process::{GetWorkOrRegister, Process},
31 ptr_align,
32 stats::GLOBAL_STATS,
33 transaction::{Transaction, TransactionInfo},
34 BinderReturnWriter, DArc, DLArc, DTRWrap, DeliverCode, DeliverToRead,
35 };
36
37 use core::mem::size_of;
38
is_aligned(value: usize, to: usize) -> bool39 fn is_aligned(value: usize, to: usize) -> bool {
40 value % to == 0
41 }
42
43 /// Stores the layout of the scatter-gather entries. This is used during the `translate_objects`
44 /// call and is discarded when it returns.
45 struct ScatterGatherState {
46 /// A struct that tracks the amount of unused buffer space.
47 unused_buffer_space: UnusedBufferSpace,
48 /// Scatter-gather entries to copy.
49 sg_entries: KVec<ScatterGatherEntry>,
50 /// Indexes into `sg_entries` corresponding to the last binder_buffer_object that
51 /// was processed and all of its ancestors. The array is in sorted order.
52 ancestors: KVec<usize>,
53 }
54
55 /// This entry specifies an additional buffer that should be copied using the scatter-gather
56 /// mechanism.
57 struct ScatterGatherEntry {
58 /// The index in the offset array of the BINDER_TYPE_PTR that this entry originates from.
59 obj_index: usize,
60 /// Offset in target buffer.
61 offset: usize,
62 /// User address in source buffer.
63 sender_uaddr: usize,
64 /// Number of bytes to copy.
65 length: usize,
66 /// The minimum offset of the next fixup in this buffer.
67 fixup_min_offset: usize,
68 /// The offsets within this buffer that contain pointers which should be translated.
69 pointer_fixups: KVec<PointerFixupEntry>,
70 }
71
72 /// This entry specifies that a fixup should happen at `target_offset` of the
73 /// buffer.
74 enum PointerFixupEntry {
75 /// A fixup for a `binder_buffer_object`.
76 Fixup {
77 /// The translated pointer to write.
78 pointer_value: u64,
79 /// The offset at which the value should be written. The offset is relative
80 /// to the original buffer.
81 target_offset: usize,
82 },
83 /// A skip for a `binder_fd_array_object`.
84 Skip {
85 /// The number of bytes to skip.
86 skip: usize,
87 /// The offset at which the skip should happen. The offset is relative
88 /// to the original buffer.
89 target_offset: usize,
90 },
91 }
92
93 /// Return type of `apply_and_validate_fixup_in_parent`.
94 struct ParentFixupInfo {
95 /// The index of the parent buffer in `sg_entries`.
96 parent_sg_index: usize,
97 /// The number of ancestors of the buffer.
98 ///
99 /// The buffer is considered an ancestor of itself, so this is always at
100 /// least one.
101 num_ancestors: usize,
102 /// New value of `fixup_min_offset` if this fixup is applied.
103 new_min_offset: usize,
104 /// The offset of the fixup in the target buffer.
105 target_offset: usize,
106 }
107
108 impl ScatterGatherState {
109 /// Called when a `binder_buffer_object` or `binder_fd_array_object` tries
110 /// to access a region in its parent buffer. These accesses have various
111 /// restrictions, which this method verifies.
112 ///
113 /// The `parent_offset` and `length` arguments describe the offset and
114 /// length of the access in the parent buffer.
115 ///
116 /// # Detailed restrictions
117 ///
118 /// Obviously the fixup must be in-bounds for the parent buffer.
119 ///
120 /// For safety reasons, we only allow fixups inside a buffer to happen
121 /// at increasing offsets; additionally, we only allow fixup on the last
122 /// buffer object that was verified, or one of its parents.
123 ///
124 /// Example of what is allowed:
125 ///
126 /// A
127 /// B (parent = A, offset = 0)
128 /// C (parent = A, offset = 16)
129 /// D (parent = C, offset = 0)
130 /// E (parent = A, offset = 32) // min_offset is 16 (C.parent_offset)
131 ///
132 /// Examples of what is not allowed:
133 ///
134 /// Decreasing offsets within the same parent:
135 /// A
136 /// C (parent = A, offset = 16)
137 /// B (parent = A, offset = 0) // decreasing offset within A
138 ///
139 /// Arcerring to a parent that wasn't the last object or any of its parents:
140 /// A
141 /// B (parent = A, offset = 0)
142 /// C (parent = A, offset = 0)
143 /// C (parent = A, offset = 16)
144 /// D (parent = B, offset = 0) // B is not A or any of A's parents
validate_parent_fixup( &self, parent: usize, parent_offset: usize, length: usize, ) -> Result<ParentFixupInfo>145 fn validate_parent_fixup(
146 &self,
147 parent: usize,
148 parent_offset: usize,
149 length: usize,
150 ) -> Result<ParentFixupInfo> {
151 // Using `position` would also be correct, but `rposition` avoids
152 // quadratic running times.
153 let ancestors_i = self
154 .ancestors
155 .iter()
156 .copied()
157 .rposition(|sg_idx| self.sg_entries[sg_idx].obj_index == parent)
158 .ok_or(EINVAL)?;
159 let sg_idx = self.ancestors[ancestors_i];
160 let sg_entry = match self.sg_entries.get(sg_idx) {
161 Some(sg_entry) => sg_entry,
162 None => {
163 pr_err!(
164 "self.ancestors[{}] is {}, but self.sg_entries.len() is {}",
165 ancestors_i,
166 sg_idx,
167 self.sg_entries.len()
168 );
169 return Err(EINVAL);
170 }
171 };
172 if sg_entry.fixup_min_offset > parent_offset {
173 pr_warn!(
174 "validate_parent_fixup: fixup_min_offset={}, parent_offset={}",
175 sg_entry.fixup_min_offset,
176 parent_offset
177 );
178 return Err(EINVAL);
179 }
180 let new_min_offset = parent_offset.checked_add(length).ok_or(EINVAL)?;
181 if new_min_offset > sg_entry.length {
182 pr_warn!(
183 "validate_parent_fixup: new_min_offset={}, sg_entry.length={}",
184 new_min_offset,
185 sg_entry.length
186 );
187 return Err(EINVAL);
188 }
189 let target_offset = sg_entry.offset.checked_add(parent_offset).ok_or(EINVAL)?;
190 // The `ancestors_i + 1` operation can't overflow since the output of the addition is at
191 // most `self.ancestors.len()`, which also fits in a usize.
192 Ok(ParentFixupInfo {
193 parent_sg_index: sg_idx,
194 num_ancestors: ancestors_i + 1,
195 new_min_offset,
196 target_offset,
197 })
198 }
199 }
200
201 /// Keeps track of how much unused buffer space is left. The initial amount is the number of bytes
202 /// requested by the user using the `buffers_size` field of `binder_transaction_data_sg`. Each time
203 /// we translate an object of type `BINDER_TYPE_PTR`, some of the unused buffer space is consumed.
204 struct UnusedBufferSpace {
205 /// The start of the remaining space.
206 offset: usize,
207 /// The end of the remaining space.
208 limit: usize,
209 }
210 impl UnusedBufferSpace {
211 /// Claim the next `size` bytes from the unused buffer space. The offset for the claimed chunk
212 /// into the buffer is returned.
claim_next(&mut self, size: usize) -> Result<usize>213 fn claim_next(&mut self, size: usize) -> Result<usize> {
214 // We require every chunk to be aligned.
215 let size = ptr_align(size).ok_or(EINVAL)?;
216 let new_offset = self.offset.checked_add(size).ok_or(EINVAL)?;
217
218 if new_offset <= self.limit {
219 let offset = self.offset;
220 self.offset = new_offset;
221 Ok(offset)
222 } else {
223 Err(EINVAL)
224 }
225 }
226 }
227
228 pub(crate) enum PushWorkRes {
229 Ok,
230 FailedDead(DLArc<dyn DeliverToRead>),
231 }
232
233 impl PushWorkRes {
is_ok(&self) -> bool234 fn is_ok(&self) -> bool {
235 match self {
236 PushWorkRes::Ok => true,
237 PushWorkRes::FailedDead(_) => false,
238 }
239 }
240 }
241
242 /// The fields of `Thread` protected by the spinlock.
243 struct InnerThread {
244 /// Determines the looper state of the thread. It is a bit-wise combination of the constants
245 /// prefixed with `LOOPER_`.
246 looper_flags: u32,
247
248 /// Determines whether the looper should return.
249 looper_need_return: bool,
250
251 /// Determines if thread is dead.
252 is_dead: bool,
253
254 /// Work item used to deliver error codes to the thread that started a transaction. Stored here
255 /// so that it can be reused.
256 reply_work: DArc<ThreadError>,
257
258 /// Work item used to deliver error codes to the current thread. Stored here so that it can be
259 /// reused.
260 return_work: DArc<ThreadError>,
261
262 /// Determines whether the work list below should be processed. When set to false, `work_list`
263 /// is treated as if it were empty.
264 process_work_list: bool,
265 /// List of work items to deliver to userspace.
266 work_list: List<DTRWrap<dyn DeliverToRead>>,
267 current_transaction: Option<DArc<Transaction>>,
268
269 /// Extended error information for this thread.
270 extended_error: ExtendedError,
271 }
272
273 const LOOPER_REGISTERED: u32 = 0x01;
274 const LOOPER_ENTERED: u32 = 0x02;
275 const LOOPER_EXITED: u32 = 0x04;
276 const LOOPER_INVALID: u32 = 0x08;
277 const LOOPER_WAITING: u32 = 0x10;
278 const LOOPER_WAITING_PROC: u32 = 0x20;
279 const LOOPER_POLL: u32 = 0x40;
280
281 impl InnerThread {
new() -> Result<Self>282 fn new() -> Result<Self> {
283 fn next_err_id() -> u32 {
284 static EE_ID: Atomic<u32> = Atomic::new(0);
285 EE_ID.fetch_add(1, Relaxed)
286 }
287
288 Ok(Self {
289 looper_flags: 0,
290 looper_need_return: false,
291 is_dead: false,
292 process_work_list: false,
293 reply_work: ThreadError::try_new()?,
294 return_work: ThreadError::try_new()?,
295 work_list: List::new(),
296 current_transaction: None,
297 extended_error: ExtendedError::new(next_err_id(), BR_OK, 0),
298 })
299 }
300
pop_work(&mut self) -> Option<DLArc<dyn DeliverToRead>>301 fn pop_work(&mut self) -> Option<DLArc<dyn DeliverToRead>> {
302 if !self.process_work_list {
303 return None;
304 }
305
306 let ret = self.work_list.pop_front();
307 self.process_work_list = !self.work_list.is_empty();
308 ret
309 }
310
push_work(&mut self, work: DLArc<dyn DeliverToRead>) -> PushWorkRes311 fn push_work(&mut self, work: DLArc<dyn DeliverToRead>) -> PushWorkRes {
312 if self.is_dead {
313 PushWorkRes::FailedDead(work)
314 } else {
315 self.work_list.push_back(work);
316 self.process_work_list = true;
317 PushWorkRes::Ok
318 }
319 }
320
push_reply_work(&mut self, code: u32)321 fn push_reply_work(&mut self, code: u32) {
322 if let Ok(work) = ListArc::try_from_arc(self.reply_work.clone()) {
323 work.set_error_code(code);
324 self.push_work(work);
325 } else {
326 pr_warn!("Thread reply work is already in use.");
327 }
328 }
329
push_return_work(&mut self, reply: u32)330 fn push_return_work(&mut self, reply: u32) {
331 if let Ok(work) = ListArc::try_from_arc(self.return_work.clone()) {
332 work.set_error_code(reply);
333 self.push_work(work);
334 } else {
335 pr_warn!("Thread return work is already in use.");
336 }
337 }
338
339 /// Used to push work items that do not need to be processed immediately and can wait until the
340 /// thread gets another work item.
push_work_deferred(&mut self, work: DLArc<dyn DeliverToRead>)341 fn push_work_deferred(&mut self, work: DLArc<dyn DeliverToRead>) {
342 self.work_list.push_back(work);
343 }
344
345 /// Fetches the transaction this thread can reply to. If the thread has a pending transaction
346 /// (that it could respond to) but it has also issued a transaction, it must first wait for the
347 /// previously-issued transaction to complete.
348 ///
349 /// The `thread` parameter should be the thread containing this `ThreadInner`.
pop_transaction_to_reply(&mut self, thread: &Thread) -> Result<DArc<Transaction>>350 fn pop_transaction_to_reply(&mut self, thread: &Thread) -> Result<DArc<Transaction>> {
351 let transaction = self.current_transaction.take().ok_or(EINVAL)?;
352 if core::ptr::eq(thread, transaction.from.as_ref()) {
353 self.current_transaction = Some(transaction);
354 return Err(EINVAL);
355 }
356 // Find a new current transaction for this thread.
357 self.current_transaction = transaction.find_from(thread).cloned();
358 Ok(transaction)
359 }
360
pop_transaction_replied(&mut self, transaction: &DArc<Transaction>) -> bool361 fn pop_transaction_replied(&mut self, transaction: &DArc<Transaction>) -> bool {
362 match self.current_transaction.take() {
363 None => false,
364 Some(old) => {
365 if !Arc::ptr_eq(transaction, &old) {
366 self.current_transaction = Some(old);
367 return false;
368 }
369 self.current_transaction = old.clone_next();
370 true
371 }
372 }
373 }
374
looper_enter(&mut self)375 fn looper_enter(&mut self) {
376 self.looper_flags |= LOOPER_ENTERED;
377 if self.looper_flags & LOOPER_REGISTERED != 0 {
378 self.looper_flags |= LOOPER_INVALID;
379 }
380 }
381
looper_register(&mut self, valid: bool)382 fn looper_register(&mut self, valid: bool) {
383 self.looper_flags |= LOOPER_REGISTERED;
384 if !valid || self.looper_flags & LOOPER_ENTERED != 0 {
385 self.looper_flags |= LOOPER_INVALID;
386 }
387 }
388
looper_exit(&mut self)389 fn looper_exit(&mut self) {
390 self.looper_flags |= LOOPER_EXITED;
391 }
392
393 /// Determines whether the thread is part of a pool, i.e., if it is a looper.
is_looper(&self) -> bool394 fn is_looper(&self) -> bool {
395 self.looper_flags & (LOOPER_ENTERED | LOOPER_REGISTERED) != 0
396 }
397
398 /// Determines whether the thread should attempt to fetch work items from the process queue.
399 /// This is generally case when the thread is registered as a looper and not part of a
400 /// transaction stack. But if there is local work, we want to return to userspace before we
401 /// deliver any remote work.
should_use_process_work_queue(&self) -> bool402 fn should_use_process_work_queue(&self) -> bool {
403 self.current_transaction.is_none() && !self.process_work_list && self.is_looper()
404 }
405
poll(&mut self) -> u32406 fn poll(&mut self) -> u32 {
407 self.looper_flags |= LOOPER_POLL;
408 if self.process_work_list || self.looper_need_return {
409 bindings::POLLIN
410 } else {
411 0
412 }
413 }
414 }
415
416 /// This represents a thread that's used with binder.
417 #[pin_data]
418 pub(crate) struct Thread {
419 pub(crate) id: i32,
420 pub(crate) process: Arc<Process>,
421 pub(crate) task: ARef<Task>,
422 #[pin]
423 inner: SpinLock<InnerThread>,
424 #[pin]
425 work_condvar: PollCondVar,
426 /// Used to insert this thread into the process' `ready_threads` list.
427 ///
428 /// INVARIANT: May never be used for any other list than the `self.process.ready_threads`.
429 #[pin]
430 links: ListLinks,
431 #[pin]
432 links_track: AtomicTracker,
433 }
434
435 kernel::list::impl_list_arc_safe! {
436 impl ListArcSafe<0> for Thread {
437 tracked_by links_track: AtomicTracker;
438 }
439 }
440 kernel::list::impl_list_item! {
441 impl ListItem<0> for Thread {
442 using ListLinks { self.links };
443 }
444 }
445
446 impl Thread {
new(id: i32, process: Arc<Process>) -> Result<Arc<Self>>447 pub(crate) fn new(id: i32, process: Arc<Process>) -> Result<Arc<Self>> {
448 let inner = InnerThread::new()?;
449
450 Arc::pin_init(
451 try_pin_init!(Thread {
452 id,
453 process,
454 task: ARef::from(&**kernel::current!()),
455 inner <- kernel::new_spinlock!(inner, "Thread::inner"),
456 work_condvar <- kernel::new_poll_condvar!("Thread::work_condvar"),
457 links <- ListLinks::new(),
458 links_track <- AtomicTracker::new(),
459 }),
460 GFP_KERNEL,
461 )
462 }
463
464 #[inline(never)]
debug_print(self: &Arc<Self>, m: &SeqFile, print_all: bool) -> Result<()>465 pub(crate) fn debug_print(self: &Arc<Self>, m: &SeqFile, print_all: bool) -> Result<()> {
466 let inner = self.inner.lock();
467
468 if print_all || inner.current_transaction.is_some() || !inner.work_list.is_empty() {
469 seq_print!(
470 m,
471 " thread {}: l {:02x} need_return {}\n",
472 self.id,
473 inner.looper_flags,
474 inner.looper_need_return,
475 );
476 }
477
478 let mut t_opt = inner.current_transaction.as_ref();
479 while let Some(t) = t_opt {
480 if Arc::ptr_eq(&t.from, self) {
481 t.debug_print_inner(m, " outgoing transaction ");
482 t_opt = t.from_parent.as_ref();
483 } else if Arc::ptr_eq(&t.to, &self.process) {
484 t.debug_print_inner(m, " incoming transaction ");
485 t_opt = t.find_from(self);
486 } else {
487 t.debug_print_inner(m, " bad transaction ");
488 t_opt = None;
489 }
490 }
491
492 for work in &inner.work_list {
493 work.debug_print(m, " ", " pending transaction ")?;
494 }
495 Ok(())
496 }
497
get_extended_error(&self, data: UserSlice) -> Result498 pub(crate) fn get_extended_error(&self, data: UserSlice) -> Result {
499 let mut writer = data.writer();
500 let ee = self.inner.lock().extended_error;
501 writer.write(&ee)?;
502 Ok(())
503 }
504
set_current_transaction(&self, transaction: DArc<Transaction>)505 pub(crate) fn set_current_transaction(&self, transaction: DArc<Transaction>) {
506 self.inner.lock().current_transaction = Some(transaction);
507 }
508
has_current_transaction(&self) -> bool509 pub(crate) fn has_current_transaction(&self) -> bool {
510 self.inner.lock().current_transaction.is_some()
511 }
512
513 /// Attempts to fetch a work item from the thread-local queue. The behaviour if the queue is
514 /// empty depends on `wait`: if it is true, the function waits for some work to be queued (or a
515 /// signal); otherwise it returns indicating that none is available.
516 // #[export_name] is a temporary workaround so that ps output does not become unreadable from
517 // mangled symbol names.
518 #[export_name = "rust_binder_waitlcl"]
get_work_local(self: &Arc<Self>, wait: bool) -> Result<Option<DLArc<dyn DeliverToRead>>>519 fn get_work_local(self: &Arc<Self>, wait: bool) -> Result<Option<DLArc<dyn DeliverToRead>>> {
520 {
521 let mut inner = self.inner.lock();
522 if inner.looper_need_return {
523 return Ok(inner.pop_work());
524 }
525 }
526
527 // Try once if the caller does not want to wait.
528 if !wait {
529 return self.inner.lock().pop_work().ok_or(EAGAIN).map(Some);
530 }
531
532 // Loop waiting only on the local queue (i.e., not registering with the process queue).
533 let mut inner = self.inner.lock();
534 loop {
535 if let Some(work) = inner.pop_work() {
536 return Ok(Some(work));
537 }
538
539 inner.looper_flags |= LOOPER_WAITING;
540 let signal_pending = self.work_condvar.wait_interruptible_freezable(&mut inner);
541 inner.looper_flags &= !LOOPER_WAITING;
542
543 if signal_pending {
544 return Err(EINTR);
545 }
546 if inner.looper_need_return {
547 return Ok(None);
548 }
549 }
550 }
551
552 /// Attempts to fetch a work item from the thread-local queue, falling back to the process-wide
553 /// queue if none is available locally.
554 ///
555 /// This must only be called when the thread is not participating in a transaction chain. If it
556 /// is, the local version (`get_work_local`) should be used instead.
557 // #[export_name] is a temporary workaround so that ps output does not become unreadable from
558 // mangled symbol names.
559 #[export_name = "rust_binder_wait"]
get_work(self: &Arc<Self>, wait: bool) -> Result<Option<DLArc<dyn DeliverToRead>>>560 fn get_work(self: &Arc<Self>, wait: bool) -> Result<Option<DLArc<dyn DeliverToRead>>> {
561 // Try to get work from the thread's work queue, using only a local lock.
562 {
563 let mut inner = self.inner.lock();
564 if let Some(work) = inner.pop_work() {
565 return Ok(Some(work));
566 }
567 if inner.looper_need_return {
568 drop(inner);
569 return Ok(self.process.get_work());
570 }
571 }
572
573 // If the caller doesn't want to wait, try to grab work from the process queue.
574 //
575 // We know nothing will have been queued directly to the thread queue because it is not in
576 // a transaction and it is not in the process' ready list.
577 if !wait {
578 return self.process.get_work().ok_or(EAGAIN).map(Some);
579 }
580
581 // Get work from the process queue. If none is available, atomically register as ready.
582 let reg = match self.process.get_work_or_register(self) {
583 GetWorkOrRegister::Work(work) => return Ok(Some(work)),
584 GetWorkOrRegister::Register(reg) => reg,
585 };
586
587 let mut inner = self.inner.lock();
588 loop {
589 if let Some(work) = inner.pop_work() {
590 return Ok(Some(work));
591 }
592
593 inner.looper_flags |= LOOPER_WAITING | LOOPER_WAITING_PROC;
594 let signal_pending = self.work_condvar.wait_interruptible_freezable(&mut inner);
595 inner.looper_flags &= !(LOOPER_WAITING | LOOPER_WAITING_PROC);
596
597 if signal_pending || inner.looper_need_return {
598 // We need to return now. We need to pull the thread off the list of ready threads
599 // (by dropping `reg`), then check the state again after it's off the list to
600 // ensure that something was not queued in the meantime. If something has been
601 // queued, we just return it (instead of the error).
602 drop(inner);
603 drop(reg);
604
605 let res = match self.inner.lock().pop_work() {
606 Some(work) => Ok(Some(work)),
607 None if signal_pending => Err(EINTR),
608 None => Ok(None),
609 };
610 return res;
611 }
612 }
613 }
614
615 /// Push the provided work item to be delivered to user space via this thread.
616 ///
617 /// Returns whether the item was successfully pushed. This can only fail if the thread is dead.
push_work(&self, work: DLArc<dyn DeliverToRead>) -> PushWorkRes618 pub(crate) fn push_work(&self, work: DLArc<dyn DeliverToRead>) -> PushWorkRes {
619 let sync = work.should_sync_wakeup();
620
621 let res = self.inner.lock().push_work(work);
622
623 if res.is_ok() {
624 if sync {
625 self.work_condvar.notify_sync();
626 } else {
627 self.work_condvar.notify_one();
628 }
629 }
630
631 res
632 }
633
634 /// Attempts to push to given work item to the thread if it's a looper thread (i.e., if it's
635 /// part of a thread pool) and is alive. Otherwise, push the work item to the process instead.
push_work_if_looper(&self, work: DLArc<dyn DeliverToRead>) -> BinderResult636 pub(crate) fn push_work_if_looper(&self, work: DLArc<dyn DeliverToRead>) -> BinderResult {
637 let mut inner = self.inner.lock();
638 if inner.is_looper() && !inner.is_dead {
639 inner.push_work(work);
640 Ok(())
641 } else {
642 drop(inner);
643 self.process.push_work(work)
644 }
645 }
646
push_work_deferred(&self, work: DLArc<dyn DeliverToRead>)647 pub(crate) fn push_work_deferred(&self, work: DLArc<dyn DeliverToRead>) {
648 self.inner.lock().push_work_deferred(work);
649 }
650
push_return_work(&self, reply: u32)651 pub(crate) fn push_return_work(&self, reply: u32) {
652 self.inner.lock().push_return_work(reply);
653 }
654
translate_object( &self, obj_index: usize, offset: usize, object: BinderObjectRef<'_>, view: &mut AllocationView<'_>, allow_fds: bool, sg_state: &mut ScatterGatherState, ) -> BinderResult655 fn translate_object(
656 &self,
657 obj_index: usize,
658 offset: usize,
659 object: BinderObjectRef<'_>,
660 view: &mut AllocationView<'_>,
661 allow_fds: bool,
662 sg_state: &mut ScatterGatherState,
663 ) -> BinderResult {
664 match object {
665 BinderObjectRef::Binder(obj) => {
666 let strong = obj.hdr.type_ == BINDER_TYPE_BINDER;
667 // SAFETY: `binder` is a `binder_uintptr_t`; any bit pattern is a valid
668 // representation.
669 let ptr = unsafe { obj.__bindgen_anon_1.binder } as _;
670 let cookie = obj.cookie as _;
671 let flags = obj.flags as _;
672 let node = self
673 .process
674 .as_arc_borrow()
675 .get_node(ptr, cookie, flags, strong, self)?;
676 security::binder_transfer_binder(&self.process.cred, &view.alloc.process.cred)?;
677 view.transfer_binder_object(offset, obj, strong, node)?;
678 }
679 BinderObjectRef::Handle(obj) => {
680 let strong = obj.hdr.type_ == BINDER_TYPE_HANDLE;
681 // SAFETY: `handle` is a `u32`; any bit pattern is a valid representation.
682 let handle = unsafe { obj.__bindgen_anon_1.handle } as _;
683 let node = self.process.get_node_from_handle(handle, strong)?;
684 security::binder_transfer_binder(&self.process.cred, &view.alloc.process.cred)?;
685 view.transfer_binder_object(offset, obj, strong, node)?;
686 }
687 BinderObjectRef::Fd(obj) => {
688 if !allow_fds {
689 return Err(EPERM.into());
690 }
691
692 // SAFETY: `fd` is a `u32`; any bit pattern is a valid representation.
693 let fd = unsafe { obj.__bindgen_anon_1.fd };
694 let file = LocalFile::fget(fd)?;
695 // SAFETY: The binder driver never calls `fdget_pos` and this code runs from an
696 // ioctl, so there are no active calls to `fdget_pos` on this thread.
697 let file = unsafe { LocalFile::assume_no_fdget_pos(file) };
698 security::binder_transfer_file(
699 &self.process.cred,
700 &view.alloc.process.cred,
701 &file,
702 )?;
703
704 let mut obj_write = BinderFdObject::default();
705 obj_write.hdr.type_ = BINDER_TYPE_FD;
706 // This will be overwritten with the actual fd when the transaction is received.
707 obj_write.__bindgen_anon_1.fd = u32::MAX;
708 obj_write.cookie = obj.cookie;
709 view.write::<BinderFdObject>(offset, &obj_write)?;
710
711 const FD_FIELD_OFFSET: usize =
712 core::mem::offset_of!(uapi::binder_fd_object, __bindgen_anon_1.fd);
713
714 let field_offset = offset + FD_FIELD_OFFSET;
715 crate::trace::trace_transaction_fd_send(view.alloc.debug_id, fd, field_offset);
716
717 view.alloc.info_add_fd(file, field_offset, false)?;
718 }
719 BinderObjectRef::Ptr(obj) => {
720 let obj_length = obj.length.try_into().map_err(|_| EINVAL)?;
721 let alloc_offset = match sg_state.unused_buffer_space.claim_next(obj_length) {
722 Ok(alloc_offset) => alloc_offset,
723 Err(err) => {
724 pr_warn!(
725 "Failed to claim space for a BINDER_TYPE_PTR. (offset: {}, limit: {}, size: {})",
726 sg_state.unused_buffer_space.offset,
727 sg_state.unused_buffer_space.limit,
728 obj_length,
729 );
730 return Err(err.into());
731 }
732 };
733
734 let sg_state_idx = sg_state.sg_entries.len();
735 sg_state.sg_entries.push(
736 ScatterGatherEntry {
737 obj_index,
738 offset: alloc_offset,
739 sender_uaddr: obj.buffer as _,
740 length: obj_length,
741 pointer_fixups: KVec::new(),
742 fixup_min_offset: 0,
743 },
744 GFP_KERNEL,
745 )?;
746
747 let buffer_ptr_in_user_space = (view.alloc.ptr + alloc_offset) as u64;
748
749 if obj.flags & uapi::BINDER_BUFFER_FLAG_HAS_PARENT == 0 {
750 sg_state.ancestors.clear();
751 sg_state.ancestors.push(sg_state_idx, GFP_KERNEL)?;
752 } else {
753 // Another buffer also has a pointer to this buffer, and we need to fixup that
754 // pointer too.
755
756 let parent_index = usize::try_from(obj.parent).map_err(|_| EINVAL)?;
757 let parent_offset = usize::try_from(obj.parent_offset).map_err(|_| EINVAL)?;
758
759 let info = sg_state.validate_parent_fixup(
760 parent_index,
761 parent_offset,
762 size_of::<u64>(),
763 )?;
764
765 sg_state.ancestors.truncate(info.num_ancestors);
766 sg_state.ancestors.push(sg_state_idx, GFP_KERNEL)?;
767
768 let parent_entry = match sg_state.sg_entries.get_mut(info.parent_sg_index) {
769 Some(parent_entry) => parent_entry,
770 None => {
771 pr_err!(
772 "validate_parent_fixup returned index out of bounds for sg.entries"
773 );
774 return Err(EINVAL.into());
775 }
776 };
777
778 parent_entry.fixup_min_offset = info.new_min_offset;
779 parent_entry.pointer_fixups.push(
780 PointerFixupEntry::Fixup {
781 pointer_value: buffer_ptr_in_user_space,
782 target_offset: info.target_offset,
783 },
784 GFP_KERNEL,
785 )?;
786 }
787
788 let mut obj_write = BinderBufferObject::default();
789 obj_write.hdr.type_ = BINDER_TYPE_PTR;
790 obj_write.flags = obj.flags;
791 obj_write.buffer = buffer_ptr_in_user_space;
792 obj_write.length = obj.length;
793 obj_write.parent = obj.parent;
794 obj_write.parent_offset = obj.parent_offset;
795 view.write::<BinderBufferObject>(offset, &obj_write)?;
796 }
797 BinderObjectRef::Fda(obj) => {
798 if !allow_fds {
799 return Err(EPERM.into());
800 }
801 let parent_index = usize::try_from(obj.parent).map_err(|_| EINVAL)?;
802 let parent_offset = usize::try_from(obj.parent_offset).map_err(|_| EINVAL)?;
803 let num_fds = usize::try_from(obj.num_fds).map_err(|_| EINVAL)?;
804 let fds_len = num_fds.checked_mul(size_of::<u32>()).ok_or(EINVAL)?;
805
806 if !is_aligned(parent_offset, size_of::<u32>()) {
807 return Err(EINVAL.into());
808 }
809
810 let info = sg_state.validate_parent_fixup(parent_index, parent_offset, fds_len)?;
811 view.alloc.info_add_fd_reserve(num_fds)?;
812
813 sg_state.ancestors.truncate(info.num_ancestors);
814 let parent_entry = match sg_state.sg_entries.get_mut(info.parent_sg_index) {
815 Some(parent_entry) => parent_entry,
816 None => {
817 pr_err!(
818 "validate_parent_fixup returned index out of bounds for sg.entries"
819 );
820 return Err(EINVAL.into());
821 }
822 };
823
824 if !is_aligned(parent_entry.sender_uaddr, size_of::<u32>()) {
825 return Err(EINVAL.into());
826 }
827
828 parent_entry.fixup_min_offset = info.new_min_offset;
829 parent_entry
830 .pointer_fixups
831 .push(
832 PointerFixupEntry::Skip {
833 skip: fds_len,
834 target_offset: info.target_offset,
835 },
836 GFP_KERNEL,
837 )
838 .map_err(|_| ENOMEM)?;
839
840 let fda_uaddr = parent_entry
841 .sender_uaddr
842 .checked_add(parent_offset)
843 .ok_or(EINVAL)?;
844
845 let mut fda_bytes = KVec::new();
846 UserSlice::new(UserPtr::from_addr(fda_uaddr as _), fds_len)
847 .read_all(&mut fda_bytes, GFP_KERNEL)?;
848
849 if fds_len != fda_bytes.len() {
850 pr_err!("UserSlice::read_all returned wrong length in BINDER_TYPE_FDA");
851 return Err(EINVAL.into());
852 }
853
854 for i in (0..fds_len).step_by(size_of::<u32>()) {
855 let fd = {
856 let mut fd_bytes = [0u8; size_of::<u32>()];
857 fd_bytes.copy_from_slice(&fda_bytes[i..i + size_of::<u32>()]);
858 u32::from_ne_bytes(fd_bytes)
859 };
860
861 let file = LocalFile::fget(fd)?;
862 // SAFETY: The binder driver never calls `fdget_pos` and this code runs from an
863 // ioctl, so there are no active calls to `fdget_pos` on this thread.
864 let file = unsafe { LocalFile::assume_no_fdget_pos(file) };
865 security::binder_transfer_file(
866 &self.process.cred,
867 &view.alloc.process.cred,
868 &file,
869 )?;
870
871 // The `validate_parent_fixup` call ensuers that this addition will not
872 // overflow.
873 view.alloc.info_add_fd(file, info.target_offset + i, true)?;
874 }
875 drop(fda_bytes);
876
877 let mut obj_write = BinderFdArrayObject::default();
878 obj_write.hdr.type_ = BINDER_TYPE_FDA;
879 obj_write.num_fds = obj.num_fds;
880 obj_write.parent = obj.parent;
881 obj_write.parent_offset = obj.parent_offset;
882 view.write::<BinderFdArrayObject>(offset, &obj_write)?;
883 }
884 }
885 Ok(())
886 }
887
apply_sg(&self, alloc: &mut Allocation, sg_state: &mut ScatterGatherState) -> BinderResult888 fn apply_sg(&self, alloc: &mut Allocation, sg_state: &mut ScatterGatherState) -> BinderResult {
889 for sg_entry in &mut sg_state.sg_entries {
890 let mut end_of_previous_fixup = sg_entry.offset;
891 let offset_end = sg_entry.offset.checked_add(sg_entry.length).ok_or(EINVAL)?;
892
893 let mut reader =
894 UserSlice::new(UserPtr::from_addr(sg_entry.sender_uaddr), sg_entry.length).reader();
895 for fixup in &mut sg_entry.pointer_fixups {
896 let (fixup_len, fixup_offset) = match fixup {
897 PointerFixupEntry::Fixup { target_offset, .. } => {
898 (size_of::<u64>(), *target_offset)
899 }
900 PointerFixupEntry::Skip {
901 skip,
902 target_offset,
903 } => (*skip, *target_offset),
904 };
905
906 let target_offset_end = fixup_offset.checked_add(fixup_len).ok_or(EINVAL)?;
907 if fixup_offset < end_of_previous_fixup || offset_end < target_offset_end {
908 pr_warn!(
909 "Fixups oob {} {} {} {}",
910 fixup_offset,
911 end_of_previous_fixup,
912 offset_end,
913 target_offset_end
914 );
915 return Err(EINVAL.into());
916 }
917
918 let copy_off = end_of_previous_fixup;
919 let copy_len = fixup_offset - end_of_previous_fixup;
920 if let Err(err) = alloc.copy_into(&mut reader, copy_off, copy_len) {
921 pr_warn!("Failed copying into alloc: {:?}", err);
922 return Err(err.into());
923 }
924 if let PointerFixupEntry::Fixup { pointer_value, .. } = fixup {
925 let res = alloc.write::<u64>(fixup_offset, pointer_value);
926 if let Err(err) = res {
927 pr_warn!("Failed copying ptr into alloc: {:?}", err);
928 return Err(err.into());
929 }
930 }
931 if let Err(err) = reader.skip(fixup_len) {
932 pr_warn!("Failed skipping {} from reader: {:?}", fixup_len, err);
933 return Err(err.into());
934 }
935 end_of_previous_fixup = target_offset_end;
936 }
937 let copy_off = end_of_previous_fixup;
938 let copy_len = offset_end - end_of_previous_fixup;
939 if let Err(err) = alloc.copy_into(&mut reader, copy_off, copy_len) {
940 pr_warn!("Failed copying remainder into alloc: {:?}", err);
941 return Err(err.into());
942 }
943 }
944 Ok(())
945 }
946
947 /// This method copies the payload of a transaction into the target process.
948 ///
949 /// The resulting payload will have several different components, which will be stored next to
950 /// each other in the allocation. Furthermore, various objects can be embedded in the payload,
951 /// and those objects have to be translated so that they make sense to the target transaction.
copy_transaction_data( &self, to_process: Arc<Process>, info: &mut TransactionInfo, debug_id: usize, allow_fds: bool, txn_security_ctx_offset: Option<&mut usize>, ) -> BinderResult<NewAllocation>952 pub(crate) fn copy_transaction_data(
953 &self,
954 to_process: Arc<Process>,
955 info: &mut TransactionInfo,
956 debug_id: usize,
957 allow_fds: bool,
958 txn_security_ctx_offset: Option<&mut usize>,
959 ) -> BinderResult<NewAllocation> {
960 let mut secctx = if let Some(offset) = txn_security_ctx_offset {
961 let secid = self.process.cred.get_secid();
962 let ctx = match security::SecurityCtx::from_secid(secid) {
963 Ok(ctx) => ctx,
964 Err(err) => {
965 pr_warn!("Failed to get security ctx for id {}: {:?}", secid, err);
966 return Err(err.into());
967 }
968 };
969 Some((offset, ctx))
970 } else {
971 None
972 };
973
974 let data_size = info.data_size;
975 let aligned_data_size = ptr_align(data_size).ok_or(EINVAL)?;
976 let offsets_size = info.offsets_size;
977 let buffers_size = info.buffers_size;
978 let aligned_secctx_size = match secctx.as_ref() {
979 Some((_offset, ctx)) => ptr_align(ctx.len()).ok_or(EINVAL)?,
980 None => 0,
981 };
982
983 if !is_aligned(offsets_size, size_of::<u64>()) {
984 return Err(EINVAL.into());
985 }
986 if !is_aligned(buffers_size, size_of::<u64>()) {
987 return Err(EINVAL.into());
988 }
989
990 // This guarantees that at least `sizeof(usize)` bytes will be allocated.
991 let len = usize::max(
992 aligned_data_size
993 .checked_add(offsets_size)
994 .and_then(|sum| sum.checked_add(buffers_size))
995 .and_then(|sum| sum.checked_add(aligned_secctx_size))
996 .ok_or(ENOMEM)?,
997 size_of::<u64>(),
998 );
999 let secctx_off = aligned_data_size + offsets_size + buffers_size;
1000 let mut alloc = match to_process.buffer_alloc(debug_id, len, info) {
1001 Ok(alloc) => alloc,
1002 Err(err) => {
1003 pr_warn!(
1004 "Failed to allocate buffer. len:{}, is_oneway:{}",
1005 len,
1006 info.is_oneway(),
1007 );
1008 return Err(err);
1009 }
1010 };
1011
1012 let mut buffer_reader = UserSlice::new(info.data_ptr, data_size).reader();
1013 let mut end_of_previous_object = 0;
1014 let mut sg_state = None;
1015
1016 // Copy offsets if there are any.
1017 if offsets_size > 0 {
1018 let mut offsets_reader = UserSlice::new(info.offsets_ptr, offsets_size).reader();
1019
1020 let offsets_start = aligned_data_size;
1021 let offsets_end = aligned_data_size + offsets_size;
1022
1023 // This state is used for BINDER_TYPE_PTR objects.
1024 let sg_state = sg_state.insert(ScatterGatherState {
1025 unused_buffer_space: UnusedBufferSpace {
1026 offset: offsets_end,
1027 limit: offsets_end + buffers_size,
1028 },
1029 sg_entries: KVec::new(),
1030 ancestors: KVec::new(),
1031 });
1032
1033 // Traverse the objects specified.
1034 let mut view = AllocationView::new(&mut alloc, data_size);
1035 for (index, index_offset) in (offsets_start..offsets_end)
1036 .step_by(size_of::<u64>())
1037 .enumerate()
1038 {
1039 let offset = offsets_reader.read::<u64>()?;
1040 view.alloc.write(index_offset, &offset)?;
1041 let offset: usize = offset.try_into().map_err(|_| EINVAL)?;
1042
1043 if offset < end_of_previous_object || !is_aligned(offset, size_of::<u32>()) {
1044 pr_warn!("Got transaction with invalid offset.");
1045 return Err(EINVAL.into());
1046 }
1047
1048 // Copy data between two objects.
1049 if end_of_previous_object < offset {
1050 view.copy_into(
1051 &mut buffer_reader,
1052 end_of_previous_object,
1053 offset - end_of_previous_object,
1054 )?;
1055 }
1056
1057 let mut object = BinderObject::read_from(&mut buffer_reader)?;
1058
1059 match self.translate_object(
1060 index,
1061 offset,
1062 object.as_ref(),
1063 &mut view,
1064 allow_fds,
1065 sg_state,
1066 ) {
1067 Ok(()) => end_of_previous_object = offset + object.size(),
1068 Err(err) => {
1069 pr_warn!("Error while translating object.");
1070 return Err(err);
1071 }
1072 }
1073
1074 // Update the indexes containing objects to clean up.
1075 let offset_after_object = index_offset + size_of::<u64>();
1076 view.alloc
1077 .set_info_offsets(offsets_start..offset_after_object);
1078 }
1079 }
1080
1081 // Copy remaining raw data.
1082 alloc.copy_into(
1083 &mut buffer_reader,
1084 end_of_previous_object,
1085 data_size - end_of_previous_object,
1086 )?;
1087
1088 if let Some(sg_state) = sg_state.as_mut() {
1089 if let Err(err) = self.apply_sg(&mut alloc, sg_state) {
1090 pr_warn!("Failure in apply_sg: {:?}", err);
1091 return Err(err);
1092 }
1093 }
1094
1095 if let Some((off_out, secctx)) = secctx.as_mut() {
1096 if let Err(err) = alloc.write(secctx_off, secctx.as_bytes()) {
1097 pr_warn!("Failed to write security context: {:?}", err);
1098 return Err(err.into());
1099 }
1100 **off_out = secctx_off;
1101 }
1102 Ok(alloc)
1103 }
1104
unwind_transaction_stack(self: &Arc<Self>)1105 fn unwind_transaction_stack(self: &Arc<Self>) {
1106 let mut thread = self.clone();
1107 while let Ok(transaction) = {
1108 let mut inner = thread.inner.lock();
1109 inner.pop_transaction_to_reply(thread.as_ref())
1110 } {
1111 let reply = Err(BR_DEAD_REPLY);
1112 if !transaction.from.deliver_single_reply(reply, &transaction) {
1113 break;
1114 }
1115
1116 thread = transaction.from.clone();
1117 }
1118 }
1119
deliver_reply( &self, reply: Result<DLArc<Transaction>, u32>, transaction: &DArc<Transaction>, )1120 pub(crate) fn deliver_reply(
1121 &self,
1122 reply: Result<DLArc<Transaction>, u32>,
1123 transaction: &DArc<Transaction>,
1124 ) {
1125 if self.deliver_single_reply(reply, transaction) {
1126 transaction.from.unwind_transaction_stack();
1127 }
1128 }
1129
1130 /// Delivers a reply to the thread that started a transaction. The reply can either be a
1131 /// reply-transaction or an error code to be delivered instead.
1132 ///
1133 /// Returns whether the thread is dead. If it is, the caller is expected to unwind the
1134 /// transaction stack by completing transactions for threads that are dead.
deliver_single_reply( &self, reply: Result<DLArc<Transaction>, u32>, transaction: &DArc<Transaction>, ) -> bool1135 fn deliver_single_reply(
1136 &self,
1137 reply: Result<DLArc<Transaction>, u32>,
1138 transaction: &DArc<Transaction>,
1139 ) -> bool {
1140 if let Ok(transaction) = &reply {
1141 crate::trace::trace_transaction(true, transaction, Some(&self.task));
1142 transaction.set_outstanding(&mut self.process.inner.lock());
1143 }
1144
1145 {
1146 let mut inner = self.inner.lock();
1147 if !inner.pop_transaction_replied(transaction) {
1148 return false;
1149 }
1150
1151 if inner.is_dead {
1152 return true;
1153 }
1154
1155 match reply {
1156 Ok(work) => {
1157 inner.push_work(work);
1158 }
1159 Err(code) => inner.push_reply_work(code),
1160 }
1161 }
1162
1163 // Notify the thread now that we've released the inner lock.
1164 self.work_condvar.notify_sync();
1165 false
1166 }
1167
1168 /// Determines if the given transaction is the current transaction for this thread.
is_current_transaction(&self, transaction: &DArc<Transaction>) -> bool1169 fn is_current_transaction(&self, transaction: &DArc<Transaction>) -> bool {
1170 let inner = self.inner.lock();
1171 match &inner.current_transaction {
1172 None => false,
1173 Some(current) => Arc::ptr_eq(current, transaction),
1174 }
1175 }
1176
1177 /// Determines the current top of the transaction stack. It fails if the top is in another
1178 /// thread (i.e., this thread belongs to a stack but it has called another thread). The top is
1179 /// [`None`] if the thread is not currently participating in a transaction stack.
top_of_transaction_stack(&self) -> Result<Option<DArc<Transaction>>>1180 fn top_of_transaction_stack(&self) -> Result<Option<DArc<Transaction>>> {
1181 let inner = self.inner.lock();
1182 if let Some(cur) = &inner.current_transaction {
1183 if core::ptr::eq(self, cur.from.as_ref()) {
1184 pr_warn!("got new transaction with bad transaction stack");
1185 return Err(EINVAL);
1186 }
1187 Ok(Some(cur.clone()))
1188 } else {
1189 Ok(None)
1190 }
1191 }
1192
1193 // No inlining avoids allocating stack space for `BinderTransactionData` for the entire
1194 // duration of `transaction()`.
1195 #[inline(never)]
read_transaction_info( &self, cmd: u32, reader: &mut UserSliceReader, info: &mut TransactionInfo, ) -> Result<()>1196 fn read_transaction_info(
1197 &self,
1198 cmd: u32,
1199 reader: &mut UserSliceReader,
1200 info: &mut TransactionInfo,
1201 ) -> Result<()> {
1202 let td = match cmd {
1203 BC_TRANSACTION | BC_REPLY => {
1204 reader.read::<BinderTransactionData>()?.with_buffers_size(0)
1205 }
1206 BC_TRANSACTION_SG | BC_REPLY_SG => reader.read::<BinderTransactionDataSg>()?,
1207 _ => return Err(EINVAL),
1208 };
1209
1210 // SAFETY: Above `read` call initializes all bytes, so this union read is ok.
1211 let trd_data_ptr = unsafe { &td.transaction_data.data.ptr };
1212
1213 info.is_reply = matches!(cmd, BC_REPLY | BC_REPLY_SG);
1214 info.from_pid = self.process.task.pid();
1215 info.from_tid = self.id;
1216 info.code = td.transaction_data.code;
1217 info.flags = td.transaction_data.flags;
1218 info.data_ptr = UserPtr::from_addr(trd_data_ptr.buffer as usize);
1219 info.data_size = td.transaction_data.data_size as usize;
1220 info.offsets_ptr = UserPtr::from_addr(trd_data_ptr.offsets as usize);
1221 info.offsets_size = td.transaction_data.offsets_size as usize;
1222 info.buffers_size = td.buffers_size as usize;
1223 // SAFETY: Above `read` call initializes all bytes, so this union read is ok.
1224 info.target_handle = unsafe { td.transaction_data.target.handle };
1225 Ok(())
1226 }
1227
1228 #[inline(never)]
transaction(self: &Arc<Self>, cmd: u32, reader: &mut UserSliceReader) -> Result<()>1229 fn transaction(self: &Arc<Self>, cmd: u32, reader: &mut UserSliceReader) -> Result<()> {
1230 let mut info = TransactionInfo::zeroed();
1231 self.read_transaction_info(cmd, reader, &mut info)?;
1232
1233 let ret = if info.is_reply {
1234 self.reply_inner(&mut info)
1235 } else if info.is_oneway() {
1236 self.oneway_transaction_inner(&mut info)
1237 } else {
1238 self.transaction_inner(&mut info)
1239 };
1240
1241 if let Err(err) = ret {
1242 if err.reply != BR_TRANSACTION_COMPLETE {
1243 info.reply = err.reply;
1244 }
1245
1246 self.push_return_work(err.reply);
1247 if let Some(source) = &err.source {
1248 info.errno = source.to_errno();
1249 info.reply = err.reply;
1250
1251 {
1252 let mut ee = self.inner.lock().extended_error;
1253 ee.command = err.reply;
1254 ee.param = source.to_errno();
1255 }
1256
1257 pr_warn!(
1258 "{}:{} transaction to {} failed: {source:?}",
1259 info.from_pid,
1260 info.from_tid,
1261 info.to_pid
1262 );
1263 }
1264 }
1265
1266 Ok(())
1267 }
1268
transaction_inner(self: &Arc<Self>, info: &mut TransactionInfo) -> BinderResult1269 fn transaction_inner(self: &Arc<Self>, info: &mut TransactionInfo) -> BinderResult {
1270 let node_ref = self.process.get_transaction_node(info.target_handle)?;
1271 info.to_pid = node_ref.node.owner.task.pid();
1272 security::binder_transaction(&self.process.cred, &node_ref.node.owner.cred)?;
1273 // TODO: We need to ensure that there isn't a pending transaction in the work queue. How
1274 // could this happen?
1275 let top = self.top_of_transaction_stack()?;
1276 let list_completion = DTRWrap::arc_try_new(DeliverCode::new(BR_TRANSACTION_COMPLETE))?;
1277 let completion = list_completion.clone_arc();
1278 let transaction = Transaction::new(node_ref, top, self, info)?;
1279
1280 // Check that the transaction stack hasn't changed while the lock was released, then update
1281 // it with the new transaction.
1282 {
1283 let mut inner = self.inner.lock();
1284 if !transaction.is_stacked_on(&inner.current_transaction) {
1285 pr_warn!("Transaction stack changed during transaction!");
1286 return Err(EINVAL.into());
1287 }
1288 inner.current_transaction = Some(transaction.clone_arc());
1289 // We push the completion as a deferred work so that we wait for the reply before
1290 // returning to userland.
1291 inner.push_work_deferred(list_completion);
1292 }
1293
1294 if let Err(e) = transaction.submit(info) {
1295 completion.skip();
1296 // Define `transaction` first to drop it after `inner`.
1297 let transaction;
1298 let mut inner = self.inner.lock();
1299 transaction = inner.current_transaction.take().unwrap();
1300 inner.current_transaction = transaction.clone_next();
1301 Err(e)
1302 } else {
1303 Ok(())
1304 }
1305 }
1306
reply_inner(self: &Arc<Self>, info: &mut TransactionInfo) -> BinderResult1307 fn reply_inner(self: &Arc<Self>, info: &mut TransactionInfo) -> BinderResult {
1308 let orig = self.inner.lock().pop_transaction_to_reply(self)?;
1309 if !orig.from.is_current_transaction(&orig) {
1310 return Err(EINVAL.into());
1311 }
1312
1313 info.to_tid = orig.from.id;
1314 info.to_pid = orig.from.process.task.pid();
1315
1316 // We need to complete the transaction even if we cannot complete building the reply.
1317 let out = (|| -> BinderResult<_> {
1318 let completion = DTRWrap::arc_try_new(DeliverCode::new(BR_TRANSACTION_COMPLETE))?;
1319 let process = orig.from.process.clone();
1320 let allow_fds = orig.flags & TF_ACCEPT_FDS != 0;
1321 let reply = Transaction::new_reply(self, process, info, allow_fds)?;
1322 self.inner.lock().push_work(completion);
1323 orig.from.deliver_reply(Ok(reply), &orig);
1324 Ok(())
1325 })()
1326 .map_err(|mut err| {
1327 // At this point we only return `BR_TRANSACTION_COMPLETE` to the caller, and we must let
1328 // the sender know that the transaction has completed (with an error in this case).
1329 pr_warn!(
1330 "Failure {:?} during reply - delivering BR_FAILED_REPLY to sender.",
1331 err
1332 );
1333 let reply = Err(BR_FAILED_REPLY);
1334 orig.from.deliver_reply(reply, &orig);
1335 err.reply = BR_TRANSACTION_COMPLETE;
1336 err
1337 });
1338
1339 out
1340 }
1341
oneway_transaction_inner(self: &Arc<Self>, info: &mut TransactionInfo) -> BinderResult1342 fn oneway_transaction_inner(self: &Arc<Self>, info: &mut TransactionInfo) -> BinderResult {
1343 let node_ref = self.process.get_transaction_node(info.target_handle)?;
1344 info.to_pid = node_ref.node.owner.task.pid();
1345 security::binder_transaction(&self.process.cred, &node_ref.node.owner.cred)?;
1346 let transaction = Transaction::new(node_ref, None, self, info)?;
1347 let code = if self.process.is_oneway_spam_detection_enabled() && info.oneway_spam_suspect {
1348 BR_ONEWAY_SPAM_SUSPECT
1349 } else {
1350 BR_TRANSACTION_COMPLETE
1351 };
1352 let list_completion = DTRWrap::arc_try_new(DeliverCode::new(code))?;
1353 let completion = list_completion.clone_arc();
1354 self.inner.lock().push_work(list_completion);
1355 match transaction.submit(info) {
1356 Ok(()) => Ok(()),
1357 Err(err) => {
1358 completion.skip();
1359 Err(err)
1360 }
1361 }
1362 }
1363
write(self: &Arc<Self>, req: &mut BinderWriteRead) -> Result1364 fn write(self: &Arc<Self>, req: &mut BinderWriteRead) -> Result {
1365 let write_start = req.write_buffer.wrapping_add(req.write_consumed);
1366 let write_len = req.write_size.saturating_sub(req.write_consumed);
1367 let mut reader =
1368 UserSlice::new(UserPtr::from_addr(write_start as _), write_len as _).reader();
1369
1370 while reader.len() >= size_of::<u32>() && self.inner.lock().return_work.is_unused() {
1371 let before = reader.len();
1372 let cmd = reader.read::<u32>()?;
1373 crate::trace::trace_command(cmd);
1374 GLOBAL_STATS.inc_bc(cmd);
1375 self.process.stats.inc_bc(cmd);
1376 match cmd {
1377 BC_TRANSACTION | BC_TRANSACTION_SG | BC_REPLY | BC_REPLY_SG => {
1378 self.transaction(cmd, &mut reader)?;
1379 }
1380 BC_FREE_BUFFER => {
1381 let buffer = self.process.buffer_get(reader.read()?);
1382 if let Some(buffer) = buffer {
1383 if buffer.looper_need_return_on_free() {
1384 self.inner.lock().looper_need_return = true;
1385 }
1386 drop(buffer);
1387 }
1388 }
1389 BC_INCREFS => {
1390 self.process
1391 .as_arc_borrow()
1392 .update_ref(reader.read()?, true, false)?
1393 }
1394 BC_ACQUIRE => {
1395 self.process
1396 .as_arc_borrow()
1397 .update_ref(reader.read()?, true, true)?
1398 }
1399 BC_RELEASE => {
1400 self.process
1401 .as_arc_borrow()
1402 .update_ref(reader.read()?, false, true)?
1403 }
1404 BC_DECREFS => {
1405 self.process
1406 .as_arc_borrow()
1407 .update_ref(reader.read()?, false, false)?
1408 }
1409 BC_INCREFS_DONE => self.process.inc_ref_done(&mut reader, false)?,
1410 BC_ACQUIRE_DONE => self.process.inc_ref_done(&mut reader, true)?,
1411 BC_REQUEST_DEATH_NOTIFICATION => self.process.request_death(&mut reader, self)?,
1412 BC_CLEAR_DEATH_NOTIFICATION => self.process.clear_death(&mut reader, self)?,
1413 BC_DEAD_BINDER_DONE => self.process.dead_binder_done(reader.read()?, self),
1414 BC_REGISTER_LOOPER => {
1415 let valid = self.process.register_thread();
1416 self.inner.lock().looper_register(valid);
1417 }
1418 BC_ENTER_LOOPER => self.inner.lock().looper_enter(),
1419 BC_EXIT_LOOPER => self.inner.lock().looper_exit(),
1420 BC_REQUEST_FREEZE_NOTIFICATION => self.process.request_freeze_notif(&mut reader)?,
1421 BC_CLEAR_FREEZE_NOTIFICATION => self.process.clear_freeze_notif(&mut reader)?,
1422 BC_FREEZE_NOTIFICATION_DONE => self.process.freeze_notif_done(&mut reader)?,
1423
1424 // Fail if given an unknown error code.
1425 // BC_ATTEMPT_ACQUIRE and BC_ACQUIRE_RESULT are no longer supported.
1426 _ => return Err(EINVAL),
1427 }
1428 // Update the number of write bytes consumed.
1429 req.write_consumed += (before - reader.len()) as u64;
1430 }
1431
1432 Ok(())
1433 }
1434
read(self: &Arc<Self>, req: &mut BinderWriteRead, wait: bool) -> Result1435 fn read(self: &Arc<Self>, req: &mut BinderWriteRead, wait: bool) -> Result {
1436 let read_start = req.read_buffer.wrapping_add(req.read_consumed);
1437 let read_len = req.read_size.saturating_sub(req.read_consumed);
1438 let mut writer = BinderReturnWriter::new(
1439 UserSlice::new(UserPtr::from_addr(read_start as _), read_len as _).writer(),
1440 self,
1441 );
1442 let (in_pool, has_transaction, thread_todo, use_proc_queue) = {
1443 let inner = self.inner.lock();
1444 (
1445 inner.is_looper(),
1446 inner.current_transaction.is_some(),
1447 !inner.work_list.is_empty(),
1448 inner.should_use_process_work_queue(),
1449 )
1450 };
1451
1452 crate::trace::trace_wait_for_work(use_proc_queue, has_transaction, thread_todo);
1453
1454 let getter = if use_proc_queue {
1455 Self::get_work
1456 } else {
1457 Self::get_work_local
1458 };
1459
1460 // Reserve some room at the beginning of the read buffer so that we can send a
1461 // BR_SPAWN_LOOPER if we need to.
1462 let mut has_noop_placeholder = false;
1463 if req.read_consumed == 0 {
1464 if let Err(err) = writer.write_code(BR_NOOP) {
1465 pr_warn!("Failure when writing BR_NOOP at beginning of buffer.");
1466 return Err(err);
1467 }
1468 has_noop_placeholder = true;
1469 }
1470
1471 // Loop doing work while there is room in the buffer.
1472 let initial_len = writer.len();
1473 while writer.len() >= size_of::<uapi::binder_transaction_data_secctx>() + 4 {
1474 match getter(self, wait && initial_len == writer.len()) {
1475 Ok(Some(work)) => match work.into_arc().do_work(self, &mut writer) {
1476 Ok(true) => {}
1477 Ok(false) => break,
1478 Err(err) => {
1479 return Err(err);
1480 }
1481 },
1482 Ok(None) => {
1483 break;
1484 }
1485 Err(err) => {
1486 // Propagate the error if we haven't written anything else.
1487 if err != EINTR && err != EAGAIN {
1488 pr_warn!("Failure in work getter: {:?}", err);
1489 }
1490 if initial_len == writer.len() {
1491 return Err(err);
1492 } else {
1493 break;
1494 }
1495 }
1496 }
1497 }
1498
1499 req.read_consumed += read_len - writer.len() as u64;
1500
1501 // Write BR_SPAWN_LOOPER if the process needs more threads for its pool.
1502 if has_noop_placeholder && in_pool && self.process.needs_thread() {
1503 let mut writer =
1504 UserSlice::new(UserPtr::from_addr(req.read_buffer as _), req.read_size as _)
1505 .writer();
1506 writer.write(&BR_SPAWN_LOOPER)?;
1507 }
1508 Ok(())
1509 }
1510
write_read(self: &Arc<Self>, data: UserSlice, wait: bool) -> Result1511 pub(crate) fn write_read(self: &Arc<Self>, data: UserSlice, wait: bool) -> Result {
1512 let (mut reader, mut writer) = data.reader_writer();
1513 let mut req = reader.read::<BinderWriteRead>()?;
1514
1515 // Go through the write buffer.
1516 let mut ret = Ok(());
1517 if req.write_size > 0 {
1518 ret = self.write(&mut req);
1519 crate::trace::trace_write_done(ret);
1520 if let Err(err) = ret {
1521 pr_warn!(
1522 "Write failure {:?} in pid:{}",
1523 err,
1524 self.process.pid_in_current_ns()
1525 );
1526 req.read_consumed = 0;
1527 writer.write(&req)?;
1528 self.inner.lock().looper_need_return = false;
1529 return ret;
1530 }
1531 }
1532
1533 // Go through the work queue.
1534 if req.read_size > 0 {
1535 ret = self.read(&mut req, wait);
1536 crate::trace::trace_read_done(ret);
1537 if ret.is_err() && ret != Err(EINTR) {
1538 pr_warn!(
1539 "Read failure {:?} in pid:{}",
1540 ret,
1541 self.process.pid_in_current_ns()
1542 );
1543 }
1544 }
1545
1546 // Write the request back so that the consumed fields are visible to the caller.
1547 writer.write(&req)?;
1548
1549 self.inner.lock().looper_need_return = false;
1550
1551 ret
1552 }
1553
poll(&self, file: &File, table: PollTable<'_>) -> (bool, u32)1554 pub(crate) fn poll(&self, file: &File, table: PollTable<'_>) -> (bool, u32) {
1555 table.register_wait(file, &self.work_condvar);
1556 let mut inner = self.inner.lock();
1557 (inner.should_use_process_work_queue(), inner.poll())
1558 }
1559
1560 /// Make the call to `get_work` or `get_work_local` return immediately, if any.
exit_looper(&self)1561 pub(crate) fn exit_looper(&self) {
1562 let mut inner = self.inner.lock();
1563 let should_notify = inner.looper_flags & LOOPER_WAITING != 0;
1564 if should_notify {
1565 inner.looper_need_return = true;
1566 }
1567 drop(inner);
1568
1569 if should_notify {
1570 self.work_condvar.notify_one();
1571 }
1572 }
1573
notify_if_poll_ready(&self, sync: bool)1574 pub(crate) fn notify_if_poll_ready(&self, sync: bool) {
1575 // Determine if we need to notify. This requires the lock.
1576 let inner = self.inner.lock();
1577 let notify = inner.looper_flags & LOOPER_POLL != 0 && inner.should_use_process_work_queue();
1578 drop(inner);
1579
1580 // Now that the lock is no longer held, notify the waiters if we have to.
1581 if notify {
1582 if sync {
1583 self.work_condvar.notify_sync();
1584 } else {
1585 self.work_condvar.notify_one();
1586 }
1587 }
1588 }
1589
release(self: &Arc<Self>)1590 pub(crate) fn release(self: &Arc<Self>) {
1591 self.inner.lock().is_dead = true;
1592
1593 //self.work_condvar.clear();
1594 self.unwind_transaction_stack();
1595
1596 // Cancel all pending work items.
1597 while let Ok(Some(work)) = self.get_work_local(false) {
1598 work.into_arc().cancel();
1599 }
1600 }
1601 }
1602
1603 #[pin_data]
1604 struct ThreadError {
1605 error_code: Atomic<u32>,
1606 #[pin]
1607 links_track: AtomicTracker,
1608 }
1609
1610 impl ThreadError {
try_new() -> Result<DArc<Self>>1611 fn try_new() -> Result<DArc<Self>> {
1612 DTRWrap::arc_pin_init(pin_init!(Self {
1613 error_code: Atomic::new(BR_OK),
1614 links_track <- AtomicTracker::new(),
1615 }))
1616 .map(ListArc::into_arc)
1617 }
1618
set_error_code(&self, code: u32)1619 fn set_error_code(&self, code: u32) {
1620 self.error_code.store(code, Relaxed);
1621 }
1622
is_unused(&self) -> bool1623 fn is_unused(&self) -> bool {
1624 self.error_code.load(Relaxed) == BR_OK
1625 }
1626 }
1627
1628 impl DeliverToRead for ThreadError {
do_work( self: DArc<Self>, _thread: &Thread, writer: &mut BinderReturnWriter<'_>, ) -> Result<bool>1629 fn do_work(
1630 self: DArc<Self>,
1631 _thread: &Thread,
1632 writer: &mut BinderReturnWriter<'_>,
1633 ) -> Result<bool> {
1634 let code = self.error_code.load(Relaxed);
1635 self.error_code.store(BR_OK, Relaxed);
1636 writer.write_code(code)?;
1637 Ok(true)
1638 }
1639
cancel(self: DArc<Self>)1640 fn cancel(self: DArc<Self>) {}
1641
should_sync_wakeup(&self) -> bool1642 fn should_sync_wakeup(&self) -> bool {
1643 false
1644 }
1645
debug_print(&self, m: &SeqFile, prefix: &str, _tprefix: &str) -> Result<()>1646 fn debug_print(&self, m: &SeqFile, prefix: &str, _tprefix: &str) -> Result<()> {
1647 seq_print!(
1648 m,
1649 "{}transaction error: {}\n",
1650 prefix,
1651 self.error_code.load(Relaxed)
1652 );
1653 Ok(())
1654 }
1655 }
1656
1657 kernel::list::impl_list_arc_safe! {
1658 impl ListArcSafe<0> for ThreadError {
1659 tracked_by links_track: AtomicTracker;
1660 }
1661 }
1662