1 // SPDX-License-Identifier: GPL-2.0
2
3 // Copyright (C) 2025 Google LLC.
4
5 //! This module defines the `Thread` type, which represents a userspace thread that is using
6 //! binder.
7 //!
8 //! The `Process` object stores all of the threads in an rb tree.
9
10 use kernel::{
11 bindings,
12 fs::{File, LocalFile},
13 list::{AtomicTracker, List, ListArc, ListLinks, TryNewListArc},
14 prelude::*,
15 security,
16 seq_file::SeqFile,
17 seq_print,
18 sync::atomic::{ordering::Relaxed, Atomic},
19 sync::poll::{PollCondVar, PollTable},
20 sync::{aref::ARef, Arc, SpinLock},
21 task::Task,
22 uaccess::UserSlice,
23 uapi,
24 };
25
26 use crate::{
27 allocation::{Allocation, AllocationView, BinderObject, BinderObjectRef, NewAllocation},
28 defs::*,
29 error::BinderResult,
30 process::{GetWorkOrRegister, Process},
31 ptr_align,
32 stats::GLOBAL_STATS,
33 transaction::Transaction,
34 BinderReturnWriter, DArc, DLArc, DTRWrap, DeliverCode, DeliverToRead,
35 };
36
37 use core::mem::size_of;
38
is_aligned(value: usize, to: usize) -> bool39 fn is_aligned(value: usize, to: usize) -> bool {
40 value % to == 0
41 }
42
43 /// Stores the layout of the scatter-gather entries. This is used during the `translate_objects`
44 /// call and is discarded when it returns.
45 struct ScatterGatherState {
46 /// A struct that tracks the amount of unused buffer space.
47 unused_buffer_space: UnusedBufferSpace,
48 /// Scatter-gather entries to copy.
49 sg_entries: KVec<ScatterGatherEntry>,
50 /// Indexes into `sg_entries` corresponding to the last binder_buffer_object that
51 /// was processed and all of its ancestors. The array is in sorted order.
52 ancestors: KVec<usize>,
53 }
54
55 /// This entry specifies an additional buffer that should be copied using the scatter-gather
56 /// mechanism.
57 struct ScatterGatherEntry {
58 /// The index in the offset array of the BINDER_TYPE_PTR that this entry originates from.
59 obj_index: usize,
60 /// Offset in target buffer.
61 offset: usize,
62 /// User address in source buffer.
63 sender_uaddr: usize,
64 /// Number of bytes to copy.
65 length: usize,
66 /// The minimum offset of the next fixup in this buffer.
67 fixup_min_offset: usize,
68 /// The offsets within this buffer that contain pointers which should be translated.
69 pointer_fixups: KVec<PointerFixupEntry>,
70 }
71
72 /// This entry specifies that a fixup should happen at `target_offset` of the
73 /// buffer.
74 enum PointerFixupEntry {
75 /// A fixup for a `binder_buffer_object`.
76 Fixup {
77 /// The translated pointer to write.
78 pointer_value: u64,
79 /// The offset at which the value should be written. The offset is relative
80 /// to the original buffer.
81 target_offset: usize,
82 },
83 /// A skip for a `binder_fd_array_object`.
84 Skip {
85 /// The number of bytes to skip.
86 skip: usize,
87 /// The offset at which the skip should happen. The offset is relative
88 /// to the original buffer.
89 target_offset: usize,
90 },
91 }
92
93 /// Return type of `apply_and_validate_fixup_in_parent`.
94 struct ParentFixupInfo {
95 /// The index of the parent buffer in `sg_entries`.
96 parent_sg_index: usize,
97 /// The number of ancestors of the buffer.
98 ///
99 /// The buffer is considered an ancestor of itself, so this is always at
100 /// least one.
101 num_ancestors: usize,
102 /// New value of `fixup_min_offset` if this fixup is applied.
103 new_min_offset: usize,
104 /// The offset of the fixup in the target buffer.
105 target_offset: usize,
106 }
107
108 impl ScatterGatherState {
109 /// Called when a `binder_buffer_object` or `binder_fd_array_object` tries
110 /// to access a region in its parent buffer. These accesses have various
111 /// restrictions, which this method verifies.
112 ///
113 /// The `parent_offset` and `length` arguments describe the offset and
114 /// length of the access in the parent buffer.
115 ///
116 /// # Detailed restrictions
117 ///
118 /// Obviously the fixup must be in-bounds for the parent buffer.
119 ///
120 /// For safety reasons, we only allow fixups inside a buffer to happen
121 /// at increasing offsets; additionally, we only allow fixup on the last
122 /// buffer object that was verified, or one of its parents.
123 ///
124 /// Example of what is allowed:
125 ///
126 /// A
127 /// B (parent = A, offset = 0)
128 /// C (parent = A, offset = 16)
129 /// D (parent = C, offset = 0)
130 /// E (parent = A, offset = 32) // min_offset is 16 (C.parent_offset)
131 ///
132 /// Examples of what is not allowed:
133 ///
134 /// Decreasing offsets within the same parent:
135 /// A
136 /// C (parent = A, offset = 16)
137 /// B (parent = A, offset = 0) // decreasing offset within A
138 ///
139 /// Arcerring to a parent that wasn't the last object or any of its parents:
140 /// A
141 /// B (parent = A, offset = 0)
142 /// C (parent = A, offset = 0)
143 /// C (parent = A, offset = 16)
144 /// D (parent = B, offset = 0) // B is not A or any of A's parents
validate_parent_fixup( &self, parent: usize, parent_offset: usize, length: usize, ) -> Result<ParentFixupInfo>145 fn validate_parent_fixup(
146 &self,
147 parent: usize,
148 parent_offset: usize,
149 length: usize,
150 ) -> Result<ParentFixupInfo> {
151 // Using `position` would also be correct, but `rposition` avoids
152 // quadratic running times.
153 let ancestors_i = self
154 .ancestors
155 .iter()
156 .copied()
157 .rposition(|sg_idx| self.sg_entries[sg_idx].obj_index == parent)
158 .ok_or(EINVAL)?;
159 let sg_idx = self.ancestors[ancestors_i];
160 let sg_entry = match self.sg_entries.get(sg_idx) {
161 Some(sg_entry) => sg_entry,
162 None => {
163 pr_err!(
164 "self.ancestors[{}] is {}, but self.sg_entries.len() is {}",
165 ancestors_i,
166 sg_idx,
167 self.sg_entries.len()
168 );
169 return Err(EINVAL);
170 }
171 };
172 if sg_entry.fixup_min_offset > parent_offset {
173 pr_warn!(
174 "validate_parent_fixup: fixup_min_offset={}, parent_offset={}",
175 sg_entry.fixup_min_offset,
176 parent_offset
177 );
178 return Err(EINVAL);
179 }
180 let new_min_offset = parent_offset.checked_add(length).ok_or(EINVAL)?;
181 if new_min_offset > sg_entry.length {
182 pr_warn!(
183 "validate_parent_fixup: new_min_offset={}, sg_entry.length={}",
184 new_min_offset,
185 sg_entry.length
186 );
187 return Err(EINVAL);
188 }
189 let target_offset = sg_entry.offset.checked_add(parent_offset).ok_or(EINVAL)?;
190 // The `ancestors_i + 1` operation can't overflow since the output of the addition is at
191 // most `self.ancestors.len()`, which also fits in a usize.
192 Ok(ParentFixupInfo {
193 parent_sg_index: sg_idx,
194 num_ancestors: ancestors_i + 1,
195 new_min_offset,
196 target_offset,
197 })
198 }
199 }
200
201 /// Keeps track of how much unused buffer space is left. The initial amount is the number of bytes
202 /// requested by the user using the `buffers_size` field of `binder_transaction_data_sg`. Each time
203 /// we translate an object of type `BINDER_TYPE_PTR`, some of the unused buffer space is consumed.
204 struct UnusedBufferSpace {
205 /// The start of the remaining space.
206 offset: usize,
207 /// The end of the remaining space.
208 limit: usize,
209 }
210 impl UnusedBufferSpace {
211 /// Claim the next `size` bytes from the unused buffer space. The offset for the claimed chunk
212 /// into the buffer is returned.
claim_next(&mut self, size: usize) -> Result<usize>213 fn claim_next(&mut self, size: usize) -> Result<usize> {
214 // We require every chunk to be aligned.
215 let size = ptr_align(size).ok_or(EINVAL)?;
216 let new_offset = self.offset.checked_add(size).ok_or(EINVAL)?;
217
218 if new_offset <= self.limit {
219 let offset = self.offset;
220 self.offset = new_offset;
221 Ok(offset)
222 } else {
223 Err(EINVAL)
224 }
225 }
226 }
227
228 pub(crate) enum PushWorkRes {
229 Ok,
230 FailedDead(DLArc<dyn DeliverToRead>),
231 }
232
233 impl PushWorkRes {
is_ok(&self) -> bool234 fn is_ok(&self) -> bool {
235 match self {
236 PushWorkRes::Ok => true,
237 PushWorkRes::FailedDead(_) => false,
238 }
239 }
240 }
241
242 /// The fields of `Thread` protected by the spinlock.
243 struct InnerThread {
244 /// Determines the looper state of the thread. It is a bit-wise combination of the constants
245 /// prefixed with `LOOPER_`.
246 looper_flags: u32,
247
248 /// Determines whether the looper should return.
249 looper_need_return: bool,
250
251 /// Determines if thread is dead.
252 is_dead: bool,
253
254 /// Work item used to deliver error codes to the thread that started a transaction. Stored here
255 /// so that it can be reused.
256 reply_work: DArc<ThreadError>,
257
258 /// Work item used to deliver error codes to the current thread. Stored here so that it can be
259 /// reused.
260 return_work: DArc<ThreadError>,
261
262 /// Determines whether the work list below should be processed. When set to false, `work_list`
263 /// is treated as if it were empty.
264 process_work_list: bool,
265 /// List of work items to deliver to userspace.
266 work_list: List<DTRWrap<dyn DeliverToRead>>,
267 current_transaction: Option<DArc<Transaction>>,
268
269 /// Extended error information for this thread.
270 extended_error: ExtendedError,
271 }
272
273 const LOOPER_REGISTERED: u32 = 0x01;
274 const LOOPER_ENTERED: u32 = 0x02;
275 const LOOPER_EXITED: u32 = 0x04;
276 const LOOPER_INVALID: u32 = 0x08;
277 const LOOPER_WAITING: u32 = 0x10;
278 const LOOPER_WAITING_PROC: u32 = 0x20;
279 const LOOPER_POLL: u32 = 0x40;
280
281 impl InnerThread {
new() -> Result<Self>282 fn new() -> Result<Self> {
283 fn next_err_id() -> u32 {
284 static EE_ID: Atomic<u32> = Atomic::new(0);
285 EE_ID.fetch_add(1, Relaxed)
286 }
287
288 Ok(Self {
289 looper_flags: 0,
290 looper_need_return: false,
291 is_dead: false,
292 process_work_list: false,
293 reply_work: ThreadError::try_new()?,
294 return_work: ThreadError::try_new()?,
295 work_list: List::new(),
296 current_transaction: None,
297 extended_error: ExtendedError::new(next_err_id(), BR_OK, 0),
298 })
299 }
300
pop_work(&mut self) -> Option<DLArc<dyn DeliverToRead>>301 fn pop_work(&mut self) -> Option<DLArc<dyn DeliverToRead>> {
302 if !self.process_work_list {
303 return None;
304 }
305
306 let ret = self.work_list.pop_front();
307 self.process_work_list = !self.work_list.is_empty();
308 ret
309 }
310
push_work(&mut self, work: DLArc<dyn DeliverToRead>) -> PushWorkRes311 fn push_work(&mut self, work: DLArc<dyn DeliverToRead>) -> PushWorkRes {
312 if self.is_dead {
313 PushWorkRes::FailedDead(work)
314 } else {
315 self.work_list.push_back(work);
316 self.process_work_list = true;
317 PushWorkRes::Ok
318 }
319 }
320
push_reply_work(&mut self, code: u32)321 fn push_reply_work(&mut self, code: u32) {
322 if let Ok(work) = ListArc::try_from_arc(self.reply_work.clone()) {
323 work.set_error_code(code);
324 self.push_work(work);
325 } else {
326 pr_warn!("Thread reply work is already in use.");
327 }
328 }
329
push_return_work(&mut self, reply: u32)330 fn push_return_work(&mut self, reply: u32) {
331 if let Ok(work) = ListArc::try_from_arc(self.return_work.clone()) {
332 work.set_error_code(reply);
333 self.push_work(work);
334 } else {
335 pr_warn!("Thread return work is already in use.");
336 }
337 }
338
339 /// Used to push work items that do not need to be processed immediately and can wait until the
340 /// thread gets another work item.
push_work_deferred(&mut self, work: DLArc<dyn DeliverToRead>)341 fn push_work_deferred(&mut self, work: DLArc<dyn DeliverToRead>) {
342 self.work_list.push_back(work);
343 }
344
345 /// Fetches the transaction this thread can reply to. If the thread has a pending transaction
346 /// (that it could respond to) but it has also issued a transaction, it must first wait for the
347 /// previously-issued transaction to complete.
348 ///
349 /// The `thread` parameter should be the thread containing this `ThreadInner`.
pop_transaction_to_reply(&mut self, thread: &Thread) -> Result<DArc<Transaction>>350 fn pop_transaction_to_reply(&mut self, thread: &Thread) -> Result<DArc<Transaction>> {
351 let transaction = self.current_transaction.take().ok_or(EINVAL)?;
352 if core::ptr::eq(thread, transaction.from.as_ref()) {
353 self.current_transaction = Some(transaction);
354 return Err(EINVAL);
355 }
356 // Find a new current transaction for this thread.
357 self.current_transaction = transaction.find_from(thread).cloned();
358 Ok(transaction)
359 }
360
pop_transaction_replied(&mut self, transaction: &DArc<Transaction>) -> bool361 fn pop_transaction_replied(&mut self, transaction: &DArc<Transaction>) -> bool {
362 match self.current_transaction.take() {
363 None => false,
364 Some(old) => {
365 if !Arc::ptr_eq(transaction, &old) {
366 self.current_transaction = Some(old);
367 return false;
368 }
369 self.current_transaction = old.clone_next();
370 true
371 }
372 }
373 }
374
looper_enter(&mut self)375 fn looper_enter(&mut self) {
376 self.looper_flags |= LOOPER_ENTERED;
377 if self.looper_flags & LOOPER_REGISTERED != 0 {
378 self.looper_flags |= LOOPER_INVALID;
379 }
380 }
381
looper_register(&mut self, valid: bool)382 fn looper_register(&mut self, valid: bool) {
383 self.looper_flags |= LOOPER_REGISTERED;
384 if !valid || self.looper_flags & LOOPER_ENTERED != 0 {
385 self.looper_flags |= LOOPER_INVALID;
386 }
387 }
388
looper_exit(&mut self)389 fn looper_exit(&mut self) {
390 self.looper_flags |= LOOPER_EXITED;
391 }
392
393 /// Determines whether the thread is part of a pool, i.e., if it is a looper.
is_looper(&self) -> bool394 fn is_looper(&self) -> bool {
395 self.looper_flags & (LOOPER_ENTERED | LOOPER_REGISTERED) != 0
396 }
397
398 /// Determines whether the thread should attempt to fetch work items from the process queue.
399 /// This is generally case when the thread is registered as a looper and not part of a
400 /// transaction stack. But if there is local work, we want to return to userspace before we
401 /// deliver any remote work.
should_use_process_work_queue(&self) -> bool402 fn should_use_process_work_queue(&self) -> bool {
403 self.current_transaction.is_none() && !self.process_work_list && self.is_looper()
404 }
405
poll(&mut self) -> u32406 fn poll(&mut self) -> u32 {
407 self.looper_flags |= LOOPER_POLL;
408 if self.process_work_list || self.looper_need_return {
409 bindings::POLLIN
410 } else {
411 0
412 }
413 }
414 }
415
416 /// This represents a thread that's used with binder.
417 #[pin_data]
418 pub(crate) struct Thread {
419 pub(crate) id: i32,
420 pub(crate) process: Arc<Process>,
421 pub(crate) task: ARef<Task>,
422 #[pin]
423 inner: SpinLock<InnerThread>,
424 #[pin]
425 work_condvar: PollCondVar,
426 /// Used to insert this thread into the process' `ready_threads` list.
427 ///
428 /// INVARIANT: May never be used for any other list than the `self.process.ready_threads`.
429 #[pin]
430 links: ListLinks,
431 #[pin]
432 links_track: AtomicTracker,
433 }
434
435 kernel::list::impl_list_arc_safe! {
436 impl ListArcSafe<0> for Thread {
437 tracked_by links_track: AtomicTracker;
438 }
439 }
440 kernel::list::impl_list_item! {
441 impl ListItem<0> for Thread {
442 using ListLinks { self.links };
443 }
444 }
445
446 impl Thread {
new(id: i32, process: Arc<Process>) -> Result<Arc<Self>>447 pub(crate) fn new(id: i32, process: Arc<Process>) -> Result<Arc<Self>> {
448 let inner = InnerThread::new()?;
449
450 Arc::pin_init(
451 try_pin_init!(Thread {
452 id,
453 process,
454 task: ARef::from(&**kernel::current!()),
455 inner <- kernel::new_spinlock!(inner, "Thread::inner"),
456 work_condvar <- kernel::new_poll_condvar!("Thread::work_condvar"),
457 links <- ListLinks::new(),
458 links_track <- AtomicTracker::new(),
459 }),
460 GFP_KERNEL,
461 )
462 }
463
464 #[inline(never)]
debug_print(self: &Arc<Self>, m: &SeqFile, print_all: bool) -> Result<()>465 pub(crate) fn debug_print(self: &Arc<Self>, m: &SeqFile, print_all: bool) -> Result<()> {
466 let inner = self.inner.lock();
467
468 if print_all || inner.current_transaction.is_some() || !inner.work_list.is_empty() {
469 seq_print!(
470 m,
471 " thread {}: l {:02x} need_return {}\n",
472 self.id,
473 inner.looper_flags,
474 inner.looper_need_return,
475 );
476 }
477
478 let mut t_opt = inner.current_transaction.as_ref();
479 while let Some(t) = t_opt {
480 if Arc::ptr_eq(&t.from, self) {
481 t.debug_print_inner(m, " outgoing transaction ");
482 t_opt = t.from_parent.as_ref();
483 } else if Arc::ptr_eq(&t.to, &self.process) {
484 t.debug_print_inner(m, " incoming transaction ");
485 t_opt = t.find_from(self);
486 } else {
487 t.debug_print_inner(m, " bad transaction ");
488 t_opt = None;
489 }
490 }
491
492 for work in &inner.work_list {
493 work.debug_print(m, " ", " pending transaction ")?;
494 }
495 Ok(())
496 }
497
get_extended_error(&self, data: UserSlice) -> Result498 pub(crate) fn get_extended_error(&self, data: UserSlice) -> Result {
499 let mut writer = data.writer();
500 let ee = self.inner.lock().extended_error;
501 writer.write(&ee)?;
502 Ok(())
503 }
504
set_current_transaction(&self, transaction: DArc<Transaction>)505 pub(crate) fn set_current_transaction(&self, transaction: DArc<Transaction>) {
506 self.inner.lock().current_transaction = Some(transaction);
507 }
508
has_current_transaction(&self) -> bool509 pub(crate) fn has_current_transaction(&self) -> bool {
510 self.inner.lock().current_transaction.is_some()
511 }
512
513 /// Attempts to fetch a work item from the thread-local queue. The behaviour if the queue is
514 /// empty depends on `wait`: if it is true, the function waits for some work to be queued (or a
515 /// signal); otherwise it returns indicating that none is available.
get_work_local(self: &Arc<Self>, wait: bool) -> Result<Option<DLArc<dyn DeliverToRead>>>516 fn get_work_local(self: &Arc<Self>, wait: bool) -> Result<Option<DLArc<dyn DeliverToRead>>> {
517 {
518 let mut inner = self.inner.lock();
519 if inner.looper_need_return {
520 return Ok(inner.pop_work());
521 }
522 }
523
524 // Try once if the caller does not want to wait.
525 if !wait {
526 return self.inner.lock().pop_work().ok_or(EAGAIN).map(Some);
527 }
528
529 // Loop waiting only on the local queue (i.e., not registering with the process queue).
530 let mut inner = self.inner.lock();
531 loop {
532 if let Some(work) = inner.pop_work() {
533 return Ok(Some(work));
534 }
535
536 inner.looper_flags |= LOOPER_WAITING;
537 let signal_pending = self.work_condvar.wait_interruptible_freezable(&mut inner);
538 inner.looper_flags &= !LOOPER_WAITING;
539
540 if signal_pending {
541 return Err(EINTR);
542 }
543 if inner.looper_need_return {
544 return Ok(None);
545 }
546 }
547 }
548
549 /// Attempts to fetch a work item from the thread-local queue, falling back to the process-wide
550 /// queue if none is available locally.
551 ///
552 /// This must only be called when the thread is not participating in a transaction chain. If it
553 /// is, the local version (`get_work_local`) should be used instead.
get_work(self: &Arc<Self>, wait: bool) -> Result<Option<DLArc<dyn DeliverToRead>>>554 fn get_work(self: &Arc<Self>, wait: bool) -> Result<Option<DLArc<dyn DeliverToRead>>> {
555 // Try to get work from the thread's work queue, using only a local lock.
556 {
557 let mut inner = self.inner.lock();
558 if let Some(work) = inner.pop_work() {
559 return Ok(Some(work));
560 }
561 if inner.looper_need_return {
562 drop(inner);
563 return Ok(self.process.get_work());
564 }
565 }
566
567 // If the caller doesn't want to wait, try to grab work from the process queue.
568 //
569 // We know nothing will have been queued directly to the thread queue because it is not in
570 // a transaction and it is not in the process' ready list.
571 if !wait {
572 return self.process.get_work().ok_or(EAGAIN).map(Some);
573 }
574
575 // Get work from the process queue. If none is available, atomically register as ready.
576 let reg = match self.process.get_work_or_register(self) {
577 GetWorkOrRegister::Work(work) => return Ok(Some(work)),
578 GetWorkOrRegister::Register(reg) => reg,
579 };
580
581 let mut inner = self.inner.lock();
582 loop {
583 if let Some(work) = inner.pop_work() {
584 return Ok(Some(work));
585 }
586
587 inner.looper_flags |= LOOPER_WAITING | LOOPER_WAITING_PROC;
588 let signal_pending = self.work_condvar.wait_interruptible_freezable(&mut inner);
589 inner.looper_flags &= !(LOOPER_WAITING | LOOPER_WAITING_PROC);
590
591 if signal_pending || inner.looper_need_return {
592 // We need to return now. We need to pull the thread off the list of ready threads
593 // (by dropping `reg`), then check the state again after it's off the list to
594 // ensure that something was not queued in the meantime. If something has been
595 // queued, we just return it (instead of the error).
596 drop(inner);
597 drop(reg);
598
599 let res = match self.inner.lock().pop_work() {
600 Some(work) => Ok(Some(work)),
601 None if signal_pending => Err(EINTR),
602 None => Ok(None),
603 };
604 return res;
605 }
606 }
607 }
608
609 /// Push the provided work item to be delivered to user space via this thread.
610 ///
611 /// Returns whether the item was successfully pushed. This can only fail if the thread is dead.
push_work(&self, work: DLArc<dyn DeliverToRead>) -> PushWorkRes612 pub(crate) fn push_work(&self, work: DLArc<dyn DeliverToRead>) -> PushWorkRes {
613 let sync = work.should_sync_wakeup();
614
615 let res = self.inner.lock().push_work(work);
616
617 if res.is_ok() {
618 if sync {
619 self.work_condvar.notify_sync();
620 } else {
621 self.work_condvar.notify_one();
622 }
623 }
624
625 res
626 }
627
628 /// Attempts to push to given work item to the thread if it's a looper thread (i.e., if it's
629 /// part of a thread pool) and is alive. Otherwise, push the work item to the process instead.
push_work_if_looper(&self, work: DLArc<dyn DeliverToRead>) -> BinderResult630 pub(crate) fn push_work_if_looper(&self, work: DLArc<dyn DeliverToRead>) -> BinderResult {
631 let mut inner = self.inner.lock();
632 if inner.is_looper() && !inner.is_dead {
633 inner.push_work(work);
634 Ok(())
635 } else {
636 drop(inner);
637 self.process.push_work(work)
638 }
639 }
640
push_work_deferred(&self, work: DLArc<dyn DeliverToRead>)641 pub(crate) fn push_work_deferred(&self, work: DLArc<dyn DeliverToRead>) {
642 self.inner.lock().push_work_deferred(work);
643 }
644
push_return_work(&self, reply: u32)645 pub(crate) fn push_return_work(&self, reply: u32) {
646 self.inner.lock().push_return_work(reply);
647 }
648
translate_object( &self, obj_index: usize, offset: usize, object: BinderObjectRef<'_>, view: &mut AllocationView<'_>, allow_fds: bool, sg_state: &mut ScatterGatherState, ) -> BinderResult649 fn translate_object(
650 &self,
651 obj_index: usize,
652 offset: usize,
653 object: BinderObjectRef<'_>,
654 view: &mut AllocationView<'_>,
655 allow_fds: bool,
656 sg_state: &mut ScatterGatherState,
657 ) -> BinderResult {
658 match object {
659 BinderObjectRef::Binder(obj) => {
660 let strong = obj.hdr.type_ == BINDER_TYPE_BINDER;
661 // SAFETY: `binder` is a `binder_uintptr_t`; any bit pattern is a valid
662 // representation.
663 let ptr = unsafe { obj.__bindgen_anon_1.binder } as _;
664 let cookie = obj.cookie as _;
665 let flags = obj.flags as _;
666 let node = self
667 .process
668 .as_arc_borrow()
669 .get_node(ptr, cookie, flags, strong, self)?;
670 security::binder_transfer_binder(&self.process.cred, &view.alloc.process.cred)?;
671 view.transfer_binder_object(offset, obj, strong, node)?;
672 }
673 BinderObjectRef::Handle(obj) => {
674 let strong = obj.hdr.type_ == BINDER_TYPE_HANDLE;
675 // SAFETY: `handle` is a `u32`; any bit pattern is a valid representation.
676 let handle = unsafe { obj.__bindgen_anon_1.handle } as _;
677 let node = self.process.get_node_from_handle(handle, strong)?;
678 security::binder_transfer_binder(&self.process.cred, &view.alloc.process.cred)?;
679 view.transfer_binder_object(offset, obj, strong, node)?;
680 }
681 BinderObjectRef::Fd(obj) => {
682 if !allow_fds {
683 return Err(EPERM.into());
684 }
685
686 // SAFETY: `fd` is a `u32`; any bit pattern is a valid representation.
687 let fd = unsafe { obj.__bindgen_anon_1.fd };
688 let file = LocalFile::fget(fd)?;
689 // SAFETY: The binder driver never calls `fdget_pos` and this code runs from an
690 // ioctl, so there are no active calls to `fdget_pos` on this thread.
691 let file = unsafe { LocalFile::assume_no_fdget_pos(file) };
692 security::binder_transfer_file(
693 &self.process.cred,
694 &view.alloc.process.cred,
695 &file,
696 )?;
697
698 let mut obj_write = BinderFdObject::default();
699 obj_write.hdr.type_ = BINDER_TYPE_FD;
700 // This will be overwritten with the actual fd when the transaction is received.
701 obj_write.__bindgen_anon_1.fd = u32::MAX;
702 obj_write.cookie = obj.cookie;
703 view.write::<BinderFdObject>(offset, &obj_write)?;
704
705 const FD_FIELD_OFFSET: usize =
706 core::mem::offset_of!(uapi::binder_fd_object, __bindgen_anon_1.fd);
707
708 let field_offset = offset + FD_FIELD_OFFSET;
709
710 view.alloc.info_add_fd(file, field_offset, false)?;
711 }
712 BinderObjectRef::Ptr(obj) => {
713 let obj_length = obj.length.try_into().map_err(|_| EINVAL)?;
714 let alloc_offset = match sg_state.unused_buffer_space.claim_next(obj_length) {
715 Ok(alloc_offset) => alloc_offset,
716 Err(err) => {
717 pr_warn!(
718 "Failed to claim space for a BINDER_TYPE_PTR. (offset: {}, limit: {}, size: {})",
719 sg_state.unused_buffer_space.offset,
720 sg_state.unused_buffer_space.limit,
721 obj_length,
722 );
723 return Err(err.into());
724 }
725 };
726
727 let sg_state_idx = sg_state.sg_entries.len();
728 sg_state.sg_entries.push(
729 ScatterGatherEntry {
730 obj_index,
731 offset: alloc_offset,
732 sender_uaddr: obj.buffer as _,
733 length: obj_length,
734 pointer_fixups: KVec::new(),
735 fixup_min_offset: 0,
736 },
737 GFP_KERNEL,
738 )?;
739
740 let buffer_ptr_in_user_space = (view.alloc.ptr + alloc_offset) as u64;
741
742 if obj.flags & uapi::BINDER_BUFFER_FLAG_HAS_PARENT == 0 {
743 sg_state.ancestors.clear();
744 sg_state.ancestors.push(sg_state_idx, GFP_KERNEL)?;
745 } else {
746 // Another buffer also has a pointer to this buffer, and we need to fixup that
747 // pointer too.
748
749 let parent_index = usize::try_from(obj.parent).map_err(|_| EINVAL)?;
750 let parent_offset = usize::try_from(obj.parent_offset).map_err(|_| EINVAL)?;
751
752 let info = sg_state.validate_parent_fixup(
753 parent_index,
754 parent_offset,
755 size_of::<u64>(),
756 )?;
757
758 sg_state.ancestors.truncate(info.num_ancestors);
759 sg_state.ancestors.push(sg_state_idx, GFP_KERNEL)?;
760
761 let parent_entry = match sg_state.sg_entries.get_mut(info.parent_sg_index) {
762 Some(parent_entry) => parent_entry,
763 None => {
764 pr_err!(
765 "validate_parent_fixup returned index out of bounds for sg.entries"
766 );
767 return Err(EINVAL.into());
768 }
769 };
770
771 parent_entry.fixup_min_offset = info.new_min_offset;
772 parent_entry.pointer_fixups.push(
773 PointerFixupEntry::Fixup {
774 pointer_value: buffer_ptr_in_user_space,
775 target_offset: info.target_offset,
776 },
777 GFP_KERNEL,
778 )?;
779 }
780
781 let mut obj_write = BinderBufferObject::default();
782 obj_write.hdr.type_ = BINDER_TYPE_PTR;
783 obj_write.flags = obj.flags;
784 obj_write.buffer = buffer_ptr_in_user_space;
785 obj_write.length = obj.length;
786 obj_write.parent = obj.parent;
787 obj_write.parent_offset = obj.parent_offset;
788 view.write::<BinderBufferObject>(offset, &obj_write)?;
789 }
790 BinderObjectRef::Fda(obj) => {
791 if !allow_fds {
792 return Err(EPERM.into());
793 }
794 let parent_index = usize::try_from(obj.parent).map_err(|_| EINVAL)?;
795 let parent_offset = usize::try_from(obj.parent_offset).map_err(|_| EINVAL)?;
796 let num_fds = usize::try_from(obj.num_fds).map_err(|_| EINVAL)?;
797 let fds_len = num_fds.checked_mul(size_of::<u32>()).ok_or(EINVAL)?;
798
799 if !is_aligned(parent_offset, size_of::<u32>()) {
800 return Err(EINVAL.into());
801 }
802
803 let info = sg_state.validate_parent_fixup(parent_index, parent_offset, fds_len)?;
804 view.alloc.info_add_fd_reserve(num_fds)?;
805
806 sg_state.ancestors.truncate(info.num_ancestors);
807 let parent_entry = match sg_state.sg_entries.get_mut(info.parent_sg_index) {
808 Some(parent_entry) => parent_entry,
809 None => {
810 pr_err!(
811 "validate_parent_fixup returned index out of bounds for sg.entries"
812 );
813 return Err(EINVAL.into());
814 }
815 };
816
817 if !is_aligned(parent_entry.sender_uaddr, size_of::<u32>()) {
818 return Err(EINVAL.into());
819 }
820
821 parent_entry.fixup_min_offset = info.new_min_offset;
822 parent_entry
823 .pointer_fixups
824 .push(
825 PointerFixupEntry::Skip {
826 skip: fds_len,
827 target_offset: info.target_offset,
828 },
829 GFP_KERNEL,
830 )
831 .map_err(|_| ENOMEM)?;
832
833 let fda_uaddr = parent_entry
834 .sender_uaddr
835 .checked_add(parent_offset)
836 .ok_or(EINVAL)?;
837
838 let mut fda_bytes = KVec::new();
839 UserSlice::new(UserPtr::from_addr(fda_uaddr as _), fds_len)
840 .read_all(&mut fda_bytes, GFP_KERNEL)?;
841
842 if fds_len != fda_bytes.len() {
843 pr_err!("UserSlice::read_all returned wrong length in BINDER_TYPE_FDA");
844 return Err(EINVAL.into());
845 }
846
847 for i in (0..fds_len).step_by(size_of::<u32>()) {
848 let fd = {
849 let mut fd_bytes = [0u8; size_of::<u32>()];
850 fd_bytes.copy_from_slice(&fda_bytes[i..i + size_of::<u32>()]);
851 u32::from_ne_bytes(fd_bytes)
852 };
853
854 let file = LocalFile::fget(fd)?;
855 // SAFETY: The binder driver never calls `fdget_pos` and this code runs from an
856 // ioctl, so there are no active calls to `fdget_pos` on this thread.
857 let file = unsafe { LocalFile::assume_no_fdget_pos(file) };
858 security::binder_transfer_file(
859 &self.process.cred,
860 &view.alloc.process.cred,
861 &file,
862 )?;
863
864 // The `validate_parent_fixup` call ensuers that this addition will not
865 // overflow.
866 view.alloc.info_add_fd(file, info.target_offset + i, true)?;
867 }
868 drop(fda_bytes);
869
870 let mut obj_write = BinderFdArrayObject::default();
871 obj_write.hdr.type_ = BINDER_TYPE_FDA;
872 obj_write.num_fds = obj.num_fds;
873 obj_write.parent = obj.parent;
874 obj_write.parent_offset = obj.parent_offset;
875 view.write::<BinderFdArrayObject>(offset, &obj_write)?;
876 }
877 }
878 Ok(())
879 }
880
apply_sg(&self, alloc: &mut Allocation, sg_state: &mut ScatterGatherState) -> BinderResult881 fn apply_sg(&self, alloc: &mut Allocation, sg_state: &mut ScatterGatherState) -> BinderResult {
882 for sg_entry in &mut sg_state.sg_entries {
883 let mut end_of_previous_fixup = sg_entry.offset;
884 let offset_end = sg_entry.offset.checked_add(sg_entry.length).ok_or(EINVAL)?;
885
886 let mut reader =
887 UserSlice::new(UserPtr::from_addr(sg_entry.sender_uaddr), sg_entry.length).reader();
888 for fixup in &mut sg_entry.pointer_fixups {
889 let (fixup_len, fixup_offset) = match fixup {
890 PointerFixupEntry::Fixup { target_offset, .. } => {
891 (size_of::<u64>(), *target_offset)
892 }
893 PointerFixupEntry::Skip {
894 skip,
895 target_offset,
896 } => (*skip, *target_offset),
897 };
898
899 let target_offset_end = fixup_offset.checked_add(fixup_len).ok_or(EINVAL)?;
900 if fixup_offset < end_of_previous_fixup || offset_end < target_offset_end {
901 pr_warn!(
902 "Fixups oob {} {} {} {}",
903 fixup_offset,
904 end_of_previous_fixup,
905 offset_end,
906 target_offset_end
907 );
908 return Err(EINVAL.into());
909 }
910
911 let copy_off = end_of_previous_fixup;
912 let copy_len = fixup_offset - end_of_previous_fixup;
913 if let Err(err) = alloc.copy_into(&mut reader, copy_off, copy_len) {
914 pr_warn!("Failed copying into alloc: {:?}", err);
915 return Err(err.into());
916 }
917 if let PointerFixupEntry::Fixup { pointer_value, .. } = fixup {
918 let res = alloc.write::<u64>(fixup_offset, pointer_value);
919 if let Err(err) = res {
920 pr_warn!("Failed copying ptr into alloc: {:?}", err);
921 return Err(err.into());
922 }
923 }
924 if let Err(err) = reader.skip(fixup_len) {
925 pr_warn!("Failed skipping {} from reader: {:?}", fixup_len, err);
926 return Err(err.into());
927 }
928 end_of_previous_fixup = target_offset_end;
929 }
930 let copy_off = end_of_previous_fixup;
931 let copy_len = offset_end - end_of_previous_fixup;
932 if let Err(err) = alloc.copy_into(&mut reader, copy_off, copy_len) {
933 pr_warn!("Failed copying remainder into alloc: {:?}", err);
934 return Err(err.into());
935 }
936 }
937 Ok(())
938 }
939
940 /// This method copies the payload of a transaction into the target process.
941 ///
942 /// The resulting payload will have several different components, which will be stored next to
943 /// each other in the allocation. Furthermore, various objects can be embedded in the payload,
944 /// and those objects have to be translated so that they make sense to the target transaction.
copy_transaction_data( &self, to_process: Arc<Process>, tr: &BinderTransactionDataSg, debug_id: usize, allow_fds: bool, txn_security_ctx_offset: Option<&mut usize>, ) -> BinderResult<NewAllocation>945 pub(crate) fn copy_transaction_data(
946 &self,
947 to_process: Arc<Process>,
948 tr: &BinderTransactionDataSg,
949 debug_id: usize,
950 allow_fds: bool,
951 txn_security_ctx_offset: Option<&mut usize>,
952 ) -> BinderResult<NewAllocation> {
953 let trd = &tr.transaction_data;
954 let is_oneway = trd.flags & TF_ONE_WAY != 0;
955 let mut secctx = if let Some(offset) = txn_security_ctx_offset {
956 let secid = self.process.cred.get_secid();
957 let ctx = match security::SecurityCtx::from_secid(secid) {
958 Ok(ctx) => ctx,
959 Err(err) => {
960 pr_warn!("Failed to get security ctx for id {}: {:?}", secid, err);
961 return Err(err.into());
962 }
963 };
964 Some((offset, ctx))
965 } else {
966 None
967 };
968
969 let data_size = trd.data_size.try_into().map_err(|_| EINVAL)?;
970 let aligned_data_size = ptr_align(data_size).ok_or(EINVAL)?;
971 let offsets_size: usize = trd.offsets_size.try_into().map_err(|_| EINVAL)?;
972 let buffers_size: usize = tr.buffers_size.try_into().map_err(|_| EINVAL)?;
973 let aligned_secctx_size = match secctx.as_ref() {
974 Some((_offset, ctx)) => ptr_align(ctx.len()).ok_or(EINVAL)?,
975 None => 0,
976 };
977
978 if !is_aligned(offsets_size, size_of::<u64>()) {
979 return Err(EINVAL.into());
980 }
981 if !is_aligned(buffers_size, size_of::<u64>()) {
982 return Err(EINVAL.into());
983 }
984
985 // This guarantees that at least `sizeof(usize)` bytes will be allocated.
986 let len = usize::max(
987 aligned_data_size
988 .checked_add(offsets_size)
989 .and_then(|sum| sum.checked_add(buffers_size))
990 .and_then(|sum| sum.checked_add(aligned_secctx_size))
991 .ok_or(ENOMEM)?,
992 size_of::<u64>(),
993 );
994 let secctx_off = aligned_data_size + offsets_size + buffers_size;
995 let mut alloc =
996 match to_process.buffer_alloc(debug_id, len, is_oneway, self.process.task.pid()) {
997 Ok(alloc) => alloc,
998 Err(err) => {
999 pr_warn!(
1000 "Failed to allocate buffer. len:{}, is_oneway:{}",
1001 len,
1002 is_oneway
1003 );
1004 return Err(err);
1005 }
1006 };
1007
1008 // SAFETY: This accesses a union field, but it's okay because the field's type is valid for
1009 // all bit-patterns.
1010 let trd_data_ptr = unsafe { &trd.data.ptr };
1011 let mut buffer_reader =
1012 UserSlice::new(UserPtr::from_addr(trd_data_ptr.buffer as _), data_size).reader();
1013 let mut end_of_previous_object = 0;
1014 let mut sg_state = None;
1015
1016 // Copy offsets if there are any.
1017 if offsets_size > 0 {
1018 let mut offsets_reader =
1019 UserSlice::new(UserPtr::from_addr(trd_data_ptr.offsets as _), offsets_size)
1020 .reader();
1021
1022 let offsets_start = aligned_data_size;
1023 let offsets_end = aligned_data_size + offsets_size;
1024
1025 // This state is used for BINDER_TYPE_PTR objects.
1026 let sg_state = sg_state.insert(ScatterGatherState {
1027 unused_buffer_space: UnusedBufferSpace {
1028 offset: offsets_end,
1029 limit: offsets_end + buffers_size,
1030 },
1031 sg_entries: KVec::new(),
1032 ancestors: KVec::new(),
1033 });
1034
1035 // Traverse the objects specified.
1036 let mut view = AllocationView::new(&mut alloc, data_size);
1037 for (index, index_offset) in (offsets_start..offsets_end)
1038 .step_by(size_of::<u64>())
1039 .enumerate()
1040 {
1041 let offset = offsets_reader.read::<u64>()?;
1042 view.alloc.write(index_offset, &offset)?;
1043 let offset: usize = offset.try_into().map_err(|_| EINVAL)?;
1044
1045 if offset < end_of_previous_object || !is_aligned(offset, size_of::<u32>()) {
1046 pr_warn!("Got transaction with invalid offset.");
1047 return Err(EINVAL.into());
1048 }
1049
1050 // Copy data between two objects.
1051 if end_of_previous_object < offset {
1052 view.copy_into(
1053 &mut buffer_reader,
1054 end_of_previous_object,
1055 offset - end_of_previous_object,
1056 )?;
1057 }
1058
1059 let mut object = BinderObject::read_from(&mut buffer_reader)?;
1060
1061 match self.translate_object(
1062 index,
1063 offset,
1064 object.as_ref(),
1065 &mut view,
1066 allow_fds,
1067 sg_state,
1068 ) {
1069 Ok(()) => end_of_previous_object = offset + object.size(),
1070 Err(err) => {
1071 pr_warn!("Error while translating object.");
1072 return Err(err);
1073 }
1074 }
1075
1076 // Update the indexes containing objects to clean up.
1077 let offset_after_object = index_offset + size_of::<u64>();
1078 view.alloc
1079 .set_info_offsets(offsets_start..offset_after_object);
1080 }
1081 }
1082
1083 // Copy remaining raw data.
1084 alloc.copy_into(
1085 &mut buffer_reader,
1086 end_of_previous_object,
1087 data_size - end_of_previous_object,
1088 )?;
1089
1090 if let Some(sg_state) = sg_state.as_mut() {
1091 if let Err(err) = self.apply_sg(&mut alloc, sg_state) {
1092 pr_warn!("Failure in apply_sg: {:?}", err);
1093 return Err(err);
1094 }
1095 }
1096
1097 if let Some((off_out, secctx)) = secctx.as_mut() {
1098 if let Err(err) = alloc.write(secctx_off, secctx.as_bytes()) {
1099 pr_warn!("Failed to write security context: {:?}", err);
1100 return Err(err.into());
1101 }
1102 **off_out = secctx_off;
1103 }
1104 Ok(alloc)
1105 }
1106
unwind_transaction_stack(self: &Arc<Self>)1107 fn unwind_transaction_stack(self: &Arc<Self>) {
1108 let mut thread = self.clone();
1109 while let Ok(transaction) = {
1110 let mut inner = thread.inner.lock();
1111 inner.pop_transaction_to_reply(thread.as_ref())
1112 } {
1113 let reply = Err(BR_DEAD_REPLY);
1114 if !transaction.from.deliver_single_reply(reply, &transaction) {
1115 break;
1116 }
1117
1118 thread = transaction.from.clone();
1119 }
1120 }
1121
deliver_reply( &self, reply: Result<DLArc<Transaction>, u32>, transaction: &DArc<Transaction>, )1122 pub(crate) fn deliver_reply(
1123 &self,
1124 reply: Result<DLArc<Transaction>, u32>,
1125 transaction: &DArc<Transaction>,
1126 ) {
1127 if self.deliver_single_reply(reply, transaction) {
1128 transaction.from.unwind_transaction_stack();
1129 }
1130 }
1131
1132 /// Delivers a reply to the thread that started a transaction. The reply can either be a
1133 /// reply-transaction or an error code to be delivered instead.
1134 ///
1135 /// Returns whether the thread is dead. If it is, the caller is expected to unwind the
1136 /// transaction stack by completing transactions for threads that are dead.
deliver_single_reply( &self, reply: Result<DLArc<Transaction>, u32>, transaction: &DArc<Transaction>, ) -> bool1137 fn deliver_single_reply(
1138 &self,
1139 reply: Result<DLArc<Transaction>, u32>,
1140 transaction: &DArc<Transaction>,
1141 ) -> bool {
1142 if let Ok(transaction) = &reply {
1143 crate::trace::trace_transaction(true, transaction, Some(&self.task));
1144 transaction.set_outstanding(&mut self.process.inner.lock());
1145 }
1146
1147 {
1148 let mut inner = self.inner.lock();
1149 if !inner.pop_transaction_replied(transaction) {
1150 return false;
1151 }
1152
1153 if inner.is_dead {
1154 return true;
1155 }
1156
1157 match reply {
1158 Ok(work) => {
1159 inner.push_work(work);
1160 }
1161 Err(code) => inner.push_reply_work(code),
1162 }
1163 }
1164
1165 // Notify the thread now that we've released the inner lock.
1166 self.work_condvar.notify_sync();
1167 false
1168 }
1169
1170 /// Determines if the given transaction is the current transaction for this thread.
is_current_transaction(&self, transaction: &DArc<Transaction>) -> bool1171 fn is_current_transaction(&self, transaction: &DArc<Transaction>) -> bool {
1172 let inner = self.inner.lock();
1173 match &inner.current_transaction {
1174 None => false,
1175 Some(current) => Arc::ptr_eq(current, transaction),
1176 }
1177 }
1178
1179 /// Determines the current top of the transaction stack. It fails if the top is in another
1180 /// thread (i.e., this thread belongs to a stack but it has called another thread). The top is
1181 /// [`None`] if the thread is not currently participating in a transaction stack.
top_of_transaction_stack(&self) -> Result<Option<DArc<Transaction>>>1182 fn top_of_transaction_stack(&self) -> Result<Option<DArc<Transaction>>> {
1183 let inner = self.inner.lock();
1184 if let Some(cur) = &inner.current_transaction {
1185 if core::ptr::eq(self, cur.from.as_ref()) {
1186 pr_warn!("got new transaction with bad transaction stack");
1187 return Err(EINVAL);
1188 }
1189 Ok(Some(cur.clone()))
1190 } else {
1191 Ok(None)
1192 }
1193 }
1194
transaction<T>(self: &Arc<Self>, tr: &BinderTransactionDataSg, inner: T) where T: FnOnce(&Arc<Self>, &BinderTransactionDataSg) -> BinderResult,1195 fn transaction<T>(self: &Arc<Self>, tr: &BinderTransactionDataSg, inner: T)
1196 where
1197 T: FnOnce(&Arc<Self>, &BinderTransactionDataSg) -> BinderResult,
1198 {
1199 if let Err(err) = inner(self, tr) {
1200 if err.should_pr_warn() {
1201 let mut ee = self.inner.lock().extended_error;
1202 ee.command = err.reply;
1203 ee.param = err.as_errno();
1204 pr_warn!(
1205 "Transaction failed: {:?} my_pid:{}",
1206 err,
1207 self.process.pid_in_current_ns()
1208 );
1209 }
1210
1211 self.push_return_work(err.reply);
1212 }
1213 }
1214
transaction_inner(self: &Arc<Self>, tr: &BinderTransactionDataSg) -> BinderResult1215 fn transaction_inner(self: &Arc<Self>, tr: &BinderTransactionDataSg) -> BinderResult {
1216 // SAFETY: Handle's type has no invalid bit patterns.
1217 let handle = unsafe { tr.transaction_data.target.handle };
1218 let node_ref = self.process.get_transaction_node(handle)?;
1219 security::binder_transaction(&self.process.cred, &node_ref.node.owner.cred)?;
1220 // TODO: We need to ensure that there isn't a pending transaction in the work queue. How
1221 // could this happen?
1222 let top = self.top_of_transaction_stack()?;
1223 let list_completion = DTRWrap::arc_try_new(DeliverCode::new(BR_TRANSACTION_COMPLETE))?;
1224 let completion = list_completion.clone_arc();
1225 let transaction = Transaction::new(node_ref, top, self, tr)?;
1226
1227 // Check that the transaction stack hasn't changed while the lock was released, then update
1228 // it with the new transaction.
1229 {
1230 let mut inner = self.inner.lock();
1231 if !transaction.is_stacked_on(&inner.current_transaction) {
1232 pr_warn!("Transaction stack changed during transaction!");
1233 return Err(EINVAL.into());
1234 }
1235 inner.current_transaction = Some(transaction.clone_arc());
1236 // We push the completion as a deferred work so that we wait for the reply before
1237 // returning to userland.
1238 inner.push_work_deferred(list_completion);
1239 }
1240
1241 if let Err(e) = transaction.submit() {
1242 completion.skip();
1243 // Define `transaction` first to drop it after `inner`.
1244 let transaction;
1245 let mut inner = self.inner.lock();
1246 transaction = inner.current_transaction.take().unwrap();
1247 inner.current_transaction = transaction.clone_next();
1248 Err(e)
1249 } else {
1250 Ok(())
1251 }
1252 }
1253
reply_inner(self: &Arc<Self>, tr: &BinderTransactionDataSg) -> BinderResult1254 fn reply_inner(self: &Arc<Self>, tr: &BinderTransactionDataSg) -> BinderResult {
1255 let orig = self.inner.lock().pop_transaction_to_reply(self)?;
1256 if !orig.from.is_current_transaction(&orig) {
1257 return Err(EINVAL.into());
1258 }
1259
1260 // We need to complete the transaction even if we cannot complete building the reply.
1261 let out = (|| -> BinderResult<_> {
1262 let completion = DTRWrap::arc_try_new(DeliverCode::new(BR_TRANSACTION_COMPLETE))?;
1263 let process = orig.from.process.clone();
1264 let allow_fds = orig.flags & TF_ACCEPT_FDS != 0;
1265 let reply = Transaction::new_reply(self, process, tr, allow_fds)?;
1266 self.inner.lock().push_work(completion);
1267 orig.from.deliver_reply(Ok(reply), &orig);
1268 Ok(())
1269 })()
1270 .map_err(|mut err| {
1271 // At this point we only return `BR_TRANSACTION_COMPLETE` to the caller, and we must let
1272 // the sender know that the transaction has completed (with an error in this case).
1273 pr_warn!(
1274 "Failure {:?} during reply - delivering BR_FAILED_REPLY to sender.",
1275 err
1276 );
1277 let reply = Err(BR_FAILED_REPLY);
1278 orig.from.deliver_reply(reply, &orig);
1279 err.reply = BR_TRANSACTION_COMPLETE;
1280 err
1281 });
1282
1283 out
1284 }
1285
oneway_transaction_inner(self: &Arc<Self>, tr: &BinderTransactionDataSg) -> BinderResult1286 fn oneway_transaction_inner(self: &Arc<Self>, tr: &BinderTransactionDataSg) -> BinderResult {
1287 // SAFETY: The `handle` field is valid for all possible byte values, so reading from the
1288 // union is okay.
1289 let handle = unsafe { tr.transaction_data.target.handle };
1290 let node_ref = self.process.get_transaction_node(handle)?;
1291 security::binder_transaction(&self.process.cred, &node_ref.node.owner.cred)?;
1292 let transaction = Transaction::new(node_ref, None, self, tr)?;
1293 let code = if self.process.is_oneway_spam_detection_enabled()
1294 && transaction.oneway_spam_detected
1295 {
1296 BR_ONEWAY_SPAM_SUSPECT
1297 } else {
1298 BR_TRANSACTION_COMPLETE
1299 };
1300 let list_completion = DTRWrap::arc_try_new(DeliverCode::new(code))?;
1301 let completion = list_completion.clone_arc();
1302 self.inner.lock().push_work(list_completion);
1303 match transaction.submit() {
1304 Ok(()) => Ok(()),
1305 Err(err) => {
1306 completion.skip();
1307 Err(err)
1308 }
1309 }
1310 }
1311
write(self: &Arc<Self>, req: &mut BinderWriteRead) -> Result1312 fn write(self: &Arc<Self>, req: &mut BinderWriteRead) -> Result {
1313 let write_start = req.write_buffer.wrapping_add(req.write_consumed);
1314 let write_len = req.write_size.saturating_sub(req.write_consumed);
1315 let mut reader =
1316 UserSlice::new(UserPtr::from_addr(write_start as _), write_len as _).reader();
1317
1318 while reader.len() >= size_of::<u32>() && self.inner.lock().return_work.is_unused() {
1319 let before = reader.len();
1320 let cmd = reader.read::<u32>()?;
1321 GLOBAL_STATS.inc_bc(cmd);
1322 self.process.stats.inc_bc(cmd);
1323 match cmd {
1324 BC_TRANSACTION => {
1325 let tr = reader.read::<BinderTransactionData>()?.with_buffers_size(0);
1326 if tr.transaction_data.flags & TF_ONE_WAY != 0 {
1327 self.transaction(&tr, Self::oneway_transaction_inner);
1328 } else {
1329 self.transaction(&tr, Self::transaction_inner);
1330 }
1331 }
1332 BC_TRANSACTION_SG => {
1333 let tr = reader.read::<BinderTransactionDataSg>()?;
1334 if tr.transaction_data.flags & TF_ONE_WAY != 0 {
1335 self.transaction(&tr, Self::oneway_transaction_inner);
1336 } else {
1337 self.transaction(&tr, Self::transaction_inner);
1338 }
1339 }
1340 BC_REPLY => {
1341 let tr = reader.read::<BinderTransactionData>()?.with_buffers_size(0);
1342 self.transaction(&tr, Self::reply_inner)
1343 }
1344 BC_REPLY_SG => {
1345 let tr = reader.read::<BinderTransactionDataSg>()?;
1346 self.transaction(&tr, Self::reply_inner)
1347 }
1348 BC_FREE_BUFFER => {
1349 let buffer = self.process.buffer_get(reader.read()?);
1350 if let Some(buffer) = buffer {
1351 if buffer.looper_need_return_on_free() {
1352 self.inner.lock().looper_need_return = true;
1353 }
1354 drop(buffer);
1355 }
1356 }
1357 BC_INCREFS => {
1358 self.process
1359 .as_arc_borrow()
1360 .update_ref(reader.read()?, true, false)?
1361 }
1362 BC_ACQUIRE => {
1363 self.process
1364 .as_arc_borrow()
1365 .update_ref(reader.read()?, true, true)?
1366 }
1367 BC_RELEASE => {
1368 self.process
1369 .as_arc_borrow()
1370 .update_ref(reader.read()?, false, true)?
1371 }
1372 BC_DECREFS => {
1373 self.process
1374 .as_arc_borrow()
1375 .update_ref(reader.read()?, false, false)?
1376 }
1377 BC_INCREFS_DONE => self.process.inc_ref_done(&mut reader, false)?,
1378 BC_ACQUIRE_DONE => self.process.inc_ref_done(&mut reader, true)?,
1379 BC_REQUEST_DEATH_NOTIFICATION => self.process.request_death(&mut reader, self)?,
1380 BC_CLEAR_DEATH_NOTIFICATION => self.process.clear_death(&mut reader, self)?,
1381 BC_DEAD_BINDER_DONE => self.process.dead_binder_done(reader.read()?, self),
1382 BC_REGISTER_LOOPER => {
1383 let valid = self.process.register_thread();
1384 self.inner.lock().looper_register(valid);
1385 }
1386 BC_ENTER_LOOPER => self.inner.lock().looper_enter(),
1387 BC_EXIT_LOOPER => self.inner.lock().looper_exit(),
1388 BC_REQUEST_FREEZE_NOTIFICATION => self.process.request_freeze_notif(&mut reader)?,
1389 BC_CLEAR_FREEZE_NOTIFICATION => self.process.clear_freeze_notif(&mut reader)?,
1390 BC_FREEZE_NOTIFICATION_DONE => self.process.freeze_notif_done(&mut reader)?,
1391
1392 // Fail if given an unknown error code.
1393 // BC_ATTEMPT_ACQUIRE and BC_ACQUIRE_RESULT are no longer supported.
1394 _ => return Err(EINVAL),
1395 }
1396 // Update the number of write bytes consumed.
1397 req.write_consumed += (before - reader.len()) as u64;
1398 }
1399
1400 Ok(())
1401 }
1402
read(self: &Arc<Self>, req: &mut BinderWriteRead, wait: bool) -> Result1403 fn read(self: &Arc<Self>, req: &mut BinderWriteRead, wait: bool) -> Result {
1404 let read_start = req.read_buffer.wrapping_add(req.read_consumed);
1405 let read_len = req.read_size.saturating_sub(req.read_consumed);
1406 let mut writer = BinderReturnWriter::new(
1407 UserSlice::new(UserPtr::from_addr(read_start as _), read_len as _).writer(),
1408 self,
1409 );
1410 let (in_pool, use_proc_queue) = {
1411 let inner = self.inner.lock();
1412 (inner.is_looper(), inner.should_use_process_work_queue())
1413 };
1414
1415 let getter = if use_proc_queue {
1416 Self::get_work
1417 } else {
1418 Self::get_work_local
1419 };
1420
1421 // Reserve some room at the beginning of the read buffer so that we can send a
1422 // BR_SPAWN_LOOPER if we need to.
1423 let mut has_noop_placeholder = false;
1424 if req.read_consumed == 0 {
1425 if let Err(err) = writer.write_code(BR_NOOP) {
1426 pr_warn!("Failure when writing BR_NOOP at beginning of buffer.");
1427 return Err(err);
1428 }
1429 has_noop_placeholder = true;
1430 }
1431
1432 // Loop doing work while there is room in the buffer.
1433 let initial_len = writer.len();
1434 while writer.len() >= size_of::<uapi::binder_transaction_data_secctx>() + 4 {
1435 match getter(self, wait && initial_len == writer.len()) {
1436 Ok(Some(work)) => match work.into_arc().do_work(self, &mut writer) {
1437 Ok(true) => {}
1438 Ok(false) => break,
1439 Err(err) => {
1440 return Err(err);
1441 }
1442 },
1443 Ok(None) => {
1444 break;
1445 }
1446 Err(err) => {
1447 // Propagate the error if we haven't written anything else.
1448 if err != EINTR && err != EAGAIN {
1449 pr_warn!("Failure in work getter: {:?}", err);
1450 }
1451 if initial_len == writer.len() {
1452 return Err(err);
1453 } else {
1454 break;
1455 }
1456 }
1457 }
1458 }
1459
1460 req.read_consumed += read_len - writer.len() as u64;
1461
1462 // Write BR_SPAWN_LOOPER if the process needs more threads for its pool.
1463 if has_noop_placeholder && in_pool && self.process.needs_thread() {
1464 let mut writer =
1465 UserSlice::new(UserPtr::from_addr(req.read_buffer as _), req.read_size as _)
1466 .writer();
1467 writer.write(&BR_SPAWN_LOOPER)?;
1468 }
1469 Ok(())
1470 }
1471
write_read(self: &Arc<Self>, data: UserSlice, wait: bool) -> Result1472 pub(crate) fn write_read(self: &Arc<Self>, data: UserSlice, wait: bool) -> Result {
1473 let (mut reader, mut writer) = data.reader_writer();
1474 let mut req = reader.read::<BinderWriteRead>()?;
1475
1476 // Go through the write buffer.
1477 let mut ret = Ok(());
1478 if req.write_size > 0 {
1479 ret = self.write(&mut req);
1480 if let Err(err) = ret {
1481 pr_warn!(
1482 "Write failure {:?} in pid:{}",
1483 err,
1484 self.process.pid_in_current_ns()
1485 );
1486 req.read_consumed = 0;
1487 writer.write(&req)?;
1488 self.inner.lock().looper_need_return = false;
1489 return ret;
1490 }
1491 }
1492
1493 // Go through the work queue.
1494 if req.read_size > 0 {
1495 ret = self.read(&mut req, wait);
1496 if ret.is_err() && ret != Err(EINTR) {
1497 pr_warn!(
1498 "Read failure {:?} in pid:{}",
1499 ret,
1500 self.process.pid_in_current_ns()
1501 );
1502 }
1503 }
1504
1505 // Write the request back so that the consumed fields are visible to the caller.
1506 writer.write(&req)?;
1507
1508 self.inner.lock().looper_need_return = false;
1509
1510 ret
1511 }
1512
poll(&self, file: &File, table: PollTable<'_>) -> (bool, u32)1513 pub(crate) fn poll(&self, file: &File, table: PollTable<'_>) -> (bool, u32) {
1514 table.register_wait(file, &self.work_condvar);
1515 let mut inner = self.inner.lock();
1516 (inner.should_use_process_work_queue(), inner.poll())
1517 }
1518
1519 /// Make the call to `get_work` or `get_work_local` return immediately, if any.
exit_looper(&self)1520 pub(crate) fn exit_looper(&self) {
1521 let mut inner = self.inner.lock();
1522 let should_notify = inner.looper_flags & LOOPER_WAITING != 0;
1523 if should_notify {
1524 inner.looper_need_return = true;
1525 }
1526 drop(inner);
1527
1528 if should_notify {
1529 self.work_condvar.notify_one();
1530 }
1531 }
1532
notify_if_poll_ready(&self, sync: bool)1533 pub(crate) fn notify_if_poll_ready(&self, sync: bool) {
1534 // Determine if we need to notify. This requires the lock.
1535 let inner = self.inner.lock();
1536 let notify = inner.looper_flags & LOOPER_POLL != 0 && inner.should_use_process_work_queue();
1537 drop(inner);
1538
1539 // Now that the lock is no longer held, notify the waiters if we have to.
1540 if notify {
1541 if sync {
1542 self.work_condvar.notify_sync();
1543 } else {
1544 self.work_condvar.notify_one();
1545 }
1546 }
1547 }
1548
release(self: &Arc<Self>)1549 pub(crate) fn release(self: &Arc<Self>) {
1550 self.inner.lock().is_dead = true;
1551
1552 //self.work_condvar.clear();
1553 self.unwind_transaction_stack();
1554
1555 // Cancel all pending work items.
1556 while let Ok(Some(work)) = self.get_work_local(false) {
1557 work.into_arc().cancel();
1558 }
1559 }
1560 }
1561
1562 #[pin_data]
1563 struct ThreadError {
1564 error_code: Atomic<u32>,
1565 #[pin]
1566 links_track: AtomicTracker,
1567 }
1568
1569 impl ThreadError {
try_new() -> Result<DArc<Self>>1570 fn try_new() -> Result<DArc<Self>> {
1571 DTRWrap::arc_pin_init(pin_init!(Self {
1572 error_code: Atomic::new(BR_OK),
1573 links_track <- AtomicTracker::new(),
1574 }))
1575 .map(ListArc::into_arc)
1576 }
1577
set_error_code(&self, code: u32)1578 fn set_error_code(&self, code: u32) {
1579 self.error_code.store(code, Relaxed);
1580 }
1581
is_unused(&self) -> bool1582 fn is_unused(&self) -> bool {
1583 self.error_code.load(Relaxed) == BR_OK
1584 }
1585 }
1586
1587 impl DeliverToRead for ThreadError {
do_work( self: DArc<Self>, _thread: &Thread, writer: &mut BinderReturnWriter<'_>, ) -> Result<bool>1588 fn do_work(
1589 self: DArc<Self>,
1590 _thread: &Thread,
1591 writer: &mut BinderReturnWriter<'_>,
1592 ) -> Result<bool> {
1593 let code = self.error_code.load(Relaxed);
1594 self.error_code.store(BR_OK, Relaxed);
1595 writer.write_code(code)?;
1596 Ok(true)
1597 }
1598
cancel(self: DArc<Self>)1599 fn cancel(self: DArc<Self>) {}
1600
should_sync_wakeup(&self) -> bool1601 fn should_sync_wakeup(&self) -> bool {
1602 false
1603 }
1604
debug_print(&self, m: &SeqFile, prefix: &str, _tprefix: &str) -> Result<()>1605 fn debug_print(&self, m: &SeqFile, prefix: &str, _tprefix: &str) -> Result<()> {
1606 seq_print!(
1607 m,
1608 "{}transaction error: {}\n",
1609 prefix,
1610 self.error_code.load(Relaxed)
1611 );
1612 Ok(())
1613 }
1614 }
1615
1616 kernel::list::impl_list_arc_safe! {
1617 impl ListArcSafe<0> for ThreadError {
1618 tracked_by links_track: AtomicTracker;
1619 }
1620 }
1621