1 // SPDX-License-Identifier: GPL-2.0 2 3 // Copyright (C) 2025 Google LLC. 4 5 //! This module defines the `Thread` type, which represents a userspace thread that is using 6 //! binder. 7 //! 8 //! The `Process` object stores all of the threads in an rb tree. 9 10 use kernel::{ 11 bindings, 12 fs::{File, LocalFile}, 13 list::{AtomicTracker, List, ListArc, ListLinks, TryNewListArc}, 14 prelude::*, 15 security, 16 seq_file::SeqFile, 17 seq_print, 18 sync::atomic::{ordering::Relaxed, Atomic}, 19 sync::poll::{PollCondVar, PollTable}, 20 sync::{aref::ARef, Arc, SpinLock}, 21 task::Task, 22 uaccess::UserSlice, 23 uapi, 24 }; 25 26 use crate::{ 27 allocation::{Allocation, AllocationView, BinderObject, BinderObjectRef, NewAllocation}, 28 defs::*, 29 error::BinderResult, 30 process::{GetWorkOrRegister, Process}, 31 ptr_align, 32 stats::GLOBAL_STATS, 33 transaction::Transaction, 34 BinderReturnWriter, DArc, DLArc, DTRWrap, DeliverCode, DeliverToRead, 35 }; 36 37 use core::mem::size_of; 38 39 fn is_aligned(value: usize, to: usize) -> bool { 40 value % to == 0 41 } 42 43 /// Stores the layout of the scatter-gather entries. This is used during the `translate_objects` 44 /// call and is discarded when it returns. 45 struct ScatterGatherState { 46 /// A struct that tracks the amount of unused buffer space. 47 unused_buffer_space: UnusedBufferSpace, 48 /// Scatter-gather entries to copy. 49 sg_entries: KVec<ScatterGatherEntry>, 50 /// Indexes into `sg_entries` corresponding to the last binder_buffer_object that 51 /// was processed and all of its ancestors. The array is in sorted order. 52 ancestors: KVec<usize>, 53 } 54 55 /// This entry specifies an additional buffer that should be copied using the scatter-gather 56 /// mechanism. 57 struct ScatterGatherEntry { 58 /// The index in the offset array of the BINDER_TYPE_PTR that this entry originates from. 59 obj_index: usize, 60 /// Offset in target buffer. 61 offset: usize, 62 /// User address in source buffer. 63 sender_uaddr: usize, 64 /// Number of bytes to copy. 65 length: usize, 66 /// The minimum offset of the next fixup in this buffer. 67 fixup_min_offset: usize, 68 /// The offsets within this buffer that contain pointers which should be translated. 69 pointer_fixups: KVec<PointerFixupEntry>, 70 } 71 72 /// This entry specifies that a fixup should happen at `target_offset` of the 73 /// buffer. 74 enum PointerFixupEntry { 75 /// A fixup for a `binder_buffer_object`. 76 Fixup { 77 /// The translated pointer to write. 78 pointer_value: u64, 79 /// The offset at which the value should be written. The offset is relative 80 /// to the original buffer. 81 target_offset: usize, 82 }, 83 /// A skip for a `binder_fd_array_object`. 84 Skip { 85 /// The number of bytes to skip. 86 skip: usize, 87 /// The offset at which the skip should happen. The offset is relative 88 /// to the original buffer. 89 target_offset: usize, 90 }, 91 } 92 93 /// Return type of `apply_and_validate_fixup_in_parent`. 94 struct ParentFixupInfo { 95 /// The index of the parent buffer in `sg_entries`. 96 parent_sg_index: usize, 97 /// The number of ancestors of the buffer. 98 /// 99 /// The buffer is considered an ancestor of itself, so this is always at 100 /// least one. 101 num_ancestors: usize, 102 /// New value of `fixup_min_offset` if this fixup is applied. 103 new_min_offset: usize, 104 /// The offset of the fixup in the target buffer. 105 target_offset: usize, 106 } 107 108 impl ScatterGatherState { 109 /// Called when a `binder_buffer_object` or `binder_fd_array_object` tries 110 /// to access a region in its parent buffer. These accesses have various 111 /// restrictions, which this method verifies. 112 /// 113 /// The `parent_offset` and `length` arguments describe the offset and 114 /// length of the access in the parent buffer. 115 /// 116 /// # Detailed restrictions 117 /// 118 /// Obviously the fixup must be in-bounds for the parent buffer. 119 /// 120 /// For safety reasons, we only allow fixups inside a buffer to happen 121 /// at increasing offsets; additionally, we only allow fixup on the last 122 /// buffer object that was verified, or one of its parents. 123 /// 124 /// Example of what is allowed: 125 /// 126 /// A 127 /// B (parent = A, offset = 0) 128 /// C (parent = A, offset = 16) 129 /// D (parent = C, offset = 0) 130 /// E (parent = A, offset = 32) // min_offset is 16 (C.parent_offset) 131 /// 132 /// Examples of what is not allowed: 133 /// 134 /// Decreasing offsets within the same parent: 135 /// A 136 /// C (parent = A, offset = 16) 137 /// B (parent = A, offset = 0) // decreasing offset within A 138 /// 139 /// Arcerring to a parent that wasn't the last object or any of its parents: 140 /// A 141 /// B (parent = A, offset = 0) 142 /// C (parent = A, offset = 0) 143 /// C (parent = A, offset = 16) 144 /// D (parent = B, offset = 0) // B is not A or any of A's parents 145 fn validate_parent_fixup( 146 &self, 147 parent: usize, 148 parent_offset: usize, 149 length: usize, 150 ) -> Result<ParentFixupInfo> { 151 // Using `position` would also be correct, but `rposition` avoids 152 // quadratic running times. 153 let ancestors_i = self 154 .ancestors 155 .iter() 156 .copied() 157 .rposition(|sg_idx| self.sg_entries[sg_idx].obj_index == parent) 158 .ok_or(EINVAL)?; 159 let sg_idx = self.ancestors[ancestors_i]; 160 let sg_entry = match self.sg_entries.get(sg_idx) { 161 Some(sg_entry) => sg_entry, 162 None => { 163 pr_err!( 164 "self.ancestors[{}] is {}, but self.sg_entries.len() is {}", 165 ancestors_i, 166 sg_idx, 167 self.sg_entries.len() 168 ); 169 return Err(EINVAL); 170 } 171 }; 172 if sg_entry.fixup_min_offset > parent_offset { 173 pr_warn!( 174 "validate_parent_fixup: fixup_min_offset={}, parent_offset={}", 175 sg_entry.fixup_min_offset, 176 parent_offset 177 ); 178 return Err(EINVAL); 179 } 180 let new_min_offset = parent_offset.checked_add(length).ok_or(EINVAL)?; 181 if new_min_offset > sg_entry.length { 182 pr_warn!( 183 "validate_parent_fixup: new_min_offset={}, sg_entry.length={}", 184 new_min_offset, 185 sg_entry.length 186 ); 187 return Err(EINVAL); 188 } 189 let target_offset = sg_entry.offset.checked_add(parent_offset).ok_or(EINVAL)?; 190 // The `ancestors_i + 1` operation can't overflow since the output of the addition is at 191 // most `self.ancestors.len()`, which also fits in a usize. 192 Ok(ParentFixupInfo { 193 parent_sg_index: sg_idx, 194 num_ancestors: ancestors_i + 1, 195 new_min_offset, 196 target_offset, 197 }) 198 } 199 } 200 201 /// Keeps track of how much unused buffer space is left. The initial amount is the number of bytes 202 /// requested by the user using the `buffers_size` field of `binder_transaction_data_sg`. Each time 203 /// we translate an object of type `BINDER_TYPE_PTR`, some of the unused buffer space is consumed. 204 struct UnusedBufferSpace { 205 /// The start of the remaining space. 206 offset: usize, 207 /// The end of the remaining space. 208 limit: usize, 209 } 210 impl UnusedBufferSpace { 211 /// Claim the next `size` bytes from the unused buffer space. The offset for the claimed chunk 212 /// into the buffer is returned. 213 fn claim_next(&mut self, size: usize) -> Result<usize> { 214 // We require every chunk to be aligned. 215 let size = ptr_align(size).ok_or(EINVAL)?; 216 let new_offset = self.offset.checked_add(size).ok_or(EINVAL)?; 217 218 if new_offset <= self.limit { 219 let offset = self.offset; 220 self.offset = new_offset; 221 Ok(offset) 222 } else { 223 Err(EINVAL) 224 } 225 } 226 } 227 228 pub(crate) enum PushWorkRes { 229 Ok, 230 FailedDead(DLArc<dyn DeliverToRead>), 231 } 232 233 impl PushWorkRes { 234 fn is_ok(&self) -> bool { 235 match self { 236 PushWorkRes::Ok => true, 237 PushWorkRes::FailedDead(_) => false, 238 } 239 } 240 } 241 242 /// The fields of `Thread` protected by the spinlock. 243 struct InnerThread { 244 /// Determines the looper state of the thread. It is a bit-wise combination of the constants 245 /// prefixed with `LOOPER_`. 246 looper_flags: u32, 247 248 /// Determines whether the looper should return. 249 looper_need_return: bool, 250 251 /// Determines if thread is dead. 252 is_dead: bool, 253 254 /// Work item used to deliver error codes to the thread that started a transaction. Stored here 255 /// so that it can be reused. 256 reply_work: DArc<ThreadError>, 257 258 /// Work item used to deliver error codes to the current thread. Stored here so that it can be 259 /// reused. 260 return_work: DArc<ThreadError>, 261 262 /// Determines whether the work list below should be processed. When set to false, `work_list` 263 /// is treated as if it were empty. 264 process_work_list: bool, 265 /// List of work items to deliver to userspace. 266 work_list: List<DTRWrap<dyn DeliverToRead>>, 267 current_transaction: Option<DArc<Transaction>>, 268 269 /// Extended error information for this thread. 270 extended_error: ExtendedError, 271 } 272 273 const LOOPER_REGISTERED: u32 = 0x01; 274 const LOOPER_ENTERED: u32 = 0x02; 275 const LOOPER_EXITED: u32 = 0x04; 276 const LOOPER_INVALID: u32 = 0x08; 277 const LOOPER_WAITING: u32 = 0x10; 278 const LOOPER_WAITING_PROC: u32 = 0x20; 279 const LOOPER_POLL: u32 = 0x40; 280 281 impl InnerThread { 282 fn new() -> Result<Self> { 283 fn next_err_id() -> u32 { 284 static EE_ID: Atomic<u32> = Atomic::new(0); 285 EE_ID.fetch_add(1, Relaxed) 286 } 287 288 Ok(Self { 289 looper_flags: 0, 290 looper_need_return: false, 291 is_dead: false, 292 process_work_list: false, 293 reply_work: ThreadError::try_new()?, 294 return_work: ThreadError::try_new()?, 295 work_list: List::new(), 296 current_transaction: None, 297 extended_error: ExtendedError::new(next_err_id(), BR_OK, 0), 298 }) 299 } 300 301 fn pop_work(&mut self) -> Option<DLArc<dyn DeliverToRead>> { 302 if !self.process_work_list { 303 return None; 304 } 305 306 let ret = self.work_list.pop_front(); 307 self.process_work_list = !self.work_list.is_empty(); 308 ret 309 } 310 311 fn push_work(&mut self, work: DLArc<dyn DeliverToRead>) -> PushWorkRes { 312 if self.is_dead { 313 PushWorkRes::FailedDead(work) 314 } else { 315 self.work_list.push_back(work); 316 self.process_work_list = true; 317 PushWorkRes::Ok 318 } 319 } 320 321 fn push_reply_work(&mut self, code: u32) { 322 if let Ok(work) = ListArc::try_from_arc(self.reply_work.clone()) { 323 work.set_error_code(code); 324 self.push_work(work); 325 } else { 326 pr_warn!("Thread reply work is already in use."); 327 } 328 } 329 330 fn push_return_work(&mut self, reply: u32) { 331 if let Ok(work) = ListArc::try_from_arc(self.return_work.clone()) { 332 work.set_error_code(reply); 333 self.push_work(work); 334 } else { 335 pr_warn!("Thread return work is already in use."); 336 } 337 } 338 339 /// Used to push work items that do not need to be processed immediately and can wait until the 340 /// thread gets another work item. 341 fn push_work_deferred(&mut self, work: DLArc<dyn DeliverToRead>) { 342 self.work_list.push_back(work); 343 } 344 345 /// Fetches the transaction this thread can reply to. If the thread has a pending transaction 346 /// (that it could respond to) but it has also issued a transaction, it must first wait for the 347 /// previously-issued transaction to complete. 348 /// 349 /// The `thread` parameter should be the thread containing this `ThreadInner`. 350 fn pop_transaction_to_reply(&mut self, thread: &Thread) -> Result<DArc<Transaction>> { 351 let transaction = self.current_transaction.take().ok_or(EINVAL)?; 352 if core::ptr::eq(thread, transaction.from.as_ref()) { 353 self.current_transaction = Some(transaction); 354 return Err(EINVAL); 355 } 356 // Find a new current transaction for this thread. 357 self.current_transaction = transaction.find_from(thread).cloned(); 358 Ok(transaction) 359 } 360 361 fn pop_transaction_replied(&mut self, transaction: &DArc<Transaction>) -> bool { 362 match self.current_transaction.take() { 363 None => false, 364 Some(old) => { 365 if !Arc::ptr_eq(transaction, &old) { 366 self.current_transaction = Some(old); 367 return false; 368 } 369 self.current_transaction = old.clone_next(); 370 true 371 } 372 } 373 } 374 375 fn looper_enter(&mut self) { 376 self.looper_flags |= LOOPER_ENTERED; 377 if self.looper_flags & LOOPER_REGISTERED != 0 { 378 self.looper_flags |= LOOPER_INVALID; 379 } 380 } 381 382 fn looper_register(&mut self, valid: bool) { 383 self.looper_flags |= LOOPER_REGISTERED; 384 if !valid || self.looper_flags & LOOPER_ENTERED != 0 { 385 self.looper_flags |= LOOPER_INVALID; 386 } 387 } 388 389 fn looper_exit(&mut self) { 390 self.looper_flags |= LOOPER_EXITED; 391 } 392 393 /// Determines whether the thread is part of a pool, i.e., if it is a looper. 394 fn is_looper(&self) -> bool { 395 self.looper_flags & (LOOPER_ENTERED | LOOPER_REGISTERED) != 0 396 } 397 398 /// Determines whether the thread should attempt to fetch work items from the process queue. 399 /// This is generally case when the thread is registered as a looper and not part of a 400 /// transaction stack. But if there is local work, we want to return to userspace before we 401 /// deliver any remote work. 402 fn should_use_process_work_queue(&self) -> bool { 403 self.current_transaction.is_none() && !self.process_work_list && self.is_looper() 404 } 405 406 fn poll(&mut self) -> u32 { 407 self.looper_flags |= LOOPER_POLL; 408 if self.process_work_list || self.looper_need_return { 409 bindings::POLLIN 410 } else { 411 0 412 } 413 } 414 } 415 416 /// This represents a thread that's used with binder. 417 #[pin_data] 418 pub(crate) struct Thread { 419 pub(crate) id: i32, 420 pub(crate) process: Arc<Process>, 421 pub(crate) task: ARef<Task>, 422 #[pin] 423 inner: SpinLock<InnerThread>, 424 #[pin] 425 work_condvar: PollCondVar, 426 /// Used to insert this thread into the process' `ready_threads` list. 427 /// 428 /// INVARIANT: May never be used for any other list than the `self.process.ready_threads`. 429 #[pin] 430 links: ListLinks, 431 #[pin] 432 links_track: AtomicTracker, 433 } 434 435 kernel::list::impl_list_arc_safe! { 436 impl ListArcSafe<0> for Thread { 437 tracked_by links_track: AtomicTracker; 438 } 439 } 440 kernel::list::impl_list_item! { 441 impl ListItem<0> for Thread { 442 using ListLinks { self.links }; 443 } 444 } 445 446 impl Thread { 447 pub(crate) fn new(id: i32, process: Arc<Process>) -> Result<Arc<Self>> { 448 let inner = InnerThread::new()?; 449 450 Arc::pin_init( 451 try_pin_init!(Thread { 452 id, 453 process, 454 task: ARef::from(&**kernel::current!()), 455 inner <- kernel::new_spinlock!(inner, "Thread::inner"), 456 work_condvar <- kernel::new_poll_condvar!("Thread::work_condvar"), 457 links <- ListLinks::new(), 458 links_track <- AtomicTracker::new(), 459 }), 460 GFP_KERNEL, 461 ) 462 } 463 464 #[inline(never)] 465 pub(crate) fn debug_print(self: &Arc<Self>, m: &SeqFile, print_all: bool) -> Result<()> { 466 let inner = self.inner.lock(); 467 468 if print_all || inner.current_transaction.is_some() || !inner.work_list.is_empty() { 469 seq_print!( 470 m, 471 " thread {}: l {:02x} need_return {}\n", 472 self.id, 473 inner.looper_flags, 474 inner.looper_need_return, 475 ); 476 } 477 478 let mut t_opt = inner.current_transaction.as_ref(); 479 while let Some(t) = t_opt { 480 if Arc::ptr_eq(&t.from, self) { 481 t.debug_print_inner(m, " outgoing transaction "); 482 t_opt = t.from_parent.as_ref(); 483 } else if Arc::ptr_eq(&t.to, &self.process) { 484 t.debug_print_inner(m, " incoming transaction "); 485 t_opt = t.find_from(self); 486 } else { 487 t.debug_print_inner(m, " bad transaction "); 488 t_opt = None; 489 } 490 } 491 492 for work in &inner.work_list { 493 work.debug_print(m, " ", " pending transaction ")?; 494 } 495 Ok(()) 496 } 497 498 pub(crate) fn get_extended_error(&self, data: UserSlice) -> Result { 499 let mut writer = data.writer(); 500 let ee = self.inner.lock().extended_error; 501 writer.write(&ee)?; 502 Ok(()) 503 } 504 505 pub(crate) fn set_current_transaction(&self, transaction: DArc<Transaction>) { 506 self.inner.lock().current_transaction = Some(transaction); 507 } 508 509 pub(crate) fn has_current_transaction(&self) -> bool { 510 self.inner.lock().current_transaction.is_some() 511 } 512 513 /// Attempts to fetch a work item from the thread-local queue. The behaviour if the queue is 514 /// empty depends on `wait`: if it is true, the function waits for some work to be queued (or a 515 /// signal); otherwise it returns indicating that none is available. 516 fn get_work_local(self: &Arc<Self>, wait: bool) -> Result<Option<DLArc<dyn DeliverToRead>>> { 517 { 518 let mut inner = self.inner.lock(); 519 if inner.looper_need_return { 520 return Ok(inner.pop_work()); 521 } 522 } 523 524 // Try once if the caller does not want to wait. 525 if !wait { 526 return self.inner.lock().pop_work().ok_or(EAGAIN).map(Some); 527 } 528 529 // Loop waiting only on the local queue (i.e., not registering with the process queue). 530 let mut inner = self.inner.lock(); 531 loop { 532 if let Some(work) = inner.pop_work() { 533 return Ok(Some(work)); 534 } 535 536 inner.looper_flags |= LOOPER_WAITING; 537 let signal_pending = self.work_condvar.wait_interruptible_freezable(&mut inner); 538 inner.looper_flags &= !LOOPER_WAITING; 539 540 if signal_pending { 541 return Err(EINTR); 542 } 543 if inner.looper_need_return { 544 return Ok(None); 545 } 546 } 547 } 548 549 /// Attempts to fetch a work item from the thread-local queue, falling back to the process-wide 550 /// queue if none is available locally. 551 /// 552 /// This must only be called when the thread is not participating in a transaction chain. If it 553 /// is, the local version (`get_work_local`) should be used instead. 554 fn get_work(self: &Arc<Self>, wait: bool) -> Result<Option<DLArc<dyn DeliverToRead>>> { 555 // Try to get work from the thread's work queue, using only a local lock. 556 { 557 let mut inner = self.inner.lock(); 558 if let Some(work) = inner.pop_work() { 559 return Ok(Some(work)); 560 } 561 if inner.looper_need_return { 562 drop(inner); 563 return Ok(self.process.get_work()); 564 } 565 } 566 567 // If the caller doesn't want to wait, try to grab work from the process queue. 568 // 569 // We know nothing will have been queued directly to the thread queue because it is not in 570 // a transaction and it is not in the process' ready list. 571 if !wait { 572 return self.process.get_work().ok_or(EAGAIN).map(Some); 573 } 574 575 // Get work from the process queue. If none is available, atomically register as ready. 576 let reg = match self.process.get_work_or_register(self) { 577 GetWorkOrRegister::Work(work) => return Ok(Some(work)), 578 GetWorkOrRegister::Register(reg) => reg, 579 }; 580 581 let mut inner = self.inner.lock(); 582 loop { 583 if let Some(work) = inner.pop_work() { 584 return Ok(Some(work)); 585 } 586 587 inner.looper_flags |= LOOPER_WAITING | LOOPER_WAITING_PROC; 588 let signal_pending = self.work_condvar.wait_interruptible_freezable(&mut inner); 589 inner.looper_flags &= !(LOOPER_WAITING | LOOPER_WAITING_PROC); 590 591 if signal_pending || inner.looper_need_return { 592 // We need to return now. We need to pull the thread off the list of ready threads 593 // (by dropping `reg`), then check the state again after it's off the list to 594 // ensure that something was not queued in the meantime. If something has been 595 // queued, we just return it (instead of the error). 596 drop(inner); 597 drop(reg); 598 599 let res = match self.inner.lock().pop_work() { 600 Some(work) => Ok(Some(work)), 601 None if signal_pending => Err(EINTR), 602 None => Ok(None), 603 }; 604 return res; 605 } 606 } 607 } 608 609 /// Push the provided work item to be delivered to user space via this thread. 610 /// 611 /// Returns whether the item was successfully pushed. This can only fail if the thread is dead. 612 pub(crate) fn push_work(&self, work: DLArc<dyn DeliverToRead>) -> PushWorkRes { 613 let sync = work.should_sync_wakeup(); 614 615 let res = self.inner.lock().push_work(work); 616 617 if res.is_ok() { 618 if sync { 619 self.work_condvar.notify_sync(); 620 } else { 621 self.work_condvar.notify_one(); 622 } 623 } 624 625 res 626 } 627 628 /// Attempts to push to given work item to the thread if it's a looper thread (i.e., if it's 629 /// part of a thread pool) and is alive. Otherwise, push the work item to the process instead. 630 pub(crate) fn push_work_if_looper(&self, work: DLArc<dyn DeliverToRead>) -> BinderResult { 631 let mut inner = self.inner.lock(); 632 if inner.is_looper() && !inner.is_dead { 633 inner.push_work(work); 634 Ok(()) 635 } else { 636 drop(inner); 637 self.process.push_work(work) 638 } 639 } 640 641 pub(crate) fn push_work_deferred(&self, work: DLArc<dyn DeliverToRead>) { 642 self.inner.lock().push_work_deferred(work); 643 } 644 645 pub(crate) fn push_return_work(&self, reply: u32) { 646 self.inner.lock().push_return_work(reply); 647 } 648 649 fn translate_object( 650 &self, 651 obj_index: usize, 652 offset: usize, 653 object: BinderObjectRef<'_>, 654 view: &mut AllocationView<'_>, 655 allow_fds: bool, 656 sg_state: &mut ScatterGatherState, 657 ) -> BinderResult { 658 match object { 659 BinderObjectRef::Binder(obj) => { 660 let strong = obj.hdr.type_ == BINDER_TYPE_BINDER; 661 // SAFETY: `binder` is a `binder_uintptr_t`; any bit pattern is a valid 662 // representation. 663 let ptr = unsafe { obj.__bindgen_anon_1.binder } as _; 664 let cookie = obj.cookie as _; 665 let flags = obj.flags as _; 666 let node = self 667 .process 668 .as_arc_borrow() 669 .get_node(ptr, cookie, flags, strong, self)?; 670 security::binder_transfer_binder(&self.process.cred, &view.alloc.process.cred)?; 671 view.transfer_binder_object(offset, obj, strong, node)?; 672 } 673 BinderObjectRef::Handle(obj) => { 674 let strong = obj.hdr.type_ == BINDER_TYPE_HANDLE; 675 // SAFETY: `handle` is a `u32`; any bit pattern is a valid representation. 676 let handle = unsafe { obj.__bindgen_anon_1.handle } as _; 677 let node = self.process.get_node_from_handle(handle, strong)?; 678 security::binder_transfer_binder(&self.process.cred, &view.alloc.process.cred)?; 679 view.transfer_binder_object(offset, obj, strong, node)?; 680 } 681 BinderObjectRef::Fd(obj) => { 682 if !allow_fds { 683 return Err(EPERM.into()); 684 } 685 686 // SAFETY: `fd` is a `u32`; any bit pattern is a valid representation. 687 let fd = unsafe { obj.__bindgen_anon_1.fd }; 688 let file = LocalFile::fget(fd)?; 689 // SAFETY: The binder driver never calls `fdget_pos` and this code runs from an 690 // ioctl, so there are no active calls to `fdget_pos` on this thread. 691 let file = unsafe { LocalFile::assume_no_fdget_pos(file) }; 692 security::binder_transfer_file( 693 &self.process.cred, 694 &view.alloc.process.cred, 695 &file, 696 )?; 697 698 let mut obj_write = BinderFdObject::default(); 699 obj_write.hdr.type_ = BINDER_TYPE_FD; 700 // This will be overwritten with the actual fd when the transaction is received. 701 obj_write.__bindgen_anon_1.fd = u32::MAX; 702 obj_write.cookie = obj.cookie; 703 view.write::<BinderFdObject>(offset, &obj_write)?; 704 705 const FD_FIELD_OFFSET: usize = 706 core::mem::offset_of!(uapi::binder_fd_object, __bindgen_anon_1.fd); 707 708 let field_offset = offset + FD_FIELD_OFFSET; 709 710 view.alloc.info_add_fd(file, field_offset, false)?; 711 } 712 BinderObjectRef::Ptr(obj) => { 713 let obj_length = obj.length.try_into().map_err(|_| EINVAL)?; 714 let alloc_offset = match sg_state.unused_buffer_space.claim_next(obj_length) { 715 Ok(alloc_offset) => alloc_offset, 716 Err(err) => { 717 pr_warn!( 718 "Failed to claim space for a BINDER_TYPE_PTR. (offset: {}, limit: {}, size: {})", 719 sg_state.unused_buffer_space.offset, 720 sg_state.unused_buffer_space.limit, 721 obj_length, 722 ); 723 return Err(err.into()); 724 } 725 }; 726 727 let sg_state_idx = sg_state.sg_entries.len(); 728 sg_state.sg_entries.push( 729 ScatterGatherEntry { 730 obj_index, 731 offset: alloc_offset, 732 sender_uaddr: obj.buffer as _, 733 length: obj_length, 734 pointer_fixups: KVec::new(), 735 fixup_min_offset: 0, 736 }, 737 GFP_KERNEL, 738 )?; 739 740 let buffer_ptr_in_user_space = (view.alloc.ptr + alloc_offset) as u64; 741 742 if obj.flags & uapi::BINDER_BUFFER_FLAG_HAS_PARENT == 0 { 743 sg_state.ancestors.clear(); 744 sg_state.ancestors.push(sg_state_idx, GFP_KERNEL)?; 745 } else { 746 // Another buffer also has a pointer to this buffer, and we need to fixup that 747 // pointer too. 748 749 let parent_index = usize::try_from(obj.parent).map_err(|_| EINVAL)?; 750 let parent_offset = usize::try_from(obj.parent_offset).map_err(|_| EINVAL)?; 751 752 let info = sg_state.validate_parent_fixup( 753 parent_index, 754 parent_offset, 755 size_of::<u64>(), 756 )?; 757 758 sg_state.ancestors.truncate(info.num_ancestors); 759 sg_state.ancestors.push(sg_state_idx, GFP_KERNEL)?; 760 761 let parent_entry = match sg_state.sg_entries.get_mut(info.parent_sg_index) { 762 Some(parent_entry) => parent_entry, 763 None => { 764 pr_err!( 765 "validate_parent_fixup returned index out of bounds for sg.entries" 766 ); 767 return Err(EINVAL.into()); 768 } 769 }; 770 771 parent_entry.fixup_min_offset = info.new_min_offset; 772 parent_entry.pointer_fixups.push( 773 PointerFixupEntry::Fixup { 774 pointer_value: buffer_ptr_in_user_space, 775 target_offset: info.target_offset, 776 }, 777 GFP_KERNEL, 778 )?; 779 } 780 781 let mut obj_write = BinderBufferObject::default(); 782 obj_write.hdr.type_ = BINDER_TYPE_PTR; 783 obj_write.flags = obj.flags; 784 obj_write.buffer = buffer_ptr_in_user_space; 785 obj_write.length = obj.length; 786 obj_write.parent = obj.parent; 787 obj_write.parent_offset = obj.parent_offset; 788 view.write::<BinderBufferObject>(offset, &obj_write)?; 789 } 790 BinderObjectRef::Fda(obj) => { 791 if !allow_fds { 792 return Err(EPERM.into()); 793 } 794 let parent_index = usize::try_from(obj.parent).map_err(|_| EINVAL)?; 795 let parent_offset = usize::try_from(obj.parent_offset).map_err(|_| EINVAL)?; 796 let num_fds = usize::try_from(obj.num_fds).map_err(|_| EINVAL)?; 797 let fds_len = num_fds.checked_mul(size_of::<u32>()).ok_or(EINVAL)?; 798 799 if !is_aligned(parent_offset, size_of::<u32>()) { 800 return Err(EINVAL.into()); 801 } 802 803 let info = sg_state.validate_parent_fixup(parent_index, parent_offset, fds_len)?; 804 view.alloc.info_add_fd_reserve(num_fds)?; 805 806 sg_state.ancestors.truncate(info.num_ancestors); 807 let parent_entry = match sg_state.sg_entries.get_mut(info.parent_sg_index) { 808 Some(parent_entry) => parent_entry, 809 None => { 810 pr_err!( 811 "validate_parent_fixup returned index out of bounds for sg.entries" 812 ); 813 return Err(EINVAL.into()); 814 } 815 }; 816 817 if !is_aligned(parent_entry.sender_uaddr, size_of::<u32>()) { 818 return Err(EINVAL.into()); 819 } 820 821 parent_entry.fixup_min_offset = info.new_min_offset; 822 parent_entry 823 .pointer_fixups 824 .push( 825 PointerFixupEntry::Skip { 826 skip: fds_len, 827 target_offset: info.target_offset, 828 }, 829 GFP_KERNEL, 830 ) 831 .map_err(|_| ENOMEM)?; 832 833 let fda_uaddr = parent_entry 834 .sender_uaddr 835 .checked_add(parent_offset) 836 .ok_or(EINVAL)?; 837 838 let mut fda_bytes = KVec::new(); 839 UserSlice::new(UserPtr::from_addr(fda_uaddr as _), fds_len) 840 .read_all(&mut fda_bytes, GFP_KERNEL)?; 841 842 if fds_len != fda_bytes.len() { 843 pr_err!("UserSlice::read_all returned wrong length in BINDER_TYPE_FDA"); 844 return Err(EINVAL.into()); 845 } 846 847 for i in (0..fds_len).step_by(size_of::<u32>()) { 848 let fd = { 849 let mut fd_bytes = [0u8; size_of::<u32>()]; 850 fd_bytes.copy_from_slice(&fda_bytes[i..i + size_of::<u32>()]); 851 u32::from_ne_bytes(fd_bytes) 852 }; 853 854 let file = LocalFile::fget(fd)?; 855 // SAFETY: The binder driver never calls `fdget_pos` and this code runs from an 856 // ioctl, so there are no active calls to `fdget_pos` on this thread. 857 let file = unsafe { LocalFile::assume_no_fdget_pos(file) }; 858 security::binder_transfer_file( 859 &self.process.cred, 860 &view.alloc.process.cred, 861 &file, 862 )?; 863 864 // The `validate_parent_fixup` call ensuers that this addition will not 865 // overflow. 866 view.alloc.info_add_fd(file, info.target_offset + i, true)?; 867 } 868 drop(fda_bytes); 869 870 let mut obj_write = BinderFdArrayObject::default(); 871 obj_write.hdr.type_ = BINDER_TYPE_FDA; 872 obj_write.num_fds = obj.num_fds; 873 obj_write.parent = obj.parent; 874 obj_write.parent_offset = obj.parent_offset; 875 view.write::<BinderFdArrayObject>(offset, &obj_write)?; 876 } 877 } 878 Ok(()) 879 } 880 881 fn apply_sg(&self, alloc: &mut Allocation, sg_state: &mut ScatterGatherState) -> BinderResult { 882 for sg_entry in &mut sg_state.sg_entries { 883 let mut end_of_previous_fixup = sg_entry.offset; 884 let offset_end = sg_entry.offset.checked_add(sg_entry.length).ok_or(EINVAL)?; 885 886 let mut reader = 887 UserSlice::new(UserPtr::from_addr(sg_entry.sender_uaddr), sg_entry.length).reader(); 888 for fixup in &mut sg_entry.pointer_fixups { 889 let (fixup_len, fixup_offset) = match fixup { 890 PointerFixupEntry::Fixup { target_offset, .. } => { 891 (size_of::<u64>(), *target_offset) 892 } 893 PointerFixupEntry::Skip { 894 skip, 895 target_offset, 896 } => (*skip, *target_offset), 897 }; 898 899 let target_offset_end = fixup_offset.checked_add(fixup_len).ok_or(EINVAL)?; 900 if fixup_offset < end_of_previous_fixup || offset_end < target_offset_end { 901 pr_warn!( 902 "Fixups oob {} {} {} {}", 903 fixup_offset, 904 end_of_previous_fixup, 905 offset_end, 906 target_offset_end 907 ); 908 return Err(EINVAL.into()); 909 } 910 911 let copy_off = end_of_previous_fixup; 912 let copy_len = fixup_offset - end_of_previous_fixup; 913 if let Err(err) = alloc.copy_into(&mut reader, copy_off, copy_len) { 914 pr_warn!("Failed copying into alloc: {:?}", err); 915 return Err(err.into()); 916 } 917 if let PointerFixupEntry::Fixup { pointer_value, .. } = fixup { 918 let res = alloc.write::<u64>(fixup_offset, pointer_value); 919 if let Err(err) = res { 920 pr_warn!("Failed copying ptr into alloc: {:?}", err); 921 return Err(err.into()); 922 } 923 } 924 if let Err(err) = reader.skip(fixup_len) { 925 pr_warn!("Failed skipping {} from reader: {:?}", fixup_len, err); 926 return Err(err.into()); 927 } 928 end_of_previous_fixup = target_offset_end; 929 } 930 let copy_off = end_of_previous_fixup; 931 let copy_len = offset_end - end_of_previous_fixup; 932 if let Err(err) = alloc.copy_into(&mut reader, copy_off, copy_len) { 933 pr_warn!("Failed copying remainder into alloc: {:?}", err); 934 return Err(err.into()); 935 } 936 } 937 Ok(()) 938 } 939 940 /// This method copies the payload of a transaction into the target process. 941 /// 942 /// The resulting payload will have several different components, which will be stored next to 943 /// each other in the allocation. Furthermore, various objects can be embedded in the payload, 944 /// and those objects have to be translated so that they make sense to the target transaction. 945 pub(crate) fn copy_transaction_data( 946 &self, 947 to_process: Arc<Process>, 948 tr: &BinderTransactionDataSg, 949 debug_id: usize, 950 allow_fds: bool, 951 txn_security_ctx_offset: Option<&mut usize>, 952 ) -> BinderResult<NewAllocation> { 953 let trd = &tr.transaction_data; 954 let is_oneway = trd.flags & TF_ONE_WAY != 0; 955 let mut secctx = if let Some(offset) = txn_security_ctx_offset { 956 let secid = self.process.cred.get_secid(); 957 let ctx = match security::SecurityCtx::from_secid(secid) { 958 Ok(ctx) => ctx, 959 Err(err) => { 960 pr_warn!("Failed to get security ctx for id {}: {:?}", secid, err); 961 return Err(err.into()); 962 } 963 }; 964 Some((offset, ctx)) 965 } else { 966 None 967 }; 968 969 let data_size = trd.data_size.try_into().map_err(|_| EINVAL)?; 970 let aligned_data_size = ptr_align(data_size).ok_or(EINVAL)?; 971 let offsets_size: usize = trd.offsets_size.try_into().map_err(|_| EINVAL)?; 972 let buffers_size: usize = tr.buffers_size.try_into().map_err(|_| EINVAL)?; 973 let aligned_secctx_size = match secctx.as_ref() { 974 Some((_offset, ctx)) => ptr_align(ctx.len()).ok_or(EINVAL)?, 975 None => 0, 976 }; 977 978 if !is_aligned(offsets_size, size_of::<u64>()) { 979 return Err(EINVAL.into()); 980 } 981 if !is_aligned(buffers_size, size_of::<u64>()) { 982 return Err(EINVAL.into()); 983 } 984 985 // This guarantees that at least `sizeof(usize)` bytes will be allocated. 986 let len = usize::max( 987 aligned_data_size 988 .checked_add(offsets_size) 989 .and_then(|sum| sum.checked_add(buffers_size)) 990 .and_then(|sum| sum.checked_add(aligned_secctx_size)) 991 .ok_or(ENOMEM)?, 992 size_of::<u64>(), 993 ); 994 let secctx_off = aligned_data_size + offsets_size + buffers_size; 995 let mut alloc = 996 match to_process.buffer_alloc(debug_id, len, is_oneway, self.process.task.pid()) { 997 Ok(alloc) => alloc, 998 Err(err) => { 999 pr_warn!( 1000 "Failed to allocate buffer. len:{}, is_oneway:{}", 1001 len, 1002 is_oneway 1003 ); 1004 return Err(err); 1005 } 1006 }; 1007 1008 // SAFETY: This accesses a union field, but it's okay because the field's type is valid for 1009 // all bit-patterns. 1010 let trd_data_ptr = unsafe { &trd.data.ptr }; 1011 let mut buffer_reader = 1012 UserSlice::new(UserPtr::from_addr(trd_data_ptr.buffer as _), data_size).reader(); 1013 let mut end_of_previous_object = 0; 1014 let mut sg_state = None; 1015 1016 // Copy offsets if there are any. 1017 if offsets_size > 0 { 1018 { 1019 let mut reader = 1020 UserSlice::new(UserPtr::from_addr(trd_data_ptr.offsets as _), offsets_size) 1021 .reader(); 1022 alloc.copy_into(&mut reader, aligned_data_size, offsets_size)?; 1023 } 1024 1025 let offsets_start = aligned_data_size; 1026 let offsets_end = aligned_data_size + offsets_size; 1027 1028 // This state is used for BINDER_TYPE_PTR objects. 1029 let sg_state = sg_state.insert(ScatterGatherState { 1030 unused_buffer_space: UnusedBufferSpace { 1031 offset: offsets_end, 1032 limit: offsets_end + buffers_size, 1033 }, 1034 sg_entries: KVec::new(), 1035 ancestors: KVec::new(), 1036 }); 1037 1038 // Traverse the objects specified. 1039 let mut view = AllocationView::new(&mut alloc, data_size); 1040 for (index, index_offset) in (offsets_start..offsets_end) 1041 .step_by(size_of::<u64>()) 1042 .enumerate() 1043 { 1044 let offset: usize = view 1045 .alloc 1046 .read::<u64>(index_offset)? 1047 .try_into() 1048 .map_err(|_| EINVAL)?; 1049 1050 if offset < end_of_previous_object || !is_aligned(offset, size_of::<u32>()) { 1051 pr_warn!("Got transaction with invalid offset."); 1052 return Err(EINVAL.into()); 1053 } 1054 1055 // Copy data between two objects. 1056 if end_of_previous_object < offset { 1057 view.copy_into( 1058 &mut buffer_reader, 1059 end_of_previous_object, 1060 offset - end_of_previous_object, 1061 )?; 1062 } 1063 1064 let mut object = BinderObject::read_from(&mut buffer_reader)?; 1065 1066 match self.translate_object( 1067 index, 1068 offset, 1069 object.as_ref(), 1070 &mut view, 1071 allow_fds, 1072 sg_state, 1073 ) { 1074 Ok(()) => end_of_previous_object = offset + object.size(), 1075 Err(err) => { 1076 pr_warn!("Error while translating object."); 1077 return Err(err); 1078 } 1079 } 1080 1081 // Update the indexes containing objects to clean up. 1082 let offset_after_object = index_offset + size_of::<u64>(); 1083 view.alloc 1084 .set_info_offsets(offsets_start..offset_after_object); 1085 } 1086 } 1087 1088 // Copy remaining raw data. 1089 alloc.copy_into( 1090 &mut buffer_reader, 1091 end_of_previous_object, 1092 data_size - end_of_previous_object, 1093 )?; 1094 1095 if let Some(sg_state) = sg_state.as_mut() { 1096 if let Err(err) = self.apply_sg(&mut alloc, sg_state) { 1097 pr_warn!("Failure in apply_sg: {:?}", err); 1098 return Err(err); 1099 } 1100 } 1101 1102 if let Some((off_out, secctx)) = secctx.as_mut() { 1103 if let Err(err) = alloc.write(secctx_off, secctx.as_bytes()) { 1104 pr_warn!("Failed to write security context: {:?}", err); 1105 return Err(err.into()); 1106 } 1107 **off_out = secctx_off; 1108 } 1109 Ok(alloc) 1110 } 1111 1112 fn unwind_transaction_stack(self: &Arc<Self>) { 1113 let mut thread = self.clone(); 1114 while let Ok(transaction) = { 1115 let mut inner = thread.inner.lock(); 1116 inner.pop_transaction_to_reply(thread.as_ref()) 1117 } { 1118 let reply = Err(BR_DEAD_REPLY); 1119 if !transaction.from.deliver_single_reply(reply, &transaction) { 1120 break; 1121 } 1122 1123 thread = transaction.from.clone(); 1124 } 1125 } 1126 1127 pub(crate) fn deliver_reply( 1128 &self, 1129 reply: Result<DLArc<Transaction>, u32>, 1130 transaction: &DArc<Transaction>, 1131 ) { 1132 if self.deliver_single_reply(reply, transaction) { 1133 transaction.from.unwind_transaction_stack(); 1134 } 1135 } 1136 1137 /// Delivers a reply to the thread that started a transaction. The reply can either be a 1138 /// reply-transaction or an error code to be delivered instead. 1139 /// 1140 /// Returns whether the thread is dead. If it is, the caller is expected to unwind the 1141 /// transaction stack by completing transactions for threads that are dead. 1142 fn deliver_single_reply( 1143 &self, 1144 reply: Result<DLArc<Transaction>, u32>, 1145 transaction: &DArc<Transaction>, 1146 ) -> bool { 1147 if let Ok(transaction) = &reply { 1148 crate::trace::trace_transaction(true, transaction, Some(&self.task)); 1149 transaction.set_outstanding(&mut self.process.inner.lock()); 1150 } 1151 1152 { 1153 let mut inner = self.inner.lock(); 1154 if !inner.pop_transaction_replied(transaction) { 1155 return false; 1156 } 1157 1158 if inner.is_dead { 1159 return true; 1160 } 1161 1162 match reply { 1163 Ok(work) => { 1164 inner.push_work(work); 1165 } 1166 Err(code) => inner.push_reply_work(code), 1167 } 1168 } 1169 1170 // Notify the thread now that we've released the inner lock. 1171 self.work_condvar.notify_sync(); 1172 false 1173 } 1174 1175 /// Determines if the given transaction is the current transaction for this thread. 1176 fn is_current_transaction(&self, transaction: &DArc<Transaction>) -> bool { 1177 let inner = self.inner.lock(); 1178 match &inner.current_transaction { 1179 None => false, 1180 Some(current) => Arc::ptr_eq(current, transaction), 1181 } 1182 } 1183 1184 /// Determines the current top of the transaction stack. It fails if the top is in another 1185 /// thread (i.e., this thread belongs to a stack but it has called another thread). The top is 1186 /// [`None`] if the thread is not currently participating in a transaction stack. 1187 fn top_of_transaction_stack(&self) -> Result<Option<DArc<Transaction>>> { 1188 let inner = self.inner.lock(); 1189 if let Some(cur) = &inner.current_transaction { 1190 if core::ptr::eq(self, cur.from.as_ref()) { 1191 pr_warn!("got new transaction with bad transaction stack"); 1192 return Err(EINVAL); 1193 } 1194 Ok(Some(cur.clone())) 1195 } else { 1196 Ok(None) 1197 } 1198 } 1199 1200 fn transaction<T>(self: &Arc<Self>, tr: &BinderTransactionDataSg, inner: T) 1201 where 1202 T: FnOnce(&Arc<Self>, &BinderTransactionDataSg) -> BinderResult, 1203 { 1204 if let Err(err) = inner(self, tr) { 1205 if err.should_pr_warn() { 1206 let mut ee = self.inner.lock().extended_error; 1207 ee.command = err.reply; 1208 ee.param = err.as_errno(); 1209 pr_warn!( 1210 "Transaction failed: {:?} my_pid:{}", 1211 err, 1212 self.process.pid_in_current_ns() 1213 ); 1214 } 1215 1216 self.push_return_work(err.reply); 1217 } 1218 } 1219 1220 fn transaction_inner(self: &Arc<Self>, tr: &BinderTransactionDataSg) -> BinderResult { 1221 // SAFETY: Handle's type has no invalid bit patterns. 1222 let handle = unsafe { tr.transaction_data.target.handle }; 1223 let node_ref = self.process.get_transaction_node(handle)?; 1224 security::binder_transaction(&self.process.cred, &node_ref.node.owner.cred)?; 1225 // TODO: We need to ensure that there isn't a pending transaction in the work queue. How 1226 // could this happen? 1227 let top = self.top_of_transaction_stack()?; 1228 let list_completion = DTRWrap::arc_try_new(DeliverCode::new(BR_TRANSACTION_COMPLETE))?; 1229 let completion = list_completion.clone_arc(); 1230 let transaction = Transaction::new(node_ref, top, self, tr)?; 1231 1232 // Check that the transaction stack hasn't changed while the lock was released, then update 1233 // it with the new transaction. 1234 { 1235 let mut inner = self.inner.lock(); 1236 if !transaction.is_stacked_on(&inner.current_transaction) { 1237 pr_warn!("Transaction stack changed during transaction!"); 1238 return Err(EINVAL.into()); 1239 } 1240 inner.current_transaction = Some(transaction.clone_arc()); 1241 // We push the completion as a deferred work so that we wait for the reply before 1242 // returning to userland. 1243 inner.push_work_deferred(list_completion); 1244 } 1245 1246 if let Err(e) = transaction.submit() { 1247 completion.skip(); 1248 // Define `transaction` first to drop it after `inner`. 1249 let transaction; 1250 let mut inner = self.inner.lock(); 1251 transaction = inner.current_transaction.take().unwrap(); 1252 inner.current_transaction = transaction.clone_next(); 1253 Err(e) 1254 } else { 1255 Ok(()) 1256 } 1257 } 1258 1259 fn reply_inner(self: &Arc<Self>, tr: &BinderTransactionDataSg) -> BinderResult { 1260 let orig = self.inner.lock().pop_transaction_to_reply(self)?; 1261 if !orig.from.is_current_transaction(&orig) { 1262 return Err(EINVAL.into()); 1263 } 1264 1265 // We need to complete the transaction even if we cannot complete building the reply. 1266 let out = (|| -> BinderResult<_> { 1267 let completion = DTRWrap::arc_try_new(DeliverCode::new(BR_TRANSACTION_COMPLETE))?; 1268 let process = orig.from.process.clone(); 1269 let allow_fds = orig.flags & TF_ACCEPT_FDS != 0; 1270 let reply = Transaction::new_reply(self, process, tr, allow_fds)?; 1271 self.inner.lock().push_work(completion); 1272 orig.from.deliver_reply(Ok(reply), &orig); 1273 Ok(()) 1274 })() 1275 .map_err(|mut err| { 1276 // At this point we only return `BR_TRANSACTION_COMPLETE` to the caller, and we must let 1277 // the sender know that the transaction has completed (with an error in this case). 1278 pr_warn!( 1279 "Failure {:?} during reply - delivering BR_FAILED_REPLY to sender.", 1280 err 1281 ); 1282 let reply = Err(BR_FAILED_REPLY); 1283 orig.from.deliver_reply(reply, &orig); 1284 err.reply = BR_TRANSACTION_COMPLETE; 1285 err 1286 }); 1287 1288 out 1289 } 1290 1291 fn oneway_transaction_inner(self: &Arc<Self>, tr: &BinderTransactionDataSg) -> BinderResult { 1292 // SAFETY: The `handle` field is valid for all possible byte values, so reading from the 1293 // union is okay. 1294 let handle = unsafe { tr.transaction_data.target.handle }; 1295 let node_ref = self.process.get_transaction_node(handle)?; 1296 security::binder_transaction(&self.process.cred, &node_ref.node.owner.cred)?; 1297 let transaction = Transaction::new(node_ref, None, self, tr)?; 1298 let code = if self.process.is_oneway_spam_detection_enabled() 1299 && transaction.oneway_spam_detected 1300 { 1301 BR_ONEWAY_SPAM_SUSPECT 1302 } else { 1303 BR_TRANSACTION_COMPLETE 1304 }; 1305 let list_completion = DTRWrap::arc_try_new(DeliverCode::new(code))?; 1306 let completion = list_completion.clone_arc(); 1307 self.inner.lock().push_work(list_completion); 1308 match transaction.submit() { 1309 Ok(()) => Ok(()), 1310 Err(err) => { 1311 completion.skip(); 1312 Err(err) 1313 } 1314 } 1315 } 1316 1317 fn write(self: &Arc<Self>, req: &mut BinderWriteRead) -> Result { 1318 let write_start = req.write_buffer.wrapping_add(req.write_consumed); 1319 let write_len = req.write_size.saturating_sub(req.write_consumed); 1320 let mut reader = 1321 UserSlice::new(UserPtr::from_addr(write_start as _), write_len as _).reader(); 1322 1323 while reader.len() >= size_of::<u32>() && self.inner.lock().return_work.is_unused() { 1324 let before = reader.len(); 1325 let cmd = reader.read::<u32>()?; 1326 GLOBAL_STATS.inc_bc(cmd); 1327 self.process.stats.inc_bc(cmd); 1328 match cmd { 1329 BC_TRANSACTION => { 1330 let tr = reader.read::<BinderTransactionData>()?.with_buffers_size(0); 1331 if tr.transaction_data.flags & TF_ONE_WAY != 0 { 1332 self.transaction(&tr, Self::oneway_transaction_inner); 1333 } else { 1334 self.transaction(&tr, Self::transaction_inner); 1335 } 1336 } 1337 BC_TRANSACTION_SG => { 1338 let tr = reader.read::<BinderTransactionDataSg>()?; 1339 if tr.transaction_data.flags & TF_ONE_WAY != 0 { 1340 self.transaction(&tr, Self::oneway_transaction_inner); 1341 } else { 1342 self.transaction(&tr, Self::transaction_inner); 1343 } 1344 } 1345 BC_REPLY => { 1346 let tr = reader.read::<BinderTransactionData>()?.with_buffers_size(0); 1347 self.transaction(&tr, Self::reply_inner) 1348 } 1349 BC_REPLY_SG => { 1350 let tr = reader.read::<BinderTransactionDataSg>()?; 1351 self.transaction(&tr, Self::reply_inner) 1352 } 1353 BC_FREE_BUFFER => { 1354 let buffer = self.process.buffer_get(reader.read()?); 1355 if let Some(buffer) = buffer { 1356 if buffer.looper_need_return_on_free() { 1357 self.inner.lock().looper_need_return = true; 1358 } 1359 drop(buffer); 1360 } 1361 } 1362 BC_INCREFS => { 1363 self.process 1364 .as_arc_borrow() 1365 .update_ref(reader.read()?, true, false)? 1366 } 1367 BC_ACQUIRE => { 1368 self.process 1369 .as_arc_borrow() 1370 .update_ref(reader.read()?, true, true)? 1371 } 1372 BC_RELEASE => { 1373 self.process 1374 .as_arc_borrow() 1375 .update_ref(reader.read()?, false, true)? 1376 } 1377 BC_DECREFS => { 1378 self.process 1379 .as_arc_borrow() 1380 .update_ref(reader.read()?, false, false)? 1381 } 1382 BC_INCREFS_DONE => self.process.inc_ref_done(&mut reader, false)?, 1383 BC_ACQUIRE_DONE => self.process.inc_ref_done(&mut reader, true)?, 1384 BC_REQUEST_DEATH_NOTIFICATION => self.process.request_death(&mut reader, self)?, 1385 BC_CLEAR_DEATH_NOTIFICATION => self.process.clear_death(&mut reader, self)?, 1386 BC_DEAD_BINDER_DONE => self.process.dead_binder_done(reader.read()?, self), 1387 BC_REGISTER_LOOPER => { 1388 let valid = self.process.register_thread(); 1389 self.inner.lock().looper_register(valid); 1390 } 1391 BC_ENTER_LOOPER => self.inner.lock().looper_enter(), 1392 BC_EXIT_LOOPER => self.inner.lock().looper_exit(), 1393 BC_REQUEST_FREEZE_NOTIFICATION => self.process.request_freeze_notif(&mut reader)?, 1394 BC_CLEAR_FREEZE_NOTIFICATION => self.process.clear_freeze_notif(&mut reader)?, 1395 BC_FREEZE_NOTIFICATION_DONE => self.process.freeze_notif_done(&mut reader)?, 1396 1397 // Fail if given an unknown error code. 1398 // BC_ATTEMPT_ACQUIRE and BC_ACQUIRE_RESULT are no longer supported. 1399 _ => return Err(EINVAL), 1400 } 1401 // Update the number of write bytes consumed. 1402 req.write_consumed += (before - reader.len()) as u64; 1403 } 1404 1405 Ok(()) 1406 } 1407 1408 fn read(self: &Arc<Self>, req: &mut BinderWriteRead, wait: bool) -> Result { 1409 let read_start = req.read_buffer.wrapping_add(req.read_consumed); 1410 let read_len = req.read_size.saturating_sub(req.read_consumed); 1411 let mut writer = BinderReturnWriter::new( 1412 UserSlice::new(UserPtr::from_addr(read_start as _), read_len as _).writer(), 1413 self, 1414 ); 1415 let (in_pool, use_proc_queue) = { 1416 let inner = self.inner.lock(); 1417 (inner.is_looper(), inner.should_use_process_work_queue()) 1418 }; 1419 1420 let getter = if use_proc_queue { 1421 Self::get_work 1422 } else { 1423 Self::get_work_local 1424 }; 1425 1426 // Reserve some room at the beginning of the read buffer so that we can send a 1427 // BR_SPAWN_LOOPER if we need to. 1428 let mut has_noop_placeholder = false; 1429 if req.read_consumed == 0 { 1430 if let Err(err) = writer.write_code(BR_NOOP) { 1431 pr_warn!("Failure when writing BR_NOOP at beginning of buffer."); 1432 return Err(err); 1433 } 1434 has_noop_placeholder = true; 1435 } 1436 1437 // Loop doing work while there is room in the buffer. 1438 let initial_len = writer.len(); 1439 while writer.len() >= size_of::<uapi::binder_transaction_data_secctx>() + 4 { 1440 match getter(self, wait && initial_len == writer.len()) { 1441 Ok(Some(work)) => match work.into_arc().do_work(self, &mut writer) { 1442 Ok(true) => {} 1443 Ok(false) => break, 1444 Err(err) => { 1445 return Err(err); 1446 } 1447 }, 1448 Ok(None) => { 1449 break; 1450 } 1451 Err(err) => { 1452 // Propagate the error if we haven't written anything else. 1453 if err != EINTR && err != EAGAIN { 1454 pr_warn!("Failure in work getter: {:?}", err); 1455 } 1456 if initial_len == writer.len() { 1457 return Err(err); 1458 } else { 1459 break; 1460 } 1461 } 1462 } 1463 } 1464 1465 req.read_consumed += read_len - writer.len() as u64; 1466 1467 // Write BR_SPAWN_LOOPER if the process needs more threads for its pool. 1468 if has_noop_placeholder && in_pool && self.process.needs_thread() { 1469 let mut writer = 1470 UserSlice::new(UserPtr::from_addr(req.read_buffer as _), req.read_size as _) 1471 .writer(); 1472 writer.write(&BR_SPAWN_LOOPER)?; 1473 } 1474 Ok(()) 1475 } 1476 1477 pub(crate) fn write_read(self: &Arc<Self>, data: UserSlice, wait: bool) -> Result { 1478 let (mut reader, mut writer) = data.reader_writer(); 1479 let mut req = reader.read::<BinderWriteRead>()?; 1480 1481 // Go through the write buffer. 1482 let mut ret = Ok(()); 1483 if req.write_size > 0 { 1484 ret = self.write(&mut req); 1485 if let Err(err) = ret { 1486 pr_warn!( 1487 "Write failure {:?} in pid:{}", 1488 err, 1489 self.process.pid_in_current_ns() 1490 ); 1491 req.read_consumed = 0; 1492 writer.write(&req)?; 1493 self.inner.lock().looper_need_return = false; 1494 return ret; 1495 } 1496 } 1497 1498 // Go through the work queue. 1499 if req.read_size > 0 { 1500 ret = self.read(&mut req, wait); 1501 if ret.is_err() && ret != Err(EINTR) { 1502 pr_warn!( 1503 "Read failure {:?} in pid:{}", 1504 ret, 1505 self.process.pid_in_current_ns() 1506 ); 1507 } 1508 } 1509 1510 // Write the request back so that the consumed fields are visible to the caller. 1511 writer.write(&req)?; 1512 1513 self.inner.lock().looper_need_return = false; 1514 1515 ret 1516 } 1517 1518 pub(crate) fn poll(&self, file: &File, table: PollTable<'_>) -> (bool, u32) { 1519 table.register_wait(file, &self.work_condvar); 1520 let mut inner = self.inner.lock(); 1521 (inner.should_use_process_work_queue(), inner.poll()) 1522 } 1523 1524 /// Make the call to `get_work` or `get_work_local` return immediately, if any. 1525 pub(crate) fn exit_looper(&self) { 1526 let mut inner = self.inner.lock(); 1527 let should_notify = inner.looper_flags & LOOPER_WAITING != 0; 1528 if should_notify { 1529 inner.looper_need_return = true; 1530 } 1531 drop(inner); 1532 1533 if should_notify { 1534 self.work_condvar.notify_one(); 1535 } 1536 } 1537 1538 pub(crate) fn notify_if_poll_ready(&self, sync: bool) { 1539 // Determine if we need to notify. This requires the lock. 1540 let inner = self.inner.lock(); 1541 let notify = inner.looper_flags & LOOPER_POLL != 0 && inner.should_use_process_work_queue(); 1542 drop(inner); 1543 1544 // Now that the lock is no longer held, notify the waiters if we have to. 1545 if notify { 1546 if sync { 1547 self.work_condvar.notify_sync(); 1548 } else { 1549 self.work_condvar.notify_one(); 1550 } 1551 } 1552 } 1553 1554 pub(crate) fn release(self: &Arc<Self>) { 1555 self.inner.lock().is_dead = true; 1556 1557 //self.work_condvar.clear(); 1558 self.unwind_transaction_stack(); 1559 1560 // Cancel all pending work items. 1561 while let Ok(Some(work)) = self.get_work_local(false) { 1562 work.into_arc().cancel(); 1563 } 1564 } 1565 } 1566 1567 #[pin_data] 1568 struct ThreadError { 1569 error_code: Atomic<u32>, 1570 #[pin] 1571 links_track: AtomicTracker, 1572 } 1573 1574 impl ThreadError { 1575 fn try_new() -> Result<DArc<Self>> { 1576 DTRWrap::arc_pin_init(pin_init!(Self { 1577 error_code: Atomic::new(BR_OK), 1578 links_track <- AtomicTracker::new(), 1579 })) 1580 .map(ListArc::into_arc) 1581 } 1582 1583 fn set_error_code(&self, code: u32) { 1584 self.error_code.store(code, Relaxed); 1585 } 1586 1587 fn is_unused(&self) -> bool { 1588 self.error_code.load(Relaxed) == BR_OK 1589 } 1590 } 1591 1592 impl DeliverToRead for ThreadError { 1593 fn do_work( 1594 self: DArc<Self>, 1595 _thread: &Thread, 1596 writer: &mut BinderReturnWriter<'_>, 1597 ) -> Result<bool> { 1598 let code = self.error_code.load(Relaxed); 1599 self.error_code.store(BR_OK, Relaxed); 1600 writer.write_code(code)?; 1601 Ok(true) 1602 } 1603 1604 fn cancel(self: DArc<Self>) {} 1605 1606 fn should_sync_wakeup(&self) -> bool { 1607 false 1608 } 1609 1610 fn debug_print(&self, m: &SeqFile, prefix: &str, _tprefix: &str) -> Result<()> { 1611 seq_print!( 1612 m, 1613 "{}transaction error: {}\n", 1614 prefix, 1615 self.error_code.load(Relaxed) 1616 ); 1617 Ok(()) 1618 } 1619 } 1620 1621 kernel::list::impl_list_arc_safe! { 1622 impl ListArcSafe<0> for ThreadError { 1623 tracked_by links_track: AtomicTracker; 1624 } 1625 } 1626