1*eafedbc7SAlice Ryhl // SPDX-License-Identifier: GPL-2.0 2*eafedbc7SAlice Ryhl 3*eafedbc7SAlice Ryhl // Copyright (C) 2025 Google LLC. 4*eafedbc7SAlice Ryhl 5*eafedbc7SAlice Ryhl //! This module defines the `Thread` type, which represents a userspace thread that is using 6*eafedbc7SAlice Ryhl //! binder. 7*eafedbc7SAlice Ryhl //! 8*eafedbc7SAlice Ryhl //! The `Process` object stores all of the threads in an rb tree. 9*eafedbc7SAlice Ryhl 10*eafedbc7SAlice Ryhl use kernel::{ 11*eafedbc7SAlice Ryhl bindings, 12*eafedbc7SAlice Ryhl fs::{File, LocalFile}, 13*eafedbc7SAlice Ryhl list::{AtomicTracker, List, ListArc, ListLinks, TryNewListArc}, 14*eafedbc7SAlice Ryhl prelude::*, 15*eafedbc7SAlice Ryhl security, 16*eafedbc7SAlice Ryhl seq_file::SeqFile, 17*eafedbc7SAlice Ryhl seq_print, 18*eafedbc7SAlice Ryhl sync::poll::{PollCondVar, PollTable}, 19*eafedbc7SAlice Ryhl sync::{Arc, SpinLock}, 20*eafedbc7SAlice Ryhl task::Task, 21*eafedbc7SAlice Ryhl types::ARef, 22*eafedbc7SAlice Ryhl uaccess::UserSlice, 23*eafedbc7SAlice Ryhl uapi, 24*eafedbc7SAlice Ryhl }; 25*eafedbc7SAlice Ryhl 26*eafedbc7SAlice Ryhl use crate::{ 27*eafedbc7SAlice Ryhl allocation::{Allocation, AllocationView, BinderObject, BinderObjectRef, NewAllocation}, 28*eafedbc7SAlice Ryhl defs::*, 29*eafedbc7SAlice Ryhl error::BinderResult, 30*eafedbc7SAlice Ryhl process::{GetWorkOrRegister, Process}, 31*eafedbc7SAlice Ryhl ptr_align, 32*eafedbc7SAlice Ryhl stats::GLOBAL_STATS, 33*eafedbc7SAlice Ryhl transaction::Transaction, 34*eafedbc7SAlice Ryhl BinderReturnWriter, DArc, DLArc, DTRWrap, DeliverCode, DeliverToRead, 35*eafedbc7SAlice Ryhl }; 36*eafedbc7SAlice Ryhl 37*eafedbc7SAlice Ryhl use core::{ 38*eafedbc7SAlice Ryhl mem::size_of, 39*eafedbc7SAlice Ryhl sync::atomic::{AtomicU32, Ordering}, 40*eafedbc7SAlice Ryhl }; 41*eafedbc7SAlice Ryhl 42*eafedbc7SAlice Ryhl /// Stores the layout of the scatter-gather entries. This is used during the `translate_objects` 43*eafedbc7SAlice Ryhl /// call and is discarded when it returns. 44*eafedbc7SAlice Ryhl struct ScatterGatherState { 45*eafedbc7SAlice Ryhl /// A struct that tracks the amount of unused buffer space. 46*eafedbc7SAlice Ryhl unused_buffer_space: UnusedBufferSpace, 47*eafedbc7SAlice Ryhl /// Scatter-gather entries to copy. 48*eafedbc7SAlice Ryhl sg_entries: KVec<ScatterGatherEntry>, 49*eafedbc7SAlice Ryhl /// Indexes into `sg_entries` corresponding to the last binder_buffer_object that 50*eafedbc7SAlice Ryhl /// was processed and all of its ancestors. The array is in sorted order. 51*eafedbc7SAlice Ryhl ancestors: KVec<usize>, 52*eafedbc7SAlice Ryhl } 53*eafedbc7SAlice Ryhl 54*eafedbc7SAlice Ryhl /// This entry specifies an additional buffer that should be copied using the scatter-gather 55*eafedbc7SAlice Ryhl /// mechanism. 56*eafedbc7SAlice Ryhl struct ScatterGatherEntry { 57*eafedbc7SAlice Ryhl /// The index in the offset array of the BINDER_TYPE_PTR that this entry originates from. 58*eafedbc7SAlice Ryhl obj_index: usize, 59*eafedbc7SAlice Ryhl /// Offset in target buffer. 60*eafedbc7SAlice Ryhl offset: usize, 61*eafedbc7SAlice Ryhl /// User address in source buffer. 62*eafedbc7SAlice Ryhl sender_uaddr: usize, 63*eafedbc7SAlice Ryhl /// Number of bytes to copy. 64*eafedbc7SAlice Ryhl length: usize, 65*eafedbc7SAlice Ryhl /// The minimum offset of the next fixup in this buffer. 66*eafedbc7SAlice Ryhl fixup_min_offset: usize, 67*eafedbc7SAlice Ryhl /// The offsets within this buffer that contain pointers which should be translated. 68*eafedbc7SAlice Ryhl pointer_fixups: KVec<PointerFixupEntry>, 69*eafedbc7SAlice Ryhl } 70*eafedbc7SAlice Ryhl 71*eafedbc7SAlice Ryhl /// This entry specifies that a fixup should happen at `target_offset` of the 72*eafedbc7SAlice Ryhl /// buffer. If `skip` is nonzero, then the fixup is a `binder_fd_array_object` 73*eafedbc7SAlice Ryhl /// and is applied later. Otherwise if `skip` is zero, then the size of the 74*eafedbc7SAlice Ryhl /// fixup is `sizeof::<u64>()` and `pointer_value` is written to the buffer. 75*eafedbc7SAlice Ryhl struct PointerFixupEntry { 76*eafedbc7SAlice Ryhl /// The number of bytes to skip, or zero for a `binder_buffer_object` fixup. 77*eafedbc7SAlice Ryhl skip: usize, 78*eafedbc7SAlice Ryhl /// The translated pointer to write when `skip` is zero. 79*eafedbc7SAlice Ryhl pointer_value: u64, 80*eafedbc7SAlice Ryhl /// The offset at which the value should be written. The offset is relative 81*eafedbc7SAlice Ryhl /// to the original buffer. 82*eafedbc7SAlice Ryhl target_offset: usize, 83*eafedbc7SAlice Ryhl } 84*eafedbc7SAlice Ryhl 85*eafedbc7SAlice Ryhl /// Return type of `apply_and_validate_fixup_in_parent`. 86*eafedbc7SAlice Ryhl struct ParentFixupInfo { 87*eafedbc7SAlice Ryhl /// The index of the parent buffer in `sg_entries`. 88*eafedbc7SAlice Ryhl parent_sg_index: usize, 89*eafedbc7SAlice Ryhl /// The number of ancestors of the buffer. 90*eafedbc7SAlice Ryhl /// 91*eafedbc7SAlice Ryhl /// The buffer is considered an ancestor of itself, so this is always at 92*eafedbc7SAlice Ryhl /// least one. 93*eafedbc7SAlice Ryhl num_ancestors: usize, 94*eafedbc7SAlice Ryhl /// New value of `fixup_min_offset` if this fixup is applied. 95*eafedbc7SAlice Ryhl new_min_offset: usize, 96*eafedbc7SAlice Ryhl /// The offset of the fixup in the target buffer. 97*eafedbc7SAlice Ryhl target_offset: usize, 98*eafedbc7SAlice Ryhl } 99*eafedbc7SAlice Ryhl 100*eafedbc7SAlice Ryhl impl ScatterGatherState { 101*eafedbc7SAlice Ryhl /// Called when a `binder_buffer_object` or `binder_fd_array_object` tries 102*eafedbc7SAlice Ryhl /// to access a region in its parent buffer. These accesses have various 103*eafedbc7SAlice Ryhl /// restrictions, which this method verifies. 104*eafedbc7SAlice Ryhl /// 105*eafedbc7SAlice Ryhl /// The `parent_offset` and `length` arguments describe the offset and 106*eafedbc7SAlice Ryhl /// length of the access in the parent buffer. 107*eafedbc7SAlice Ryhl /// 108*eafedbc7SAlice Ryhl /// # Detailed restrictions 109*eafedbc7SAlice Ryhl /// 110*eafedbc7SAlice Ryhl /// Obviously the fixup must be in-bounds for the parent buffer. 111*eafedbc7SAlice Ryhl /// 112*eafedbc7SAlice Ryhl /// For safety reasons, we only allow fixups inside a buffer to happen 113*eafedbc7SAlice Ryhl /// at increasing offsets; additionally, we only allow fixup on the last 114*eafedbc7SAlice Ryhl /// buffer object that was verified, or one of its parents. 115*eafedbc7SAlice Ryhl /// 116*eafedbc7SAlice Ryhl /// Example of what is allowed: 117*eafedbc7SAlice Ryhl /// 118*eafedbc7SAlice Ryhl /// A 119*eafedbc7SAlice Ryhl /// B (parent = A, offset = 0) 120*eafedbc7SAlice Ryhl /// C (parent = A, offset = 16) 121*eafedbc7SAlice Ryhl /// D (parent = C, offset = 0) 122*eafedbc7SAlice Ryhl /// E (parent = A, offset = 32) // min_offset is 16 (C.parent_offset) 123*eafedbc7SAlice Ryhl /// 124*eafedbc7SAlice Ryhl /// Examples of what is not allowed: 125*eafedbc7SAlice Ryhl /// 126*eafedbc7SAlice Ryhl /// Decreasing offsets within the same parent: 127*eafedbc7SAlice Ryhl /// A 128*eafedbc7SAlice Ryhl /// C (parent = A, offset = 16) 129*eafedbc7SAlice Ryhl /// B (parent = A, offset = 0) // decreasing offset within A 130*eafedbc7SAlice Ryhl /// 131*eafedbc7SAlice Ryhl /// Arcerring to a parent that wasn't the last object or any of its parents: 132*eafedbc7SAlice Ryhl /// A 133*eafedbc7SAlice Ryhl /// B (parent = A, offset = 0) 134*eafedbc7SAlice Ryhl /// C (parent = A, offset = 0) 135*eafedbc7SAlice Ryhl /// C (parent = A, offset = 16) 136*eafedbc7SAlice Ryhl /// D (parent = B, offset = 0) // B is not A or any of A's parents 137*eafedbc7SAlice Ryhl fn validate_parent_fixup( 138*eafedbc7SAlice Ryhl &self, 139*eafedbc7SAlice Ryhl parent: usize, 140*eafedbc7SAlice Ryhl parent_offset: usize, 141*eafedbc7SAlice Ryhl length: usize, 142*eafedbc7SAlice Ryhl ) -> Result<ParentFixupInfo> { 143*eafedbc7SAlice Ryhl // Using `position` would also be correct, but `rposition` avoids 144*eafedbc7SAlice Ryhl // quadratic running times. 145*eafedbc7SAlice Ryhl let ancestors_i = self 146*eafedbc7SAlice Ryhl .ancestors 147*eafedbc7SAlice Ryhl .iter() 148*eafedbc7SAlice Ryhl .copied() 149*eafedbc7SAlice Ryhl .rposition(|sg_idx| self.sg_entries[sg_idx].obj_index == parent) 150*eafedbc7SAlice Ryhl .ok_or(EINVAL)?; 151*eafedbc7SAlice Ryhl let sg_idx = self.ancestors[ancestors_i]; 152*eafedbc7SAlice Ryhl let sg_entry = match self.sg_entries.get(sg_idx) { 153*eafedbc7SAlice Ryhl Some(sg_entry) => sg_entry, 154*eafedbc7SAlice Ryhl None => { 155*eafedbc7SAlice Ryhl pr_err!( 156*eafedbc7SAlice Ryhl "self.ancestors[{}] is {}, but self.sg_entries.len() is {}", 157*eafedbc7SAlice Ryhl ancestors_i, 158*eafedbc7SAlice Ryhl sg_idx, 159*eafedbc7SAlice Ryhl self.sg_entries.len() 160*eafedbc7SAlice Ryhl ); 161*eafedbc7SAlice Ryhl return Err(EINVAL); 162*eafedbc7SAlice Ryhl } 163*eafedbc7SAlice Ryhl }; 164*eafedbc7SAlice Ryhl if sg_entry.fixup_min_offset > parent_offset { 165*eafedbc7SAlice Ryhl pr_warn!( 166*eafedbc7SAlice Ryhl "validate_parent_fixup: fixup_min_offset={}, parent_offset={}", 167*eafedbc7SAlice Ryhl sg_entry.fixup_min_offset, 168*eafedbc7SAlice Ryhl parent_offset 169*eafedbc7SAlice Ryhl ); 170*eafedbc7SAlice Ryhl return Err(EINVAL); 171*eafedbc7SAlice Ryhl } 172*eafedbc7SAlice Ryhl let new_min_offset = parent_offset.checked_add(length).ok_or(EINVAL)?; 173*eafedbc7SAlice Ryhl if new_min_offset > sg_entry.length { 174*eafedbc7SAlice Ryhl pr_warn!( 175*eafedbc7SAlice Ryhl "validate_parent_fixup: new_min_offset={}, sg_entry.length={}", 176*eafedbc7SAlice Ryhl new_min_offset, 177*eafedbc7SAlice Ryhl sg_entry.length 178*eafedbc7SAlice Ryhl ); 179*eafedbc7SAlice Ryhl return Err(EINVAL); 180*eafedbc7SAlice Ryhl } 181*eafedbc7SAlice Ryhl let target_offset = sg_entry.offset.checked_add(parent_offset).ok_or(EINVAL)?; 182*eafedbc7SAlice Ryhl // The `ancestors_i + 1` operation can't overflow since the output of the addition is at 183*eafedbc7SAlice Ryhl // most `self.ancestors.len()`, which also fits in a usize. 184*eafedbc7SAlice Ryhl Ok(ParentFixupInfo { 185*eafedbc7SAlice Ryhl parent_sg_index: sg_idx, 186*eafedbc7SAlice Ryhl num_ancestors: ancestors_i + 1, 187*eafedbc7SAlice Ryhl new_min_offset, 188*eafedbc7SAlice Ryhl target_offset, 189*eafedbc7SAlice Ryhl }) 190*eafedbc7SAlice Ryhl } 191*eafedbc7SAlice Ryhl } 192*eafedbc7SAlice Ryhl 193*eafedbc7SAlice Ryhl /// Keeps track of how much unused buffer space is left. The initial amount is the number of bytes 194*eafedbc7SAlice Ryhl /// requested by the user using the `buffers_size` field of `binder_transaction_data_sg`. Each time 195*eafedbc7SAlice Ryhl /// we translate an object of type `BINDER_TYPE_PTR`, some of the unused buffer space is consumed. 196*eafedbc7SAlice Ryhl struct UnusedBufferSpace { 197*eafedbc7SAlice Ryhl /// The start of the remaining space. 198*eafedbc7SAlice Ryhl offset: usize, 199*eafedbc7SAlice Ryhl /// The end of the remaining space. 200*eafedbc7SAlice Ryhl limit: usize, 201*eafedbc7SAlice Ryhl } 202*eafedbc7SAlice Ryhl impl UnusedBufferSpace { 203*eafedbc7SAlice Ryhl /// Claim the next `size` bytes from the unused buffer space. The offset for the claimed chunk 204*eafedbc7SAlice Ryhl /// into the buffer is returned. 205*eafedbc7SAlice Ryhl fn claim_next(&mut self, size: usize) -> Result<usize> { 206*eafedbc7SAlice Ryhl // We require every chunk to be aligned. 207*eafedbc7SAlice Ryhl let size = ptr_align(size).ok_or(EINVAL)?; 208*eafedbc7SAlice Ryhl let new_offset = self.offset.checked_add(size).ok_or(EINVAL)?; 209*eafedbc7SAlice Ryhl 210*eafedbc7SAlice Ryhl if new_offset <= self.limit { 211*eafedbc7SAlice Ryhl let offset = self.offset; 212*eafedbc7SAlice Ryhl self.offset = new_offset; 213*eafedbc7SAlice Ryhl Ok(offset) 214*eafedbc7SAlice Ryhl } else { 215*eafedbc7SAlice Ryhl Err(EINVAL) 216*eafedbc7SAlice Ryhl } 217*eafedbc7SAlice Ryhl } 218*eafedbc7SAlice Ryhl } 219*eafedbc7SAlice Ryhl 220*eafedbc7SAlice Ryhl pub(crate) enum PushWorkRes { 221*eafedbc7SAlice Ryhl Ok, 222*eafedbc7SAlice Ryhl FailedDead(DLArc<dyn DeliverToRead>), 223*eafedbc7SAlice Ryhl } 224*eafedbc7SAlice Ryhl 225*eafedbc7SAlice Ryhl impl PushWorkRes { 226*eafedbc7SAlice Ryhl fn is_ok(&self) -> bool { 227*eafedbc7SAlice Ryhl match self { 228*eafedbc7SAlice Ryhl PushWorkRes::Ok => true, 229*eafedbc7SAlice Ryhl PushWorkRes::FailedDead(_) => false, 230*eafedbc7SAlice Ryhl } 231*eafedbc7SAlice Ryhl } 232*eafedbc7SAlice Ryhl } 233*eafedbc7SAlice Ryhl 234*eafedbc7SAlice Ryhl /// The fields of `Thread` protected by the spinlock. 235*eafedbc7SAlice Ryhl struct InnerThread { 236*eafedbc7SAlice Ryhl /// Determines the looper state of the thread. It is a bit-wise combination of the constants 237*eafedbc7SAlice Ryhl /// prefixed with `LOOPER_`. 238*eafedbc7SAlice Ryhl looper_flags: u32, 239*eafedbc7SAlice Ryhl 240*eafedbc7SAlice Ryhl /// Determines whether the looper should return. 241*eafedbc7SAlice Ryhl looper_need_return: bool, 242*eafedbc7SAlice Ryhl 243*eafedbc7SAlice Ryhl /// Determines if thread is dead. 244*eafedbc7SAlice Ryhl is_dead: bool, 245*eafedbc7SAlice Ryhl 246*eafedbc7SAlice Ryhl /// Work item used to deliver error codes to the thread that started a transaction. Stored here 247*eafedbc7SAlice Ryhl /// so that it can be reused. 248*eafedbc7SAlice Ryhl reply_work: DArc<ThreadError>, 249*eafedbc7SAlice Ryhl 250*eafedbc7SAlice Ryhl /// Work item used to deliver error codes to the current thread. Stored here so that it can be 251*eafedbc7SAlice Ryhl /// reused. 252*eafedbc7SAlice Ryhl return_work: DArc<ThreadError>, 253*eafedbc7SAlice Ryhl 254*eafedbc7SAlice Ryhl /// Determines whether the work list below should be processed. When set to false, `work_list` 255*eafedbc7SAlice Ryhl /// is treated as if it were empty. 256*eafedbc7SAlice Ryhl process_work_list: bool, 257*eafedbc7SAlice Ryhl /// List of work items to deliver to userspace. 258*eafedbc7SAlice Ryhl work_list: List<DTRWrap<dyn DeliverToRead>>, 259*eafedbc7SAlice Ryhl current_transaction: Option<DArc<Transaction>>, 260*eafedbc7SAlice Ryhl 261*eafedbc7SAlice Ryhl /// Extended error information for this thread. 262*eafedbc7SAlice Ryhl extended_error: ExtendedError, 263*eafedbc7SAlice Ryhl } 264*eafedbc7SAlice Ryhl 265*eafedbc7SAlice Ryhl const LOOPER_REGISTERED: u32 = 0x01; 266*eafedbc7SAlice Ryhl const LOOPER_ENTERED: u32 = 0x02; 267*eafedbc7SAlice Ryhl const LOOPER_EXITED: u32 = 0x04; 268*eafedbc7SAlice Ryhl const LOOPER_INVALID: u32 = 0x08; 269*eafedbc7SAlice Ryhl const LOOPER_WAITING: u32 = 0x10; 270*eafedbc7SAlice Ryhl const LOOPER_WAITING_PROC: u32 = 0x20; 271*eafedbc7SAlice Ryhl const LOOPER_POLL: u32 = 0x40; 272*eafedbc7SAlice Ryhl 273*eafedbc7SAlice Ryhl impl InnerThread { 274*eafedbc7SAlice Ryhl fn new() -> Result<Self> { 275*eafedbc7SAlice Ryhl fn next_err_id() -> u32 { 276*eafedbc7SAlice Ryhl static EE_ID: AtomicU32 = AtomicU32::new(0); 277*eafedbc7SAlice Ryhl EE_ID.fetch_add(1, Ordering::Relaxed) 278*eafedbc7SAlice Ryhl } 279*eafedbc7SAlice Ryhl 280*eafedbc7SAlice Ryhl Ok(Self { 281*eafedbc7SAlice Ryhl looper_flags: 0, 282*eafedbc7SAlice Ryhl looper_need_return: false, 283*eafedbc7SAlice Ryhl is_dead: false, 284*eafedbc7SAlice Ryhl process_work_list: false, 285*eafedbc7SAlice Ryhl reply_work: ThreadError::try_new()?, 286*eafedbc7SAlice Ryhl return_work: ThreadError::try_new()?, 287*eafedbc7SAlice Ryhl work_list: List::new(), 288*eafedbc7SAlice Ryhl current_transaction: None, 289*eafedbc7SAlice Ryhl extended_error: ExtendedError::new(next_err_id(), BR_OK, 0), 290*eafedbc7SAlice Ryhl }) 291*eafedbc7SAlice Ryhl } 292*eafedbc7SAlice Ryhl 293*eafedbc7SAlice Ryhl fn pop_work(&mut self) -> Option<DLArc<dyn DeliverToRead>> { 294*eafedbc7SAlice Ryhl if !self.process_work_list { 295*eafedbc7SAlice Ryhl return None; 296*eafedbc7SAlice Ryhl } 297*eafedbc7SAlice Ryhl 298*eafedbc7SAlice Ryhl let ret = self.work_list.pop_front(); 299*eafedbc7SAlice Ryhl self.process_work_list = !self.work_list.is_empty(); 300*eafedbc7SAlice Ryhl ret 301*eafedbc7SAlice Ryhl } 302*eafedbc7SAlice Ryhl 303*eafedbc7SAlice Ryhl fn push_work(&mut self, work: DLArc<dyn DeliverToRead>) -> PushWorkRes { 304*eafedbc7SAlice Ryhl if self.is_dead { 305*eafedbc7SAlice Ryhl PushWorkRes::FailedDead(work) 306*eafedbc7SAlice Ryhl } else { 307*eafedbc7SAlice Ryhl self.work_list.push_back(work); 308*eafedbc7SAlice Ryhl self.process_work_list = true; 309*eafedbc7SAlice Ryhl PushWorkRes::Ok 310*eafedbc7SAlice Ryhl } 311*eafedbc7SAlice Ryhl } 312*eafedbc7SAlice Ryhl 313*eafedbc7SAlice Ryhl fn push_reply_work(&mut self, code: u32) { 314*eafedbc7SAlice Ryhl if let Ok(work) = ListArc::try_from_arc(self.reply_work.clone()) { 315*eafedbc7SAlice Ryhl work.set_error_code(code); 316*eafedbc7SAlice Ryhl self.push_work(work); 317*eafedbc7SAlice Ryhl } else { 318*eafedbc7SAlice Ryhl pr_warn!("Thread reply work is already in use."); 319*eafedbc7SAlice Ryhl } 320*eafedbc7SAlice Ryhl } 321*eafedbc7SAlice Ryhl 322*eafedbc7SAlice Ryhl fn push_return_work(&mut self, reply: u32) { 323*eafedbc7SAlice Ryhl if let Ok(work) = ListArc::try_from_arc(self.return_work.clone()) { 324*eafedbc7SAlice Ryhl work.set_error_code(reply); 325*eafedbc7SAlice Ryhl self.push_work(work); 326*eafedbc7SAlice Ryhl } else { 327*eafedbc7SAlice Ryhl pr_warn!("Thread return work is already in use."); 328*eafedbc7SAlice Ryhl } 329*eafedbc7SAlice Ryhl } 330*eafedbc7SAlice Ryhl 331*eafedbc7SAlice Ryhl /// Used to push work items that do not need to be processed immediately and can wait until the 332*eafedbc7SAlice Ryhl /// thread gets another work item. 333*eafedbc7SAlice Ryhl fn push_work_deferred(&mut self, work: DLArc<dyn DeliverToRead>) { 334*eafedbc7SAlice Ryhl self.work_list.push_back(work); 335*eafedbc7SAlice Ryhl } 336*eafedbc7SAlice Ryhl 337*eafedbc7SAlice Ryhl /// Fetches the transaction this thread can reply to. If the thread has a pending transaction 338*eafedbc7SAlice Ryhl /// (that it could respond to) but it has also issued a transaction, it must first wait for the 339*eafedbc7SAlice Ryhl /// previously-issued transaction to complete. 340*eafedbc7SAlice Ryhl /// 341*eafedbc7SAlice Ryhl /// The `thread` parameter should be the thread containing this `ThreadInner`. 342*eafedbc7SAlice Ryhl fn pop_transaction_to_reply(&mut self, thread: &Thread) -> Result<DArc<Transaction>> { 343*eafedbc7SAlice Ryhl let transaction = self.current_transaction.take().ok_or(EINVAL)?; 344*eafedbc7SAlice Ryhl if core::ptr::eq(thread, transaction.from.as_ref()) { 345*eafedbc7SAlice Ryhl self.current_transaction = Some(transaction); 346*eafedbc7SAlice Ryhl return Err(EINVAL); 347*eafedbc7SAlice Ryhl } 348*eafedbc7SAlice Ryhl // Find a new current transaction for this thread. 349*eafedbc7SAlice Ryhl self.current_transaction = transaction.find_from(thread).cloned(); 350*eafedbc7SAlice Ryhl Ok(transaction) 351*eafedbc7SAlice Ryhl } 352*eafedbc7SAlice Ryhl 353*eafedbc7SAlice Ryhl fn pop_transaction_replied(&mut self, transaction: &DArc<Transaction>) -> bool { 354*eafedbc7SAlice Ryhl match self.current_transaction.take() { 355*eafedbc7SAlice Ryhl None => false, 356*eafedbc7SAlice Ryhl Some(old) => { 357*eafedbc7SAlice Ryhl if !Arc::ptr_eq(transaction, &old) { 358*eafedbc7SAlice Ryhl self.current_transaction = Some(old); 359*eafedbc7SAlice Ryhl return false; 360*eafedbc7SAlice Ryhl } 361*eafedbc7SAlice Ryhl self.current_transaction = old.clone_next(); 362*eafedbc7SAlice Ryhl true 363*eafedbc7SAlice Ryhl } 364*eafedbc7SAlice Ryhl } 365*eafedbc7SAlice Ryhl } 366*eafedbc7SAlice Ryhl 367*eafedbc7SAlice Ryhl fn looper_enter(&mut self) { 368*eafedbc7SAlice Ryhl self.looper_flags |= LOOPER_ENTERED; 369*eafedbc7SAlice Ryhl if self.looper_flags & LOOPER_REGISTERED != 0 { 370*eafedbc7SAlice Ryhl self.looper_flags |= LOOPER_INVALID; 371*eafedbc7SAlice Ryhl } 372*eafedbc7SAlice Ryhl } 373*eafedbc7SAlice Ryhl 374*eafedbc7SAlice Ryhl fn looper_register(&mut self, valid: bool) { 375*eafedbc7SAlice Ryhl self.looper_flags |= LOOPER_REGISTERED; 376*eafedbc7SAlice Ryhl if !valid || self.looper_flags & LOOPER_ENTERED != 0 { 377*eafedbc7SAlice Ryhl self.looper_flags |= LOOPER_INVALID; 378*eafedbc7SAlice Ryhl } 379*eafedbc7SAlice Ryhl } 380*eafedbc7SAlice Ryhl 381*eafedbc7SAlice Ryhl fn looper_exit(&mut self) { 382*eafedbc7SAlice Ryhl self.looper_flags |= LOOPER_EXITED; 383*eafedbc7SAlice Ryhl } 384*eafedbc7SAlice Ryhl 385*eafedbc7SAlice Ryhl /// Determines whether the thread is part of a pool, i.e., if it is a looper. 386*eafedbc7SAlice Ryhl fn is_looper(&self) -> bool { 387*eafedbc7SAlice Ryhl self.looper_flags & (LOOPER_ENTERED | LOOPER_REGISTERED) != 0 388*eafedbc7SAlice Ryhl } 389*eafedbc7SAlice Ryhl 390*eafedbc7SAlice Ryhl /// Determines whether the thread should attempt to fetch work items from the process queue. 391*eafedbc7SAlice Ryhl /// This is generally case when the thread is registered as a looper and not part of a 392*eafedbc7SAlice Ryhl /// transaction stack. But if there is local work, we want to return to userspace before we 393*eafedbc7SAlice Ryhl /// deliver any remote work. 394*eafedbc7SAlice Ryhl fn should_use_process_work_queue(&self) -> bool { 395*eafedbc7SAlice Ryhl self.current_transaction.is_none() && !self.process_work_list && self.is_looper() 396*eafedbc7SAlice Ryhl } 397*eafedbc7SAlice Ryhl 398*eafedbc7SAlice Ryhl fn poll(&mut self) -> u32 { 399*eafedbc7SAlice Ryhl self.looper_flags |= LOOPER_POLL; 400*eafedbc7SAlice Ryhl if self.process_work_list || self.looper_need_return { 401*eafedbc7SAlice Ryhl bindings::POLLIN 402*eafedbc7SAlice Ryhl } else { 403*eafedbc7SAlice Ryhl 0 404*eafedbc7SAlice Ryhl } 405*eafedbc7SAlice Ryhl } 406*eafedbc7SAlice Ryhl } 407*eafedbc7SAlice Ryhl 408*eafedbc7SAlice Ryhl /// This represents a thread that's used with binder. 409*eafedbc7SAlice Ryhl #[pin_data] 410*eafedbc7SAlice Ryhl pub(crate) struct Thread { 411*eafedbc7SAlice Ryhl pub(crate) id: i32, 412*eafedbc7SAlice Ryhl pub(crate) process: Arc<Process>, 413*eafedbc7SAlice Ryhl pub(crate) task: ARef<Task>, 414*eafedbc7SAlice Ryhl #[pin] 415*eafedbc7SAlice Ryhl inner: SpinLock<InnerThread>, 416*eafedbc7SAlice Ryhl #[pin] 417*eafedbc7SAlice Ryhl work_condvar: PollCondVar, 418*eafedbc7SAlice Ryhl /// Used to insert this thread into the process' `ready_threads` list. 419*eafedbc7SAlice Ryhl /// 420*eafedbc7SAlice Ryhl /// INVARIANT: May never be used for any other list than the `self.process.ready_threads`. 421*eafedbc7SAlice Ryhl #[pin] 422*eafedbc7SAlice Ryhl links: ListLinks, 423*eafedbc7SAlice Ryhl #[pin] 424*eafedbc7SAlice Ryhl links_track: AtomicTracker, 425*eafedbc7SAlice Ryhl } 426*eafedbc7SAlice Ryhl 427*eafedbc7SAlice Ryhl kernel::list::impl_list_arc_safe! { 428*eafedbc7SAlice Ryhl impl ListArcSafe<0> for Thread { 429*eafedbc7SAlice Ryhl tracked_by links_track: AtomicTracker; 430*eafedbc7SAlice Ryhl } 431*eafedbc7SAlice Ryhl } 432*eafedbc7SAlice Ryhl kernel::list::impl_list_item! { 433*eafedbc7SAlice Ryhl impl ListItem<0> for Thread { 434*eafedbc7SAlice Ryhl using ListLinks { self.links }; 435*eafedbc7SAlice Ryhl } 436*eafedbc7SAlice Ryhl } 437*eafedbc7SAlice Ryhl 438*eafedbc7SAlice Ryhl impl Thread { 439*eafedbc7SAlice Ryhl pub(crate) fn new(id: i32, process: Arc<Process>) -> Result<Arc<Self>> { 440*eafedbc7SAlice Ryhl let inner = InnerThread::new()?; 441*eafedbc7SAlice Ryhl 442*eafedbc7SAlice Ryhl Arc::pin_init( 443*eafedbc7SAlice Ryhl try_pin_init!(Thread { 444*eafedbc7SAlice Ryhl id, 445*eafedbc7SAlice Ryhl process, 446*eafedbc7SAlice Ryhl task: ARef::from(&**kernel::current!()), 447*eafedbc7SAlice Ryhl inner <- kernel::new_spinlock!(inner, "Thread::inner"), 448*eafedbc7SAlice Ryhl work_condvar <- kernel::new_poll_condvar!("Thread::work_condvar"), 449*eafedbc7SAlice Ryhl links <- ListLinks::new(), 450*eafedbc7SAlice Ryhl links_track <- AtomicTracker::new(), 451*eafedbc7SAlice Ryhl }), 452*eafedbc7SAlice Ryhl GFP_KERNEL, 453*eafedbc7SAlice Ryhl ) 454*eafedbc7SAlice Ryhl } 455*eafedbc7SAlice Ryhl 456*eafedbc7SAlice Ryhl #[inline(never)] 457*eafedbc7SAlice Ryhl pub(crate) fn debug_print(self: &Arc<Self>, m: &SeqFile, print_all: bool) -> Result<()> { 458*eafedbc7SAlice Ryhl let inner = self.inner.lock(); 459*eafedbc7SAlice Ryhl 460*eafedbc7SAlice Ryhl if print_all || inner.current_transaction.is_some() || !inner.work_list.is_empty() { 461*eafedbc7SAlice Ryhl seq_print!( 462*eafedbc7SAlice Ryhl m, 463*eafedbc7SAlice Ryhl " thread {}: l {:02x} need_return {}\n", 464*eafedbc7SAlice Ryhl self.id, 465*eafedbc7SAlice Ryhl inner.looper_flags, 466*eafedbc7SAlice Ryhl inner.looper_need_return, 467*eafedbc7SAlice Ryhl ); 468*eafedbc7SAlice Ryhl } 469*eafedbc7SAlice Ryhl 470*eafedbc7SAlice Ryhl let mut t_opt = inner.current_transaction.as_ref(); 471*eafedbc7SAlice Ryhl while let Some(t) = t_opt { 472*eafedbc7SAlice Ryhl if Arc::ptr_eq(&t.from, self) { 473*eafedbc7SAlice Ryhl t.debug_print_inner(m, " outgoing transaction "); 474*eafedbc7SAlice Ryhl t_opt = t.from_parent.as_ref(); 475*eafedbc7SAlice Ryhl } else if Arc::ptr_eq(&t.to, &self.process) { 476*eafedbc7SAlice Ryhl t.debug_print_inner(m, " incoming transaction "); 477*eafedbc7SAlice Ryhl t_opt = t.find_from(self); 478*eafedbc7SAlice Ryhl } else { 479*eafedbc7SAlice Ryhl t.debug_print_inner(m, " bad transaction "); 480*eafedbc7SAlice Ryhl t_opt = None; 481*eafedbc7SAlice Ryhl } 482*eafedbc7SAlice Ryhl } 483*eafedbc7SAlice Ryhl 484*eafedbc7SAlice Ryhl for work in &inner.work_list { 485*eafedbc7SAlice Ryhl work.debug_print(m, " ", " pending transaction ")?; 486*eafedbc7SAlice Ryhl } 487*eafedbc7SAlice Ryhl Ok(()) 488*eafedbc7SAlice Ryhl } 489*eafedbc7SAlice Ryhl 490*eafedbc7SAlice Ryhl pub(crate) fn get_extended_error(&self, data: UserSlice) -> Result { 491*eafedbc7SAlice Ryhl let mut writer = data.writer(); 492*eafedbc7SAlice Ryhl let ee = self.inner.lock().extended_error; 493*eafedbc7SAlice Ryhl writer.write(&ee)?; 494*eafedbc7SAlice Ryhl Ok(()) 495*eafedbc7SAlice Ryhl } 496*eafedbc7SAlice Ryhl 497*eafedbc7SAlice Ryhl pub(crate) fn set_current_transaction(&self, transaction: DArc<Transaction>) { 498*eafedbc7SAlice Ryhl self.inner.lock().current_transaction = Some(transaction); 499*eafedbc7SAlice Ryhl } 500*eafedbc7SAlice Ryhl 501*eafedbc7SAlice Ryhl pub(crate) fn has_current_transaction(&self) -> bool { 502*eafedbc7SAlice Ryhl self.inner.lock().current_transaction.is_some() 503*eafedbc7SAlice Ryhl } 504*eafedbc7SAlice Ryhl 505*eafedbc7SAlice Ryhl /// Attempts to fetch a work item from the thread-local queue. The behaviour if the queue is 506*eafedbc7SAlice Ryhl /// empty depends on `wait`: if it is true, the function waits for some work to be queued (or a 507*eafedbc7SAlice Ryhl /// signal); otherwise it returns indicating that none is available. 508*eafedbc7SAlice Ryhl fn get_work_local(self: &Arc<Self>, wait: bool) -> Result<Option<DLArc<dyn DeliverToRead>>> { 509*eafedbc7SAlice Ryhl { 510*eafedbc7SAlice Ryhl let mut inner = self.inner.lock(); 511*eafedbc7SAlice Ryhl if inner.looper_need_return { 512*eafedbc7SAlice Ryhl return Ok(inner.pop_work()); 513*eafedbc7SAlice Ryhl } 514*eafedbc7SAlice Ryhl } 515*eafedbc7SAlice Ryhl 516*eafedbc7SAlice Ryhl // Try once if the caller does not want to wait. 517*eafedbc7SAlice Ryhl if !wait { 518*eafedbc7SAlice Ryhl return self.inner.lock().pop_work().ok_or(EAGAIN).map(Some); 519*eafedbc7SAlice Ryhl } 520*eafedbc7SAlice Ryhl 521*eafedbc7SAlice Ryhl // Loop waiting only on the local queue (i.e., not registering with the process queue). 522*eafedbc7SAlice Ryhl let mut inner = self.inner.lock(); 523*eafedbc7SAlice Ryhl loop { 524*eafedbc7SAlice Ryhl if let Some(work) = inner.pop_work() { 525*eafedbc7SAlice Ryhl return Ok(Some(work)); 526*eafedbc7SAlice Ryhl } 527*eafedbc7SAlice Ryhl 528*eafedbc7SAlice Ryhl inner.looper_flags |= LOOPER_WAITING; 529*eafedbc7SAlice Ryhl let signal_pending = self.work_condvar.wait_interruptible_freezable(&mut inner); 530*eafedbc7SAlice Ryhl inner.looper_flags &= !LOOPER_WAITING; 531*eafedbc7SAlice Ryhl 532*eafedbc7SAlice Ryhl if signal_pending { 533*eafedbc7SAlice Ryhl return Err(EINTR); 534*eafedbc7SAlice Ryhl } 535*eafedbc7SAlice Ryhl if inner.looper_need_return { 536*eafedbc7SAlice Ryhl return Ok(None); 537*eafedbc7SAlice Ryhl } 538*eafedbc7SAlice Ryhl } 539*eafedbc7SAlice Ryhl } 540*eafedbc7SAlice Ryhl 541*eafedbc7SAlice Ryhl /// Attempts to fetch a work item from the thread-local queue, falling back to the process-wide 542*eafedbc7SAlice Ryhl /// queue if none is available locally. 543*eafedbc7SAlice Ryhl /// 544*eafedbc7SAlice Ryhl /// This must only be called when the thread is not participating in a transaction chain. If it 545*eafedbc7SAlice Ryhl /// is, the local version (`get_work_local`) should be used instead. 546*eafedbc7SAlice Ryhl fn get_work(self: &Arc<Self>, wait: bool) -> Result<Option<DLArc<dyn DeliverToRead>>> { 547*eafedbc7SAlice Ryhl // Try to get work from the thread's work queue, using only a local lock. 548*eafedbc7SAlice Ryhl { 549*eafedbc7SAlice Ryhl let mut inner = self.inner.lock(); 550*eafedbc7SAlice Ryhl if let Some(work) = inner.pop_work() { 551*eafedbc7SAlice Ryhl return Ok(Some(work)); 552*eafedbc7SAlice Ryhl } 553*eafedbc7SAlice Ryhl if inner.looper_need_return { 554*eafedbc7SAlice Ryhl drop(inner); 555*eafedbc7SAlice Ryhl return Ok(self.process.get_work()); 556*eafedbc7SAlice Ryhl } 557*eafedbc7SAlice Ryhl } 558*eafedbc7SAlice Ryhl 559*eafedbc7SAlice Ryhl // If the caller doesn't want to wait, try to grab work from the process queue. 560*eafedbc7SAlice Ryhl // 561*eafedbc7SAlice Ryhl // We know nothing will have been queued directly to the thread queue because it is not in 562*eafedbc7SAlice Ryhl // a transaction and it is not in the process' ready list. 563*eafedbc7SAlice Ryhl if !wait { 564*eafedbc7SAlice Ryhl return self.process.get_work().ok_or(EAGAIN).map(Some); 565*eafedbc7SAlice Ryhl } 566*eafedbc7SAlice Ryhl 567*eafedbc7SAlice Ryhl // Get work from the process queue. If none is available, atomically register as ready. 568*eafedbc7SAlice Ryhl let reg = match self.process.get_work_or_register(self) { 569*eafedbc7SAlice Ryhl GetWorkOrRegister::Work(work) => return Ok(Some(work)), 570*eafedbc7SAlice Ryhl GetWorkOrRegister::Register(reg) => reg, 571*eafedbc7SAlice Ryhl }; 572*eafedbc7SAlice Ryhl 573*eafedbc7SAlice Ryhl let mut inner = self.inner.lock(); 574*eafedbc7SAlice Ryhl loop { 575*eafedbc7SAlice Ryhl if let Some(work) = inner.pop_work() { 576*eafedbc7SAlice Ryhl return Ok(Some(work)); 577*eafedbc7SAlice Ryhl } 578*eafedbc7SAlice Ryhl 579*eafedbc7SAlice Ryhl inner.looper_flags |= LOOPER_WAITING | LOOPER_WAITING_PROC; 580*eafedbc7SAlice Ryhl let signal_pending = self.work_condvar.wait_interruptible_freezable(&mut inner); 581*eafedbc7SAlice Ryhl inner.looper_flags &= !(LOOPER_WAITING | LOOPER_WAITING_PROC); 582*eafedbc7SAlice Ryhl 583*eafedbc7SAlice Ryhl if signal_pending || inner.looper_need_return { 584*eafedbc7SAlice Ryhl // We need to return now. We need to pull the thread off the list of ready threads 585*eafedbc7SAlice Ryhl // (by dropping `reg`), then check the state again after it's off the list to 586*eafedbc7SAlice Ryhl // ensure that something was not queued in the meantime. If something has been 587*eafedbc7SAlice Ryhl // queued, we just return it (instead of the error). 588*eafedbc7SAlice Ryhl drop(inner); 589*eafedbc7SAlice Ryhl drop(reg); 590*eafedbc7SAlice Ryhl 591*eafedbc7SAlice Ryhl let res = match self.inner.lock().pop_work() { 592*eafedbc7SAlice Ryhl Some(work) => Ok(Some(work)), 593*eafedbc7SAlice Ryhl None if signal_pending => Err(EINTR), 594*eafedbc7SAlice Ryhl None => Ok(None), 595*eafedbc7SAlice Ryhl }; 596*eafedbc7SAlice Ryhl return res; 597*eafedbc7SAlice Ryhl } 598*eafedbc7SAlice Ryhl } 599*eafedbc7SAlice Ryhl } 600*eafedbc7SAlice Ryhl 601*eafedbc7SAlice Ryhl /// Push the provided work item to be delivered to user space via this thread. 602*eafedbc7SAlice Ryhl /// 603*eafedbc7SAlice Ryhl /// Returns whether the item was successfully pushed. This can only fail if the thread is dead. 604*eafedbc7SAlice Ryhl pub(crate) fn push_work(&self, work: DLArc<dyn DeliverToRead>) -> PushWorkRes { 605*eafedbc7SAlice Ryhl let sync = work.should_sync_wakeup(); 606*eafedbc7SAlice Ryhl 607*eafedbc7SAlice Ryhl let res = self.inner.lock().push_work(work); 608*eafedbc7SAlice Ryhl 609*eafedbc7SAlice Ryhl if res.is_ok() { 610*eafedbc7SAlice Ryhl if sync { 611*eafedbc7SAlice Ryhl self.work_condvar.notify_sync(); 612*eafedbc7SAlice Ryhl } else { 613*eafedbc7SAlice Ryhl self.work_condvar.notify_one(); 614*eafedbc7SAlice Ryhl } 615*eafedbc7SAlice Ryhl } 616*eafedbc7SAlice Ryhl 617*eafedbc7SAlice Ryhl res 618*eafedbc7SAlice Ryhl } 619*eafedbc7SAlice Ryhl 620*eafedbc7SAlice Ryhl /// Attempts to push to given work item to the thread if it's a looper thread (i.e., if it's 621*eafedbc7SAlice Ryhl /// part of a thread pool) and is alive. Otherwise, push the work item to the process instead. 622*eafedbc7SAlice Ryhl pub(crate) fn push_work_if_looper(&self, work: DLArc<dyn DeliverToRead>) -> BinderResult { 623*eafedbc7SAlice Ryhl let mut inner = self.inner.lock(); 624*eafedbc7SAlice Ryhl if inner.is_looper() && !inner.is_dead { 625*eafedbc7SAlice Ryhl inner.push_work(work); 626*eafedbc7SAlice Ryhl Ok(()) 627*eafedbc7SAlice Ryhl } else { 628*eafedbc7SAlice Ryhl drop(inner); 629*eafedbc7SAlice Ryhl self.process.push_work(work) 630*eafedbc7SAlice Ryhl } 631*eafedbc7SAlice Ryhl } 632*eafedbc7SAlice Ryhl 633*eafedbc7SAlice Ryhl pub(crate) fn push_work_deferred(&self, work: DLArc<dyn DeliverToRead>) { 634*eafedbc7SAlice Ryhl self.inner.lock().push_work_deferred(work); 635*eafedbc7SAlice Ryhl } 636*eafedbc7SAlice Ryhl 637*eafedbc7SAlice Ryhl pub(crate) fn push_return_work(&self, reply: u32) { 638*eafedbc7SAlice Ryhl self.inner.lock().push_return_work(reply); 639*eafedbc7SAlice Ryhl } 640*eafedbc7SAlice Ryhl 641*eafedbc7SAlice Ryhl fn translate_object( 642*eafedbc7SAlice Ryhl &self, 643*eafedbc7SAlice Ryhl obj_index: usize, 644*eafedbc7SAlice Ryhl offset: usize, 645*eafedbc7SAlice Ryhl object: BinderObjectRef<'_>, 646*eafedbc7SAlice Ryhl view: &mut AllocationView<'_>, 647*eafedbc7SAlice Ryhl allow_fds: bool, 648*eafedbc7SAlice Ryhl sg_state: &mut ScatterGatherState, 649*eafedbc7SAlice Ryhl ) -> BinderResult { 650*eafedbc7SAlice Ryhl match object { 651*eafedbc7SAlice Ryhl BinderObjectRef::Binder(obj) => { 652*eafedbc7SAlice Ryhl let strong = obj.hdr.type_ == BINDER_TYPE_BINDER; 653*eafedbc7SAlice Ryhl // SAFETY: `binder` is a `binder_uintptr_t`; any bit pattern is a valid 654*eafedbc7SAlice Ryhl // representation. 655*eafedbc7SAlice Ryhl let ptr = unsafe { obj.__bindgen_anon_1.binder } as _; 656*eafedbc7SAlice Ryhl let cookie = obj.cookie as _; 657*eafedbc7SAlice Ryhl let flags = obj.flags as _; 658*eafedbc7SAlice Ryhl let node = self 659*eafedbc7SAlice Ryhl .process 660*eafedbc7SAlice Ryhl .as_arc_borrow() 661*eafedbc7SAlice Ryhl .get_node(ptr, cookie, flags, strong, self)?; 662*eafedbc7SAlice Ryhl security::binder_transfer_binder(&self.process.cred, &view.alloc.process.cred)?; 663*eafedbc7SAlice Ryhl view.transfer_binder_object(offset, obj, strong, node)?; 664*eafedbc7SAlice Ryhl } 665*eafedbc7SAlice Ryhl BinderObjectRef::Handle(obj) => { 666*eafedbc7SAlice Ryhl let strong = obj.hdr.type_ == BINDER_TYPE_HANDLE; 667*eafedbc7SAlice Ryhl // SAFETY: `handle` is a `u32`; any bit pattern is a valid representation. 668*eafedbc7SAlice Ryhl let handle = unsafe { obj.__bindgen_anon_1.handle } as _; 669*eafedbc7SAlice Ryhl let node = self.process.get_node_from_handle(handle, strong)?; 670*eafedbc7SAlice Ryhl security::binder_transfer_binder(&self.process.cred, &view.alloc.process.cred)?; 671*eafedbc7SAlice Ryhl view.transfer_binder_object(offset, obj, strong, node)?; 672*eafedbc7SAlice Ryhl } 673*eafedbc7SAlice Ryhl BinderObjectRef::Fd(obj) => { 674*eafedbc7SAlice Ryhl if !allow_fds { 675*eafedbc7SAlice Ryhl return Err(EPERM.into()); 676*eafedbc7SAlice Ryhl } 677*eafedbc7SAlice Ryhl 678*eafedbc7SAlice Ryhl // SAFETY: `fd` is a `u32`; any bit pattern is a valid representation. 679*eafedbc7SAlice Ryhl let fd = unsafe { obj.__bindgen_anon_1.fd }; 680*eafedbc7SAlice Ryhl let file = LocalFile::fget(fd)?; 681*eafedbc7SAlice Ryhl // SAFETY: The binder driver never calls `fdget_pos` and this code runs from an 682*eafedbc7SAlice Ryhl // ioctl, so there are no active calls to `fdget_pos` on this thread. 683*eafedbc7SAlice Ryhl let file = unsafe { LocalFile::assume_no_fdget_pos(file) }; 684*eafedbc7SAlice Ryhl security::binder_transfer_file( 685*eafedbc7SAlice Ryhl &self.process.cred, 686*eafedbc7SAlice Ryhl &view.alloc.process.cred, 687*eafedbc7SAlice Ryhl &file, 688*eafedbc7SAlice Ryhl )?; 689*eafedbc7SAlice Ryhl 690*eafedbc7SAlice Ryhl let mut obj_write = BinderFdObject::default(); 691*eafedbc7SAlice Ryhl obj_write.hdr.type_ = BINDER_TYPE_FD; 692*eafedbc7SAlice Ryhl // This will be overwritten with the actual fd when the transaction is received. 693*eafedbc7SAlice Ryhl obj_write.__bindgen_anon_1.fd = u32::MAX; 694*eafedbc7SAlice Ryhl obj_write.cookie = obj.cookie; 695*eafedbc7SAlice Ryhl view.write::<BinderFdObject>(offset, &obj_write)?; 696*eafedbc7SAlice Ryhl 697*eafedbc7SAlice Ryhl const FD_FIELD_OFFSET: usize = 698*eafedbc7SAlice Ryhl core::mem::offset_of!(uapi::binder_fd_object, __bindgen_anon_1.fd); 699*eafedbc7SAlice Ryhl 700*eafedbc7SAlice Ryhl let field_offset = offset + FD_FIELD_OFFSET; 701*eafedbc7SAlice Ryhl 702*eafedbc7SAlice Ryhl view.alloc.info_add_fd(file, field_offset, false)?; 703*eafedbc7SAlice Ryhl } 704*eafedbc7SAlice Ryhl BinderObjectRef::Ptr(obj) => { 705*eafedbc7SAlice Ryhl let obj_length = obj.length.try_into().map_err(|_| EINVAL)?; 706*eafedbc7SAlice Ryhl let alloc_offset = match sg_state.unused_buffer_space.claim_next(obj_length) { 707*eafedbc7SAlice Ryhl Ok(alloc_offset) => alloc_offset, 708*eafedbc7SAlice Ryhl Err(err) => { 709*eafedbc7SAlice Ryhl pr_warn!( 710*eafedbc7SAlice Ryhl "Failed to claim space for a BINDER_TYPE_PTR. (offset: {}, limit: {}, size: {})", 711*eafedbc7SAlice Ryhl sg_state.unused_buffer_space.offset, 712*eafedbc7SAlice Ryhl sg_state.unused_buffer_space.limit, 713*eafedbc7SAlice Ryhl obj_length, 714*eafedbc7SAlice Ryhl ); 715*eafedbc7SAlice Ryhl return Err(err.into()); 716*eafedbc7SAlice Ryhl } 717*eafedbc7SAlice Ryhl }; 718*eafedbc7SAlice Ryhl 719*eafedbc7SAlice Ryhl let sg_state_idx = sg_state.sg_entries.len(); 720*eafedbc7SAlice Ryhl sg_state.sg_entries.push( 721*eafedbc7SAlice Ryhl ScatterGatherEntry { 722*eafedbc7SAlice Ryhl obj_index, 723*eafedbc7SAlice Ryhl offset: alloc_offset, 724*eafedbc7SAlice Ryhl sender_uaddr: obj.buffer as _, 725*eafedbc7SAlice Ryhl length: obj_length, 726*eafedbc7SAlice Ryhl pointer_fixups: KVec::new(), 727*eafedbc7SAlice Ryhl fixup_min_offset: 0, 728*eafedbc7SAlice Ryhl }, 729*eafedbc7SAlice Ryhl GFP_KERNEL, 730*eafedbc7SAlice Ryhl )?; 731*eafedbc7SAlice Ryhl 732*eafedbc7SAlice Ryhl let buffer_ptr_in_user_space = (view.alloc.ptr + alloc_offset) as u64; 733*eafedbc7SAlice Ryhl 734*eafedbc7SAlice Ryhl if obj.flags & uapi::BINDER_BUFFER_FLAG_HAS_PARENT == 0 { 735*eafedbc7SAlice Ryhl sg_state.ancestors.clear(); 736*eafedbc7SAlice Ryhl sg_state.ancestors.push(sg_state_idx, GFP_KERNEL)?; 737*eafedbc7SAlice Ryhl } else { 738*eafedbc7SAlice Ryhl // Another buffer also has a pointer to this buffer, and we need to fixup that 739*eafedbc7SAlice Ryhl // pointer too. 740*eafedbc7SAlice Ryhl 741*eafedbc7SAlice Ryhl let parent_index = usize::try_from(obj.parent).map_err(|_| EINVAL)?; 742*eafedbc7SAlice Ryhl let parent_offset = usize::try_from(obj.parent_offset).map_err(|_| EINVAL)?; 743*eafedbc7SAlice Ryhl 744*eafedbc7SAlice Ryhl let info = sg_state.validate_parent_fixup( 745*eafedbc7SAlice Ryhl parent_index, 746*eafedbc7SAlice Ryhl parent_offset, 747*eafedbc7SAlice Ryhl size_of::<u64>(), 748*eafedbc7SAlice Ryhl )?; 749*eafedbc7SAlice Ryhl 750*eafedbc7SAlice Ryhl sg_state.ancestors.truncate(info.num_ancestors); 751*eafedbc7SAlice Ryhl sg_state.ancestors.push(sg_state_idx, GFP_KERNEL)?; 752*eafedbc7SAlice Ryhl 753*eafedbc7SAlice Ryhl let parent_entry = match sg_state.sg_entries.get_mut(info.parent_sg_index) { 754*eafedbc7SAlice Ryhl Some(parent_entry) => parent_entry, 755*eafedbc7SAlice Ryhl None => { 756*eafedbc7SAlice Ryhl pr_err!( 757*eafedbc7SAlice Ryhl "validate_parent_fixup returned index out of bounds for sg.entries" 758*eafedbc7SAlice Ryhl ); 759*eafedbc7SAlice Ryhl return Err(EINVAL.into()); 760*eafedbc7SAlice Ryhl } 761*eafedbc7SAlice Ryhl }; 762*eafedbc7SAlice Ryhl 763*eafedbc7SAlice Ryhl parent_entry.fixup_min_offset = info.new_min_offset; 764*eafedbc7SAlice Ryhl parent_entry.pointer_fixups.push( 765*eafedbc7SAlice Ryhl PointerFixupEntry { 766*eafedbc7SAlice Ryhl skip: 0, 767*eafedbc7SAlice Ryhl pointer_value: buffer_ptr_in_user_space, 768*eafedbc7SAlice Ryhl target_offset: info.target_offset, 769*eafedbc7SAlice Ryhl }, 770*eafedbc7SAlice Ryhl GFP_KERNEL, 771*eafedbc7SAlice Ryhl )?; 772*eafedbc7SAlice Ryhl } 773*eafedbc7SAlice Ryhl 774*eafedbc7SAlice Ryhl let mut obj_write = BinderBufferObject::default(); 775*eafedbc7SAlice Ryhl obj_write.hdr.type_ = BINDER_TYPE_PTR; 776*eafedbc7SAlice Ryhl obj_write.flags = obj.flags; 777*eafedbc7SAlice Ryhl obj_write.buffer = buffer_ptr_in_user_space; 778*eafedbc7SAlice Ryhl obj_write.length = obj.length; 779*eafedbc7SAlice Ryhl obj_write.parent = obj.parent; 780*eafedbc7SAlice Ryhl obj_write.parent_offset = obj.parent_offset; 781*eafedbc7SAlice Ryhl view.write::<BinderBufferObject>(offset, &obj_write)?; 782*eafedbc7SAlice Ryhl } 783*eafedbc7SAlice Ryhl BinderObjectRef::Fda(obj) => { 784*eafedbc7SAlice Ryhl if !allow_fds { 785*eafedbc7SAlice Ryhl return Err(EPERM.into()); 786*eafedbc7SAlice Ryhl } 787*eafedbc7SAlice Ryhl let parent_index = usize::try_from(obj.parent).map_err(|_| EINVAL)?; 788*eafedbc7SAlice Ryhl let parent_offset = usize::try_from(obj.parent_offset).map_err(|_| EINVAL)?; 789*eafedbc7SAlice Ryhl let num_fds = usize::try_from(obj.num_fds).map_err(|_| EINVAL)?; 790*eafedbc7SAlice Ryhl let fds_len = num_fds.checked_mul(size_of::<u32>()).ok_or(EINVAL)?; 791*eafedbc7SAlice Ryhl 792*eafedbc7SAlice Ryhl let info = sg_state.validate_parent_fixup(parent_index, parent_offset, fds_len)?; 793*eafedbc7SAlice Ryhl view.alloc.info_add_fd_reserve(num_fds)?; 794*eafedbc7SAlice Ryhl 795*eafedbc7SAlice Ryhl sg_state.ancestors.truncate(info.num_ancestors); 796*eafedbc7SAlice Ryhl let parent_entry = match sg_state.sg_entries.get_mut(info.parent_sg_index) { 797*eafedbc7SAlice Ryhl Some(parent_entry) => parent_entry, 798*eafedbc7SAlice Ryhl None => { 799*eafedbc7SAlice Ryhl pr_err!( 800*eafedbc7SAlice Ryhl "validate_parent_fixup returned index out of bounds for sg.entries" 801*eafedbc7SAlice Ryhl ); 802*eafedbc7SAlice Ryhl return Err(EINVAL.into()); 803*eafedbc7SAlice Ryhl } 804*eafedbc7SAlice Ryhl }; 805*eafedbc7SAlice Ryhl 806*eafedbc7SAlice Ryhl parent_entry.fixup_min_offset = info.new_min_offset; 807*eafedbc7SAlice Ryhl parent_entry 808*eafedbc7SAlice Ryhl .pointer_fixups 809*eafedbc7SAlice Ryhl .push( 810*eafedbc7SAlice Ryhl PointerFixupEntry { 811*eafedbc7SAlice Ryhl skip: fds_len, 812*eafedbc7SAlice Ryhl pointer_value: 0, 813*eafedbc7SAlice Ryhl target_offset: info.target_offset, 814*eafedbc7SAlice Ryhl }, 815*eafedbc7SAlice Ryhl GFP_KERNEL, 816*eafedbc7SAlice Ryhl ) 817*eafedbc7SAlice Ryhl .map_err(|_| ENOMEM)?; 818*eafedbc7SAlice Ryhl 819*eafedbc7SAlice Ryhl let fda_uaddr = parent_entry 820*eafedbc7SAlice Ryhl .sender_uaddr 821*eafedbc7SAlice Ryhl .checked_add(parent_offset) 822*eafedbc7SAlice Ryhl .ok_or(EINVAL)?; 823*eafedbc7SAlice Ryhl let mut fda_bytes = KVec::new(); 824*eafedbc7SAlice Ryhl UserSlice::new(UserPtr::from_addr(fda_uaddr as _), fds_len) 825*eafedbc7SAlice Ryhl .read_all(&mut fda_bytes, GFP_KERNEL)?; 826*eafedbc7SAlice Ryhl 827*eafedbc7SAlice Ryhl if fds_len != fda_bytes.len() { 828*eafedbc7SAlice Ryhl pr_err!("UserSlice::read_all returned wrong length in BINDER_TYPE_FDA"); 829*eafedbc7SAlice Ryhl return Err(EINVAL.into()); 830*eafedbc7SAlice Ryhl } 831*eafedbc7SAlice Ryhl 832*eafedbc7SAlice Ryhl for i in (0..fds_len).step_by(size_of::<u32>()) { 833*eafedbc7SAlice Ryhl let fd = { 834*eafedbc7SAlice Ryhl let mut fd_bytes = [0u8; size_of::<u32>()]; 835*eafedbc7SAlice Ryhl fd_bytes.copy_from_slice(&fda_bytes[i..i + size_of::<u32>()]); 836*eafedbc7SAlice Ryhl u32::from_ne_bytes(fd_bytes) 837*eafedbc7SAlice Ryhl }; 838*eafedbc7SAlice Ryhl 839*eafedbc7SAlice Ryhl let file = LocalFile::fget(fd)?; 840*eafedbc7SAlice Ryhl // SAFETY: The binder driver never calls `fdget_pos` and this code runs from an 841*eafedbc7SAlice Ryhl // ioctl, so there are no active calls to `fdget_pos` on this thread. 842*eafedbc7SAlice Ryhl let file = unsafe { LocalFile::assume_no_fdget_pos(file) }; 843*eafedbc7SAlice Ryhl security::binder_transfer_file( 844*eafedbc7SAlice Ryhl &self.process.cred, 845*eafedbc7SAlice Ryhl &view.alloc.process.cred, 846*eafedbc7SAlice Ryhl &file, 847*eafedbc7SAlice Ryhl )?; 848*eafedbc7SAlice Ryhl 849*eafedbc7SAlice Ryhl // The `validate_parent_fixup` call ensuers that this addition will not 850*eafedbc7SAlice Ryhl // overflow. 851*eafedbc7SAlice Ryhl view.alloc.info_add_fd(file, info.target_offset + i, true)?; 852*eafedbc7SAlice Ryhl } 853*eafedbc7SAlice Ryhl drop(fda_bytes); 854*eafedbc7SAlice Ryhl 855*eafedbc7SAlice Ryhl let mut obj_write = BinderFdArrayObject::default(); 856*eafedbc7SAlice Ryhl obj_write.hdr.type_ = BINDER_TYPE_FDA; 857*eafedbc7SAlice Ryhl obj_write.num_fds = obj.num_fds; 858*eafedbc7SAlice Ryhl obj_write.parent = obj.parent; 859*eafedbc7SAlice Ryhl obj_write.parent_offset = obj.parent_offset; 860*eafedbc7SAlice Ryhl view.write::<BinderFdArrayObject>(offset, &obj_write)?; 861*eafedbc7SAlice Ryhl } 862*eafedbc7SAlice Ryhl } 863*eafedbc7SAlice Ryhl Ok(()) 864*eafedbc7SAlice Ryhl } 865*eafedbc7SAlice Ryhl 866*eafedbc7SAlice Ryhl fn apply_sg(&self, alloc: &mut Allocation, sg_state: &mut ScatterGatherState) -> BinderResult { 867*eafedbc7SAlice Ryhl for sg_entry in &mut sg_state.sg_entries { 868*eafedbc7SAlice Ryhl let mut end_of_previous_fixup = sg_entry.offset; 869*eafedbc7SAlice Ryhl let offset_end = sg_entry.offset.checked_add(sg_entry.length).ok_or(EINVAL)?; 870*eafedbc7SAlice Ryhl 871*eafedbc7SAlice Ryhl let mut reader = 872*eafedbc7SAlice Ryhl UserSlice::new(UserPtr::from_addr(sg_entry.sender_uaddr), sg_entry.length).reader(); 873*eafedbc7SAlice Ryhl for fixup in &mut sg_entry.pointer_fixups { 874*eafedbc7SAlice Ryhl let fixup_len = if fixup.skip == 0 { 875*eafedbc7SAlice Ryhl size_of::<u64>() 876*eafedbc7SAlice Ryhl } else { 877*eafedbc7SAlice Ryhl fixup.skip 878*eafedbc7SAlice Ryhl }; 879*eafedbc7SAlice Ryhl 880*eafedbc7SAlice Ryhl let target_offset_end = fixup.target_offset.checked_add(fixup_len).ok_or(EINVAL)?; 881*eafedbc7SAlice Ryhl if fixup.target_offset < end_of_previous_fixup || offset_end < target_offset_end { 882*eafedbc7SAlice Ryhl pr_warn!( 883*eafedbc7SAlice Ryhl "Fixups oob {} {} {} {}", 884*eafedbc7SAlice Ryhl fixup.target_offset, 885*eafedbc7SAlice Ryhl end_of_previous_fixup, 886*eafedbc7SAlice Ryhl offset_end, 887*eafedbc7SAlice Ryhl target_offset_end 888*eafedbc7SAlice Ryhl ); 889*eafedbc7SAlice Ryhl return Err(EINVAL.into()); 890*eafedbc7SAlice Ryhl } 891*eafedbc7SAlice Ryhl 892*eafedbc7SAlice Ryhl let copy_off = end_of_previous_fixup; 893*eafedbc7SAlice Ryhl let copy_len = fixup.target_offset - end_of_previous_fixup; 894*eafedbc7SAlice Ryhl if let Err(err) = alloc.copy_into(&mut reader, copy_off, copy_len) { 895*eafedbc7SAlice Ryhl pr_warn!("Failed copying into alloc: {:?}", err); 896*eafedbc7SAlice Ryhl return Err(err.into()); 897*eafedbc7SAlice Ryhl } 898*eafedbc7SAlice Ryhl if fixup.skip == 0 { 899*eafedbc7SAlice Ryhl let res = alloc.write::<u64>(fixup.target_offset, &fixup.pointer_value); 900*eafedbc7SAlice Ryhl if let Err(err) = res { 901*eafedbc7SAlice Ryhl pr_warn!("Failed copying ptr into alloc: {:?}", err); 902*eafedbc7SAlice Ryhl return Err(err.into()); 903*eafedbc7SAlice Ryhl } 904*eafedbc7SAlice Ryhl } 905*eafedbc7SAlice Ryhl if let Err(err) = reader.skip(fixup_len) { 906*eafedbc7SAlice Ryhl pr_warn!("Failed skipping {} from reader: {:?}", fixup_len, err); 907*eafedbc7SAlice Ryhl return Err(err.into()); 908*eafedbc7SAlice Ryhl } 909*eafedbc7SAlice Ryhl end_of_previous_fixup = target_offset_end; 910*eafedbc7SAlice Ryhl } 911*eafedbc7SAlice Ryhl let copy_off = end_of_previous_fixup; 912*eafedbc7SAlice Ryhl let copy_len = offset_end - end_of_previous_fixup; 913*eafedbc7SAlice Ryhl if let Err(err) = alloc.copy_into(&mut reader, copy_off, copy_len) { 914*eafedbc7SAlice Ryhl pr_warn!("Failed copying remainder into alloc: {:?}", err); 915*eafedbc7SAlice Ryhl return Err(err.into()); 916*eafedbc7SAlice Ryhl } 917*eafedbc7SAlice Ryhl } 918*eafedbc7SAlice Ryhl Ok(()) 919*eafedbc7SAlice Ryhl } 920*eafedbc7SAlice Ryhl 921*eafedbc7SAlice Ryhl /// This method copies the payload of a transaction into the target process. 922*eafedbc7SAlice Ryhl /// 923*eafedbc7SAlice Ryhl /// The resulting payload will have several different components, which will be stored next to 924*eafedbc7SAlice Ryhl /// each other in the allocation. Furthermore, various objects can be embedded in the payload, 925*eafedbc7SAlice Ryhl /// and those objects have to be translated so that they make sense to the target transaction. 926*eafedbc7SAlice Ryhl pub(crate) fn copy_transaction_data( 927*eafedbc7SAlice Ryhl &self, 928*eafedbc7SAlice Ryhl to_process: Arc<Process>, 929*eafedbc7SAlice Ryhl tr: &BinderTransactionDataSg, 930*eafedbc7SAlice Ryhl debug_id: usize, 931*eafedbc7SAlice Ryhl allow_fds: bool, 932*eafedbc7SAlice Ryhl txn_security_ctx_offset: Option<&mut usize>, 933*eafedbc7SAlice Ryhl ) -> BinderResult<NewAllocation> { 934*eafedbc7SAlice Ryhl let trd = &tr.transaction_data; 935*eafedbc7SAlice Ryhl let is_oneway = trd.flags & TF_ONE_WAY != 0; 936*eafedbc7SAlice Ryhl let mut secctx = if let Some(offset) = txn_security_ctx_offset { 937*eafedbc7SAlice Ryhl let secid = self.process.cred.get_secid(); 938*eafedbc7SAlice Ryhl let ctx = match security::SecurityCtx::from_secid(secid) { 939*eafedbc7SAlice Ryhl Ok(ctx) => ctx, 940*eafedbc7SAlice Ryhl Err(err) => { 941*eafedbc7SAlice Ryhl pr_warn!("Failed to get security ctx for id {}: {:?}", secid, err); 942*eafedbc7SAlice Ryhl return Err(err.into()); 943*eafedbc7SAlice Ryhl } 944*eafedbc7SAlice Ryhl }; 945*eafedbc7SAlice Ryhl Some((offset, ctx)) 946*eafedbc7SAlice Ryhl } else { 947*eafedbc7SAlice Ryhl None 948*eafedbc7SAlice Ryhl }; 949*eafedbc7SAlice Ryhl 950*eafedbc7SAlice Ryhl let data_size = trd.data_size.try_into().map_err(|_| EINVAL)?; 951*eafedbc7SAlice Ryhl let aligned_data_size = ptr_align(data_size).ok_or(EINVAL)?; 952*eafedbc7SAlice Ryhl let offsets_size = trd.offsets_size.try_into().map_err(|_| EINVAL)?; 953*eafedbc7SAlice Ryhl let aligned_offsets_size = ptr_align(offsets_size).ok_or(EINVAL)?; 954*eafedbc7SAlice Ryhl let buffers_size = tr.buffers_size.try_into().map_err(|_| EINVAL)?; 955*eafedbc7SAlice Ryhl let aligned_buffers_size = ptr_align(buffers_size).ok_or(EINVAL)?; 956*eafedbc7SAlice Ryhl let aligned_secctx_size = match secctx.as_ref() { 957*eafedbc7SAlice Ryhl Some((_offset, ctx)) => ptr_align(ctx.len()).ok_or(EINVAL)?, 958*eafedbc7SAlice Ryhl None => 0, 959*eafedbc7SAlice Ryhl }; 960*eafedbc7SAlice Ryhl 961*eafedbc7SAlice Ryhl // This guarantees that at least `sizeof(usize)` bytes will be allocated. 962*eafedbc7SAlice Ryhl let len = usize::max( 963*eafedbc7SAlice Ryhl aligned_data_size 964*eafedbc7SAlice Ryhl .checked_add(aligned_offsets_size) 965*eafedbc7SAlice Ryhl .and_then(|sum| sum.checked_add(aligned_buffers_size)) 966*eafedbc7SAlice Ryhl .and_then(|sum| sum.checked_add(aligned_secctx_size)) 967*eafedbc7SAlice Ryhl .ok_or(ENOMEM)?, 968*eafedbc7SAlice Ryhl size_of::<usize>(), 969*eafedbc7SAlice Ryhl ); 970*eafedbc7SAlice Ryhl let secctx_off = aligned_data_size + aligned_offsets_size + aligned_buffers_size; 971*eafedbc7SAlice Ryhl let mut alloc = 972*eafedbc7SAlice Ryhl match to_process.buffer_alloc(debug_id, len, is_oneway, self.process.task.pid()) { 973*eafedbc7SAlice Ryhl Ok(alloc) => alloc, 974*eafedbc7SAlice Ryhl Err(err) => { 975*eafedbc7SAlice Ryhl pr_warn!( 976*eafedbc7SAlice Ryhl "Failed to allocate buffer. len:{}, is_oneway:{}", 977*eafedbc7SAlice Ryhl len, 978*eafedbc7SAlice Ryhl is_oneway 979*eafedbc7SAlice Ryhl ); 980*eafedbc7SAlice Ryhl return Err(err); 981*eafedbc7SAlice Ryhl } 982*eafedbc7SAlice Ryhl }; 983*eafedbc7SAlice Ryhl 984*eafedbc7SAlice Ryhl // SAFETY: This accesses a union field, but it's okay because the field's type is valid for 985*eafedbc7SAlice Ryhl // all bit-patterns. 986*eafedbc7SAlice Ryhl let trd_data_ptr = unsafe { &trd.data.ptr }; 987*eafedbc7SAlice Ryhl let mut buffer_reader = 988*eafedbc7SAlice Ryhl UserSlice::new(UserPtr::from_addr(trd_data_ptr.buffer as _), data_size).reader(); 989*eafedbc7SAlice Ryhl let mut end_of_previous_object = 0; 990*eafedbc7SAlice Ryhl let mut sg_state = None; 991*eafedbc7SAlice Ryhl 992*eafedbc7SAlice Ryhl // Copy offsets if there are any. 993*eafedbc7SAlice Ryhl if offsets_size > 0 { 994*eafedbc7SAlice Ryhl { 995*eafedbc7SAlice Ryhl let mut reader = 996*eafedbc7SAlice Ryhl UserSlice::new(UserPtr::from_addr(trd_data_ptr.offsets as _), offsets_size) 997*eafedbc7SAlice Ryhl .reader(); 998*eafedbc7SAlice Ryhl alloc.copy_into(&mut reader, aligned_data_size, offsets_size)?; 999*eafedbc7SAlice Ryhl } 1000*eafedbc7SAlice Ryhl 1001*eafedbc7SAlice Ryhl let offsets_start = aligned_data_size; 1002*eafedbc7SAlice Ryhl let offsets_end = aligned_data_size + aligned_offsets_size; 1003*eafedbc7SAlice Ryhl 1004*eafedbc7SAlice Ryhl // This state is used for BINDER_TYPE_PTR objects. 1005*eafedbc7SAlice Ryhl let sg_state = sg_state.insert(ScatterGatherState { 1006*eafedbc7SAlice Ryhl unused_buffer_space: UnusedBufferSpace { 1007*eafedbc7SAlice Ryhl offset: offsets_end, 1008*eafedbc7SAlice Ryhl limit: len, 1009*eafedbc7SAlice Ryhl }, 1010*eafedbc7SAlice Ryhl sg_entries: KVec::new(), 1011*eafedbc7SAlice Ryhl ancestors: KVec::new(), 1012*eafedbc7SAlice Ryhl }); 1013*eafedbc7SAlice Ryhl 1014*eafedbc7SAlice Ryhl // Traverse the objects specified. 1015*eafedbc7SAlice Ryhl let mut view = AllocationView::new(&mut alloc, data_size); 1016*eafedbc7SAlice Ryhl for (index, index_offset) in (offsets_start..offsets_end) 1017*eafedbc7SAlice Ryhl .step_by(size_of::<usize>()) 1018*eafedbc7SAlice Ryhl .enumerate() 1019*eafedbc7SAlice Ryhl { 1020*eafedbc7SAlice Ryhl let offset = view.alloc.read(index_offset)?; 1021*eafedbc7SAlice Ryhl 1022*eafedbc7SAlice Ryhl if offset < end_of_previous_object { 1023*eafedbc7SAlice Ryhl pr_warn!("Got transaction with invalid offset."); 1024*eafedbc7SAlice Ryhl return Err(EINVAL.into()); 1025*eafedbc7SAlice Ryhl } 1026*eafedbc7SAlice Ryhl 1027*eafedbc7SAlice Ryhl // Copy data between two objects. 1028*eafedbc7SAlice Ryhl if end_of_previous_object < offset { 1029*eafedbc7SAlice Ryhl view.copy_into( 1030*eafedbc7SAlice Ryhl &mut buffer_reader, 1031*eafedbc7SAlice Ryhl end_of_previous_object, 1032*eafedbc7SAlice Ryhl offset - end_of_previous_object, 1033*eafedbc7SAlice Ryhl )?; 1034*eafedbc7SAlice Ryhl } 1035*eafedbc7SAlice Ryhl 1036*eafedbc7SAlice Ryhl let mut object = BinderObject::read_from(&mut buffer_reader)?; 1037*eafedbc7SAlice Ryhl 1038*eafedbc7SAlice Ryhl match self.translate_object( 1039*eafedbc7SAlice Ryhl index, 1040*eafedbc7SAlice Ryhl offset, 1041*eafedbc7SAlice Ryhl object.as_ref(), 1042*eafedbc7SAlice Ryhl &mut view, 1043*eafedbc7SAlice Ryhl allow_fds, 1044*eafedbc7SAlice Ryhl sg_state, 1045*eafedbc7SAlice Ryhl ) { 1046*eafedbc7SAlice Ryhl Ok(()) => end_of_previous_object = offset + object.size(), 1047*eafedbc7SAlice Ryhl Err(err) => { 1048*eafedbc7SAlice Ryhl pr_warn!("Error while translating object."); 1049*eafedbc7SAlice Ryhl return Err(err); 1050*eafedbc7SAlice Ryhl } 1051*eafedbc7SAlice Ryhl } 1052*eafedbc7SAlice Ryhl 1053*eafedbc7SAlice Ryhl // Update the indexes containing objects to clean up. 1054*eafedbc7SAlice Ryhl let offset_after_object = index_offset + size_of::<usize>(); 1055*eafedbc7SAlice Ryhl view.alloc 1056*eafedbc7SAlice Ryhl .set_info_offsets(offsets_start..offset_after_object); 1057*eafedbc7SAlice Ryhl } 1058*eafedbc7SAlice Ryhl } 1059*eafedbc7SAlice Ryhl 1060*eafedbc7SAlice Ryhl // Copy remaining raw data. 1061*eafedbc7SAlice Ryhl alloc.copy_into( 1062*eafedbc7SAlice Ryhl &mut buffer_reader, 1063*eafedbc7SAlice Ryhl end_of_previous_object, 1064*eafedbc7SAlice Ryhl data_size - end_of_previous_object, 1065*eafedbc7SAlice Ryhl )?; 1066*eafedbc7SAlice Ryhl 1067*eafedbc7SAlice Ryhl if let Some(sg_state) = sg_state.as_mut() { 1068*eafedbc7SAlice Ryhl if let Err(err) = self.apply_sg(&mut alloc, sg_state) { 1069*eafedbc7SAlice Ryhl pr_warn!("Failure in apply_sg: {:?}", err); 1070*eafedbc7SAlice Ryhl return Err(err); 1071*eafedbc7SAlice Ryhl } 1072*eafedbc7SAlice Ryhl } 1073*eafedbc7SAlice Ryhl 1074*eafedbc7SAlice Ryhl if let Some((off_out, secctx)) = secctx.as_mut() { 1075*eafedbc7SAlice Ryhl if let Err(err) = alloc.write(secctx_off, secctx.as_bytes()) { 1076*eafedbc7SAlice Ryhl pr_warn!("Failed to write security context: {:?}", err); 1077*eafedbc7SAlice Ryhl return Err(err.into()); 1078*eafedbc7SAlice Ryhl } 1079*eafedbc7SAlice Ryhl **off_out = secctx_off; 1080*eafedbc7SAlice Ryhl } 1081*eafedbc7SAlice Ryhl Ok(alloc) 1082*eafedbc7SAlice Ryhl } 1083*eafedbc7SAlice Ryhl 1084*eafedbc7SAlice Ryhl fn unwind_transaction_stack(self: &Arc<Self>) { 1085*eafedbc7SAlice Ryhl let mut thread = self.clone(); 1086*eafedbc7SAlice Ryhl while let Ok(transaction) = { 1087*eafedbc7SAlice Ryhl let mut inner = thread.inner.lock(); 1088*eafedbc7SAlice Ryhl inner.pop_transaction_to_reply(thread.as_ref()) 1089*eafedbc7SAlice Ryhl } { 1090*eafedbc7SAlice Ryhl let reply = Err(BR_DEAD_REPLY); 1091*eafedbc7SAlice Ryhl if !transaction.from.deliver_single_reply(reply, &transaction) { 1092*eafedbc7SAlice Ryhl break; 1093*eafedbc7SAlice Ryhl } 1094*eafedbc7SAlice Ryhl 1095*eafedbc7SAlice Ryhl thread = transaction.from.clone(); 1096*eafedbc7SAlice Ryhl } 1097*eafedbc7SAlice Ryhl } 1098*eafedbc7SAlice Ryhl 1099*eafedbc7SAlice Ryhl pub(crate) fn deliver_reply( 1100*eafedbc7SAlice Ryhl &self, 1101*eafedbc7SAlice Ryhl reply: Result<DLArc<Transaction>, u32>, 1102*eafedbc7SAlice Ryhl transaction: &DArc<Transaction>, 1103*eafedbc7SAlice Ryhl ) { 1104*eafedbc7SAlice Ryhl if self.deliver_single_reply(reply, transaction) { 1105*eafedbc7SAlice Ryhl transaction.from.unwind_transaction_stack(); 1106*eafedbc7SAlice Ryhl } 1107*eafedbc7SAlice Ryhl } 1108*eafedbc7SAlice Ryhl 1109*eafedbc7SAlice Ryhl /// Delivers a reply to the thread that started a transaction. The reply can either be a 1110*eafedbc7SAlice Ryhl /// reply-transaction or an error code to be delivered instead. 1111*eafedbc7SAlice Ryhl /// 1112*eafedbc7SAlice Ryhl /// Returns whether the thread is dead. If it is, the caller is expected to unwind the 1113*eafedbc7SAlice Ryhl /// transaction stack by completing transactions for threads that are dead. 1114*eafedbc7SAlice Ryhl fn deliver_single_reply( 1115*eafedbc7SAlice Ryhl &self, 1116*eafedbc7SAlice Ryhl reply: Result<DLArc<Transaction>, u32>, 1117*eafedbc7SAlice Ryhl transaction: &DArc<Transaction>, 1118*eafedbc7SAlice Ryhl ) -> bool { 1119*eafedbc7SAlice Ryhl if let Ok(transaction) = &reply { 1120*eafedbc7SAlice Ryhl transaction.set_outstanding(&mut self.process.inner.lock()); 1121*eafedbc7SAlice Ryhl } 1122*eafedbc7SAlice Ryhl 1123*eafedbc7SAlice Ryhl { 1124*eafedbc7SAlice Ryhl let mut inner = self.inner.lock(); 1125*eafedbc7SAlice Ryhl if !inner.pop_transaction_replied(transaction) { 1126*eafedbc7SAlice Ryhl return false; 1127*eafedbc7SAlice Ryhl } 1128*eafedbc7SAlice Ryhl 1129*eafedbc7SAlice Ryhl if inner.is_dead { 1130*eafedbc7SAlice Ryhl return true; 1131*eafedbc7SAlice Ryhl } 1132*eafedbc7SAlice Ryhl 1133*eafedbc7SAlice Ryhl match reply { 1134*eafedbc7SAlice Ryhl Ok(work) => { 1135*eafedbc7SAlice Ryhl inner.push_work(work); 1136*eafedbc7SAlice Ryhl } 1137*eafedbc7SAlice Ryhl Err(code) => inner.push_reply_work(code), 1138*eafedbc7SAlice Ryhl } 1139*eafedbc7SAlice Ryhl } 1140*eafedbc7SAlice Ryhl 1141*eafedbc7SAlice Ryhl // Notify the thread now that we've released the inner lock. 1142*eafedbc7SAlice Ryhl self.work_condvar.notify_sync(); 1143*eafedbc7SAlice Ryhl false 1144*eafedbc7SAlice Ryhl } 1145*eafedbc7SAlice Ryhl 1146*eafedbc7SAlice Ryhl /// Determines if the given transaction is the current transaction for this thread. 1147*eafedbc7SAlice Ryhl fn is_current_transaction(&self, transaction: &DArc<Transaction>) -> bool { 1148*eafedbc7SAlice Ryhl let inner = self.inner.lock(); 1149*eafedbc7SAlice Ryhl match &inner.current_transaction { 1150*eafedbc7SAlice Ryhl None => false, 1151*eafedbc7SAlice Ryhl Some(current) => Arc::ptr_eq(current, transaction), 1152*eafedbc7SAlice Ryhl } 1153*eafedbc7SAlice Ryhl } 1154*eafedbc7SAlice Ryhl 1155*eafedbc7SAlice Ryhl /// Determines the current top of the transaction stack. It fails if the top is in another 1156*eafedbc7SAlice Ryhl /// thread (i.e., this thread belongs to a stack but it has called another thread). The top is 1157*eafedbc7SAlice Ryhl /// [`None`] if the thread is not currently participating in a transaction stack. 1158*eafedbc7SAlice Ryhl fn top_of_transaction_stack(&self) -> Result<Option<DArc<Transaction>>> { 1159*eafedbc7SAlice Ryhl let inner = self.inner.lock(); 1160*eafedbc7SAlice Ryhl if let Some(cur) = &inner.current_transaction { 1161*eafedbc7SAlice Ryhl if core::ptr::eq(self, cur.from.as_ref()) { 1162*eafedbc7SAlice Ryhl pr_warn!("got new transaction with bad transaction stack"); 1163*eafedbc7SAlice Ryhl return Err(EINVAL); 1164*eafedbc7SAlice Ryhl } 1165*eafedbc7SAlice Ryhl Ok(Some(cur.clone())) 1166*eafedbc7SAlice Ryhl } else { 1167*eafedbc7SAlice Ryhl Ok(None) 1168*eafedbc7SAlice Ryhl } 1169*eafedbc7SAlice Ryhl } 1170*eafedbc7SAlice Ryhl 1171*eafedbc7SAlice Ryhl fn transaction<T>(self: &Arc<Self>, tr: &BinderTransactionDataSg, inner: T) 1172*eafedbc7SAlice Ryhl where 1173*eafedbc7SAlice Ryhl T: FnOnce(&Arc<Self>, &BinderTransactionDataSg) -> BinderResult, 1174*eafedbc7SAlice Ryhl { 1175*eafedbc7SAlice Ryhl if let Err(err) = inner(self, tr) { 1176*eafedbc7SAlice Ryhl if err.should_pr_warn() { 1177*eafedbc7SAlice Ryhl let mut ee = self.inner.lock().extended_error; 1178*eafedbc7SAlice Ryhl ee.command = err.reply; 1179*eafedbc7SAlice Ryhl ee.param = err.as_errno(); 1180*eafedbc7SAlice Ryhl pr_warn!( 1181*eafedbc7SAlice Ryhl "Transaction failed: {:?} my_pid:{}", 1182*eafedbc7SAlice Ryhl err, 1183*eafedbc7SAlice Ryhl self.process.pid_in_current_ns() 1184*eafedbc7SAlice Ryhl ); 1185*eafedbc7SAlice Ryhl } 1186*eafedbc7SAlice Ryhl 1187*eafedbc7SAlice Ryhl self.push_return_work(err.reply); 1188*eafedbc7SAlice Ryhl } 1189*eafedbc7SAlice Ryhl } 1190*eafedbc7SAlice Ryhl 1191*eafedbc7SAlice Ryhl fn transaction_inner(self: &Arc<Self>, tr: &BinderTransactionDataSg) -> BinderResult { 1192*eafedbc7SAlice Ryhl // SAFETY: Handle's type has no invalid bit patterns. 1193*eafedbc7SAlice Ryhl let handle = unsafe { tr.transaction_data.target.handle }; 1194*eafedbc7SAlice Ryhl let node_ref = self.process.get_transaction_node(handle)?; 1195*eafedbc7SAlice Ryhl security::binder_transaction(&self.process.cred, &node_ref.node.owner.cred)?; 1196*eafedbc7SAlice Ryhl // TODO: We need to ensure that there isn't a pending transaction in the work queue. How 1197*eafedbc7SAlice Ryhl // could this happen? 1198*eafedbc7SAlice Ryhl let top = self.top_of_transaction_stack()?; 1199*eafedbc7SAlice Ryhl let list_completion = DTRWrap::arc_try_new(DeliverCode::new(BR_TRANSACTION_COMPLETE))?; 1200*eafedbc7SAlice Ryhl let completion = list_completion.clone_arc(); 1201*eafedbc7SAlice Ryhl let transaction = Transaction::new(node_ref, top, self, tr)?; 1202*eafedbc7SAlice Ryhl 1203*eafedbc7SAlice Ryhl // Check that the transaction stack hasn't changed while the lock was released, then update 1204*eafedbc7SAlice Ryhl // it with the new transaction. 1205*eafedbc7SAlice Ryhl { 1206*eafedbc7SAlice Ryhl let mut inner = self.inner.lock(); 1207*eafedbc7SAlice Ryhl if !transaction.is_stacked_on(&inner.current_transaction) { 1208*eafedbc7SAlice Ryhl pr_warn!("Transaction stack changed during transaction!"); 1209*eafedbc7SAlice Ryhl return Err(EINVAL.into()); 1210*eafedbc7SAlice Ryhl } 1211*eafedbc7SAlice Ryhl inner.current_transaction = Some(transaction.clone_arc()); 1212*eafedbc7SAlice Ryhl // We push the completion as a deferred work so that we wait for the reply before 1213*eafedbc7SAlice Ryhl // returning to userland. 1214*eafedbc7SAlice Ryhl inner.push_work_deferred(list_completion); 1215*eafedbc7SAlice Ryhl } 1216*eafedbc7SAlice Ryhl 1217*eafedbc7SAlice Ryhl if let Err(e) = transaction.submit() { 1218*eafedbc7SAlice Ryhl completion.skip(); 1219*eafedbc7SAlice Ryhl // Define `transaction` first to drop it after `inner`. 1220*eafedbc7SAlice Ryhl let transaction; 1221*eafedbc7SAlice Ryhl let mut inner = self.inner.lock(); 1222*eafedbc7SAlice Ryhl transaction = inner.current_transaction.take().unwrap(); 1223*eafedbc7SAlice Ryhl inner.current_transaction = transaction.clone_next(); 1224*eafedbc7SAlice Ryhl Err(e) 1225*eafedbc7SAlice Ryhl } else { 1226*eafedbc7SAlice Ryhl Ok(()) 1227*eafedbc7SAlice Ryhl } 1228*eafedbc7SAlice Ryhl } 1229*eafedbc7SAlice Ryhl 1230*eafedbc7SAlice Ryhl fn reply_inner(self: &Arc<Self>, tr: &BinderTransactionDataSg) -> BinderResult { 1231*eafedbc7SAlice Ryhl let orig = self.inner.lock().pop_transaction_to_reply(self)?; 1232*eafedbc7SAlice Ryhl if !orig.from.is_current_transaction(&orig) { 1233*eafedbc7SAlice Ryhl return Err(EINVAL.into()); 1234*eafedbc7SAlice Ryhl } 1235*eafedbc7SAlice Ryhl 1236*eafedbc7SAlice Ryhl // We need to complete the transaction even if we cannot complete building the reply. 1237*eafedbc7SAlice Ryhl let out = (|| -> BinderResult<_> { 1238*eafedbc7SAlice Ryhl let completion = DTRWrap::arc_try_new(DeliverCode::new(BR_TRANSACTION_COMPLETE))?; 1239*eafedbc7SAlice Ryhl let process = orig.from.process.clone(); 1240*eafedbc7SAlice Ryhl let allow_fds = orig.flags & TF_ACCEPT_FDS != 0; 1241*eafedbc7SAlice Ryhl let reply = Transaction::new_reply(self, process, tr, allow_fds)?; 1242*eafedbc7SAlice Ryhl self.inner.lock().push_work(completion); 1243*eafedbc7SAlice Ryhl orig.from.deliver_reply(Ok(reply), &orig); 1244*eafedbc7SAlice Ryhl Ok(()) 1245*eafedbc7SAlice Ryhl })() 1246*eafedbc7SAlice Ryhl .map_err(|mut err| { 1247*eafedbc7SAlice Ryhl // At this point we only return `BR_TRANSACTION_COMPLETE` to the caller, and we must let 1248*eafedbc7SAlice Ryhl // the sender know that the transaction has completed (with an error in this case). 1249*eafedbc7SAlice Ryhl pr_warn!( 1250*eafedbc7SAlice Ryhl "Failure {:?} during reply - delivering BR_FAILED_REPLY to sender.", 1251*eafedbc7SAlice Ryhl err 1252*eafedbc7SAlice Ryhl ); 1253*eafedbc7SAlice Ryhl let reply = Err(BR_FAILED_REPLY); 1254*eafedbc7SAlice Ryhl orig.from.deliver_reply(reply, &orig); 1255*eafedbc7SAlice Ryhl err.reply = BR_TRANSACTION_COMPLETE; 1256*eafedbc7SAlice Ryhl err 1257*eafedbc7SAlice Ryhl }); 1258*eafedbc7SAlice Ryhl 1259*eafedbc7SAlice Ryhl out 1260*eafedbc7SAlice Ryhl } 1261*eafedbc7SAlice Ryhl 1262*eafedbc7SAlice Ryhl fn oneway_transaction_inner(self: &Arc<Self>, tr: &BinderTransactionDataSg) -> BinderResult { 1263*eafedbc7SAlice Ryhl // SAFETY: The `handle` field is valid for all possible byte values, so reading from the 1264*eafedbc7SAlice Ryhl // union is okay. 1265*eafedbc7SAlice Ryhl let handle = unsafe { tr.transaction_data.target.handle }; 1266*eafedbc7SAlice Ryhl let node_ref = self.process.get_transaction_node(handle)?; 1267*eafedbc7SAlice Ryhl security::binder_transaction(&self.process.cred, &node_ref.node.owner.cred)?; 1268*eafedbc7SAlice Ryhl let transaction = Transaction::new(node_ref, None, self, tr)?; 1269*eafedbc7SAlice Ryhl let code = if self.process.is_oneway_spam_detection_enabled() 1270*eafedbc7SAlice Ryhl && transaction.oneway_spam_detected 1271*eafedbc7SAlice Ryhl { 1272*eafedbc7SAlice Ryhl BR_ONEWAY_SPAM_SUSPECT 1273*eafedbc7SAlice Ryhl } else { 1274*eafedbc7SAlice Ryhl BR_TRANSACTION_COMPLETE 1275*eafedbc7SAlice Ryhl }; 1276*eafedbc7SAlice Ryhl let list_completion = DTRWrap::arc_try_new(DeliverCode::new(code))?; 1277*eafedbc7SAlice Ryhl let completion = list_completion.clone_arc(); 1278*eafedbc7SAlice Ryhl self.inner.lock().push_work(list_completion); 1279*eafedbc7SAlice Ryhl match transaction.submit() { 1280*eafedbc7SAlice Ryhl Ok(()) => Ok(()), 1281*eafedbc7SAlice Ryhl Err(err) => { 1282*eafedbc7SAlice Ryhl completion.skip(); 1283*eafedbc7SAlice Ryhl Err(err) 1284*eafedbc7SAlice Ryhl } 1285*eafedbc7SAlice Ryhl } 1286*eafedbc7SAlice Ryhl } 1287*eafedbc7SAlice Ryhl 1288*eafedbc7SAlice Ryhl fn write(self: &Arc<Self>, req: &mut BinderWriteRead) -> Result { 1289*eafedbc7SAlice Ryhl let write_start = req.write_buffer.wrapping_add(req.write_consumed); 1290*eafedbc7SAlice Ryhl let write_len = req.write_size.saturating_sub(req.write_consumed); 1291*eafedbc7SAlice Ryhl let mut reader = 1292*eafedbc7SAlice Ryhl UserSlice::new(UserPtr::from_addr(write_start as _), write_len as _).reader(); 1293*eafedbc7SAlice Ryhl 1294*eafedbc7SAlice Ryhl while reader.len() >= size_of::<u32>() && self.inner.lock().return_work.is_unused() { 1295*eafedbc7SAlice Ryhl let before = reader.len(); 1296*eafedbc7SAlice Ryhl let cmd = reader.read::<u32>()?; 1297*eafedbc7SAlice Ryhl GLOBAL_STATS.inc_bc(cmd); 1298*eafedbc7SAlice Ryhl self.process.stats.inc_bc(cmd); 1299*eafedbc7SAlice Ryhl match cmd { 1300*eafedbc7SAlice Ryhl BC_TRANSACTION => { 1301*eafedbc7SAlice Ryhl let tr = reader.read::<BinderTransactionData>()?.with_buffers_size(0); 1302*eafedbc7SAlice Ryhl if tr.transaction_data.flags & TF_ONE_WAY != 0 { 1303*eafedbc7SAlice Ryhl self.transaction(&tr, Self::oneway_transaction_inner); 1304*eafedbc7SAlice Ryhl } else { 1305*eafedbc7SAlice Ryhl self.transaction(&tr, Self::transaction_inner); 1306*eafedbc7SAlice Ryhl } 1307*eafedbc7SAlice Ryhl } 1308*eafedbc7SAlice Ryhl BC_TRANSACTION_SG => { 1309*eafedbc7SAlice Ryhl let tr = reader.read::<BinderTransactionDataSg>()?; 1310*eafedbc7SAlice Ryhl if tr.transaction_data.flags & TF_ONE_WAY != 0 { 1311*eafedbc7SAlice Ryhl self.transaction(&tr, Self::oneway_transaction_inner); 1312*eafedbc7SAlice Ryhl } else { 1313*eafedbc7SAlice Ryhl self.transaction(&tr, Self::transaction_inner); 1314*eafedbc7SAlice Ryhl } 1315*eafedbc7SAlice Ryhl } 1316*eafedbc7SAlice Ryhl BC_REPLY => { 1317*eafedbc7SAlice Ryhl let tr = reader.read::<BinderTransactionData>()?.with_buffers_size(0); 1318*eafedbc7SAlice Ryhl self.transaction(&tr, Self::reply_inner) 1319*eafedbc7SAlice Ryhl } 1320*eafedbc7SAlice Ryhl BC_REPLY_SG => { 1321*eafedbc7SAlice Ryhl let tr = reader.read::<BinderTransactionDataSg>()?; 1322*eafedbc7SAlice Ryhl self.transaction(&tr, Self::reply_inner) 1323*eafedbc7SAlice Ryhl } 1324*eafedbc7SAlice Ryhl BC_FREE_BUFFER => { 1325*eafedbc7SAlice Ryhl let buffer = self.process.buffer_get(reader.read()?); 1326*eafedbc7SAlice Ryhl if let Some(buffer) = &buffer { 1327*eafedbc7SAlice Ryhl if buffer.looper_need_return_on_free() { 1328*eafedbc7SAlice Ryhl self.inner.lock().looper_need_return = true; 1329*eafedbc7SAlice Ryhl } 1330*eafedbc7SAlice Ryhl } 1331*eafedbc7SAlice Ryhl drop(buffer); 1332*eafedbc7SAlice Ryhl } 1333*eafedbc7SAlice Ryhl BC_INCREFS => { 1334*eafedbc7SAlice Ryhl self.process 1335*eafedbc7SAlice Ryhl .as_arc_borrow() 1336*eafedbc7SAlice Ryhl .update_ref(reader.read()?, true, false)? 1337*eafedbc7SAlice Ryhl } 1338*eafedbc7SAlice Ryhl BC_ACQUIRE => { 1339*eafedbc7SAlice Ryhl self.process 1340*eafedbc7SAlice Ryhl .as_arc_borrow() 1341*eafedbc7SAlice Ryhl .update_ref(reader.read()?, true, true)? 1342*eafedbc7SAlice Ryhl } 1343*eafedbc7SAlice Ryhl BC_RELEASE => { 1344*eafedbc7SAlice Ryhl self.process 1345*eafedbc7SAlice Ryhl .as_arc_borrow() 1346*eafedbc7SAlice Ryhl .update_ref(reader.read()?, false, true)? 1347*eafedbc7SAlice Ryhl } 1348*eafedbc7SAlice Ryhl BC_DECREFS => { 1349*eafedbc7SAlice Ryhl self.process 1350*eafedbc7SAlice Ryhl .as_arc_borrow() 1351*eafedbc7SAlice Ryhl .update_ref(reader.read()?, false, false)? 1352*eafedbc7SAlice Ryhl } 1353*eafedbc7SAlice Ryhl BC_INCREFS_DONE => self.process.inc_ref_done(&mut reader, false)?, 1354*eafedbc7SAlice Ryhl BC_ACQUIRE_DONE => self.process.inc_ref_done(&mut reader, true)?, 1355*eafedbc7SAlice Ryhl BC_REQUEST_DEATH_NOTIFICATION => self.process.request_death(&mut reader, self)?, 1356*eafedbc7SAlice Ryhl BC_CLEAR_DEATH_NOTIFICATION => self.process.clear_death(&mut reader, self)?, 1357*eafedbc7SAlice Ryhl BC_DEAD_BINDER_DONE => self.process.dead_binder_done(reader.read()?, self), 1358*eafedbc7SAlice Ryhl BC_REGISTER_LOOPER => { 1359*eafedbc7SAlice Ryhl let valid = self.process.register_thread(); 1360*eafedbc7SAlice Ryhl self.inner.lock().looper_register(valid); 1361*eafedbc7SAlice Ryhl } 1362*eafedbc7SAlice Ryhl BC_ENTER_LOOPER => self.inner.lock().looper_enter(), 1363*eafedbc7SAlice Ryhl BC_EXIT_LOOPER => self.inner.lock().looper_exit(), 1364*eafedbc7SAlice Ryhl BC_REQUEST_FREEZE_NOTIFICATION => self.process.request_freeze_notif(&mut reader)?, 1365*eafedbc7SAlice Ryhl BC_CLEAR_FREEZE_NOTIFICATION => self.process.clear_freeze_notif(&mut reader)?, 1366*eafedbc7SAlice Ryhl BC_FREEZE_NOTIFICATION_DONE => self.process.freeze_notif_done(&mut reader)?, 1367*eafedbc7SAlice Ryhl 1368*eafedbc7SAlice Ryhl // Fail if given an unknown error code. 1369*eafedbc7SAlice Ryhl // BC_ATTEMPT_ACQUIRE and BC_ACQUIRE_RESULT are no longer supported. 1370*eafedbc7SAlice Ryhl _ => return Err(EINVAL), 1371*eafedbc7SAlice Ryhl } 1372*eafedbc7SAlice Ryhl // Update the number of write bytes consumed. 1373*eafedbc7SAlice Ryhl req.write_consumed += (before - reader.len()) as u64; 1374*eafedbc7SAlice Ryhl } 1375*eafedbc7SAlice Ryhl 1376*eafedbc7SAlice Ryhl Ok(()) 1377*eafedbc7SAlice Ryhl } 1378*eafedbc7SAlice Ryhl 1379*eafedbc7SAlice Ryhl fn read(self: &Arc<Self>, req: &mut BinderWriteRead, wait: bool) -> Result { 1380*eafedbc7SAlice Ryhl let read_start = req.read_buffer.wrapping_add(req.read_consumed); 1381*eafedbc7SAlice Ryhl let read_len = req.read_size.saturating_sub(req.read_consumed); 1382*eafedbc7SAlice Ryhl let mut writer = BinderReturnWriter::new( 1383*eafedbc7SAlice Ryhl UserSlice::new(UserPtr::from_addr(read_start as _), read_len as _).writer(), 1384*eafedbc7SAlice Ryhl self, 1385*eafedbc7SAlice Ryhl ); 1386*eafedbc7SAlice Ryhl let (in_pool, use_proc_queue) = { 1387*eafedbc7SAlice Ryhl let inner = self.inner.lock(); 1388*eafedbc7SAlice Ryhl (inner.is_looper(), inner.should_use_process_work_queue()) 1389*eafedbc7SAlice Ryhl }; 1390*eafedbc7SAlice Ryhl 1391*eafedbc7SAlice Ryhl let getter = if use_proc_queue { 1392*eafedbc7SAlice Ryhl Self::get_work 1393*eafedbc7SAlice Ryhl } else { 1394*eafedbc7SAlice Ryhl Self::get_work_local 1395*eafedbc7SAlice Ryhl }; 1396*eafedbc7SAlice Ryhl 1397*eafedbc7SAlice Ryhl // Reserve some room at the beginning of the read buffer so that we can send a 1398*eafedbc7SAlice Ryhl // BR_SPAWN_LOOPER if we need to. 1399*eafedbc7SAlice Ryhl let mut has_noop_placeholder = false; 1400*eafedbc7SAlice Ryhl if req.read_consumed == 0 { 1401*eafedbc7SAlice Ryhl if let Err(err) = writer.write_code(BR_NOOP) { 1402*eafedbc7SAlice Ryhl pr_warn!("Failure when writing BR_NOOP at beginning of buffer."); 1403*eafedbc7SAlice Ryhl return Err(err); 1404*eafedbc7SAlice Ryhl } 1405*eafedbc7SAlice Ryhl has_noop_placeholder = true; 1406*eafedbc7SAlice Ryhl } 1407*eafedbc7SAlice Ryhl 1408*eafedbc7SAlice Ryhl // Loop doing work while there is room in the buffer. 1409*eafedbc7SAlice Ryhl let initial_len = writer.len(); 1410*eafedbc7SAlice Ryhl while writer.len() >= size_of::<uapi::binder_transaction_data_secctx>() + 4 { 1411*eafedbc7SAlice Ryhl match getter(self, wait && initial_len == writer.len()) { 1412*eafedbc7SAlice Ryhl Ok(Some(work)) => match work.into_arc().do_work(self, &mut writer) { 1413*eafedbc7SAlice Ryhl Ok(true) => {} 1414*eafedbc7SAlice Ryhl Ok(false) => break, 1415*eafedbc7SAlice Ryhl Err(err) => { 1416*eafedbc7SAlice Ryhl return Err(err); 1417*eafedbc7SAlice Ryhl } 1418*eafedbc7SAlice Ryhl }, 1419*eafedbc7SAlice Ryhl Ok(None) => { 1420*eafedbc7SAlice Ryhl break; 1421*eafedbc7SAlice Ryhl } 1422*eafedbc7SAlice Ryhl Err(err) => { 1423*eafedbc7SAlice Ryhl // Propagate the error if we haven't written anything else. 1424*eafedbc7SAlice Ryhl if err != EINTR && err != EAGAIN { 1425*eafedbc7SAlice Ryhl pr_warn!("Failure in work getter: {:?}", err); 1426*eafedbc7SAlice Ryhl } 1427*eafedbc7SAlice Ryhl if initial_len == writer.len() { 1428*eafedbc7SAlice Ryhl return Err(err); 1429*eafedbc7SAlice Ryhl } else { 1430*eafedbc7SAlice Ryhl break; 1431*eafedbc7SAlice Ryhl } 1432*eafedbc7SAlice Ryhl } 1433*eafedbc7SAlice Ryhl } 1434*eafedbc7SAlice Ryhl } 1435*eafedbc7SAlice Ryhl 1436*eafedbc7SAlice Ryhl req.read_consumed += read_len - writer.len() as u64; 1437*eafedbc7SAlice Ryhl 1438*eafedbc7SAlice Ryhl // Write BR_SPAWN_LOOPER if the process needs more threads for its pool. 1439*eafedbc7SAlice Ryhl if has_noop_placeholder && in_pool && self.process.needs_thread() { 1440*eafedbc7SAlice Ryhl let mut writer = 1441*eafedbc7SAlice Ryhl UserSlice::new(UserPtr::from_addr(req.read_buffer as _), req.read_size as _) 1442*eafedbc7SAlice Ryhl .writer(); 1443*eafedbc7SAlice Ryhl writer.write(&BR_SPAWN_LOOPER)?; 1444*eafedbc7SAlice Ryhl } 1445*eafedbc7SAlice Ryhl Ok(()) 1446*eafedbc7SAlice Ryhl } 1447*eafedbc7SAlice Ryhl 1448*eafedbc7SAlice Ryhl pub(crate) fn write_read(self: &Arc<Self>, data: UserSlice, wait: bool) -> Result { 1449*eafedbc7SAlice Ryhl let (mut reader, mut writer) = data.reader_writer(); 1450*eafedbc7SAlice Ryhl let mut req = reader.read::<BinderWriteRead>()?; 1451*eafedbc7SAlice Ryhl 1452*eafedbc7SAlice Ryhl // Go through the write buffer. 1453*eafedbc7SAlice Ryhl let mut ret = Ok(()); 1454*eafedbc7SAlice Ryhl if req.write_size > 0 { 1455*eafedbc7SAlice Ryhl ret = self.write(&mut req); 1456*eafedbc7SAlice Ryhl if let Err(err) = ret { 1457*eafedbc7SAlice Ryhl pr_warn!( 1458*eafedbc7SAlice Ryhl "Write failure {:?} in pid:{}", 1459*eafedbc7SAlice Ryhl err, 1460*eafedbc7SAlice Ryhl self.process.pid_in_current_ns() 1461*eafedbc7SAlice Ryhl ); 1462*eafedbc7SAlice Ryhl req.read_consumed = 0; 1463*eafedbc7SAlice Ryhl writer.write(&req)?; 1464*eafedbc7SAlice Ryhl self.inner.lock().looper_need_return = false; 1465*eafedbc7SAlice Ryhl return ret; 1466*eafedbc7SAlice Ryhl } 1467*eafedbc7SAlice Ryhl } 1468*eafedbc7SAlice Ryhl 1469*eafedbc7SAlice Ryhl // Go through the work queue. 1470*eafedbc7SAlice Ryhl if req.read_size > 0 { 1471*eafedbc7SAlice Ryhl ret = self.read(&mut req, wait); 1472*eafedbc7SAlice Ryhl if ret.is_err() && ret != Err(EINTR) { 1473*eafedbc7SAlice Ryhl pr_warn!( 1474*eafedbc7SAlice Ryhl "Read failure {:?} in pid:{}", 1475*eafedbc7SAlice Ryhl ret, 1476*eafedbc7SAlice Ryhl self.process.pid_in_current_ns() 1477*eafedbc7SAlice Ryhl ); 1478*eafedbc7SAlice Ryhl } 1479*eafedbc7SAlice Ryhl } 1480*eafedbc7SAlice Ryhl 1481*eafedbc7SAlice Ryhl // Write the request back so that the consumed fields are visible to the caller. 1482*eafedbc7SAlice Ryhl writer.write(&req)?; 1483*eafedbc7SAlice Ryhl 1484*eafedbc7SAlice Ryhl self.inner.lock().looper_need_return = false; 1485*eafedbc7SAlice Ryhl 1486*eafedbc7SAlice Ryhl ret 1487*eafedbc7SAlice Ryhl } 1488*eafedbc7SAlice Ryhl 1489*eafedbc7SAlice Ryhl pub(crate) fn poll(&self, file: &File, table: PollTable<'_>) -> (bool, u32) { 1490*eafedbc7SAlice Ryhl table.register_wait(file, &self.work_condvar); 1491*eafedbc7SAlice Ryhl let mut inner = self.inner.lock(); 1492*eafedbc7SAlice Ryhl (inner.should_use_process_work_queue(), inner.poll()) 1493*eafedbc7SAlice Ryhl } 1494*eafedbc7SAlice Ryhl 1495*eafedbc7SAlice Ryhl /// Make the call to `get_work` or `get_work_local` return immediately, if any. 1496*eafedbc7SAlice Ryhl pub(crate) fn exit_looper(&self) { 1497*eafedbc7SAlice Ryhl let mut inner = self.inner.lock(); 1498*eafedbc7SAlice Ryhl let should_notify = inner.looper_flags & LOOPER_WAITING != 0; 1499*eafedbc7SAlice Ryhl if should_notify { 1500*eafedbc7SAlice Ryhl inner.looper_need_return = true; 1501*eafedbc7SAlice Ryhl } 1502*eafedbc7SAlice Ryhl drop(inner); 1503*eafedbc7SAlice Ryhl 1504*eafedbc7SAlice Ryhl if should_notify { 1505*eafedbc7SAlice Ryhl self.work_condvar.notify_one(); 1506*eafedbc7SAlice Ryhl } 1507*eafedbc7SAlice Ryhl } 1508*eafedbc7SAlice Ryhl 1509*eafedbc7SAlice Ryhl pub(crate) fn notify_if_poll_ready(&self, sync: bool) { 1510*eafedbc7SAlice Ryhl // Determine if we need to notify. This requires the lock. 1511*eafedbc7SAlice Ryhl let inner = self.inner.lock(); 1512*eafedbc7SAlice Ryhl let notify = inner.looper_flags & LOOPER_POLL != 0 && inner.should_use_process_work_queue(); 1513*eafedbc7SAlice Ryhl drop(inner); 1514*eafedbc7SAlice Ryhl 1515*eafedbc7SAlice Ryhl // Now that the lock is no longer held, notify the waiters if we have to. 1516*eafedbc7SAlice Ryhl if notify { 1517*eafedbc7SAlice Ryhl if sync { 1518*eafedbc7SAlice Ryhl self.work_condvar.notify_sync(); 1519*eafedbc7SAlice Ryhl } else { 1520*eafedbc7SAlice Ryhl self.work_condvar.notify_one(); 1521*eafedbc7SAlice Ryhl } 1522*eafedbc7SAlice Ryhl } 1523*eafedbc7SAlice Ryhl } 1524*eafedbc7SAlice Ryhl 1525*eafedbc7SAlice Ryhl pub(crate) fn release(self: &Arc<Self>) { 1526*eafedbc7SAlice Ryhl self.inner.lock().is_dead = true; 1527*eafedbc7SAlice Ryhl 1528*eafedbc7SAlice Ryhl //self.work_condvar.clear(); 1529*eafedbc7SAlice Ryhl self.unwind_transaction_stack(); 1530*eafedbc7SAlice Ryhl 1531*eafedbc7SAlice Ryhl // Cancel all pending work items. 1532*eafedbc7SAlice Ryhl while let Ok(Some(work)) = self.get_work_local(false) { 1533*eafedbc7SAlice Ryhl work.into_arc().cancel(); 1534*eafedbc7SAlice Ryhl } 1535*eafedbc7SAlice Ryhl } 1536*eafedbc7SAlice Ryhl } 1537*eafedbc7SAlice Ryhl 1538*eafedbc7SAlice Ryhl #[pin_data] 1539*eafedbc7SAlice Ryhl struct ThreadError { 1540*eafedbc7SAlice Ryhl error_code: AtomicU32, 1541*eafedbc7SAlice Ryhl #[pin] 1542*eafedbc7SAlice Ryhl links_track: AtomicTracker, 1543*eafedbc7SAlice Ryhl } 1544*eafedbc7SAlice Ryhl 1545*eafedbc7SAlice Ryhl impl ThreadError { 1546*eafedbc7SAlice Ryhl fn try_new() -> Result<DArc<Self>> { 1547*eafedbc7SAlice Ryhl DTRWrap::arc_pin_init(pin_init!(Self { 1548*eafedbc7SAlice Ryhl error_code: AtomicU32::new(BR_OK), 1549*eafedbc7SAlice Ryhl links_track <- AtomicTracker::new(), 1550*eafedbc7SAlice Ryhl })) 1551*eafedbc7SAlice Ryhl .map(ListArc::into_arc) 1552*eafedbc7SAlice Ryhl } 1553*eafedbc7SAlice Ryhl 1554*eafedbc7SAlice Ryhl fn set_error_code(&self, code: u32) { 1555*eafedbc7SAlice Ryhl self.error_code.store(code, Ordering::Relaxed); 1556*eafedbc7SAlice Ryhl } 1557*eafedbc7SAlice Ryhl 1558*eafedbc7SAlice Ryhl fn is_unused(&self) -> bool { 1559*eafedbc7SAlice Ryhl self.error_code.load(Ordering::Relaxed) == BR_OK 1560*eafedbc7SAlice Ryhl } 1561*eafedbc7SAlice Ryhl } 1562*eafedbc7SAlice Ryhl 1563*eafedbc7SAlice Ryhl impl DeliverToRead for ThreadError { 1564*eafedbc7SAlice Ryhl fn do_work( 1565*eafedbc7SAlice Ryhl self: DArc<Self>, 1566*eafedbc7SAlice Ryhl _thread: &Thread, 1567*eafedbc7SAlice Ryhl writer: &mut BinderReturnWriter<'_>, 1568*eafedbc7SAlice Ryhl ) -> Result<bool> { 1569*eafedbc7SAlice Ryhl let code = self.error_code.load(Ordering::Relaxed); 1570*eafedbc7SAlice Ryhl self.error_code.store(BR_OK, Ordering::Relaxed); 1571*eafedbc7SAlice Ryhl writer.write_code(code)?; 1572*eafedbc7SAlice Ryhl Ok(true) 1573*eafedbc7SAlice Ryhl } 1574*eafedbc7SAlice Ryhl 1575*eafedbc7SAlice Ryhl fn cancel(self: DArc<Self>) {} 1576*eafedbc7SAlice Ryhl 1577*eafedbc7SAlice Ryhl fn should_sync_wakeup(&self) -> bool { 1578*eafedbc7SAlice Ryhl false 1579*eafedbc7SAlice Ryhl } 1580*eafedbc7SAlice Ryhl 1581*eafedbc7SAlice Ryhl fn debug_print(&self, m: &SeqFile, prefix: &str, _tprefix: &str) -> Result<()> { 1582*eafedbc7SAlice Ryhl seq_print!( 1583*eafedbc7SAlice Ryhl m, 1584*eafedbc7SAlice Ryhl "{}transaction error: {}\n", 1585*eafedbc7SAlice Ryhl prefix, 1586*eafedbc7SAlice Ryhl self.error_code.load(Ordering::Relaxed) 1587*eafedbc7SAlice Ryhl ); 1588*eafedbc7SAlice Ryhl Ok(()) 1589*eafedbc7SAlice Ryhl } 1590*eafedbc7SAlice Ryhl } 1591*eafedbc7SAlice Ryhl 1592*eafedbc7SAlice Ryhl kernel::list::impl_list_arc_safe! { 1593*eafedbc7SAlice Ryhl impl ListArcSafe<0> for ThreadError { 1594*eafedbc7SAlice Ryhl tracked_by links_track: AtomicTracker; 1595*eafedbc7SAlice Ryhl } 1596*eafedbc7SAlice Ryhl } 1597