Lines Matching refs:inner
437 pub(crate) inner: SpinLock<ProcessInner>, field
482 let mut inner = me.inner.lock(); in run() localVariable
483 defer = inner.defer_work; in run()
484 inner.defer_work = 0; in run()
503 inner <- kernel::new_spinlock!(ProcessInner::new(), "Process::inner"), in new()
530 let inner = self.inner.lock(); in debug_print_stats() localVariable
531 seq_print!(m, " threads: {}\n", inner.threads.iter().count()); in debug_print_stats()
535 inner.requested_thread_count, in debug_print_stats()
536 inner.started_thread_count, in debug_print_stats()
537 inner.max_threads, in debug_print_stats()
539 if let Some(mapping) = &inner.mapping { in debug_print_stats()
550 inner.outstanding_txns in debug_print_stats()
552 seq_print!(m, " nodes: {}\n", inner.nodes.iter().count()); in debug_print_stats()
553 drop(inner); in debug_print_stats()
581 let inner = self.inner.lock(); in debug_print() localVariable
582 let num_threads = inner.threads.iter().count(); in debug_print()
583 let num_nodes = inner.nodes.iter().count(); in debug_print()
586 drop(inner); in debug_print()
592 for thread in inner.threads.values() { in debug_print()
597 for node in inner.nodes.values() { in debug_print()
609 let mut inner = self.inner.lock(); in debug_print() localVariable
611 if print_all || node.has_oneway_transaction(&mut inner) { in debug_print()
612 node.full_debug_print(m, &mut inner)?; in debug_print()
615 drop(inner); in debug_print()
621 let dead = node_ref.node.owner.inner.lock().is_dead; in debug_print()
635 let inner = self.inner.lock(); in debug_print() localVariable
636 for work in &inner.work { in debug_print()
639 for _death in &inner.delivered_deaths { in debug_print()
642 if let Some(mapping) = &inner.mapping { in debug_print()
645 drop(inner); in debug_print()
652 self.inner.lock().work.pop_front() in get_work()
665 let mut inner = self.inner.lock(); in get_work_or_register() localVariable
667 if let Some(work) = inner.work.pop_front() { in get_work_or_register()
672 GetWorkOrRegister::Register(Registration::new(thread, &mut inner)) in get_work_or_register()
686 let inner = self.inner.lock(); in get_current_thread() localVariable
687 if let Some(thread) = inner.threads.get(&id) { in get_current_thread()
696 let mut inner = self.inner.lock(); in get_current_thread() localVariable
697 match inner.threads.entry(id) { in get_current_thread()
711 let res = self.inner.lock().push_work(work); in push_work()
740 self.inner.lock().is_manager = true; in set_as_manager()
743 let mut owner_inner = node.owner.inner.lock(); in set_as_manager()
759 let mut inner = self.inner.lock(); in get_node_inner() localVariable
760 if let Some(node) = inner.get_existing_node(ptr, cookie)? { in get_node_inner()
761 return Ok(inner.new_node_ref_with_thread(node, strong, thread, wrapper)); in get_node_inner()
768 let mut inner = self.inner.lock(); in get_node_inner() localVariable
769 if let Some(node) = inner.get_existing_node(ptr, cookie)? { in get_node_inner()
770 return Ok(inner.new_node_ref_with_thread(node, strong, thread, wrapper)); in get_node_inner()
773 inner.nodes.insert(rbnode); in get_node_inner()
776 let node_ref = inner in get_node_inner()
879 if self.inner.lock().is_dead { in insert_or_update_handle()
913 let mut inner = self.inner.lock(); in remove_from_delivered_deaths() localVariable
915 let removed = unsafe { inner.delivered_deaths.remove(death) }; in remove_from_delivered_deaths()
916 drop(inner); in remove_from_delivered_deaths()
968 if !self.inner.lock().is_dead { in update_ref()
977 let mut inner = self.inner.lock(); in update_node() localVariable
978 if let Ok(Some(node)) = inner.get_existing_node(ptr, cookie) { in update_node()
979 inner.update_node_refcount(&node, false, strong, 1, None); in update_node()
986 let mut inner = self.inner.lock(); in inc_ref_done() localVariable
987 if let Ok(Some(node)) = inner.get_existing_node(ptr, cookie) { in inc_ref_done()
988 if let Some(node) = node.inc_ref_done_locked(strong, &mut inner) { in inc_ref_done()
990 let _ = inner.push_work(node); in inc_ref_done()
1014 let mut inner = self.inner.lock(); in buffer_alloc() localVariable
1015 let mapping = inner.mapping.as_mut().ok_or_else(BinderError::new_dead)?; in buffer_alloc()
1020 drop(inner); in buffer_alloc()
1059 let mut inner = self.inner.lock(); in buffer_get() localVariable
1060 let mapping = inner.mapping.as_mut()?; in buffer_get()
1071 let mut inner = self.inner.lock(); in buffer_raw_free() localVariable
1072 if let Some(ref mut mapping) = &mut inner.mapping { in buffer_raw_free()
1100 let mut inner = self.inner.lock(); in buffer_make_freeable() localVariable
1101 if let Some(ref mut mapping) = &mut inner.mapping { in buffer_make_freeable()
1118 self.inner.lock().mapping = Some(mapping); in create_mapping()
1128 self.inner.lock().register_thread() in register_thread()
1132 self.inner.lock().threads.remove(&thread.id); in remove_thread()
1137 self.inner.lock().max_threads = max; in set_max_threads()
1141 self.inner.lock().oneway_spam_detection_enabled = enabled != 0; in set_oneway_spam_detection_enabled()
1145 self.inner.lock().oneway_spam_detection_enabled in is_oneway_spam_detection_enabled()
1156 let inner = self.inner.lock(); in get_node_debug_info() localVariable
1157 for (node_ptr, node) in &inner.nodes { in get_node_debug_info()
1159 node.populate_debug_info(&mut out, &inner); in get_node_debug_info()
1182 if !self.inner.lock().is_manager { in get_node_info_from_ref()
1190 let owner_inner = node_ref.node.owner.inner.lock(); in get_node_info_from_ref()
1199 let mut inner = self.inner.lock(); in needs_thread() localVariable
1200 let ret = inner.requested_thread_count == 0 in needs_thread()
1201 && inner.ready_threads.is_empty() in needs_thread()
1202 && inner.started_thread_count < inner.max_threads; in needs_thread()
1204 inner.requested_thread_count += 1 in needs_thread()
1245 let mut owner_inner = owner.inner.lock(); in request_death()
1292 if let Some(death) = self.inner.lock().pull_delivered_death(cookie) { in dead_binder_done()
1302 let mut inner = self.inner.lock(); in lock_with_nodes() localVariable
1304 nodes: take(&mut inner.nodes), in lock_with_nodes()
1305 inner, in lock_with_nodes()
1310 let inner = self.inner.lock(); in deferred_flush() localVariable
1311 for thread in inner.threads.values() { in deferred_flush()
1318 let mut inner = self.inner.lock(); in deferred_release() localVariable
1319 inner.is_dead = true; in deferred_release()
1320 inner.is_frozen = IsFrozen::No; in deferred_release()
1321 inner.sync_recv = false; in deferred_release()
1322 inner.async_recv = false; in deferred_release()
1323 inner.is_manager in deferred_release()
1332 let binderfs_file = self.inner.lock().binderfs_file.take(); in deferred_release()
1337 let mut inner = self.inner.lock(); in deferred_release() localVariable
1338 let threads = take(&mut inner.threads); in deferred_release()
1339 let ready = take(&mut inner.ready_threads); in deferred_release()
1340 drop(inner); in deferred_release()
1352 let mut lock = self.inner.lock(); in deferred_release()
1398 while let Some(delivered_death) = { self.inner.lock().delivered_deaths.pop_front() } { in deferred_release()
1403 let omapping = self.inner.lock().mapping.take(); in deferred_release()
1425 let mut inner = self.inner.lock(); in drop_outstanding_txn() localVariable
1426 if inner.outstanding_txns == 0 { in drop_outstanding_txn()
1430 inner.outstanding_txns -= 1; in drop_outstanding_txn()
1431 inner.is_frozen.is_frozen() && inner.outstanding_txns == 0 in drop_outstanding_txn()
1442 let mut inner = self.inner.lock(); in ioctl_freeze() localVariable
1443 inner.sync_recv = false; in ioctl_freeze()
1444 inner.async_recv = false; in ioctl_freeze()
1445 inner.is_frozen = IsFrozen::No; in ioctl_freeze()
1446 drop(inner); in ioctl_freeze()
1451 let mut inner = self.inner.lock(); in ioctl_freeze() localVariable
1452 inner.sync_recv = false; in ioctl_freeze()
1453 inner.async_recv = false; in ioctl_freeze()
1454 inner.is_frozen = IsFrozen::InProgress; in ioctl_freeze()
1459 if inner.outstanding_txns == 0 { in ioctl_freeze()
1465 .wait_interruptible_timeout(&mut inner, jiffies) in ioctl_freeze()
1468 inner.is_frozen = IsFrozen::No; in ioctl_freeze()
1481 if inner.txns_pending_locked() { in ioctl_freeze()
1482 inner.is_frozen = IsFrozen::No; in ioctl_freeze()
1485 drop(inner); in ioctl_freeze()
1488 self.inner.lock().is_frozen = IsFrozen::Yes; in ioctl_freeze()
1493 self.inner.lock().is_frozen = IsFrozen::No; in ioctl_freeze()
1513 let inner = proc.inner.lock(); in get_frozen_status() localVariable
1514 let txns_pending = inner.txns_pending_locked(); in get_frozen_status()
1515 info.async_recv |= inner.async_recv as u32; in get_frozen_status()
1516 info.sync_recv |= inner.sync_recv as u32; in get_frozen_status()
1612 let mut inner = this.inner.lock(); in release() localVariable
1613 should_schedule = inner.defer_work == 0; in release()
1614 inner.defer_work |= PROC_DEFER_RELEASE; in release()
1615 binderfs_file = inner.binderfs_file.take(); in release()
1630 let mut inner = this.inner.lock(); in flush() localVariable
1631 should_schedule = inner.defer_work == 0; in flush()
1632 inner.defer_work |= PROC_DEFER_FLUSH; in flush()
1688 if mask == 0 && from_proc && !this.inner.lock().work.is_empty() { in poll()
1704 assert!(core::ptr::eq(&thread.process.inner, guard.lock_ref())); in new()
1720 let mut inner = self.thread.process.inner.lock(); in drop() localVariable
1724 unsafe { inner.ready_threads.remove(self.thread) }; in drop()
1729 pub(crate) inner: Guard<'a, ProcessInner, SpinLockBackend>, field
1735 core::mem::swap(&mut self.nodes, &mut self.inner.nodes); in drop()