| /linux/rust/kernel/sync/ |
| H A D | atomic.rs | 269 pub fn load<Ordering: ordering::AcquireOrRelaxed>(&self, _: Ordering) -> T { in load() 271 match Ordering::TYPE { in load() 299 pub fn store<Ordering: ordering::ReleaseOrRelaxed>(&self, v: T, _: Ordering) { in store() argument 303 match Ordering::TYPE { in store() 340 pub fn xchg<Ordering: ordering::Ordering>(&self, v: T, _: Ordering) -> T { in xchg() 346 match Ordering::TYPE { in xchg() 411 pub fn cmpxchg<Ordering: ordering::Ordering>( in cmpxchg() 415 o: Ordering, in cmpxchg() argument 458 fn try_cmpxchg<Ordering: ordering::Ordering>(&self, old: &mut T, new: T, _: Ordering) -> bool { in try_cmpxchg() 465 match Ordering::TYPE { in try_cmpxchg() [all …]
|
| /linux/rust/kernel/sync/atomic/ |
| H A D | ordering.rs | 73 pub trait Ordering: internal::Sealed { trait 78 impl Ordering for Relaxed { 82 impl Ordering for Acquire { 86 impl Ordering for Release { 90 impl Ordering for Full { 95 pub trait AcquireOrRelaxed: Ordering {} 101 pub trait ReleaseOrRelaxed: Ordering {}
|
| /linux/rust/proc-macro2/ |
| H A D | detection.rs | 3 use core::sync::atomic::{AtomicUsize, Ordering}; 10 match WORKS.load(Ordering::Relaxed) { in inside_proc_macro() 21 WORKS.store(1, Ordering::Relaxed); in force_fallback() 31 WORKS.store(available as usize + 1, Ordering::Relaxed); in initialize() 70 WORKS.store(works as usize + 1, Ordering::Relaxed); in initialize()
|
| H A D | location.rs | 3 use core::cmp::Ordering; 20 fn cmp(&self, other: &Self) -> Ordering { in cmp() argument 28 fn partial_cmp(&self, other: &Self) -> Option<Ordering> { in partial_cmp() argument
|
| H A D | lib.rs | 170 use core::cmp::Ordering; 1021 fn partial_cmp(&self, other: &Ident) -> Option<Ordering> { in partial_cmp() argument 1027 fn cmp(&self, other: &Ident) -> Ordering { in cmp() argument
|
| H A D | fallback.rs | 17 use core::cmp::Ordering; 465 Ordering::Less in find() 467 Ordering::Greater in find() 470 Ordering::Equal in find()
|
| /linux/rust/kernel/ |
| H A D | revocable.rs | 15 sync::atomic::{AtomicBool, Ordering}, 101 if self.is_available.load(Ordering::Relaxed) { in try_access() 119 if self.is_available.load(Ordering::Relaxed) { in try_access_with_guard() 160 let revoke = self.is_available.swap(false, Ordering::Relaxed); in revoke_internal()
|
| H A D | rbtree.rs | 11 cmp::{Ord, Ordering}, 373 Ordering::Less => child_field_of_parent = unsafe { &mut (*curr).rb_left }, in raw_entry() 375 Ordering::Greater => child_field_of_parent = unsafe { &mut (*curr).rb_right }, in raw_entry() 376 Ordering::Equal => { in raw_entry() 420 Ordering::Less => unsafe { (*node).rb_left }, in get() 422 Ordering::Greater => unsafe { (*node).rb_right }, in get() 424 Ordering::Equal => return Some(unsafe { &(*this).value }), in get() 506 Ordering::Equal => { in find_best_match() 511 Ordering::Greater => { in find_best_match() 514 Ordering::Less => { in find_best_match()
|
| /linux/rust/pin-init/examples/ |
| H A D | mutex.rs | 13 sync::atomic::{AtomicBool, Ordering}, 37 .compare_exchange(false, true, Ordering::Acquire, Ordering::Relaxed) in acquire() 41 while self.inner.load(Ordering::Relaxed) { in acquire() 62 self.0.inner.store(false, Ordering::Release); in drop()
|
| /linux/rust/syn/ |
| H A D | lifetime.rs | 6 use std::cmp::Ordering; 99 fn partial_cmp(&self, other: &Lifetime) -> Option<Ordering> { in partial_cmp() argument 105 fn cmp(&self, other: &Lifetime) -> Ordering { in cmp() argument
|
| H A D | verbatim.rs | 5 use std::cmp::Ordering; 17 if crate::buffer::cmp_assuming_same_buffer(end, next) == Ordering::Less { in between()
|
| H A D | buffer.rs | 13 use std::cmp::Ordering; 401 fn partial_cmp(&self, other: &Self) -> Option<Ordering> { in partial_cmp() argument 427 pub(crate) fn cmp_assuming_same_buffer(a: Cursor, b: Cursor) -> Ordering { in cmp_assuming_same_buffer() argument
|
| H A D | precedence.rs | 17 use std::cmp::Ordering; 207 fn partial_cmp(&self, other: &Self) -> Option<Ordering> { in partial_cmp() argument
|
| /linux/drivers/android/binder/ |
| H A D | transaction.rs | 5 use core::sync::atomic::{AtomicBool, Ordering}; 218 if !self.is_outstanding.load(Ordering::Relaxed) { in set_outstanding() 219 self.is_outstanding.store(true, Ordering::Relaxed); in set_outstanding() 230 if self.is_outstanding.load(Ordering::Relaxed) { in drop_outstanding_txn() 231 self.is_outstanding.store(false, Ordering::Relaxed); in drop_outstanding_txn()
|
| H A D | rust_binder_main.rs | 33 sync::atomic::{AtomicBool, AtomicUsize, Ordering}, 95 NEXT_DEBUG_ID.fetch_add(1, Ordering::Relaxed) in next_debug_id() 238 self.skip.store(true, Ordering::Relaxed); in skip() 248 if !self.skip.load(Ordering::Relaxed) { in do_work() 262 if self.skip.load(Ordering::Relaxed) { in debug_print()
|
| H A D | thread.rs | 39 sync::atomic::{AtomicU32, Ordering}, 277 EE_ID.fetch_add(1, Ordering::Relaxed) in new() 1555 self.error_code.store(code, Ordering::Relaxed); in set_error_code() 1559 self.error_code.load(Ordering::Relaxed) == BR_OK in is_unused() 1569 let code = self.error_code.load(Ordering::Relaxed); in do_work() 1570 self.error_code.store(BR_OK, Ordering::Relaxed); in do_work() 1586 self.error_code.load(Ordering::Relaxed) in debug_print()
|
| H A D | stats.rs | 8 use core::sync::atomic::{AtomicU32, Ordering::Relaxed};
|
| /linux/rust/kernel/list/ |
| H A D | arc.rs | 13 use core::sync::atomic::{AtomicBool, Ordering}; 503 self.inner.store(false, Ordering::Release); in on_drop_list_arc() 518 .compare_exchange(false, true, Ordering::Acquire, Ordering::Relaxed) in try_new_list_arc()
|
| /linux/tools/memory-model/Documentation/ |
| H A D | cheatsheet.txt | 25 C: Ordering is cumulative 26 P: Ordering propagates
|
| H A D | references.txt | 16 Itanium Processor Family Memory Ordering". Intel Corporation. 104 Ordering" (backup material for the LWN articles)
|
| H A D | locking.txt | 153 Ordering Provided by a Lock to CPUs Not Holding That Lock 189 Ordering can be extended to CPUs not holding the lock by careful use
|
| /linux/fs/jffs2/ |
| H A D | README.Locking | 37 Ordering constraints: See f->sem. 62 Ordering constraints: 115 Ordering constraints: 147 Ordering constraints: 168 Ordering constraints:
|
| /linux/rust/kernel/num/ |
| H A D | bounded.rs | 547 fn partial_cmp(&self, other: &Bounded<U, M>) -> Option<cmp::Ordering> { in partial_cmp() argument 557 fn cmp(&self, other: &Self) -> cmp::Ordering { in cmp() argument 579 fn partial_cmp(&self, other: &T) -> Option<cmp::Ordering> { in partial_cmp() argument
|
| /linux/Documentation/driver-api/ |
| H A D | io_ordering.rst | 2 Ordering I/O writes to memory-mapped addresses
|
| /linux/tools/memory-model/ |
| H A D | linux-kernel.cat | 97 (* Propagation: Ordering from release operations and strong fences. *) 106 * Happens Before: Ordering from the passage of time.
|