/linux/tools/memory-model/litmus-tests/ |
H A D | Z6.0+pooncelock+poonceLock+pombonce.litmus | 6 * This litmus test demonstrates how smp_mb__after_spinlock() may be 27 smp_mb__after_spinlock();
|
H A D | MP+polockmbonce+poacquiresilsil.litmus | 6 * Do spinlocks combined with smp_mb__after_spinlock() provide order 18 smp_mb__after_spinlock();
|
H A D | README | 74 Protect the access with a lock and an smp_mb__after_spinlock() 153 As above, but with smp_mb__after_spinlock() immediately
|
/linux/kernel/kcsan/ |
H A D | selftest.c | 148 KCSAN_CHECK_READ_BARRIER(smp_mb__after_spinlock()); in test_barrier() 177 KCSAN_CHECK_WRITE_BARRIER(smp_mb__after_spinlock()); in test_barrier() 209 KCSAN_CHECK_RW_BARRIER(smp_mb__after_spinlock()); in test_barrier()
|
H A D | kcsan_test.c | 581 KCSAN_EXPECT_READ_BARRIER(smp_mb__after_spinlock(), true); in test_barrier_nothreads() 626 KCSAN_EXPECT_WRITE_BARRIER(smp_mb__after_spinlock(), true); in test_barrier_nothreads() 671 KCSAN_EXPECT_RW_BARRIER(smp_mb__after_spinlock(), true); in test_barrier_nothreads()
|
/linux/arch/csky/include/asm/ |
H A D | spinlock.h | 10 #define smp_mb__after_spinlock() smp_mb() macro
|
/linux/arch/xtensa/include/asm/ |
H A D | spinlock.h | 18 #define smp_mb__after_spinlock() smp_mb() macro
|
/linux/arch/powerpc/include/asm/ |
H A D | spinlock.h | 14 #define smp_mb__after_spinlock() smp_mb() macro
|
/linux/arch/arm64/include/asm/ |
H A D | spinlock.h | 12 #define smp_mb__after_spinlock() smp_mb() macro
|
/linux/arch/riscv/include/asm/ |
H A D | barrier.h | 57 #define smp_mb__after_spinlock() RISCV_FENCE(iorw, iorw) macro
|
/linux/tools/memory-model/Documentation/ |
H A D | locking.txt | 185 of smp_mb__after_spinlock(): 199 smp_mb__after_spinlock(); 212 This addition of smp_mb__after_spinlock() strengthens the lock 214 In other words, the addition of the smp_mb__after_spinlock() prohibits
|
H A D | herd-representation.txt | 47 | smp_mb__after_spinlock | F[after-spinlock] |
|
H A D | recipes.txt | 160 of smp_mb__after_spinlock(): 174 smp_mb__after_spinlock(); 187 This addition of smp_mb__after_spinlock() strengthens the lock acquisition
|
H A D | ordering.txt | 160 o smp_mb__after_spinlock(), which provides full ordering subsequent
|
H A D | explanation.txt | 2752 smp_mb__after_spinlock(). The LKMM uses fence events with special 2764 smp_mb__after_spinlock() orders po-earlier lock acquisition
|
/linux/Documentation/scheduler/ |
H A D | membarrier.rst | 16 rq_lock(); smp_mb__after_spinlock() in __schedule(). The barrier matches a full
|
/linux/include/linux/ |
H A D | spinlock.h | 175 #ifndef smp_mb__after_spinlock 176 #define smp_mb__after_spinlock() kcsan_mb() macro
|
/linux/tools/memory-model/ |
H A D | linux-kernel.bell | 33 'after-spinlock (*smp_mb__after_spinlock*) ||
|
H A D | linux-kernel.def | 25 smp_mb__after_spinlock() { __fence{after-spinlock}; }
|
/linux/kernel/ |
H A D | kthread.c | 1500 smp_mb__after_spinlock(); in kthread_unuse_mm()
|
H A D | exit.c | 561 smp_mb__after_spinlock(); in exit_mm()
|
/linux/kernel/rcu/ |
H A D | tree_nocb.h | 1000 smp_mb__after_spinlock(); /* Timer expire before wakeup. */ in do_nocb_deferred_wakeup_timer()
|
/linux/kernel/sched/ |
H A D | core.c | 1889 smp_mb__after_spinlock(); in uclamp_sync_util_min_rt_default() 4206 smp_mb__after_spinlock(); in try_to_wake_up() 4433 smp_mb__after_spinlock(); /* Pairing determined by caller's synchronization design. */ in cpu_curr_snapshot() 6681 smp_mb__after_spinlock(); in __schedule()
|
/linux/Documentation/RCU/ |
H A D | whatisRCU.rst | 678 smp_mb__after_spinlock(); 704 been able to write-acquire the lock otherwise. The smp_mb__after_spinlock()
|
/linux/fs/ |
H A D | inode.c | 822 smp_mb__after_spinlock(); in evict()
|