xref: /linux/tools/memory-model/litmus-tests/MP+polocks.litmus (revision d0034a7a4ac7fae708146ac0059b9c47a1543f0d)
11c27b644SPaul E. McKenneyC MP+polocks
21c27b644SPaul E. McKenney
38f32543bSPaul E. McKenney(*
48f32543bSPaul E. McKenney * Result: Never
58f32543bSPaul E. McKenney *
68f32543bSPaul E. McKenney * This litmus test demonstrates how lock acquisitions and releases can
78f32543bSPaul E. McKenney * stand in for smp_load_acquire() and smp_store_release(), respectively.
88f32543bSPaul E. McKenney * In other words, when holding a given lock (or indeed after releasing a
98f32543bSPaul E. McKenney * given lock), a CPU is not only guaranteed to see the accesses that other
108f32543bSPaul E. McKenney * CPUs made while previously holding that lock, it is also guaranteed
118f32543bSPaul E. McKenney * to see all prior accesses by those other CPUs.
128f32543bSPaul E. McKenney *)
138f32543bSPaul E. McKenney
14*5c587f9bSAkira Yokosawa{}
151c27b644SPaul E. McKenney
16b6ff3084SPaul E. McKenneyP0(int *buf, int *flag, spinlock_t *mylock) // Producer
171c27b644SPaul E. McKenney{
18acc4bdc5SPaul E. McKenney	WRITE_ONCE(*buf, 1);
191c27b644SPaul E. McKenney	spin_lock(mylock);
20acc4bdc5SPaul E. McKenney	WRITE_ONCE(*flag, 1);
211c27b644SPaul E. McKenney	spin_unlock(mylock);
221c27b644SPaul E. McKenney}
231c27b644SPaul E. McKenney
24b6ff3084SPaul E. McKenneyP1(int *buf, int *flag, spinlock_t *mylock) // Consumer
251c27b644SPaul E. McKenney{
261c27b644SPaul E. McKenney	int r0;
271c27b644SPaul E. McKenney	int r1;
281c27b644SPaul E. McKenney
291c27b644SPaul E. McKenney	spin_lock(mylock);
30acc4bdc5SPaul E. McKenney	r0 = READ_ONCE(*flag);
311c27b644SPaul E. McKenney	spin_unlock(mylock);
32acc4bdc5SPaul E. McKenney	r1 = READ_ONCE(*buf);
331c27b644SPaul E. McKenney}
341c27b644SPaul E. McKenney
35b6ff3084SPaul E. McKenneyexists (1:r0=1 /\ 1:r1=0) (* Bad outcome. *)
36