xref: /linux/tools/memory-model/litmus-tests/MP+polocks.litmus (revision b6ff30849ca723b78306514246b98ca5645d92f5)
11c27b644SPaul E. McKenneyC MP+polocks
21c27b644SPaul E. McKenney
38f32543bSPaul E. McKenney(*
48f32543bSPaul E. McKenney * Result: Never
58f32543bSPaul E. McKenney *
68f32543bSPaul E. McKenney * This litmus test demonstrates how lock acquisitions and releases can
78f32543bSPaul E. McKenney * stand in for smp_load_acquire() and smp_store_release(), respectively.
88f32543bSPaul E. McKenney * In other words, when holding a given lock (or indeed after releasing a
98f32543bSPaul E. McKenney * given lock), a CPU is not only guaranteed to see the accesses that other
108f32543bSPaul E. McKenney * CPUs made while previously holding that lock, it is also guaranteed
118f32543bSPaul E. McKenney * to see all prior accesses by those other CPUs.
128f32543bSPaul E. McKenney *)
138f32543bSPaul E. McKenney
141947bfcfSPaul E. McKenney{
151947bfcfSPaul E. McKenney	spinlock_t mylock;
16acc4bdc5SPaul E. McKenney	int buf;
17acc4bdc5SPaul E. McKenney	int flag;
181947bfcfSPaul E. McKenney}
191c27b644SPaul E. McKenney
20*b6ff3084SPaul E. McKenneyP0(int *buf, int *flag, spinlock_t *mylock) // Producer
211c27b644SPaul E. McKenney{
22acc4bdc5SPaul E. McKenney	WRITE_ONCE(*buf, 1);
231c27b644SPaul E. McKenney	spin_lock(mylock);
24acc4bdc5SPaul E. McKenney	WRITE_ONCE(*flag, 1);
251c27b644SPaul E. McKenney	spin_unlock(mylock);
261c27b644SPaul E. McKenney}
271c27b644SPaul E. McKenney
28*b6ff3084SPaul E. McKenneyP1(int *buf, int *flag, spinlock_t *mylock) // Consumer
291c27b644SPaul E. McKenney{
301c27b644SPaul E. McKenney	int r0;
311c27b644SPaul E. McKenney	int r1;
321c27b644SPaul E. McKenney
331c27b644SPaul E. McKenney	spin_lock(mylock);
34acc4bdc5SPaul E. McKenney	r0 = READ_ONCE(*flag);
351c27b644SPaul E. McKenney	spin_unlock(mylock);
36acc4bdc5SPaul E. McKenney	r1 = READ_ONCE(*buf);
371c27b644SPaul E. McKenney}
381c27b644SPaul E. McKenney
39*b6ff3084SPaul E. McKenneyexists (1:r0=1 /\ 1:r1=0) (* Bad outcome. *)
40