xref: /linux/arch/arc/include/asm/cmpxchg.h (revision ca55b2fef3a9373fcfc30f82fd26bc7fccbda732)
1 /*
2  * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License version 2 as
6  * published by the Free Software Foundation.
7  */
8 
9 #ifndef __ASM_ARC_CMPXCHG_H
10 #define __ASM_ARC_CMPXCHG_H
11 
12 #include <linux/types.h>
13 
14 #include <asm/barrier.h>
15 #include <asm/smp.h>
16 
17 #ifdef CONFIG_ARC_HAS_LLSC
18 
19 static inline unsigned long
20 __cmpxchg(volatile void *ptr, unsigned long expected, unsigned long new)
21 {
22 	unsigned long prev;
23 
24 	/*
25 	 * Explicit full memory barrier needed before/after as
26 	 * LLOCK/SCOND thmeselves don't provide any such semantics
27 	 */
28 	smp_mb();
29 
30 	__asm__ __volatile__(
31 	"1:	llock   %0, [%1]	\n"
32 	"	brne    %0, %2, 2f	\n"
33 	"	scond   %3, [%1]	\n"
34 	"	bnz     1b		\n"
35 	"2:				\n"
36 	: "=&r"(prev)	/* Early clobber, to prevent reg reuse */
37 	: "r"(ptr),	/* Not "m": llock only supports reg direct addr mode */
38 	  "ir"(expected),
39 	  "r"(new)	/* can't be "ir". scond can't take LIMM for "b" */
40 	: "cc", "memory"); /* so that gcc knows memory is being written here */
41 
42 	smp_mb();
43 
44 	return prev;
45 }
46 
47 #else
48 
49 static inline unsigned long
50 __cmpxchg(volatile void *ptr, unsigned long expected, unsigned long new)
51 {
52 	unsigned long flags;
53 	int prev;
54 	volatile unsigned long *p = ptr;
55 
56 	/*
57 	 * spin lock/unlock provide the needed smp_mb() before/after
58 	 */
59 	atomic_ops_lock(flags);
60 	prev = *p;
61 	if (prev == expected)
62 		*p = new;
63 	atomic_ops_unlock(flags);
64 	return prev;
65 }
66 
67 #endif /* CONFIG_ARC_HAS_LLSC */
68 
69 #define cmpxchg(ptr, o, n) ((typeof(*(ptr)))__cmpxchg((ptr), \
70 				(unsigned long)(o), (unsigned long)(n)))
71 
72 /*
73  * Since not supported natively, ARC cmpxchg() uses atomic_ops_lock (UP/SMP)
74  * just to gaurantee semantics.
75  * atomic_cmpxchg() needs to use the same locks as it's other atomic siblings
76  * which also happens to be atomic_ops_lock.
77  *
78  * Thus despite semantically being different, implementation of atomic_cmpxchg()
79  * is same as cmpxchg().
80  */
81 #define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n)))
82 
83 
84 /*
85  * xchg (reg with memory) based on "Native atomic" EX insn
86  */
87 static inline unsigned long __xchg(unsigned long val, volatile void *ptr,
88 				   int size)
89 {
90 	extern unsigned long __xchg_bad_pointer(void);
91 
92 	switch (size) {
93 	case 4:
94 		smp_mb();
95 
96 		__asm__ __volatile__(
97 		"	ex  %0, [%1]	\n"
98 		: "+r"(val)
99 		: "r"(ptr)
100 		: "memory");
101 
102 		smp_mb();
103 
104 		return val;
105 	}
106 	return __xchg_bad_pointer();
107 }
108 
109 #define _xchg(ptr, with) ((typeof(*(ptr)))__xchg((unsigned long)(with), (ptr), \
110 						 sizeof(*(ptr))))
111 
112 /*
113  * xchg() maps directly to ARC EX instruction which guarantees atomicity.
114  * However in !LLSC config, it also needs to be use @atomic_ops_lock spinlock
115  * due to a subtle reason:
116  *  - For !LLSC, cmpxchg() needs to use that lock (see above) and there is lot
117  *    of  kernel code which calls xchg()/cmpxchg() on same data (see llist.h)
118  *    Hence xchg() needs to follow same locking rules.
119  *
120  * Technically the lock is also needed for UP (boils down to irq save/restore)
121  * but we can cheat a bit since cmpxchg() atomic_ops_lock() would cause irqs to
122  * be disabled thus can't possibly be interrpted/preempted/clobbered by xchg()
123  * Other way around, xchg is one instruction anyways, so can't be interrupted
124  * as such
125  */
126 
127 #if !defined(CONFIG_ARC_HAS_LLSC) && defined(CONFIG_SMP)
128 
129 #define xchg(ptr, with)			\
130 ({					\
131 	unsigned long flags;		\
132 	typeof(*(ptr)) old_val;		\
133 					\
134 	atomic_ops_lock(flags);		\
135 	old_val = _xchg(ptr, with);	\
136 	atomic_ops_unlock(flags);	\
137 	old_val;			\
138 })
139 
140 #else
141 
142 #define xchg(ptr, with)  _xchg(ptr, with)
143 
144 #endif
145 
146 /*
147  * "atomic" variant of xchg()
148  * REQ: It needs to follow the same serialization rules as other atomic_xxx()
149  * Since xchg() doesn't always do that, it would seem that following defintion
150  * is incorrect. But here's the rationale:
151  *   SMP : Even xchg() takes the atomic_ops_lock, so OK.
152  *   LLSC: atomic_ops_lock are not relevent at all (even if SMP, since LLSC
153  *         is natively "SMP safe", no serialization required).
154  *   UP  : other atomics disable IRQ, so no way a difft ctxt atomic_xchg()
155  *         could clobber them. atomic_xchg() itself would be 1 insn, so it
156  *         can't be clobbered by others. Thus no serialization required when
157  *         atomic_xchg is involved.
158  */
159 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
160 
161 #endif
162