1 /* 2 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com) 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License version 2 as 6 * published by the Free Software Foundation. 7 */ 8 9 #ifndef __ASM_SPINLOCK_H 10 #define __ASM_SPINLOCK_H 11 12 #include <asm/spinlock_types.h> 13 #include <asm/processor.h> 14 #include <asm/barrier.h> 15 16 #define arch_spin_is_locked(x) ((x)->slock != __ARCH_SPIN_LOCK_UNLOCKED__) 17 #define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock) 18 19 #ifdef CONFIG_ARC_HAS_LLSC 20 21 static inline void arch_spin_lock(arch_spinlock_t *lock) 22 { 23 unsigned int val; 24 25 smp_mb(); 26 27 __asm__ __volatile__( 28 "1: llock %[val], [%[slock]] \n" 29 " breq %[val], %[LOCKED], 1b \n" /* spin while LOCKED */ 30 " scond %[LOCKED], [%[slock]] \n" /* acquire */ 31 " bnz 1b \n" 32 " \n" 33 : [val] "=&r" (val) 34 : [slock] "r" (&(lock->slock)), 35 [LOCKED] "r" (__ARCH_SPIN_LOCK_LOCKED__) 36 : "memory", "cc"); 37 38 smp_mb(); 39 } 40 41 /* 1 - lock taken successfully */ 42 static inline int arch_spin_trylock(arch_spinlock_t *lock) 43 { 44 unsigned int val, got_it = 0; 45 46 smp_mb(); 47 48 __asm__ __volatile__( 49 "1: llock %[val], [%[slock]] \n" 50 " breq %[val], %[LOCKED], 4f \n" /* already LOCKED, just bail */ 51 " scond %[LOCKED], [%[slock]] \n" /* acquire */ 52 " bnz 1b \n" 53 " mov %[got_it], 1 \n" 54 "4: \n" 55 " \n" 56 : [val] "=&r" (val), 57 [got_it] "+&r" (got_it) 58 : [slock] "r" (&(lock->slock)), 59 [LOCKED] "r" (__ARCH_SPIN_LOCK_LOCKED__) 60 : "memory", "cc"); 61 62 smp_mb(); 63 64 return got_it; 65 } 66 67 static inline void arch_spin_unlock(arch_spinlock_t *lock) 68 { 69 smp_mb(); 70 71 lock->slock = __ARCH_SPIN_LOCK_UNLOCKED__; 72 73 smp_mb(); 74 } 75 76 /* 77 * Read-write spinlocks, allowing multiple readers but only one writer. 78 * Unfair locking as Writers could be starved indefinitely by Reader(s) 79 */ 80 81 static inline void arch_read_lock(arch_rwlock_t *rw) 82 { 83 unsigned int val; 84 85 smp_mb(); 86 87 /* 88 * zero means writer holds the lock exclusively, deny Reader. 89 * Otherwise grant lock to first/subseq reader 90 * 91 * if (rw->counter > 0) { 92 * rw->counter--; 93 * ret = 1; 94 * } 95 */ 96 97 __asm__ __volatile__( 98 "1: llock %[val], [%[rwlock]] \n" 99 " brls %[val], %[WR_LOCKED], 1b\n" /* <= 0: spin while write locked */ 100 " sub %[val], %[val], 1 \n" /* reader lock */ 101 " scond %[val], [%[rwlock]] \n" 102 " bnz 1b \n" 103 " \n" 104 : [val] "=&r" (val) 105 : [rwlock] "r" (&(rw->counter)), 106 [WR_LOCKED] "ir" (0) 107 : "memory", "cc"); 108 109 smp_mb(); 110 } 111 112 /* 1 - lock taken successfully */ 113 static inline int arch_read_trylock(arch_rwlock_t *rw) 114 { 115 unsigned int val, got_it = 0; 116 117 smp_mb(); 118 119 __asm__ __volatile__( 120 "1: llock %[val], [%[rwlock]] \n" 121 " brls %[val], %[WR_LOCKED], 4f\n" /* <= 0: already write locked, bail */ 122 " sub %[val], %[val], 1 \n" /* counter-- */ 123 " scond %[val], [%[rwlock]] \n" 124 " bnz 1b \n" /* retry if collided with someone */ 125 " mov %[got_it], 1 \n" 126 " \n" 127 "4: ; --- done --- \n" 128 129 : [val] "=&r" (val), 130 [got_it] "+&r" (got_it) 131 : [rwlock] "r" (&(rw->counter)), 132 [WR_LOCKED] "ir" (0) 133 : "memory", "cc"); 134 135 smp_mb(); 136 137 return got_it; 138 } 139 140 static inline void arch_write_lock(arch_rwlock_t *rw) 141 { 142 unsigned int val; 143 144 smp_mb(); 145 146 /* 147 * If reader(s) hold lock (lock < __ARCH_RW_LOCK_UNLOCKED__), 148 * deny writer. Otherwise if unlocked grant to writer 149 * Hence the claim that Linux rwlocks are unfair to writers. 150 * (can be starved for an indefinite time by readers). 151 * 152 * if (rw->counter == __ARCH_RW_LOCK_UNLOCKED__) { 153 * rw->counter = 0; 154 * ret = 1; 155 * } 156 */ 157 158 __asm__ __volatile__( 159 "1: llock %[val], [%[rwlock]] \n" 160 " brne %[val], %[UNLOCKED], 1b \n" /* while !UNLOCKED spin */ 161 " mov %[val], %[WR_LOCKED] \n" 162 " scond %[val], [%[rwlock]] \n" 163 " bnz 1b \n" 164 " \n" 165 : [val] "=&r" (val) 166 : [rwlock] "r" (&(rw->counter)), 167 [UNLOCKED] "ir" (__ARCH_RW_LOCK_UNLOCKED__), 168 [WR_LOCKED] "ir" (0) 169 : "memory", "cc"); 170 171 smp_mb(); 172 } 173 174 /* 1 - lock taken successfully */ 175 static inline int arch_write_trylock(arch_rwlock_t *rw) 176 { 177 unsigned int val, got_it = 0; 178 179 smp_mb(); 180 181 __asm__ __volatile__( 182 "1: llock %[val], [%[rwlock]] \n" 183 " brne %[val], %[UNLOCKED], 4f \n" /* !UNLOCKED, bail */ 184 " mov %[val], %[WR_LOCKED] \n" 185 " scond %[val], [%[rwlock]] \n" 186 " bnz 1b \n" /* retry if collided with someone */ 187 " mov %[got_it], 1 \n" 188 " \n" 189 "4: ; --- done --- \n" 190 191 : [val] "=&r" (val), 192 [got_it] "+&r" (got_it) 193 : [rwlock] "r" (&(rw->counter)), 194 [UNLOCKED] "ir" (__ARCH_RW_LOCK_UNLOCKED__), 195 [WR_LOCKED] "ir" (0) 196 : "memory", "cc"); 197 198 smp_mb(); 199 200 return got_it; 201 } 202 203 static inline void arch_read_unlock(arch_rwlock_t *rw) 204 { 205 unsigned int val; 206 207 smp_mb(); 208 209 /* 210 * rw->counter++; 211 */ 212 __asm__ __volatile__( 213 "1: llock %[val], [%[rwlock]] \n" 214 " add %[val], %[val], 1 \n" 215 " scond %[val], [%[rwlock]] \n" 216 " bnz 1b \n" 217 " \n" 218 : [val] "=&r" (val) 219 : [rwlock] "r" (&(rw->counter)) 220 : "memory", "cc"); 221 222 smp_mb(); 223 } 224 225 static inline void arch_write_unlock(arch_rwlock_t *rw) 226 { 227 smp_mb(); 228 229 rw->counter = __ARCH_RW_LOCK_UNLOCKED__; 230 231 smp_mb(); 232 } 233 234 #else /* !CONFIG_ARC_HAS_LLSC */ 235 236 static inline void arch_spin_lock(arch_spinlock_t *lock) 237 { 238 unsigned int val = __ARCH_SPIN_LOCK_LOCKED__; 239 240 /* 241 * This smp_mb() is technically superfluous, we only need the one 242 * after the lock for providing the ACQUIRE semantics. 243 * However doing the "right" thing was regressing hackbench 244 * so keeping this, pending further investigation 245 */ 246 smp_mb(); 247 248 __asm__ __volatile__( 249 "1: ex %0, [%1] \n" 250 " breq %0, %2, 1b \n" 251 : "+&r" (val) 252 : "r"(&(lock->slock)), "ir"(__ARCH_SPIN_LOCK_LOCKED__) 253 : "memory"); 254 255 /* 256 * ACQUIRE barrier to ensure load/store after taking the lock 257 * don't "bleed-up" out of the critical section (leak-in is allowed) 258 * http://www.spinics.net/lists/kernel/msg2010409.html 259 * 260 * ARCv2 only has load-load, store-store and all-all barrier 261 * thus need the full all-all barrier 262 */ 263 smp_mb(); 264 } 265 266 /* 1 - lock taken successfully */ 267 static inline int arch_spin_trylock(arch_spinlock_t *lock) 268 { 269 unsigned int val = __ARCH_SPIN_LOCK_LOCKED__; 270 271 smp_mb(); 272 273 __asm__ __volatile__( 274 "1: ex %0, [%1] \n" 275 : "+r" (val) 276 : "r"(&(lock->slock)) 277 : "memory"); 278 279 smp_mb(); 280 281 return (val == __ARCH_SPIN_LOCK_UNLOCKED__); 282 } 283 284 static inline void arch_spin_unlock(arch_spinlock_t *lock) 285 { 286 unsigned int val = __ARCH_SPIN_LOCK_UNLOCKED__; 287 288 /* 289 * RELEASE barrier: given the instructions avail on ARCv2, full barrier 290 * is the only option 291 */ 292 smp_mb(); 293 294 __asm__ __volatile__( 295 " ex %0, [%1] \n" 296 : "+r" (val) 297 : "r"(&(lock->slock)) 298 : "memory"); 299 300 /* 301 * superfluous, but keeping for now - see pairing version in 302 * arch_spin_lock above 303 */ 304 smp_mb(); 305 } 306 307 /* 308 * Read-write spinlocks, allowing multiple readers but only one writer. 309 * Unfair locking as Writers could be starved indefinitely by Reader(s) 310 * 311 * The spinlock itself is contained in @counter and access to it is 312 * serialized with @lock_mutex. 313 */ 314 315 /* 1 - lock taken successfully */ 316 static inline int arch_read_trylock(arch_rwlock_t *rw) 317 { 318 int ret = 0; 319 unsigned long flags; 320 321 local_irq_save(flags); 322 arch_spin_lock(&(rw->lock_mutex)); 323 324 /* 325 * zero means writer holds the lock exclusively, deny Reader. 326 * Otherwise grant lock to first/subseq reader 327 */ 328 if (rw->counter > 0) { 329 rw->counter--; 330 ret = 1; 331 } 332 333 arch_spin_unlock(&(rw->lock_mutex)); 334 local_irq_restore(flags); 335 336 smp_mb(); 337 return ret; 338 } 339 340 /* 1 - lock taken successfully */ 341 static inline int arch_write_trylock(arch_rwlock_t *rw) 342 { 343 int ret = 0; 344 unsigned long flags; 345 346 local_irq_save(flags); 347 arch_spin_lock(&(rw->lock_mutex)); 348 349 /* 350 * If reader(s) hold lock (lock < __ARCH_RW_LOCK_UNLOCKED__), 351 * deny writer. Otherwise if unlocked grant to writer 352 * Hence the claim that Linux rwlocks are unfair to writers. 353 * (can be starved for an indefinite time by readers). 354 */ 355 if (rw->counter == __ARCH_RW_LOCK_UNLOCKED__) { 356 rw->counter = 0; 357 ret = 1; 358 } 359 arch_spin_unlock(&(rw->lock_mutex)); 360 local_irq_restore(flags); 361 362 return ret; 363 } 364 365 static inline void arch_read_lock(arch_rwlock_t *rw) 366 { 367 while (!arch_read_trylock(rw)) 368 cpu_relax(); 369 } 370 371 static inline void arch_write_lock(arch_rwlock_t *rw) 372 { 373 while (!arch_write_trylock(rw)) 374 cpu_relax(); 375 } 376 377 static inline void arch_read_unlock(arch_rwlock_t *rw) 378 { 379 unsigned long flags; 380 381 local_irq_save(flags); 382 arch_spin_lock(&(rw->lock_mutex)); 383 rw->counter++; 384 arch_spin_unlock(&(rw->lock_mutex)); 385 local_irq_restore(flags); 386 } 387 388 static inline void arch_write_unlock(arch_rwlock_t *rw) 389 { 390 unsigned long flags; 391 392 local_irq_save(flags); 393 arch_spin_lock(&(rw->lock_mutex)); 394 rw->counter = __ARCH_RW_LOCK_UNLOCKED__; 395 arch_spin_unlock(&(rw->lock_mutex)); 396 local_irq_restore(flags); 397 } 398 399 #endif 400 401 #define arch_read_can_lock(x) ((x)->counter > 0) 402 #define arch_write_can_lock(x) ((x)->counter == __ARCH_RW_LOCK_UNLOCKED__) 403 404 #define arch_read_lock_flags(lock, flags) arch_read_lock(lock) 405 #define arch_write_lock_flags(lock, flags) arch_write_lock(lock) 406 407 #define arch_spin_relax(lock) cpu_relax() 408 #define arch_read_relax(lock) cpu_relax() 409 #define arch_write_relax(lock) cpu_relax() 410 411 #endif /* __ASM_SPINLOCK_H */ 412