1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (2004) Linus Torvalds 4 * 5 * Author: Zwane Mwaikambo <zwane@fsmlabs.com> 6 * 7 * Copyright (2004, 2005) Ingo Molnar 8 * 9 * This file contains the spinlock/rwlock implementations for the 10 * SMP and the DEBUG_SPINLOCK cases. (UP-nondebug inlines them) 11 * 12 * Note that some architectures have special knowledge about the 13 * stack frames of these functions in their profile_pc. If you 14 * change anything significant here that could change the stack 15 * frame contact the architecture maintainers. 16 */ 17 18 #include <linux/linkage.h> 19 #include <linux/preempt.h> 20 #include <linux/spinlock.h> 21 #include <linux/interrupt.h> 22 #include <linux/debug_locks.h> 23 #include <linux/export.h> 24 25 #ifdef CONFIG_MMIOWB 26 #ifndef arch_mmiowb_state 27 DEFINE_PER_CPU(struct mmiowb_state, __mmiowb_state); 28 EXPORT_PER_CPU_SYMBOL(__mmiowb_state); 29 #endif 30 #endif 31 32 /* 33 * If lockdep is enabled then we use the non-preemption spin-ops 34 * even on CONFIG_PREEMPT, because lockdep assumes that interrupts are 35 * not re-enabled during lock-acquire (which the preempt-spin-ops do): 36 */ 37 #if !defined(CONFIG_GENERIC_LOCKBREAK) || defined(CONFIG_DEBUG_LOCK_ALLOC) 38 /* 39 * The __lock_function inlines are taken from 40 * spinlock : include/linux/spinlock_api_smp.h 41 * rwlock : include/linux/rwlock_api_smp.h 42 */ 43 #else 44 45 /* 46 * Some architectures can relax in favour of the CPU owning the lock. 47 */ 48 #ifndef arch_read_relax 49 # define arch_read_relax(l) cpu_relax() 50 #endif 51 #ifndef arch_write_relax 52 # define arch_write_relax(l) cpu_relax() 53 #endif 54 #ifndef arch_spin_relax 55 # define arch_spin_relax(l) cpu_relax() 56 #endif 57 58 /* 59 * We build the __lock_function inlines here. They are too large for 60 * inlining all over the place, but here is only one user per function 61 * which embeds them into the calling _lock_function below. 62 * 63 * This could be a long-held lock. We both prepare to spin for a long 64 * time (making _this_ CPU preemptible if possible), and we also signal 65 * towards that other CPU that it should break the lock ASAP. 66 */ 67 #define BUILD_LOCK_OPS(op, locktype, lock_ctx_op) \ 68 static void __lockfunc __raw_##op##_lock(locktype##_t *lock) \ 69 lock_ctx_op(lock) \ 70 { \ 71 for (;;) { \ 72 preempt_disable(); \ 73 if (likely(do_raw_##op##_trylock(lock))) \ 74 break; \ 75 preempt_enable(); \ 76 \ 77 arch_##op##_relax(&lock->raw_lock); \ 78 } \ 79 } \ 80 \ 81 static unsigned long __lockfunc __raw_##op##_lock_irqsave(locktype##_t *lock) \ 82 lock_ctx_op(lock) \ 83 { \ 84 unsigned long flags; \ 85 \ 86 for (;;) { \ 87 preempt_disable(); \ 88 local_irq_save(flags); \ 89 if (likely(do_raw_##op##_trylock(lock))) \ 90 break; \ 91 local_irq_restore(flags); \ 92 preempt_enable(); \ 93 \ 94 arch_##op##_relax(&lock->raw_lock); \ 95 } \ 96 \ 97 return flags; \ 98 } \ 99 \ 100 static void __lockfunc __raw_##op##_lock_irq(locktype##_t *lock) \ 101 lock_ctx_op(lock) \ 102 { \ 103 _raw_##op##_lock_irqsave(lock); \ 104 } \ 105 \ 106 static void __lockfunc __raw_##op##_lock_bh(locktype##_t *lock) \ 107 lock_ctx_op(lock) \ 108 { \ 109 unsigned long flags; \ 110 \ 111 /* */ \ 112 /* Careful: we must exclude softirqs too, hence the */ \ 113 /* irq-disabling. We use the generic preemption-aware */ \ 114 /* function: */ \ 115 /**/ \ 116 flags = _raw_##op##_lock_irqsave(lock); \ 117 local_bh_disable(); \ 118 local_irq_restore(flags); \ 119 } \ 120 121 /* 122 * Build preemption-friendly versions of the following 123 * lock-spinning functions: 124 * 125 * __[spin|read|write]_lock() 126 * __[spin|read|write]_lock_irq() 127 * __[spin|read|write]_lock_irqsave() 128 * __[spin|read|write]_lock_bh() 129 */ 130 BUILD_LOCK_OPS(spin, raw_spinlock, __acquires); 131 132 #ifndef CONFIG_PREEMPT_RT 133 BUILD_LOCK_OPS(read, rwlock, __acquires_shared); 134 BUILD_LOCK_OPS(write, rwlock, __acquires); 135 #endif 136 137 #endif 138 139 #ifndef CONFIG_INLINE_SPIN_TRYLOCK 140 noinline int __lockfunc _raw_spin_trylock(raw_spinlock_t *lock) 141 { 142 return __raw_spin_trylock(lock); 143 } 144 EXPORT_SYMBOL(_raw_spin_trylock); 145 #endif 146 147 #ifndef CONFIG_INLINE_SPIN_TRYLOCK_BH 148 noinline int __lockfunc _raw_spin_trylock_bh(raw_spinlock_t *lock) 149 { 150 return __raw_spin_trylock_bh(lock); 151 } 152 EXPORT_SYMBOL(_raw_spin_trylock_bh); 153 #endif 154 155 #ifndef CONFIG_INLINE_SPIN_LOCK 156 noinline void __lockfunc _raw_spin_lock(raw_spinlock_t *lock) 157 { 158 __raw_spin_lock(lock); 159 } 160 EXPORT_SYMBOL(_raw_spin_lock); 161 #endif 162 163 #ifndef CONFIG_INLINE_SPIN_LOCK_IRQSAVE 164 noinline unsigned long __lockfunc _raw_spin_lock_irqsave(raw_spinlock_t *lock) 165 { 166 return __raw_spin_lock_irqsave(lock); 167 } 168 EXPORT_SYMBOL(_raw_spin_lock_irqsave); 169 #endif 170 171 #ifndef CONFIG_INLINE_SPIN_LOCK_IRQ 172 noinline void __lockfunc _raw_spin_lock_irq(raw_spinlock_t *lock) 173 { 174 __raw_spin_lock_irq(lock); 175 } 176 EXPORT_SYMBOL(_raw_spin_lock_irq); 177 #endif 178 179 #ifndef CONFIG_INLINE_SPIN_LOCK_BH 180 noinline void __lockfunc _raw_spin_lock_bh(raw_spinlock_t *lock) 181 { 182 __raw_spin_lock_bh(lock); 183 } 184 EXPORT_SYMBOL(_raw_spin_lock_bh); 185 #endif 186 187 #ifdef CONFIG_UNINLINE_SPIN_UNLOCK 188 noinline void __lockfunc _raw_spin_unlock(raw_spinlock_t *lock) 189 { 190 __raw_spin_unlock(lock); 191 } 192 EXPORT_SYMBOL(_raw_spin_unlock); 193 #endif 194 195 #ifndef CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE 196 noinline void __lockfunc _raw_spin_unlock_irqrestore(raw_spinlock_t *lock, unsigned long flags) 197 { 198 __raw_spin_unlock_irqrestore(lock, flags); 199 } 200 EXPORT_SYMBOL(_raw_spin_unlock_irqrestore); 201 #endif 202 203 #ifndef CONFIG_INLINE_SPIN_UNLOCK_IRQ 204 noinline void __lockfunc _raw_spin_unlock_irq(raw_spinlock_t *lock) 205 { 206 __raw_spin_unlock_irq(lock); 207 } 208 EXPORT_SYMBOL(_raw_spin_unlock_irq); 209 #endif 210 211 #ifndef CONFIG_INLINE_SPIN_UNLOCK_BH 212 noinline void __lockfunc _raw_spin_unlock_bh(raw_spinlock_t *lock) 213 { 214 __raw_spin_unlock_bh(lock); 215 } 216 EXPORT_SYMBOL(_raw_spin_unlock_bh); 217 #endif 218 219 #ifndef CONFIG_PREEMPT_RT 220 221 #ifndef CONFIG_INLINE_READ_TRYLOCK 222 noinline int __lockfunc _raw_read_trylock(rwlock_t *lock) 223 { 224 return __raw_read_trylock(lock); 225 } 226 EXPORT_SYMBOL(_raw_read_trylock); 227 #endif 228 229 #ifndef CONFIG_INLINE_READ_LOCK 230 noinline void __lockfunc _raw_read_lock(rwlock_t *lock) 231 { 232 __raw_read_lock(lock); 233 } 234 EXPORT_SYMBOL(_raw_read_lock); 235 #endif 236 237 #ifndef CONFIG_INLINE_READ_LOCK_IRQSAVE 238 noinline unsigned long __lockfunc _raw_read_lock_irqsave(rwlock_t *lock) 239 { 240 return __raw_read_lock_irqsave(lock); 241 } 242 EXPORT_SYMBOL(_raw_read_lock_irqsave); 243 #endif 244 245 #ifndef CONFIG_INLINE_READ_LOCK_IRQ 246 noinline void __lockfunc _raw_read_lock_irq(rwlock_t *lock) 247 { 248 __raw_read_lock_irq(lock); 249 } 250 EXPORT_SYMBOL(_raw_read_lock_irq); 251 #endif 252 253 #ifndef CONFIG_INLINE_READ_LOCK_BH 254 noinline void __lockfunc _raw_read_lock_bh(rwlock_t *lock) 255 { 256 __raw_read_lock_bh(lock); 257 } 258 EXPORT_SYMBOL(_raw_read_lock_bh); 259 #endif 260 261 #ifndef CONFIG_INLINE_READ_UNLOCK 262 noinline void __lockfunc _raw_read_unlock(rwlock_t *lock) 263 { 264 __raw_read_unlock(lock); 265 } 266 EXPORT_SYMBOL(_raw_read_unlock); 267 #endif 268 269 #ifndef CONFIG_INLINE_READ_UNLOCK_IRQRESTORE 270 noinline void __lockfunc _raw_read_unlock_irqrestore(rwlock_t *lock, unsigned long flags) 271 { 272 __raw_read_unlock_irqrestore(lock, flags); 273 } 274 EXPORT_SYMBOL(_raw_read_unlock_irqrestore); 275 #endif 276 277 #ifndef CONFIG_INLINE_READ_UNLOCK_IRQ 278 noinline void __lockfunc _raw_read_unlock_irq(rwlock_t *lock) 279 { 280 __raw_read_unlock_irq(lock); 281 } 282 EXPORT_SYMBOL(_raw_read_unlock_irq); 283 #endif 284 285 #ifndef CONFIG_INLINE_READ_UNLOCK_BH 286 noinline void __lockfunc _raw_read_unlock_bh(rwlock_t *lock) 287 { 288 __raw_read_unlock_bh(lock); 289 } 290 EXPORT_SYMBOL(_raw_read_unlock_bh); 291 #endif 292 293 #ifndef CONFIG_INLINE_WRITE_TRYLOCK 294 noinline int __lockfunc _raw_write_trylock(rwlock_t *lock) 295 { 296 return __raw_write_trylock(lock); 297 } 298 EXPORT_SYMBOL(_raw_write_trylock); 299 #endif 300 301 #ifndef CONFIG_INLINE_WRITE_LOCK 302 noinline void __lockfunc _raw_write_lock(rwlock_t *lock) 303 { 304 __raw_write_lock(lock); 305 } 306 EXPORT_SYMBOL(_raw_write_lock); 307 308 #ifndef CONFIG_DEBUG_LOCK_ALLOC 309 #define __raw_write_lock_nested(lock, subclass) __raw_write_lock(((void)(subclass), (lock))) 310 #endif 311 312 void __lockfunc _raw_write_lock_nested(rwlock_t *lock, int subclass) 313 { 314 __raw_write_lock_nested(lock, subclass); 315 } 316 EXPORT_SYMBOL(_raw_write_lock_nested); 317 #endif 318 319 #ifndef CONFIG_INLINE_WRITE_LOCK_IRQSAVE 320 noinline unsigned long __lockfunc _raw_write_lock_irqsave(rwlock_t *lock) 321 { 322 return __raw_write_lock_irqsave(lock); 323 } 324 EXPORT_SYMBOL(_raw_write_lock_irqsave); 325 #endif 326 327 #ifndef CONFIG_INLINE_WRITE_LOCK_IRQ 328 noinline void __lockfunc _raw_write_lock_irq(rwlock_t *lock) 329 { 330 __raw_write_lock_irq(lock); 331 } 332 EXPORT_SYMBOL(_raw_write_lock_irq); 333 #endif 334 335 #ifndef CONFIG_INLINE_WRITE_LOCK_BH 336 noinline void __lockfunc _raw_write_lock_bh(rwlock_t *lock) 337 { 338 __raw_write_lock_bh(lock); 339 } 340 EXPORT_SYMBOL(_raw_write_lock_bh); 341 #endif 342 343 #ifndef CONFIG_INLINE_WRITE_UNLOCK 344 noinline void __lockfunc _raw_write_unlock(rwlock_t *lock) 345 { 346 __raw_write_unlock(lock); 347 } 348 EXPORT_SYMBOL(_raw_write_unlock); 349 #endif 350 351 #ifndef CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE 352 noinline void __lockfunc _raw_write_unlock_irqrestore(rwlock_t *lock, unsigned long flags) 353 { 354 __raw_write_unlock_irqrestore(lock, flags); 355 } 356 EXPORT_SYMBOL(_raw_write_unlock_irqrestore); 357 #endif 358 359 #ifndef CONFIG_INLINE_WRITE_UNLOCK_IRQ 360 noinline void __lockfunc _raw_write_unlock_irq(rwlock_t *lock) 361 { 362 __raw_write_unlock_irq(lock); 363 } 364 EXPORT_SYMBOL(_raw_write_unlock_irq); 365 #endif 366 367 #ifndef CONFIG_INLINE_WRITE_UNLOCK_BH 368 noinline void __lockfunc _raw_write_unlock_bh(rwlock_t *lock) 369 { 370 __raw_write_unlock_bh(lock); 371 } 372 EXPORT_SYMBOL(_raw_write_unlock_bh); 373 #endif 374 375 #endif /* !CONFIG_PREEMPT_RT */ 376 377 #ifdef CONFIG_DEBUG_LOCK_ALLOC 378 379 void __lockfunc _raw_spin_lock_nested(raw_spinlock_t *lock, int subclass) 380 { 381 preempt_disable(); 382 spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_); 383 LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock); 384 } 385 EXPORT_SYMBOL(_raw_spin_lock_nested); 386 387 unsigned long __lockfunc _raw_spin_lock_irqsave_nested(raw_spinlock_t *lock, 388 int subclass) 389 { 390 unsigned long flags; 391 392 local_irq_save(flags); 393 preempt_disable(); 394 spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_); 395 LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock); 396 return flags; 397 } 398 EXPORT_SYMBOL(_raw_spin_lock_irqsave_nested); 399 400 void __lockfunc _raw_spin_lock_nest_lock(raw_spinlock_t *lock, 401 struct lockdep_map *nest_lock) 402 { 403 preempt_disable(); 404 spin_acquire_nest(&lock->dep_map, 0, 0, nest_lock, _RET_IP_); 405 LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock); 406 } 407 EXPORT_SYMBOL(_raw_spin_lock_nest_lock); 408 409 #endif 410 411 notrace int in_lock_functions(unsigned long addr) 412 { 413 /* Linker adds these: start and end of __lockfunc functions */ 414 extern char __lock_text_start[], __lock_text_end[]; 415 416 return addr >= (unsigned long)__lock_text_start 417 && addr < (unsigned long)__lock_text_end; 418 } 419 EXPORT_SYMBOL(in_lock_functions); 420 421 #if defined(CONFIG_PROVE_LOCKING) && defined(CONFIG_PREEMPT_RT) 422 void notrace lockdep_assert_in_softirq_func(void) 423 { 424 lockdep_assert_in_softirq(); 425 } 426 EXPORT_SYMBOL(lockdep_assert_in_softirq_func); 427 #endif 428