1 /* 2 * Atomic operations that C can't guarantee us. Useful for 3 * resource counting etc.. 4 * 5 * But use these as seldom as possible since they are much more slower 6 * than regular operations. 7 * 8 * This file is subject to the terms and conditions of the GNU General Public 9 * License. See the file "COPYING" in the main directory of this archive 10 * for more details. 11 * 12 * Copyright (C) 1996, 97, 99, 2000, 03, 04, 06 by Ralf Baechle 13 */ 14 #ifndef _ASM_ATOMIC_H 15 #define _ASM_ATOMIC_H 16 17 #include <linux/irqflags.h> 18 #include <linux/types.h> 19 #include <asm/barrier.h> 20 #include <asm/compiler.h> 21 #include <asm/cpu-features.h> 22 #include <asm/cmpxchg.h> 23 #include <asm/war.h> 24 25 /* 26 * Using a branch-likely instruction to check the result of an sc instruction 27 * works around a bug present in R10000 CPUs prior to revision 3.0 that could 28 * cause ll-sc sequences to execute non-atomically. 29 */ 30 #if R10000_LLSC_WAR 31 # define __scbeqz "beqzl" 32 #else 33 # define __scbeqz "beqz" 34 #endif 35 36 #define ATOMIC_INIT(i) { (i) } 37 38 /* 39 * atomic_read - read atomic variable 40 * @v: pointer of type atomic_t 41 * 42 * Atomically reads the value of @v. 43 */ 44 #define atomic_read(v) READ_ONCE((v)->counter) 45 46 /* 47 * atomic_set - set atomic variable 48 * @v: pointer of type atomic_t 49 * @i: required value 50 * 51 * Atomically sets the value of @v to @i. 52 */ 53 #define atomic_set(v, i) WRITE_ONCE((v)->counter, (i)) 54 55 #define ATOMIC_OP(op, c_op, asm_op) \ 56 static __inline__ void atomic_##op(int i, atomic_t * v) \ 57 { \ 58 if (kernel_uses_llsc) { \ 59 int temp; \ 60 \ 61 __asm__ __volatile__( \ 62 " .set "MIPS_ISA_LEVEL" \n" \ 63 "1: ll %0, %1 # atomic_" #op " \n" \ 64 " " #asm_op " %0, %2 \n" \ 65 " sc %0, %1 \n" \ 66 "\t" __scbeqz " %0, 1b \n" \ 67 " .set mips0 \n" \ 68 : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter) \ 69 : "Ir" (i)); \ 70 } else { \ 71 unsigned long flags; \ 72 \ 73 raw_local_irq_save(flags); \ 74 v->counter c_op i; \ 75 raw_local_irq_restore(flags); \ 76 } \ 77 } 78 79 #define ATOMIC_OP_RETURN(op, c_op, asm_op) \ 80 static __inline__ int atomic_##op##_return_relaxed(int i, atomic_t * v) \ 81 { \ 82 int result; \ 83 \ 84 if (kernel_uses_llsc) { \ 85 int temp; \ 86 \ 87 __asm__ __volatile__( \ 88 " .set "MIPS_ISA_LEVEL" \n" \ 89 "1: ll %1, %2 # atomic_" #op "_return \n" \ 90 " " #asm_op " %0, %1, %3 \n" \ 91 " sc %0, %2 \n" \ 92 "\t" __scbeqz " %0, 1b \n" \ 93 " " #asm_op " %0, %1, %3 \n" \ 94 " .set mips0 \n" \ 95 : "=&r" (result), "=&r" (temp), \ 96 "+" GCC_OFF_SMALL_ASM() (v->counter) \ 97 : "Ir" (i)); \ 98 } else { \ 99 unsigned long flags; \ 100 \ 101 raw_local_irq_save(flags); \ 102 result = v->counter; \ 103 result c_op i; \ 104 v->counter = result; \ 105 raw_local_irq_restore(flags); \ 106 } \ 107 \ 108 return result; \ 109 } 110 111 #define ATOMIC_FETCH_OP(op, c_op, asm_op) \ 112 static __inline__ int atomic_fetch_##op##_relaxed(int i, atomic_t * v) \ 113 { \ 114 int result; \ 115 \ 116 if (kernel_uses_llsc) { \ 117 int temp; \ 118 \ 119 __asm__ __volatile__( \ 120 " .set "MIPS_ISA_LEVEL" \n" \ 121 "1: ll %1, %2 # atomic_fetch_" #op " \n" \ 122 " " #asm_op " %0, %1, %3 \n" \ 123 " sc %0, %2 \n" \ 124 "\t" __scbeqz " %0, 1b \n" \ 125 " move %0, %1 \n" \ 126 " .set mips0 \n" \ 127 : "=&r" (result), "=&r" (temp), \ 128 "+" GCC_OFF_SMALL_ASM() (v->counter) \ 129 : "Ir" (i)); \ 130 } else { \ 131 unsigned long flags; \ 132 \ 133 raw_local_irq_save(flags); \ 134 result = v->counter; \ 135 v->counter c_op i; \ 136 raw_local_irq_restore(flags); \ 137 } \ 138 \ 139 return result; \ 140 } 141 142 #define ATOMIC_OPS(op, c_op, asm_op) \ 143 ATOMIC_OP(op, c_op, asm_op) \ 144 ATOMIC_OP_RETURN(op, c_op, asm_op) \ 145 ATOMIC_FETCH_OP(op, c_op, asm_op) 146 147 ATOMIC_OPS(add, +=, addu) 148 ATOMIC_OPS(sub, -=, subu) 149 150 #define atomic_add_return_relaxed atomic_add_return_relaxed 151 #define atomic_sub_return_relaxed atomic_sub_return_relaxed 152 #define atomic_fetch_add_relaxed atomic_fetch_add_relaxed 153 #define atomic_fetch_sub_relaxed atomic_fetch_sub_relaxed 154 155 #undef ATOMIC_OPS 156 #define ATOMIC_OPS(op, c_op, asm_op) \ 157 ATOMIC_OP(op, c_op, asm_op) \ 158 ATOMIC_FETCH_OP(op, c_op, asm_op) 159 160 ATOMIC_OPS(and, &=, and) 161 ATOMIC_OPS(or, |=, or) 162 ATOMIC_OPS(xor, ^=, xor) 163 164 #define atomic_fetch_and_relaxed atomic_fetch_and_relaxed 165 #define atomic_fetch_or_relaxed atomic_fetch_or_relaxed 166 #define atomic_fetch_xor_relaxed atomic_fetch_xor_relaxed 167 168 #undef ATOMIC_OPS 169 #undef ATOMIC_FETCH_OP 170 #undef ATOMIC_OP_RETURN 171 #undef ATOMIC_OP 172 173 /* 174 * atomic_sub_if_positive - conditionally subtract integer from atomic variable 175 * @i: integer value to subtract 176 * @v: pointer of type atomic_t 177 * 178 * Atomically test @v and subtract @i if @v is greater or equal than @i. 179 * The function returns the old value of @v minus @i. 180 */ 181 static __inline__ int atomic_sub_if_positive(int i, atomic_t * v) 182 { 183 int result; 184 185 smp_mb__before_llsc(); 186 187 if (kernel_uses_llsc) { 188 int temp; 189 190 __asm__ __volatile__( 191 " .set "MIPS_ISA_LEVEL" \n" 192 "1: ll %1, %2 # atomic_sub_if_positive\n" 193 " subu %0, %1, %3 \n" 194 " move %1, %0 \n" 195 " bltz %0, 1f \n" 196 " sc %1, %2 \n" 197 "\t" __scbeqz " %1, 1b \n" 198 "1: \n" 199 " .set mips0 \n" 200 : "=&r" (result), "=&r" (temp), 201 "+" GCC_OFF_SMALL_ASM() (v->counter) 202 : "Ir" (i)); 203 } else { 204 unsigned long flags; 205 206 raw_local_irq_save(flags); 207 result = v->counter; 208 result -= i; 209 if (result >= 0) 210 v->counter = result; 211 raw_local_irq_restore(flags); 212 } 213 214 smp_llsc_mb(); 215 216 return result; 217 } 218 219 #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n))) 220 #define atomic_xchg(v, new) (xchg(&((v)->counter), (new))) 221 222 /** 223 * __atomic_add_unless - add unless the number is a given value 224 * @v: pointer of type atomic_t 225 * @a: the amount to add to v... 226 * @u: ...unless v is equal to u. 227 * 228 * Atomically adds @a to @v, so long as it was not @u. 229 * Returns the old value of @v. 230 */ 231 static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u) 232 { 233 int c, old; 234 c = atomic_read(v); 235 for (;;) { 236 if (unlikely(c == (u))) 237 break; 238 old = atomic_cmpxchg((v), c, c + (a)); 239 if (likely(old == c)) 240 break; 241 c = old; 242 } 243 return c; 244 } 245 246 #define atomic_dec_return(v) atomic_sub_return(1, (v)) 247 #define atomic_inc_return(v) atomic_add_return(1, (v)) 248 249 /* 250 * atomic_sub_and_test - subtract value from variable and test result 251 * @i: integer value to subtract 252 * @v: pointer of type atomic_t 253 * 254 * Atomically subtracts @i from @v and returns 255 * true if the result is zero, or false for all 256 * other cases. 257 */ 258 #define atomic_sub_and_test(i, v) (atomic_sub_return((i), (v)) == 0) 259 260 /* 261 * atomic_inc_and_test - increment and test 262 * @v: pointer of type atomic_t 263 * 264 * Atomically increments @v by 1 265 * and returns true if the result is zero, or false for all 266 * other cases. 267 */ 268 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0) 269 270 /* 271 * atomic_dec_and_test - decrement by 1 and test 272 * @v: pointer of type atomic_t 273 * 274 * Atomically decrements @v by 1 and 275 * returns true if the result is 0, or false for all other 276 * cases. 277 */ 278 #define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0) 279 280 /* 281 * atomic_dec_if_positive - decrement by 1 if old value positive 282 * @v: pointer of type atomic_t 283 */ 284 #define atomic_dec_if_positive(v) atomic_sub_if_positive(1, v) 285 286 /* 287 * atomic_inc - increment atomic variable 288 * @v: pointer of type atomic_t 289 * 290 * Atomically increments @v by 1. 291 */ 292 #define atomic_inc(v) atomic_add(1, (v)) 293 294 /* 295 * atomic_dec - decrement and test 296 * @v: pointer of type atomic_t 297 * 298 * Atomically decrements @v by 1. 299 */ 300 #define atomic_dec(v) atomic_sub(1, (v)) 301 302 /* 303 * atomic_add_negative - add and test if negative 304 * @v: pointer of type atomic_t 305 * @i: integer value to add 306 * 307 * Atomically adds @i to @v and returns true 308 * if the result is negative, or false when 309 * result is greater than or equal to zero. 310 */ 311 #define atomic_add_negative(i, v) (atomic_add_return(i, (v)) < 0) 312 313 #ifdef CONFIG_64BIT 314 315 #define ATOMIC64_INIT(i) { (i) } 316 317 /* 318 * atomic64_read - read atomic variable 319 * @v: pointer of type atomic64_t 320 * 321 */ 322 #define atomic64_read(v) READ_ONCE((v)->counter) 323 324 /* 325 * atomic64_set - set atomic variable 326 * @v: pointer of type atomic64_t 327 * @i: required value 328 */ 329 #define atomic64_set(v, i) WRITE_ONCE((v)->counter, (i)) 330 331 #define ATOMIC64_OP(op, c_op, asm_op) \ 332 static __inline__ void atomic64_##op(long i, atomic64_t * v) \ 333 { \ 334 if (kernel_uses_llsc) { \ 335 long temp; \ 336 \ 337 __asm__ __volatile__( \ 338 " .set "MIPS_ISA_LEVEL" \n" \ 339 "1: lld %0, %1 # atomic64_" #op " \n" \ 340 " " #asm_op " %0, %2 \n" \ 341 " scd %0, %1 \n" \ 342 "\t" __scbeqz " %0, 1b \n" \ 343 " .set mips0 \n" \ 344 : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter) \ 345 : "Ir" (i)); \ 346 } else { \ 347 unsigned long flags; \ 348 \ 349 raw_local_irq_save(flags); \ 350 v->counter c_op i; \ 351 raw_local_irq_restore(flags); \ 352 } \ 353 } 354 355 #define ATOMIC64_OP_RETURN(op, c_op, asm_op) \ 356 static __inline__ long atomic64_##op##_return_relaxed(long i, atomic64_t * v) \ 357 { \ 358 long result; \ 359 \ 360 if (kernel_uses_llsc) { \ 361 long temp; \ 362 \ 363 __asm__ __volatile__( \ 364 " .set "MIPS_ISA_LEVEL" \n" \ 365 "1: lld %1, %2 # atomic64_" #op "_return\n" \ 366 " " #asm_op " %0, %1, %3 \n" \ 367 " scd %0, %2 \n" \ 368 "\t" __scbeqz " %0, 1b \n" \ 369 " " #asm_op " %0, %1, %3 \n" \ 370 " .set mips0 \n" \ 371 : "=&r" (result), "=&r" (temp), \ 372 "+" GCC_OFF_SMALL_ASM() (v->counter) \ 373 : "Ir" (i)); \ 374 } else { \ 375 unsigned long flags; \ 376 \ 377 raw_local_irq_save(flags); \ 378 result = v->counter; \ 379 result c_op i; \ 380 v->counter = result; \ 381 raw_local_irq_restore(flags); \ 382 } \ 383 \ 384 return result; \ 385 } 386 387 #define ATOMIC64_FETCH_OP(op, c_op, asm_op) \ 388 static __inline__ long atomic64_fetch_##op##_relaxed(long i, atomic64_t * v) \ 389 { \ 390 long result; \ 391 \ 392 if (kernel_uses_llsc && R10000_LLSC_WAR) { \ 393 long temp; \ 394 \ 395 __asm__ __volatile__( \ 396 " .set "MIPS_ISA_LEVEL" \n" \ 397 "1: lld %1, %2 # atomic64_fetch_" #op "\n" \ 398 " " #asm_op " %0, %1, %3 \n" \ 399 " scd %0, %2 \n" \ 400 "\t" __scbeqz " %0, 1b \n" \ 401 " move %0, %1 \n" \ 402 " .set mips0 \n" \ 403 : "=&r" (result), "=&r" (temp), \ 404 "+" GCC_OFF_SMALL_ASM() (v->counter) \ 405 : "Ir" (i)); \ 406 } else { \ 407 unsigned long flags; \ 408 \ 409 raw_local_irq_save(flags); \ 410 result = v->counter; \ 411 v->counter c_op i; \ 412 raw_local_irq_restore(flags); \ 413 } \ 414 \ 415 return result; \ 416 } 417 418 #define ATOMIC64_OPS(op, c_op, asm_op) \ 419 ATOMIC64_OP(op, c_op, asm_op) \ 420 ATOMIC64_OP_RETURN(op, c_op, asm_op) \ 421 ATOMIC64_FETCH_OP(op, c_op, asm_op) 422 423 ATOMIC64_OPS(add, +=, daddu) 424 ATOMIC64_OPS(sub, -=, dsubu) 425 426 #define atomic64_add_return_relaxed atomic64_add_return_relaxed 427 #define atomic64_sub_return_relaxed atomic64_sub_return_relaxed 428 #define atomic64_fetch_add_relaxed atomic64_fetch_add_relaxed 429 #define atomic64_fetch_sub_relaxed atomic64_fetch_sub_relaxed 430 431 #undef ATOMIC64_OPS 432 #define ATOMIC64_OPS(op, c_op, asm_op) \ 433 ATOMIC64_OP(op, c_op, asm_op) \ 434 ATOMIC64_FETCH_OP(op, c_op, asm_op) 435 436 ATOMIC64_OPS(and, &=, and) 437 ATOMIC64_OPS(or, |=, or) 438 ATOMIC64_OPS(xor, ^=, xor) 439 440 #define atomic64_fetch_and_relaxed atomic64_fetch_and_relaxed 441 #define atomic64_fetch_or_relaxed atomic64_fetch_or_relaxed 442 #define atomic64_fetch_xor_relaxed atomic64_fetch_xor_relaxed 443 444 #undef ATOMIC64_OPS 445 #undef ATOMIC64_FETCH_OP 446 #undef ATOMIC64_OP_RETURN 447 #undef ATOMIC64_OP 448 449 /* 450 * atomic64_sub_if_positive - conditionally subtract integer from atomic 451 * variable 452 * @i: integer value to subtract 453 * @v: pointer of type atomic64_t 454 * 455 * Atomically test @v and subtract @i if @v is greater or equal than @i. 456 * The function returns the old value of @v minus @i. 457 */ 458 static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v) 459 { 460 long result; 461 462 smp_mb__before_llsc(); 463 464 if (kernel_uses_llsc) { 465 long temp; 466 467 __asm__ __volatile__( 468 " .set "MIPS_ISA_LEVEL" \n" 469 "1: lld %1, %2 # atomic64_sub_if_positive\n" 470 " dsubu %0, %1, %3 \n" 471 " move %1, %0 \n" 472 " bltz %0, 1f \n" 473 " scd %1, %2 \n" 474 "\t" __scbeqz " %1, 1b \n" 475 "1: \n" 476 " .set mips0 \n" 477 : "=&r" (result), "=&r" (temp), 478 "+" GCC_OFF_SMALL_ASM() (v->counter) 479 : "Ir" (i)); 480 } else { 481 unsigned long flags; 482 483 raw_local_irq_save(flags); 484 result = v->counter; 485 result -= i; 486 if (result >= 0) 487 v->counter = result; 488 raw_local_irq_restore(flags); 489 } 490 491 smp_llsc_mb(); 492 493 return result; 494 } 495 496 #define atomic64_cmpxchg(v, o, n) \ 497 ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n))) 498 #define atomic64_xchg(v, new) (xchg(&((v)->counter), (new))) 499 500 /** 501 * atomic64_add_unless - add unless the number is a given value 502 * @v: pointer of type atomic64_t 503 * @a: the amount to add to v... 504 * @u: ...unless v is equal to u. 505 * 506 * Atomically adds @a to @v, so long as it was not @u. 507 * Returns true iff @v was not @u. 508 */ 509 static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u) 510 { 511 long c, old; 512 c = atomic64_read(v); 513 for (;;) { 514 if (unlikely(c == (u))) 515 break; 516 old = atomic64_cmpxchg((v), c, c + (a)); 517 if (likely(old == c)) 518 break; 519 c = old; 520 } 521 return c != (u); 522 } 523 524 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0) 525 526 #define atomic64_dec_return(v) atomic64_sub_return(1, (v)) 527 #define atomic64_inc_return(v) atomic64_add_return(1, (v)) 528 529 /* 530 * atomic64_sub_and_test - subtract value from variable and test result 531 * @i: integer value to subtract 532 * @v: pointer of type atomic64_t 533 * 534 * Atomically subtracts @i from @v and returns 535 * true if the result is zero, or false for all 536 * other cases. 537 */ 538 #define atomic64_sub_and_test(i, v) (atomic64_sub_return((i), (v)) == 0) 539 540 /* 541 * atomic64_inc_and_test - increment and test 542 * @v: pointer of type atomic64_t 543 * 544 * Atomically increments @v by 1 545 * and returns true if the result is zero, or false for all 546 * other cases. 547 */ 548 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0) 549 550 /* 551 * atomic64_dec_and_test - decrement by 1 and test 552 * @v: pointer of type atomic64_t 553 * 554 * Atomically decrements @v by 1 and 555 * returns true if the result is 0, or false for all other 556 * cases. 557 */ 558 #define atomic64_dec_and_test(v) (atomic64_sub_return(1, (v)) == 0) 559 560 /* 561 * atomic64_dec_if_positive - decrement by 1 if old value positive 562 * @v: pointer of type atomic64_t 563 */ 564 #define atomic64_dec_if_positive(v) atomic64_sub_if_positive(1, v) 565 566 /* 567 * atomic64_inc - increment atomic variable 568 * @v: pointer of type atomic64_t 569 * 570 * Atomically increments @v by 1. 571 */ 572 #define atomic64_inc(v) atomic64_add(1, (v)) 573 574 /* 575 * atomic64_dec - decrement and test 576 * @v: pointer of type atomic64_t 577 * 578 * Atomically decrements @v by 1. 579 */ 580 #define atomic64_dec(v) atomic64_sub(1, (v)) 581 582 /* 583 * atomic64_add_negative - add and test if negative 584 * @v: pointer of type atomic64_t 585 * @i: integer value to add 586 * 587 * Atomically adds @i to @v and returns true 588 * if the result is negative, or false when 589 * result is greater than or equal to zero. 590 */ 591 #define atomic64_add_negative(i, v) (atomic64_add_return(i, (v)) < 0) 592 593 #endif /* CONFIG_64BIT */ 594 595 #endif /* _ASM_ATOMIC_H */ 596