1*b2441318SGreg Kroah-Hartman /* SPDX-License-Identifier: GPL-2.0 */ 2d550bbd4SDavid Howells #ifndef __SPARC64_BARRIER_H 3d550bbd4SDavid Howells #define __SPARC64_BARRIER_H 4d550bbd4SDavid Howells 5d550bbd4SDavid Howells /* These are here in an effort to more fully work around Spitfire Errata 6d550bbd4SDavid Howells * #51. Essentially, if a memory barrier occurs soon after a mispredicted 7d550bbd4SDavid Howells * branch, the chip can stop executing instructions until a trap occurs. 8d550bbd4SDavid Howells * Therefore, if interrupts are disabled, the chip can hang forever. 9d550bbd4SDavid Howells * 10d550bbd4SDavid Howells * It used to be believed that the memory barrier had to be right in the 11d550bbd4SDavid Howells * delay slot, but a case has been traced recently wherein the memory barrier 12d550bbd4SDavid Howells * was one instruction after the branch delay slot and the chip still hung. 13d550bbd4SDavid Howells * The offending sequence was the following in sym_wakeup_done() of the 14d550bbd4SDavid Howells * sym53c8xx_2 driver: 15d550bbd4SDavid Howells * 16d550bbd4SDavid Howells * call sym_ccb_from_dsa, 0 17d550bbd4SDavid Howells * movge %icc, 0, %l0 18d550bbd4SDavid Howells * brz,pn %o0, .LL1303 19d550bbd4SDavid Howells * mov %o0, %l2 20d550bbd4SDavid Howells * membar #LoadLoad 21d550bbd4SDavid Howells * 22d550bbd4SDavid Howells * The branch has to be mispredicted for the bug to occur. Therefore, we put 23d550bbd4SDavid Howells * the memory barrier explicitly into a "branch always, predicted taken" 24d550bbd4SDavid Howells * delay slot to avoid the problem case. 25d550bbd4SDavid Howells */ 26d550bbd4SDavid Howells #define membar_safe(type) \ 27d550bbd4SDavid Howells do { __asm__ __volatile__("ba,pt %%xcc, 1f\n\t" \ 28d550bbd4SDavid Howells " membar " type "\n" \ 29d550bbd4SDavid Howells "1:\n" \ 30d550bbd4SDavid Howells : : : "memory"); \ 31d550bbd4SDavid Howells } while (0) 32d550bbd4SDavid Howells 33d550bbd4SDavid Howells /* The kernel always executes in TSO memory model these days, 34d550bbd4SDavid Howells * and furthermore most sparc64 chips implement more stringent 35d550bbd4SDavid Howells * memory ordering than required by the specifications. 36d550bbd4SDavid Howells */ 37d550bbd4SDavid Howells #define mb() membar_safe("#StoreLoad") 38d550bbd4SDavid Howells #define rmb() __asm__ __volatile__("":::"memory") 39d550bbd4SDavid Howells #define wmb() __asm__ __volatile__("":::"memory") 40d550bbd4SDavid Howells 4145d9b859SMichael S. Tsirkin #define __smp_store_release(p, v) \ 4247933ad4SPeter Zijlstra do { \ 4347933ad4SPeter Zijlstra compiletime_assert_atomic_type(*p); \ 4447933ad4SPeter Zijlstra barrier(); \ 4576695af2SAndrey Konovalov WRITE_ONCE(*p, v); \ 4647933ad4SPeter Zijlstra } while (0) 4747933ad4SPeter Zijlstra 4845d9b859SMichael S. Tsirkin #define __smp_load_acquire(p) \ 4947933ad4SPeter Zijlstra ({ \ 5076695af2SAndrey Konovalov typeof(*p) ___p1 = READ_ONCE(*p); \ 5147933ad4SPeter Zijlstra compiletime_assert_atomic_type(*p); \ 5247933ad4SPeter Zijlstra barrier(); \ 5347933ad4SPeter Zijlstra ___p1; \ 5447933ad4SPeter Zijlstra }) 5547933ad4SPeter Zijlstra 5645d9b859SMichael S. Tsirkin #define __smp_mb__before_atomic() barrier() 5745d9b859SMichael S. Tsirkin #define __smp_mb__after_atomic() barrier() 5856d36489SPeter Zijlstra 59519be043SMichael S. Tsirkin #include <asm-generic/barrier.h> 60519be043SMichael S. Tsirkin 61d550bbd4SDavid Howells #endif /* !(__SPARC64_BARRIER_H) */ 62