1 /* 2 * This file is subject to the terms and conditions of the GNU General Public 3 * License. See the file "COPYING" in the main directory of this archive 4 * for more details. 5 * 6 * Copyright (C) 2003, 04, 07 Ralf Baechle <ralf@linux-mips.org> 7 * Copyright (C) MIPS Technologies, Inc. 8 * written by Ralf Baechle <ralf@linux-mips.org> 9 */ 10 #ifndef _ASM_HAZARDS_H 11 #define _ASM_HAZARDS_H 12 13 #ifdef __ASSEMBLY__ 14 #define ASMMACRO(name, code...) .macro name; code; .endm 15 #else 16 17 #include <asm/cpu-features.h> 18 19 #define ASMMACRO(name, code...) \ 20 __asm__(".macro " #name "; " #code "; .endm"); \ 21 \ 22 static inline void name(void) \ 23 { \ 24 __asm__ __volatile__ (#name); \ 25 } 26 27 /* 28 * MIPS R2 instruction hazard barrier. Needs to be called as a subroutine. 29 */ 30 extern void mips_ihb(void); 31 32 #endif 33 34 ASMMACRO(_ssnop, 35 sll $0, $0, 1 36 ) 37 38 ASMMACRO(_ehb, 39 sll $0, $0, 3 40 ) 41 42 /* 43 * TLB hazards 44 */ 45 #if defined(CONFIG_CPU_MIPSR2) && !defined(CONFIG_CPU_CAVIUM_OCTEON) 46 47 /* 48 * MIPSR2 defines ehb for hazard avoidance 49 */ 50 51 ASMMACRO(mtc0_tlbw_hazard, 52 _ehb 53 ) 54 ASMMACRO(tlbw_use_hazard, 55 _ehb 56 ) 57 ASMMACRO(tlb_probe_hazard, 58 _ehb 59 ) 60 ASMMACRO(irq_enable_hazard, 61 _ehb 62 ) 63 ASMMACRO(irq_disable_hazard, 64 _ehb 65 ) 66 ASMMACRO(back_to_back_c0_hazard, 67 _ehb 68 ) 69 /* 70 * gcc has a tradition of misscompiling the previous construct using the 71 * address of a label as argument to inline assembler. Gas otoh has the 72 * annoying difference between la and dla which are only usable for 32-bit 73 * rsp. 64-bit code, so can't be used without conditional compilation. 74 * The alterantive is switching the assembler to 64-bit code which happens 75 * to work right even for 32-bit code ... 76 */ 77 #define instruction_hazard() \ 78 do { \ 79 unsigned long tmp; \ 80 \ 81 __asm__ __volatile__( \ 82 " .set mips64r2 \n" \ 83 " dla %0, 1f \n" \ 84 " jr.hb %0 \n" \ 85 " .set mips0 \n" \ 86 "1: \n" \ 87 : "=r" (tmp)); \ 88 } while (0) 89 90 #elif (defined(CONFIG_CPU_MIPSR1) && !defined(CONFIG_MIPS_ALCHEMY)) || \ 91 defined(CONFIG_CPU_BMIPS) 92 93 /* 94 * These are slightly complicated by the fact that we guarantee R1 kernels to 95 * run fine on R2 processors. 96 */ 97 ASMMACRO(mtc0_tlbw_hazard, 98 _ssnop; _ssnop; _ehb 99 ) 100 ASMMACRO(tlbw_use_hazard, 101 _ssnop; _ssnop; _ssnop; _ehb 102 ) 103 ASMMACRO(tlb_probe_hazard, 104 _ssnop; _ssnop; _ssnop; _ehb 105 ) 106 ASMMACRO(irq_enable_hazard, 107 _ssnop; _ssnop; _ssnop; _ehb 108 ) 109 ASMMACRO(irq_disable_hazard, 110 _ssnop; _ssnop; _ssnop; _ehb 111 ) 112 ASMMACRO(back_to_back_c0_hazard, 113 _ssnop; _ssnop; _ssnop; _ehb 114 ) 115 /* 116 * gcc has a tradition of misscompiling the previous construct using the 117 * address of a label as argument to inline assembler. Gas otoh has the 118 * annoying difference between la and dla which are only usable for 32-bit 119 * rsp. 64-bit code, so can't be used without conditional compilation. 120 * The alterantive is switching the assembler to 64-bit code which happens 121 * to work right even for 32-bit code ... 122 */ 123 #define __instruction_hazard() \ 124 do { \ 125 unsigned long tmp; \ 126 \ 127 __asm__ __volatile__( \ 128 " .set mips64r2 \n" \ 129 " dla %0, 1f \n" \ 130 " jr.hb %0 \n" \ 131 " .set mips0 \n" \ 132 "1: \n" \ 133 : "=r" (tmp)); \ 134 } while (0) 135 136 #define instruction_hazard() \ 137 do { \ 138 if (cpu_has_mips_r2) \ 139 __instruction_hazard(); \ 140 } while (0) 141 142 #elif defined(CONFIG_MIPS_ALCHEMY) || defined(CONFIG_CPU_CAVIUM_OCTEON) || \ 143 defined(CONFIG_CPU_LOONGSON2) || defined(CONFIG_CPU_R10000) || \ 144 defined(CONFIG_CPU_R5500) 145 146 /* 147 * R10000 rocks - all hazards handled in hardware, so this becomes a nobrainer. 148 */ 149 150 ASMMACRO(mtc0_tlbw_hazard, 151 ) 152 ASMMACRO(tlbw_use_hazard, 153 ) 154 ASMMACRO(tlb_probe_hazard, 155 ) 156 ASMMACRO(irq_enable_hazard, 157 ) 158 ASMMACRO(irq_disable_hazard, 159 ) 160 ASMMACRO(back_to_back_c0_hazard, 161 ) 162 #define instruction_hazard() do { } while (0) 163 164 #elif defined(CONFIG_CPU_SB1) 165 166 /* 167 * Mostly like R4000 for historic reasons 168 */ 169 ASMMACRO(mtc0_tlbw_hazard, 170 ) 171 ASMMACRO(tlbw_use_hazard, 172 ) 173 ASMMACRO(tlb_probe_hazard, 174 ) 175 ASMMACRO(irq_enable_hazard, 176 ) 177 ASMMACRO(irq_disable_hazard, 178 _ssnop; _ssnop; _ssnop 179 ) 180 ASMMACRO(back_to_back_c0_hazard, 181 ) 182 #define instruction_hazard() do { } while (0) 183 184 #else 185 186 /* 187 * Finally the catchall case for all other processors including R4000, R4400, 188 * R4600, R4700, R5000, RM7000, NEC VR41xx etc. 189 * 190 * The taken branch will result in a two cycle penalty for the two killed 191 * instructions on R4000 / R4400. Other processors only have a single cycle 192 * hazard so this is nice trick to have an optimal code for a range of 193 * processors. 194 */ 195 ASMMACRO(mtc0_tlbw_hazard, 196 nop; nop 197 ) 198 ASMMACRO(tlbw_use_hazard, 199 nop; nop; nop 200 ) 201 ASMMACRO(tlb_probe_hazard, 202 nop; nop; nop 203 ) 204 ASMMACRO(irq_enable_hazard, 205 _ssnop; _ssnop; _ssnop; 206 ) 207 ASMMACRO(irq_disable_hazard, 208 nop; nop; nop 209 ) 210 ASMMACRO(back_to_back_c0_hazard, 211 _ssnop; _ssnop; _ssnop; 212 ) 213 #define instruction_hazard() do { } while (0) 214 215 #endif 216 217 218 /* FPU hazards */ 219 220 #if defined(CONFIG_CPU_SB1) 221 ASMMACRO(enable_fpu_hazard, 222 .set push; 223 .set mips64; 224 .set noreorder; 225 _ssnop; 226 bnezl $0, .+4; 227 _ssnop; 228 .set pop 229 ) 230 ASMMACRO(disable_fpu_hazard, 231 ) 232 233 #elif defined(CONFIG_CPU_MIPSR2) 234 ASMMACRO(enable_fpu_hazard, 235 _ehb 236 ) 237 ASMMACRO(disable_fpu_hazard, 238 _ehb 239 ) 240 #else 241 ASMMACRO(enable_fpu_hazard, 242 nop; nop; nop; nop 243 ) 244 ASMMACRO(disable_fpu_hazard, 245 _ehb 246 ) 247 #endif 248 249 #endif /* _ASM_HAZARDS_H */ 250