1/* 2 * This file is subject to the terms and conditions of the GNU General Public 3 * License. See the file "COPYING" in the main directory of this archive 4 * for more details. 5 * 6 * Copyright (C) 1994 - 2000, 2001, 2003 Ralf Baechle 7 * Copyright (C) 1999, 2000 Silicon Graphics, Inc. 8 * Copyright (C) 2001 MIPS Technologies, Inc. 9 * Copyright (C) 2002 Maciej W. Rozycki 10 */ 11#include <linux/init.h> 12 13#include <asm/asm.h> 14#include <asm/asmmacro.h> 15#include <asm/cacheops.h> 16#include <asm/irqflags.h> 17#include <asm/regdef.h> 18#include <asm/fpregdef.h> 19#include <asm/mipsregs.h> 20#include <asm/stackframe.h> 21#include <asm/war.h> 22#include <asm/page.h> 23 24#define PANIC_PIC(msg) \ 25 .set push; \ 26 .set reorder; \ 27 PTR_LA a0,8f; \ 28 .set noat; \ 29 PTR_LA AT, panic; \ 30 jr AT; \ 319: b 9b; \ 32 .set pop; \ 33 TEXT(msg) 34 35 __INIT 36 37NESTED(except_vec0_generic, 0, sp) 38 PANIC_PIC("Exception vector 0 called") 39 END(except_vec0_generic) 40 41NESTED(except_vec1_generic, 0, sp) 42 PANIC_PIC("Exception vector 1 called") 43 END(except_vec1_generic) 44 45/* 46 * General exception vector for all other CPUs. 47 * 48 * Be careful when changing this, it has to be at most 128 bytes 49 * to fit into space reserved for the exception handler. 50 */ 51NESTED(except_vec3_generic, 0, sp) 52 .set push 53 .set noat 54#if R5432_CP0_INTERRUPT_WAR 55 mfc0 k0, CP0_INDEX 56#endif 57 mfc0 k1, CP0_CAUSE 58 andi k1, k1, 0x7c 59#ifdef CONFIG_64BIT 60 dsll k1, k1, 1 61#endif 62 PTR_L k0, exception_handlers(k1) 63 jr k0 64 .set pop 65 END(except_vec3_generic) 66 67/* 68 * General exception handler for CPUs with virtual coherency exception. 69 * 70 * Be careful when changing this, it has to be at most 256 (as a special 71 * exception) bytes to fit into space reserved for the exception handler. 72 */ 73NESTED(except_vec3_r4000, 0, sp) 74 .set push 75 .set mips3 76 .set noat 77 mfc0 k1, CP0_CAUSE 78 li k0, 31<<2 79 andi k1, k1, 0x7c 80 .set push 81 .set noreorder 82 .set nomacro 83 beq k1, k0, handle_vced 84 li k0, 14<<2 85 beq k1, k0, handle_vcei 86#ifdef CONFIG_64BIT 87 dsll k1, k1, 1 88#endif 89 .set pop 90 PTR_L k0, exception_handlers(k1) 91 jr k0 92 93 /* 94 * Big shit, we now may have two dirty primary cache lines for the same 95 * physical address. We can safely invalidate the line pointed to by 96 * c0_badvaddr because after return from this exception handler the 97 * load / store will be re-executed. 98 */ 99handle_vced: 100 MFC0 k0, CP0_BADVADDR 101 li k1, -4 # Is this ... 102 and k0, k1 # ... really needed? 103 mtc0 zero, CP0_TAGLO 104 cache Index_Store_Tag_D, (k0) 105 cache Hit_Writeback_Inv_SD, (k0) 106#ifdef CONFIG_PROC_FS 107 PTR_LA k0, vced_count 108 lw k1, (k0) 109 addiu k1, 1 110 sw k1, (k0) 111#endif 112 eret 113 114handle_vcei: 115 MFC0 k0, CP0_BADVADDR 116 cache Hit_Writeback_Inv_SD, (k0) # also cleans pi 117#ifdef CONFIG_PROC_FS 118 PTR_LA k0, vcei_count 119 lw k1, (k0) 120 addiu k1, 1 121 sw k1, (k0) 122#endif 123 eret 124 .set pop 125 END(except_vec3_r4000) 126 127 __FINIT 128 129 .align 5 130NESTED(handle_int, PT_SIZE, sp) 131 SAVE_ALL 132 CLI 133 TRACE_IRQS_OFF 134 135 LONG_L s0, TI_REGS($28) 136 LONG_S sp, TI_REGS($28) 137 PTR_LA ra, ret_from_irq 138 j plat_irq_dispatch 139 END(handle_int) 140 141 __INIT 142 143/* 144 * Special interrupt vector for MIPS64 ISA & embedded MIPS processors. 145 * This is a dedicated interrupt exception vector which reduces the 146 * interrupt processing overhead. The jump instruction will be replaced 147 * at the initialization time. 148 * 149 * Be careful when changing this, it has to be at most 128 bytes 150 * to fit into space reserved for the exception handler. 151 */ 152NESTED(except_vec4, 0, sp) 1531: j 1b /* Dummy, will be replaced */ 154 END(except_vec4) 155 156/* 157 * EJTAG debug exception handler. 158 * The EJTAG debug exception entry point is 0xbfc00480, which 159 * normally is in the boot PROM, so the boot PROM must do a 160 * unconditional jump to this vector. 161 */ 162NESTED(except_vec_ejtag_debug, 0, sp) 163 j ejtag_debug_handler 164 END(except_vec_ejtag_debug) 165 166 __FINIT 167 168/* 169 * Vectored interrupt handler. 170 * This prototype is copied to ebase + n*IntCtl.VS and patched 171 * to invoke the handler 172 */ 173NESTED(except_vec_vi, 0, sp) 174 SAVE_SOME 175 SAVE_AT 176 .set push 177 .set noreorder 178#ifdef CONFIG_MIPS_MT_SMTC 179 /* 180 * To keep from blindly blocking *all* interrupts 181 * during service by SMTC kernel, we also want to 182 * pass the IM value to be cleared. 183 */ 184EXPORT(except_vec_vi_mori) 185 ori a0, $0, 0 186#endif /* CONFIG_MIPS_MT_SMTC */ 187EXPORT(except_vec_vi_lui) 188 lui v0, 0 /* Patched */ 189 j except_vec_vi_handler 190EXPORT(except_vec_vi_ori) 191 ori v0, 0 /* Patched */ 192 .set pop 193 END(except_vec_vi) 194EXPORT(except_vec_vi_end) 195 196/* 197 * Common Vectored Interrupt code 198 * Complete the register saves and invoke the handler which is passed in $v0 199 */ 200NESTED(except_vec_vi_handler, 0, sp) 201 SAVE_TEMP 202 SAVE_STATIC 203#ifdef CONFIG_MIPS_MT_SMTC 204 /* 205 * SMTC has an interesting problem that interrupts are level-triggered, 206 * and the CLI macro will clear EXL, potentially causing a duplicate 207 * interrupt service invocation. So we need to clear the associated 208 * IM bit of Status prior to doing CLI, and restore it after the 209 * service routine has been invoked - we must assume that the 210 * service routine will have cleared the state, and any active 211 * level represents a new or otherwised unserviced event... 212 */ 213 mfc0 t1, CP0_STATUS 214 and t0, a0, t1 215 mfc0 t2, CP0_TCCONTEXT 216 or t0, t0, t2 217 mtc0 t0, CP0_TCCONTEXT 218 xor t1, t1, t0 219 mtc0 t1, CP0_STATUS 220 _ehb 221#endif /* CONFIG_MIPS_MT_SMTC */ 222 CLI 223 TRACE_IRQS_OFF 224 225 LONG_L s0, TI_REGS($28) 226 LONG_S sp, TI_REGS($28) 227 PTR_LA ra, ret_from_irq 228 jr v0 229 END(except_vec_vi_handler) 230 231/* 232 * EJTAG debug exception handler. 233 */ 234NESTED(ejtag_debug_handler, PT_SIZE, sp) 235 .set push 236 .set noat 237 MTC0 k0, CP0_DESAVE 238 mfc0 k0, CP0_DEBUG 239 240 sll k0, k0, 30 # Check for SDBBP. 241 bgez k0, ejtag_return 242 243 PTR_LA k0, ejtag_debug_buffer 244 LONG_S k1, 0(k0) 245 SAVE_ALL 246 move a0, sp 247 jal ejtag_exception_handler 248 RESTORE_ALL 249 PTR_LA k0, ejtag_debug_buffer 250 LONG_L k1, 0(k0) 251 252ejtag_return: 253 MFC0 k0, CP0_DESAVE 254 .set mips32 255 deret 256 .set pop 257 END(ejtag_debug_handler) 258 259/* 260 * This buffer is reserved for the use of the EJTAG debug 261 * handler. 262 */ 263 .data 264EXPORT(ejtag_debug_buffer) 265 .fill LONGSIZE 266 .previous 267 268 __INIT 269 270/* 271 * NMI debug exception handler for MIPS reference boards. 272 * The NMI debug exception entry point is 0xbfc00000, which 273 * normally is in the boot PROM, so the boot PROM must do a 274 * unconditional jump to this vector. 275 */ 276NESTED(except_vec_nmi, 0, sp) 277 j nmi_handler 278 END(except_vec_nmi) 279 280 __FINIT 281 282NESTED(nmi_handler, PT_SIZE, sp) 283 .set push 284 .set noat 285 SAVE_ALL 286 move a0, sp 287 jal nmi_exception_handler 288 RESTORE_ALL 289 .set mips3 290 eret 291 .set pop 292 END(nmi_handler) 293 294 .macro __build_clear_none 295 .endm 296 297 .macro __build_clear_sti 298 TRACE_IRQS_ON 299 STI 300 .endm 301 302 .macro __build_clear_cli 303 CLI 304 TRACE_IRQS_OFF 305 .endm 306 307 .macro __build_clear_fpe 308 cfc1 a1, fcr31 309 li a2, ~(0x3f << 12) 310 and a2, a1 311 ctc1 a2, fcr31 312 TRACE_IRQS_ON 313 STI 314 .endm 315 316 .macro __build_clear_ade 317 MFC0 t0, CP0_BADVADDR 318 PTR_S t0, PT_BVADDR(sp) 319 KMODE 320 .endm 321 322 .macro __BUILD_silent exception 323 .endm 324 325 /* Gas tries to parse the PRINT argument as a string containing 326 string escapes and emits bogus warnings if it believes to 327 recognize an unknown escape code. So make the arguments 328 start with an n and gas will believe \n is ok ... */ 329 .macro __BUILD_verbose nexception 330 LONG_L a1, PT_EPC(sp) 331#ifdef CONFIG_32BIT 332 PRINT("Got \nexception at %08lx\012") 333#endif 334#ifdef CONFIG_64BIT 335 PRINT("Got \nexception at %016lx\012") 336#endif 337 .endm 338 339 .macro __BUILD_count exception 340 LONG_L t0,exception_count_\exception 341 LONG_ADDIU t0, 1 342 LONG_S t0,exception_count_\exception 343 .comm exception_count\exception, 8, 8 344 .endm 345 346 .macro __BUILD_HANDLER exception handler clear verbose ext 347 .align 5 348 NESTED(handle_\exception, PT_SIZE, sp) 349 .set noat 350 SAVE_ALL 351 FEXPORT(handle_\exception\ext) 352 __BUILD_clear_\clear 353 .set at 354 __BUILD_\verbose \exception 355 move a0, sp 356 PTR_LA ra, ret_from_exception 357 j do_\handler 358 END(handle_\exception) 359 .endm 360 361 .macro BUILD_HANDLER exception handler clear verbose 362 __BUILD_HANDLER \exception \handler \clear \verbose _int 363 .endm 364 365 BUILD_HANDLER adel ade ade silent /* #4 */ 366 BUILD_HANDLER ades ade ade silent /* #5 */ 367 BUILD_HANDLER ibe be cli silent /* #6 */ 368 BUILD_HANDLER dbe be cli silent /* #7 */ 369 BUILD_HANDLER bp bp sti silent /* #9 */ 370 BUILD_HANDLER ri ri sti silent /* #10 */ 371 BUILD_HANDLER cpu cpu sti silent /* #11 */ 372 BUILD_HANDLER ov ov sti silent /* #12 */ 373 BUILD_HANDLER tr tr sti silent /* #13 */ 374 BUILD_HANDLER fpe fpe fpe silent /* #15 */ 375 BUILD_HANDLER mdmx mdmx sti silent /* #22 */ 376 BUILD_HANDLER watch watch sti verbose /* #23 */ 377 BUILD_HANDLER mcheck mcheck cli verbose /* #24 */ 378 BUILD_HANDLER mt mt sti silent /* #25 */ 379 BUILD_HANDLER dsp dsp sti silent /* #26 */ 380 BUILD_HANDLER reserved reserved sti verbose /* others */ 381 382 .align 5 383 LEAF(handle_ri_rdhwr_vivt) 384#ifdef CONFIG_MIPS_MT_SMTC 385 PANIC_PIC("handle_ri_rdhwr_vivt called") 386#else 387 .set push 388 .set noat 389 .set noreorder 390 /* check if TLB contains a entry for EPC */ 391 MFC0 k1, CP0_ENTRYHI 392 andi k1, 0xff /* ASID_MASK */ 393 MFC0 k0, CP0_EPC 394 PTR_SRL k0, PAGE_SHIFT + 1 395 PTR_SLL k0, PAGE_SHIFT + 1 396 or k1, k0 397 MTC0 k1, CP0_ENTRYHI 398 mtc0_tlbw_hazard 399 tlbp 400 tlb_probe_hazard 401 mfc0 k1, CP0_INDEX 402 .set pop 403 bltz k1, handle_ri /* slow path */ 404 /* fall thru */ 405#endif 406 END(handle_ri_rdhwr_vivt) 407 408 LEAF(handle_ri_rdhwr) 409 .set push 410 .set noat 411 .set noreorder 412 /* 0x7c03e83b: rdhwr v1,$29 */ 413 MFC0 k1, CP0_EPC 414 lui k0, 0x7c03 415 lw k1, (k1) 416 ori k0, 0xe83b 417 .set reorder 418 bne k0, k1, handle_ri /* if not ours */ 419 /* The insn is rdhwr. No need to check CAUSE.BD here. */ 420 get_saved_sp /* k1 := current_thread_info */ 421 .set noreorder 422 MFC0 k0, CP0_EPC 423#if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX) 424 ori k1, _THREAD_MASK 425 xori k1, _THREAD_MASK 426 LONG_L v1, TI_TP_VALUE(k1) 427 LONG_ADDIU k0, 4 428 jr k0 429 rfe 430#else 431 LONG_ADDIU k0, 4 /* stall on $k0 */ 432 MTC0 k0, CP0_EPC 433 /* I hope three instructions between MTC0 and ERET are enough... */ 434 ori k1, _THREAD_MASK 435 xori k1, _THREAD_MASK 436 LONG_L v1, TI_TP_VALUE(k1) 437 .set mips3 438 eret 439 .set mips0 440#endif 441 .set pop 442 END(handle_ri_rdhwr) 443 444#ifdef CONFIG_64BIT 445/* A temporary overflow handler used by check_daddi(). */ 446 447 __INIT 448 449 BUILD_HANDLER daddi_ov daddi_ov none silent /* #12 */ 450#endif 451