1/* 2 * arch/sh/kernel/cpu/sh3/entry.S 3 * 4 * Copyright (C) 1999, 2000, 2002 Niibe Yutaka 5 * Copyright (C) 2003 - 2006 Paul Mundt 6 * 7 * This file is subject to the terms and conditions of the GNU General Public 8 * License. See the file "COPYING" in the main directory of this archive 9 * for more details. 10 */ 11#include <linux/sys.h> 12#include <linux/errno.h> 13#include <linux/linkage.h> 14#include <asm/asm-offsets.h> 15#include <asm/thread_info.h> 16#include <asm/cpu/mmu_context.h> 17#include <asm/unistd.h> 18 19! NOTE: 20! GNU as (as of 2.9.1) changes bf/s into bt/s and bra, when the address 21! to be jumped is too far, but it causes illegal slot exception. 22 23/* 24 * entry.S contains the system-call and fault low-level handling routines. 25 * This also contains the timer-interrupt handler, as well as all interrupts 26 * and faults that can result in a task-switch. 27 * 28 * NOTE: This code handles signal-recognition, which happens every time 29 * after a timer-interrupt and after each system call. 30 * 31 * NOTE: This code uses a convention that instructions in the delay slot 32 * of a transfer-control instruction are indented by an extra space, thus: 33 * 34 * jmp @k0 ! control-transfer instruction 35 * ldc k1, ssr ! delay slot 36 * 37 * Stack layout in 'ret_from_syscall': 38 * ptrace needs to have all regs on the stack. 39 * if the order here is changed, it needs to be 40 * updated in ptrace.c and ptrace.h 41 * 42 * r0 43 * ... 44 * r15 = stack pointer 45 * spc 46 * pr 47 * ssr 48 * gbr 49 * mach 50 * macl 51 * syscall # 52 * 53 */ 54#if defined(CONFIG_KGDB_NMI) 55NMI_VEC = 0x1c0 ! Must catch early for debounce 56#endif 57 58/* Offsets to the stack */ 59OFF_R0 = 0 /* Return value. New ABI also arg4 */ 60OFF_R1 = 4 /* New ABI: arg5 */ 61OFF_R2 = 8 /* New ABI: arg6 */ 62OFF_R3 = 12 /* New ABI: syscall_nr */ 63OFF_R4 = 16 /* New ABI: arg0 */ 64OFF_R5 = 20 /* New ABI: arg1 */ 65OFF_R6 = 24 /* New ABI: arg2 */ 66OFF_R7 = 28 /* New ABI: arg3 */ 67OFF_SP = (15*4) 68OFF_PC = (16*4) 69OFF_SR = (16*4+8) 70OFF_TRA = (16*4+6*4) 71 72 73#define k0 r0 74#define k1 r1 75#define k2 r2 76#define k3 r3 77#define k4 r4 78 79#define g_imask r6 /* r6_bank1 */ 80#define k_g_imask r6_bank /* r6_bank1 */ 81#define current r7 /* r7_bank1 */ 82 83#include <asm/entry-macros.S> 84 85/* 86 * Kernel mode register usage: 87 * k0 scratch 88 * k1 scratch 89 * k2 scratch (Exception code) 90 * k3 scratch (Return address) 91 * k4 scratch 92 * k5 reserved 93 * k6 Global Interrupt Mask (0--15 << 4) 94 * k7 CURRENT_THREAD_INFO (pointer to current thread info) 95 */ 96 97! 98! TLB Miss / Initial Page write exception handling 99! _and_ 100! TLB hits, but the access violate the protection. 101! It can be valid access, such as stack grow and/or C-O-W. 102! 103! 104! Find the pmd/pte entry and loadtlb 105! If it's not found, cause address error (SEGV) 106! 107! Although this could be written in assembly language (and it'd be faster), 108! this first version depends *much* on C implementation. 109! 110 111#if defined(CONFIG_MMU) 112 .align 2 113ENTRY(tlb_miss_load) 114 bra call_dpf 115 mov #0, r5 116 117 .align 2 118ENTRY(tlb_miss_store) 119 bra call_dpf 120 mov #1, r5 121 122 .align 2 123ENTRY(initial_page_write) 124 bra call_dpf 125 mov #1, r5 126 127 .align 2 128ENTRY(tlb_protection_violation_load) 129 bra call_dpf 130 mov #0, r5 131 132 .align 2 133ENTRY(tlb_protection_violation_store) 134 bra call_dpf 135 mov #1, r5 136 137call_dpf: 138 mov.l 1f, r0 139 mov r5, r8 140 mov.l @r0, r6 141 mov r6, r9 142 mov.l 2f, r0 143 sts pr, r10 144 jsr @r0 145 mov r15, r4 146 ! 147 tst r0, r0 148 bf/s 0f 149 lds r10, pr 150 rts 151 nop 1520: sti 153 mov.l 3f, r0 154 mov r9, r6 155 mov r8, r5 156 jmp @r0 157 mov r15, r4 158 159 .align 2 1601: .long MMU_TEA 1612: .long __do_page_fault 1623: .long do_page_fault 163 164 .align 2 165ENTRY(address_error_load) 166 bra call_dae 167 mov #0,r5 ! writeaccess = 0 168 169 .align 2 170ENTRY(address_error_store) 171 bra call_dae 172 mov #1,r5 ! writeaccess = 1 173 174 .align 2 175call_dae: 176 mov.l 1f, r0 177 mov.l @r0, r6 ! address 178 mov.l 2f, r0 179 jmp @r0 180 mov r15, r4 ! regs 181 182 .align 2 1831: .long MMU_TEA 1842: .long do_address_error 185#endif /* CONFIG_MMU */ 186 187#if defined(CONFIG_SH_STANDARD_BIOS) 188 /* Unwind the stack and jmp to the debug entry */ 189ENTRY(sh_bios_handler) 190 mov.l @r15+, r0 191 mov.l @r15+, r1 192 mov.l @r15+, r2 193 mov.l @r15+, r3 194 mov.l @r15+, r4 195 mov.l @r15+, r5 196 mov.l @r15+, r6 197 mov.l @r15+, r7 198 stc sr, r8 199 mov.l 1f, r9 ! BL =1, RB=1, IMASK=0x0F 200 or r9, r8 201 ldc r8, sr ! here, change the register bank 202 mov.l @r15+, r8 203 mov.l @r15+, r9 204 mov.l @r15+, r10 205 mov.l @r15+, r11 206 mov.l @r15+, r12 207 mov.l @r15+, r13 208 mov.l @r15+, r14 209 mov.l @r15+, k0 210 ldc.l @r15+, spc 211 lds.l @r15+, pr 212 mov.l @r15+, k1 213 ldc.l @r15+, gbr 214 lds.l @r15+, mach 215 lds.l @r15+, macl 216 mov k0, r15 217 ! 218 mov.l 2f, k0 219 mov.l @k0, k0 220 jmp @k0 221 ldc k1, ssr 222 .align 2 2231: .long 0x300000f0 2242: .long gdb_vbr_vector 225#endif /* CONFIG_SH_STANDARD_BIOS */ 226 227restore_all: 228 mov.l @r15+, r0 229 mov.l @r15+, r1 230 mov.l @r15+, r2 231 mov.l @r15+, r3 232 mov.l @r15+, r4 233 mov.l @r15+, r5 234 mov.l @r15+, r6 235 mov.l @r15+, r7 236 ! 237 stc sr, r8 238 mov.l 7f, r9 239 or r9, r8 ! BL =1, RB=1 240 ldc r8, sr ! here, change the register bank 241 ! 242 mov.l @r15+, r8 243 mov.l @r15+, r9 244 mov.l @r15+, r10 245 mov.l @r15+, r11 246 mov.l @r15+, r12 247 mov.l @r15+, r13 248 mov.l @r15+, r14 249 mov.l @r15+, k4 ! original stack pointer 250 ldc.l @r15+, spc 251 lds.l @r15+, pr 252 mov.l @r15+, k3 ! original SR 253 ldc.l @r15+, gbr 254 lds.l @r15+, mach 255 lds.l @r15+, macl 256 add #4, r15 ! Skip syscall number 257 ! 258#ifdef CONFIG_SH_DSP 259 mov.l @r15+, k0 ! DSP mode marker 260 mov.l 5f, k1 261 cmp/eq k0, k1 ! Do we have a DSP stack frame? 262 bf skip_restore 263 264 stc sr, k0 ! Enable CPU DSP mode 265 or k1, k0 ! (within kernel it may be disabled) 266 ldc k0, sr 267 mov r2, k0 ! Backup r2 268 269 ! Restore DSP registers from stack 270 mov r15, r2 271 movs.l @r2+, a1 272 movs.l @r2+, a0g 273 movs.l @r2+, a1g 274 movs.l @r2+, m0 275 movs.l @r2+, m1 276 mov r2, r15 277 278 lds.l @r15+, a0 279 lds.l @r15+, x0 280 lds.l @r15+, x1 281 lds.l @r15+, y0 282 lds.l @r15+, y1 283 lds.l @r15+, dsr 284 ldc.l @r15+, rs 285 ldc.l @r15+, re 286 ldc.l @r15+, mod 287 288 mov k0, r2 ! Restore r2 289skip_restore: 290#endif 291 ! 292 ! Calculate new SR value 293 mov k3, k2 ! original SR value 294 mov #0xf0, k1 295 extu.b k1, k1 296 not k1, k1 297 and k1, k2 ! Mask orignal SR value 298 ! 299 mov k3, k0 ! Calculate IMASK-bits 300 shlr2 k0 301 and #0x3c, k0 302 cmp/eq #0x3c, k0 303 bt/s 6f 304 shll2 k0 305 mov g_imask, k0 306 ! 3076: or k0, k2 ! Set the IMASK-bits 308 ldc k2, ssr 309 ! 310#if defined(CONFIG_KGDB_NMI) 311 ! Clear in_nmi 312 mov.l 6f, k0 313 mov #0, k1 314 mov.b k1, @k0 315#endif 316 mov.l @r15+, k2 ! restore EXPEVT 317 mov k4, r15 318 rte 319 nop 320 321 .align 2 3225: .long 0x00001000 ! DSP 3236: .long in_nmi 3247: .long 0x30000000 325 326! common exception handler 327#include "../../entry-common.S" 328 329! Exception Vector Base 330! 331! Should be aligned page boundary. 332! 333 .balign 4096,0,4096 334ENTRY(vbr_base) 335 .long 0 336! 337 .balign 256,0,256 338general_exception: 339 mov.l 1f, k2 340 mov.l 2f, k3 341 bra handle_exception 342 mov.l @k2, k2 343 .align 2 3441: .long EXPEVT 3452: .long ret_from_exception 346! 347! 348 349 .balign 1024,0,1024 350tlb_miss: 351 mov.l 1f, k2 352 mov.l 4f, k3 353 bra handle_exception 354 mov.l @k2, k2 355! 356 .balign 512,0,512 357interrupt: 358 mov.l 2f, k2 359 mov.l 3f, k3 360#if defined(CONFIG_KGDB_NMI) 361 ! Debounce (filter nested NMI) 362 mov.l @k2, k0 363 mov.l 5f, k1 364 cmp/eq k1, k0 365 bf 0f 366 mov.l 6f, k1 367 tas.b @k1 368 bt 0f 369 rte 370 nop 371 .align 2 3725: .long NMI_VEC 3736: .long in_nmi 3740: 375#endif /* defined(CONFIG_KGDB_NMI) */ 376 bra handle_exception 377 mov #-1, k2 ! interrupt exception marker 378 379 .align 2 3801: .long EXPEVT 3812: .long INTEVT 3823: .long ret_from_irq 3834: .long ret_from_exception 384 385! 386! 387 .align 2 388ENTRY(handle_exception) 389 ! Using k0, k1 for scratch registers (r0_bank1, r1_bank), 390 ! save all registers onto stack. 391 ! 392 stc ssr, k0 ! Is it from kernel space? 393 shll k0 ! Check MD bit (bit30) by shifting it into... 394 shll k0 ! ...the T bit 395 bt/s 1f ! It's a kernel to kernel transition. 396 mov r15, k0 ! save original stack to k0 397 /* User space to kernel */ 398 mov #(THREAD_SIZE >> 10), k1 399 shll8 k1 ! k1 := THREAD_SIZE 400 shll2 k1 401 add current, k1 402 mov k1, r15 ! change to kernel stack 403 ! 4041: mov.l 2f, k1 405 ! 406#ifdef CONFIG_SH_DSP 407 mov.l r2, @-r15 ! Save r2, we need another reg 408 stc sr, k4 409 mov.l 1f, r2 410 tst r2, k4 ! Check if in DSP mode 411 mov.l @r15+, r2 ! Restore r2 now 412 bt/s skip_save 413 mov #0, k4 ! Set marker for no stack frame 414 415 mov r2, k4 ! Backup r2 (in k4) for later 416 417 ! Save DSP registers on stack 418 stc.l mod, @-r15 419 stc.l re, @-r15 420 stc.l rs, @-r15 421 sts.l dsr, @-r15 422 sts.l y1, @-r15 423 sts.l y0, @-r15 424 sts.l x1, @-r15 425 sts.l x0, @-r15 426 sts.l a0, @-r15 427 428 ! GAS is broken, does not generate correct "movs.l Ds,@-As" instr. 429 430 ! FIXME: Make sure that this is still the case with newer toolchains, 431 ! as we're not at all interested in supporting ancient toolchains at 432 ! this point. -- PFM. 433 434 mov r15, r2 435 .word 0xf653 ! movs.l a1, @-r2 436 .word 0xf6f3 ! movs.l a0g, @-r2 437 .word 0xf6d3 ! movs.l a1g, @-r2 438 .word 0xf6c3 ! movs.l m0, @-r2 439 .word 0xf6e3 ! movs.l m1, @-r2 440 mov r2, r15 441 442 mov k4, r2 ! Restore r2 443 mov.l 1f, k4 ! Force DSP stack frame 444skip_save: 445 mov.l k4, @-r15 ! Push DSP mode marker onto stack 446#endif 447 ! Save the user registers on the stack. 448 mov.l k2, @-r15 ! EXPEVT 449 450 mov #-1, k4 451 mov.l k4, @-r15 ! set TRA (default: -1) 452 ! 453 sts.l macl, @-r15 454 sts.l mach, @-r15 455 stc.l gbr, @-r15 456 stc.l ssr, @-r15 457 sts.l pr, @-r15 458 stc.l spc, @-r15 459 ! 460 lds k3, pr ! Set the return address to pr 461 ! 462 mov.l k0, @-r15 ! save orignal stack 463 mov.l r14, @-r15 464 mov.l r13, @-r15 465 mov.l r12, @-r15 466 mov.l r11, @-r15 467 mov.l r10, @-r15 468 mov.l r9, @-r15 469 mov.l r8, @-r15 470 ! 471 stc sr, r8 ! Back to normal register bank, and 472 or k1, r8 ! Block all interrupts 473 mov.l 3f, k1 474 and k1, r8 ! ... 475 ldc r8, sr ! ...changed here. 476 ! 477 mov.l r7, @-r15 478 mov.l r6, @-r15 479 mov.l r5, @-r15 480 mov.l r4, @-r15 481 mov.l r3, @-r15 482 mov.l r2, @-r15 483 mov.l r1, @-r15 484 mov.l r0, @-r15 485 486 /* 487 * This gets a bit tricky.. in the INTEVT case we don't want to use 488 * the VBR offset as a destination in the jump call table, since all 489 * of the destinations are the same. In this case, (interrupt) sets 490 * a marker in r2 (now r2_bank since SR.RB changed), which we check 491 * to determine the exception type. For all other exceptions, we 492 * forcibly read EXPEVT from memory and fix up the jump address, in 493 * the interrupt exception case we jump to do_IRQ() and defer the 494 * INTEVT read until there. As a bonus, we can also clean up the SR.RB 495 * checks that do_IRQ() was doing.. 496 */ 497 stc r2_bank, r8 498 cmp/pz r8 499 bf interrupt_exception 500 shlr2 r8 501 shlr r8 502 mov.l 4f, r9 503 add r8, r9 504 mov.l @r9, r9 505 jmp @r9 506 nop 507 rts 508 nop 509 510 .align 2 5111: .long 0x00001000 ! DSP=1 5122: .long 0x000080f0 ! FD=1, IMASK=15 5133: .long 0xcfffffff ! RB=0, BL=0 5144: .long exception_handling_table 515 516interrupt_exception: 517 mov.l 1f, r9 518 mov.l 2f, r4 519 mov.l @r4, r4 520 jmp @r9 521 mov r15, r5 522 rts 523 nop 524 525 .align 2 5261: .long do_IRQ 5272: .long INTEVT 528 529 .align 2 530ENTRY(exception_none) 531 rts 532 nop 533