1/* 2 * Low-level exception handling 3 * 4 * This file is subject to the terms and conditions of the GNU General Public 5 * License. See the file "COPYING" in the main directory of this archive 6 * for more details. 7 * 8 * Copyright (C) 2004 - 2008 by Tensilica Inc. 9 * Copyright (C) 2015 Cadence Design Systems Inc. 10 * 11 * Chris Zankel <chris@zankel.net> 12 * 13 */ 14 15#include <linux/linkage.h> 16#include <asm/asm-offsets.h> 17#include <asm/processor.h> 18#include <asm/coprocessor.h> 19#include <asm/thread_info.h> 20#include <asm/asm-uaccess.h> 21#include <asm/unistd.h> 22#include <asm/ptrace.h> 23#include <asm/current.h> 24#include <asm/pgtable.h> 25#include <asm/page.h> 26#include <asm/signal.h> 27#include <asm/tlbflush.h> 28#include <variant/tie-asm.h> 29 30/* Unimplemented features. */ 31 32#undef KERNEL_STACK_OVERFLOW_CHECK 33 34/* Not well tested. 35 * 36 * - fast_coprocessor 37 */ 38 39/* 40 * Macro to find first bit set in WINDOWBASE from the left + 1 41 * 42 * 100....0 -> 1 43 * 010....0 -> 2 44 * 000....1 -> WSBITS 45 */ 46 47 .macro ffs_ws bit mask 48 49#if XCHAL_HAVE_NSA 50 nsau \bit, \mask # 32-WSBITS ... 31 (32 iff 0) 51 addi \bit, \bit, WSBITS - 32 + 1 # uppest bit set -> return 1 52#else 53 movi \bit, WSBITS 54#if WSBITS > 16 55 _bltui \mask, 0x10000, 99f 56 addi \bit, \bit, -16 57 extui \mask, \mask, 16, 16 58#endif 59#if WSBITS > 8 6099: _bltui \mask, 0x100, 99f 61 addi \bit, \bit, -8 62 srli \mask, \mask, 8 63#endif 6499: _bltui \mask, 0x10, 99f 65 addi \bit, \bit, -4 66 srli \mask, \mask, 4 6799: _bltui \mask, 0x4, 99f 68 addi \bit, \bit, -2 69 srli \mask, \mask, 2 7099: _bltui \mask, 0x2, 99f 71 addi \bit, \bit, -1 7299: 73 74#endif 75 .endm 76 77 78 .macro irq_save flags tmp 79#if XTENSA_FAKE_NMI 80#if defined(CONFIG_DEBUG_KERNEL) && (LOCKLEVEL | TOPLEVEL) >= XCHAL_DEBUGLEVEL 81 rsr \flags, ps 82 extui \tmp, \flags, PS_INTLEVEL_SHIFT, PS_INTLEVEL_WIDTH 83 bgei \tmp, LOCKLEVEL, 99f 84 rsil \tmp, LOCKLEVEL 8599: 86#else 87 movi \tmp, LOCKLEVEL 88 rsr \flags, ps 89 or \flags, \flags, \tmp 90 xsr \flags, ps 91 rsync 92#endif 93#else 94 rsil \flags, LOCKLEVEL 95#endif 96 .endm 97 98/* ----------------- DEFAULT FIRST LEVEL EXCEPTION HANDLERS ----------------- */ 99 100/* 101 * First-level exception handler for user exceptions. 102 * Save some special registers, extra states and all registers in the AR 103 * register file that were in use in the user task, and jump to the common 104 * exception code. 105 * We save SAR (used to calculate WMASK), and WB and WS (we don't have to 106 * save them for kernel exceptions). 107 * 108 * Entry condition for user_exception: 109 * 110 * a0: trashed, original value saved on stack (PT_AREG0) 111 * a1: a1 112 * a2: new stack pointer, original value in depc 113 * a3: a3 114 * depc: a2, original value saved on stack (PT_DEPC) 115 * excsave1: dispatch table 116 * 117 * PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC 118 * < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception 119 * 120 * Entry condition for _user_exception: 121 * 122 * a0-a3 and depc have been saved to PT_AREG0...PT_AREG3 and PT_DEPC 123 * excsave has been restored, and 124 * stack pointer (a1) has been set. 125 * 126 * Note: _user_exception might be at an odd address. Don't use call0..call12 127 */ 128 .literal_position 129 130ENTRY(user_exception) 131 132 /* Save a1, a2, a3, and set SP. */ 133 134 rsr a0, depc 135 s32i a1, a2, PT_AREG1 136 s32i a0, a2, PT_AREG2 137 s32i a3, a2, PT_AREG3 138 mov a1, a2 139 140 .globl _user_exception 141_user_exception: 142 143 /* Save SAR and turn off single stepping */ 144 145 movi a2, 0 146 wsr a2, depc # terminate user stack trace with 0 147 rsr a3, sar 148 xsr a2, icountlevel 149 s32i a3, a1, PT_SAR 150 s32i a2, a1, PT_ICOUNTLEVEL 151 152#if XCHAL_HAVE_THREADPTR 153 rur a2, threadptr 154 s32i a2, a1, PT_THREADPTR 155#endif 156 157 /* Rotate ws so that the current windowbase is at bit0. */ 158 /* Assume ws = xxwww1yyyy. Rotate ws right, so that a2 = yyyyxxwww1 */ 159 160 rsr a2, windowbase 161 rsr a3, windowstart 162 ssr a2 163 s32i a2, a1, PT_WINDOWBASE 164 s32i a3, a1, PT_WINDOWSTART 165 slli a2, a3, 32-WSBITS 166 src a2, a3, a2 167 srli a2, a2, 32-WSBITS 168 s32i a2, a1, PT_WMASK # needed for restoring registers 169 170 /* Save only live registers. */ 171 172 _bbsi.l a2, 1, 1f 173 s32i a4, a1, PT_AREG4 174 s32i a5, a1, PT_AREG5 175 s32i a6, a1, PT_AREG6 176 s32i a7, a1, PT_AREG7 177 _bbsi.l a2, 2, 1f 178 s32i a8, a1, PT_AREG8 179 s32i a9, a1, PT_AREG9 180 s32i a10, a1, PT_AREG10 181 s32i a11, a1, PT_AREG11 182 _bbsi.l a2, 3, 1f 183 s32i a12, a1, PT_AREG12 184 s32i a13, a1, PT_AREG13 185 s32i a14, a1, PT_AREG14 186 s32i a15, a1, PT_AREG15 187 _bnei a2, 1, 1f # only one valid frame? 188 189 /* Only one valid frame, skip saving regs. */ 190 191 j 2f 192 193 /* Save the remaining registers. 194 * We have to save all registers up to the first '1' from 195 * the right, except the current frame (bit 0). 196 * Assume a2 is: 001001000110001 197 * All register frames starting from the top field to the marked '1' 198 * must be saved. 199 */ 200 2011: addi a3, a2, -1 # eliminate '1' in bit 0: yyyyxxww0 202 neg a3, a3 # yyyyxxww0 -> YYYYXXWW1+1 203 and a3, a3, a2 # max. only one bit is set 204 205 /* Find number of frames to save */ 206 207 ffs_ws a0, a3 # number of frames to the '1' from left 208 209 /* Store information into WMASK: 210 * bits 0..3: xxx1 masked lower 4 bits of the rotated windowstart, 211 * bits 4...: number of valid 4-register frames 212 */ 213 214 slli a3, a0, 4 # number of frames to save in bits 8..4 215 extui a2, a2, 0, 4 # mask for the first 16 registers 216 or a2, a3, a2 217 s32i a2, a1, PT_WMASK # needed when we restore the reg-file 218 219 /* Save 4 registers at a time */ 220 2211: rotw -1 222 s32i a0, a5, PT_AREG_END - 16 223 s32i a1, a5, PT_AREG_END - 12 224 s32i a2, a5, PT_AREG_END - 8 225 s32i a3, a5, PT_AREG_END - 4 226 addi a0, a4, -1 227 addi a1, a5, -16 228 _bnez a0, 1b 229 230 /* WINDOWBASE still in SAR! */ 231 232 rsr a2, sar # original WINDOWBASE 233 movi a3, 1 234 ssl a2 235 sll a3, a3 236 wsr a3, windowstart # set corresponding WINDOWSTART bit 237 wsr a2, windowbase # and WINDOWSTART 238 rsync 239 240 /* We are back to the original stack pointer (a1) */ 241 2422: /* Now, jump to the common exception handler. */ 243 244 j common_exception 245 246ENDPROC(user_exception) 247 248/* 249 * First-level exit handler for kernel exceptions 250 * Save special registers and the live window frame. 251 * Note: Even though we changes the stack pointer, we don't have to do a 252 * MOVSP here, as we do that when we return from the exception. 253 * (See comment in the kernel exception exit code) 254 * 255 * Entry condition for kernel_exception: 256 * 257 * a0: trashed, original value saved on stack (PT_AREG0) 258 * a1: a1 259 * a2: new stack pointer, original in DEPC 260 * a3: a3 261 * depc: a2, original value saved on stack (PT_DEPC) 262 * excsave_1: dispatch table 263 * 264 * PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC 265 * < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception 266 * 267 * Entry condition for _kernel_exception: 268 * 269 * a0-a3 and depc have been saved to PT_AREG0...PT_AREG3 and PT_DEPC 270 * excsave has been restored, and 271 * stack pointer (a1) has been set. 272 * 273 * Note: _kernel_exception might be at an odd address. Don't use call0..call12 274 */ 275 276ENTRY(kernel_exception) 277 278 /* Save a1, a2, a3, and set SP. */ 279 280 rsr a0, depc # get a2 281 s32i a1, a2, PT_AREG1 282 s32i a0, a2, PT_AREG2 283 s32i a3, a2, PT_AREG3 284 mov a1, a2 285 286 .globl _kernel_exception 287_kernel_exception: 288 289 /* Save SAR and turn off single stepping */ 290 291 movi a2, 0 292 rsr a3, sar 293 xsr a2, icountlevel 294 s32i a3, a1, PT_SAR 295 s32i a2, a1, PT_ICOUNTLEVEL 296 297 /* Rotate ws so that the current windowbase is at bit0. */ 298 /* Assume ws = xxwww1yyyy. Rotate ws right, so that a2 = yyyyxxwww1 */ 299 300 rsr a2, windowbase # don't need to save these, we only 301 rsr a3, windowstart # need shifted windowstart: windowmask 302 ssr a2 303 slli a2, a3, 32-WSBITS 304 src a2, a3, a2 305 srli a2, a2, 32-WSBITS 306 s32i a2, a1, PT_WMASK # needed for kernel_exception_exit 307 308 /* Save only the live window-frame */ 309 310 _bbsi.l a2, 1, 1f 311 s32i a4, a1, PT_AREG4 312 s32i a5, a1, PT_AREG5 313 s32i a6, a1, PT_AREG6 314 s32i a7, a1, PT_AREG7 315 _bbsi.l a2, 2, 1f 316 s32i a8, a1, PT_AREG8 317 s32i a9, a1, PT_AREG9 318 s32i a10, a1, PT_AREG10 319 s32i a11, a1, PT_AREG11 320 _bbsi.l a2, 3, 1f 321 s32i a12, a1, PT_AREG12 322 s32i a13, a1, PT_AREG13 323 s32i a14, a1, PT_AREG14 324 s32i a15, a1, PT_AREG15 325 326 _bnei a2, 1, 1f 327 328 /* Copy spill slots of a0 and a1 to imitate movsp 329 * in order to keep exception stack continuous 330 */ 331 l32i a3, a1, PT_SIZE 332 l32i a0, a1, PT_SIZE + 4 333 s32e a3, a1, -16 334 s32e a0, a1, -12 3351: 336 l32i a0, a1, PT_AREG0 # restore saved a0 337 wsr a0, depc 338 339#ifdef KERNEL_STACK_OVERFLOW_CHECK 340 341 /* Stack overflow check, for debugging */ 342 extui a2, a1, TASK_SIZE_BITS,XX 343 movi a3, SIZE?? 344 _bge a2, a3, out_of_stack_panic 345 346#endif 347 348/* 349 * This is the common exception handler. 350 * We get here from the user exception handler or simply by falling through 351 * from the kernel exception handler. 352 * Save the remaining special registers, switch to kernel mode, and jump 353 * to the second-level exception handler. 354 * 355 */ 356 357common_exception: 358 359 /* Save some registers, disable loops and clear the syscall flag. */ 360 361 rsr a2, debugcause 362 rsr a3, epc1 363 s32i a2, a1, PT_DEBUGCAUSE 364 s32i a3, a1, PT_PC 365 366 movi a2, -1 367 rsr a3, excvaddr 368 s32i a2, a1, PT_SYSCALL 369 movi a2, 0 370 s32i a3, a1, PT_EXCVADDR 371#if XCHAL_HAVE_LOOPS 372 xsr a2, lcount 373 s32i a2, a1, PT_LCOUNT 374#endif 375 376 /* It is now save to restore the EXC_TABLE_FIXUP variable. */ 377 378 rsr a2, exccause 379 movi a3, 0 380 rsr a0, excsave1 381 s32i a2, a1, PT_EXCCAUSE 382 s32i a3, a0, EXC_TABLE_FIXUP 383 384 /* All unrecoverable states are saved on stack, now, and a1 is valid. 385 * Now we can allow exceptions again. In case we've got an interrupt 386 * PS.INTLEVEL is set to LOCKLEVEL disabling furhter interrupts, 387 * otherwise it's left unchanged. 388 * 389 * Set PS(EXCM = 0, UM = 0, RING = 0, OWB = 0, WOE = 1, INTLEVEL = X) 390 */ 391 392 rsr a3, ps 393 s32i a3, a1, PT_PS # save ps 394 395#if XTENSA_FAKE_NMI 396 /* Correct PS needs to be saved in the PT_PS: 397 * - in case of exception or level-1 interrupt it's in the PS, 398 * and is already saved. 399 * - in case of medium level interrupt it's in the excsave2. 400 */ 401 movi a0, EXCCAUSE_MAPPED_NMI 402 extui a3, a3, PS_INTLEVEL_SHIFT, PS_INTLEVEL_WIDTH 403 beq a2, a0, .Lmedium_level_irq 404 bnei a2, EXCCAUSE_LEVEL1_INTERRUPT, .Lexception 405 beqz a3, .Llevel1_irq # level-1 IRQ sets ps.intlevel to 0 406 407.Lmedium_level_irq: 408 rsr a0, excsave2 409 s32i a0, a1, PT_PS # save medium-level interrupt ps 410 bgei a3, LOCKLEVEL, .Lexception 411 412.Llevel1_irq: 413 movi a3, LOCKLEVEL 414 415.Lexception: 416 movi a0, 1 << PS_WOE_BIT 417 or a3, a3, a0 418#else 419 addi a2, a2, -EXCCAUSE_LEVEL1_INTERRUPT 420 movi a0, LOCKLEVEL 421 extui a3, a3, PS_INTLEVEL_SHIFT, PS_INTLEVEL_WIDTH 422 # a3 = PS.INTLEVEL 423 moveqz a3, a0, a2 # a3 = LOCKLEVEL iff interrupt 424 movi a2, 1 << PS_WOE_BIT 425 or a3, a3, a2 426 rsr a2, exccause 427#endif 428 429 /* restore return address (or 0 if return to userspace) */ 430 rsr a0, depc 431 wsr a3, ps 432 rsync # PS.WOE => rsync => overflow 433 434 /* Save lbeg, lend */ 435#if XCHAL_HAVE_LOOPS 436 rsr a4, lbeg 437 rsr a3, lend 438 s32i a4, a1, PT_LBEG 439 s32i a3, a1, PT_LEND 440#endif 441 442 /* Save SCOMPARE1 */ 443 444#if XCHAL_HAVE_S32C1I 445 rsr a3, scompare1 446 s32i a3, a1, PT_SCOMPARE1 447#endif 448 449 /* Save optional registers. */ 450 451 save_xtregs_opt a1 a3 a4 a5 a6 a7 PT_XTREGS_OPT 452 453 /* Go to second-level dispatcher. Set up parameters to pass to the 454 * exception handler and call the exception handler. 455 */ 456 457 rsr a4, excsave1 458 mov a6, a1 # pass stack frame 459 mov a7, a2 # pass EXCCAUSE 460 addx4 a4, a2, a4 461 l32i a4, a4, EXC_TABLE_DEFAULT # load handler 462 463 /* Call the second-level handler */ 464 465 callx4 a4 466 467 /* Jump here for exception exit */ 468 .global common_exception_return 469common_exception_return: 470 471#if XTENSA_FAKE_NMI 472 l32i a2, a1, PT_EXCCAUSE 473 movi a3, EXCCAUSE_MAPPED_NMI 474 beq a2, a3, .LNMIexit 475#endif 4761: 477 irq_save a2, a3 478#ifdef CONFIG_TRACE_IRQFLAGS 479 movi a4, trace_hardirqs_off 480 callx4 a4 481#endif 482 483 /* Jump if we are returning from kernel exceptions. */ 484 485 l32i a3, a1, PT_PS 486 GET_THREAD_INFO(a2, a1) 487 l32i a4, a2, TI_FLAGS 488 _bbci.l a3, PS_UM_BIT, 6f 489 490 /* Specific to a user exception exit: 491 * We need to check some flags for signal handling and rescheduling, 492 * and have to restore WB and WS, extra states, and all registers 493 * in the register file that were in use in the user task. 494 * Note that we don't disable interrupts here. 495 */ 496 497 _bbsi.l a4, TIF_NEED_RESCHED, 3f 498 _bbsi.l a4, TIF_NOTIFY_RESUME, 2f 499 _bbci.l a4, TIF_SIGPENDING, 5f 500 5012: l32i a4, a1, PT_DEPC 502 bgeui a4, VALID_DOUBLE_EXCEPTION_ADDRESS, 4f 503 504 /* Call do_signal() */ 505 506#ifdef CONFIG_TRACE_IRQFLAGS 507 movi a4, trace_hardirqs_on 508 callx4 a4 509#endif 510 rsil a2, 0 511 movi a4, do_notify_resume # int do_notify_resume(struct pt_regs*) 512 mov a6, a1 513 callx4 a4 514 j 1b 515 5163: /* Reschedule */ 517 518#ifdef CONFIG_TRACE_IRQFLAGS 519 movi a4, trace_hardirqs_on 520 callx4 a4 521#endif 522 rsil a2, 0 523 movi a4, schedule # void schedule (void) 524 callx4 a4 525 j 1b 526 527#ifdef CONFIG_PREEMPT 5286: 529 _bbci.l a4, TIF_NEED_RESCHED, 4f 530 531 /* Check current_thread_info->preempt_count */ 532 533 l32i a4, a2, TI_PRE_COUNT 534 bnez a4, 4f 535 movi a4, preempt_schedule_irq 536 callx4 a4 537 j 1b 538#endif 539 540#if XTENSA_FAKE_NMI 541.LNMIexit: 542 l32i a3, a1, PT_PS 543 _bbci.l a3, PS_UM_BIT, 4f 544#endif 545 5465: 547#ifdef CONFIG_HAVE_HW_BREAKPOINT 548 _bbci.l a4, TIF_DB_DISABLED, 7f 549 movi a4, restore_dbreak 550 callx4 a4 5517: 552#endif 553#ifdef CONFIG_DEBUG_TLB_SANITY 554 l32i a4, a1, PT_DEPC 555 bgeui a4, VALID_DOUBLE_EXCEPTION_ADDRESS, 4f 556 movi a4, check_tlb_sanity 557 callx4 a4 558#endif 5596: 5604: 561#ifdef CONFIG_TRACE_IRQFLAGS 562 extui a4, a3, PS_INTLEVEL_SHIFT, PS_INTLEVEL_WIDTH 563 bgei a4, LOCKLEVEL, 1f 564 movi a4, trace_hardirqs_on 565 callx4 a4 5661: 567#endif 568 /* Restore optional registers. */ 569 570 load_xtregs_opt a1 a2 a4 a5 a6 a7 PT_XTREGS_OPT 571 572 /* Restore SCOMPARE1 */ 573 574#if XCHAL_HAVE_S32C1I 575 l32i a2, a1, PT_SCOMPARE1 576 wsr a2, scompare1 577#endif 578 wsr a3, ps /* disable interrupts */ 579 580 _bbci.l a3, PS_UM_BIT, kernel_exception_exit 581 582user_exception_exit: 583 584 /* Restore the state of the task and return from the exception. */ 585 586 /* Switch to the user thread WINDOWBASE. Save SP temporarily in DEPC */ 587 588 l32i a2, a1, PT_WINDOWBASE 589 l32i a3, a1, PT_WINDOWSTART 590 wsr a1, depc # use DEPC as temp storage 591 wsr a3, windowstart # restore WINDOWSTART 592 ssr a2 # preserve user's WB in the SAR 593 wsr a2, windowbase # switch to user's saved WB 594 rsync 595 rsr a1, depc # restore stack pointer 596 l32i a2, a1, PT_WMASK # register frames saved (in bits 4...9) 597 rotw -1 # we restore a4..a7 598 _bltui a6, 16, 1f # only have to restore current window? 599 600 /* The working registers are a0 and a3. We are restoring to 601 * a4..a7. Be careful not to destroy what we have just restored. 602 * Note: wmask has the format YYYYM: 603 * Y: number of registers saved in groups of 4 604 * M: 4 bit mask of first 16 registers 605 */ 606 607 mov a2, a6 608 mov a3, a5 609 6102: rotw -1 # a0..a3 become a4..a7 611 addi a3, a7, -4*4 # next iteration 612 addi a2, a6, -16 # decrementing Y in WMASK 613 l32i a4, a3, PT_AREG_END + 0 614 l32i a5, a3, PT_AREG_END + 4 615 l32i a6, a3, PT_AREG_END + 8 616 l32i a7, a3, PT_AREG_END + 12 617 _bgeui a2, 16, 2b 618 619 /* Clear unrestored registers (don't leak anything to user-land */ 620 6211: rsr a0, windowbase 622 rsr a3, sar 623 sub a3, a0, a3 624 beqz a3, 2f 625 extui a3, a3, 0, WBBITS 626 6271: rotw -1 628 addi a3, a7, -1 629 movi a4, 0 630 movi a5, 0 631 movi a6, 0 632 movi a7, 0 633 bgei a3, 1, 1b 634 635 /* We are back were we were when we started. 636 * Note: a2 still contains WMASK (if we've returned to the original 637 * frame where we had loaded a2), or at least the lower 4 bits 638 * (if we have restored WSBITS-1 frames). 639 */ 640 6412: 642#if XCHAL_HAVE_THREADPTR 643 l32i a3, a1, PT_THREADPTR 644 wur a3, threadptr 645#endif 646 647 j common_exception_exit 648 649 /* This is the kernel exception exit. 650 * We avoided to do a MOVSP when we entered the exception, but we 651 * have to do it here. 652 */ 653 654kernel_exception_exit: 655 656 /* Check if we have to do a movsp. 657 * 658 * We only have to do a movsp if the previous window-frame has 659 * been spilled to the *temporary* exception stack instead of the 660 * task's stack. This is the case if the corresponding bit in 661 * WINDOWSTART for the previous window-frame was set before 662 * (not spilled) but is zero now (spilled). 663 * If this bit is zero, all other bits except the one for the 664 * current window frame are also zero. So, we can use a simple test: 665 * 'and' WINDOWSTART and WINDOWSTART-1: 666 * 667 * (XXXXXX1[0]* - 1) AND XXXXXX1[0]* = XXXXXX0[0]* 668 * 669 * The result is zero only if one bit was set. 670 * 671 * (Note: We might have gone through several task switches before 672 * we come back to the current task, so WINDOWBASE might be 673 * different from the time the exception occurred.) 674 */ 675 676 /* Test WINDOWSTART before and after the exception. 677 * We actually have WMASK, so we only have to test if it is 1 or not. 678 */ 679 680 l32i a2, a1, PT_WMASK 681 _beqi a2, 1, common_exception_exit # Spilled before exception,jump 682 683 /* Test WINDOWSTART now. If spilled, do the movsp */ 684 685 rsr a3, windowstart 686 addi a0, a3, -1 687 and a3, a3, a0 688 _bnez a3, common_exception_exit 689 690 /* Do a movsp (we returned from a call4, so we have at least a0..a7) */ 691 692 addi a0, a1, -16 693 l32i a3, a0, 0 694 l32i a4, a0, 4 695 s32i a3, a1, PT_SIZE+0 696 s32i a4, a1, PT_SIZE+4 697 l32i a3, a0, 8 698 l32i a4, a0, 12 699 s32i a3, a1, PT_SIZE+8 700 s32i a4, a1, PT_SIZE+12 701 702 /* Common exception exit. 703 * We restore the special register and the current window frame, and 704 * return from the exception. 705 * 706 * Note: We expect a2 to hold PT_WMASK 707 */ 708 709common_exception_exit: 710 711 /* Restore address registers. */ 712 713 _bbsi.l a2, 1, 1f 714 l32i a4, a1, PT_AREG4 715 l32i a5, a1, PT_AREG5 716 l32i a6, a1, PT_AREG6 717 l32i a7, a1, PT_AREG7 718 _bbsi.l a2, 2, 1f 719 l32i a8, a1, PT_AREG8 720 l32i a9, a1, PT_AREG9 721 l32i a10, a1, PT_AREG10 722 l32i a11, a1, PT_AREG11 723 _bbsi.l a2, 3, 1f 724 l32i a12, a1, PT_AREG12 725 l32i a13, a1, PT_AREG13 726 l32i a14, a1, PT_AREG14 727 l32i a15, a1, PT_AREG15 728 729 /* Restore PC, SAR */ 730 7311: l32i a2, a1, PT_PC 732 l32i a3, a1, PT_SAR 733 wsr a2, epc1 734 wsr a3, sar 735 736 /* Restore LBEG, LEND, LCOUNT */ 737#if XCHAL_HAVE_LOOPS 738 l32i a2, a1, PT_LBEG 739 l32i a3, a1, PT_LEND 740 wsr a2, lbeg 741 l32i a2, a1, PT_LCOUNT 742 wsr a3, lend 743 wsr a2, lcount 744#endif 745 746 /* We control single stepping through the ICOUNTLEVEL register. */ 747 748 l32i a2, a1, PT_ICOUNTLEVEL 749 movi a3, -2 750 wsr a2, icountlevel 751 wsr a3, icount 752 753 /* Check if it was double exception. */ 754 755 l32i a0, a1, PT_DEPC 756 l32i a3, a1, PT_AREG3 757 l32i a2, a1, PT_AREG2 758 _bgeui a0, VALID_DOUBLE_EXCEPTION_ADDRESS, 1f 759 760 /* Restore a0...a3 and return */ 761 762 l32i a0, a1, PT_AREG0 763 l32i a1, a1, PT_AREG1 764 rfe 765 7661: wsr a0, depc 767 l32i a0, a1, PT_AREG0 768 l32i a1, a1, PT_AREG1 769 rfde 770 771ENDPROC(kernel_exception) 772 773/* 774 * Debug exception handler. 775 * 776 * Currently, we don't support KGDB, so only user application can be debugged. 777 * 778 * When we get here, a0 is trashed and saved to excsave[debuglevel] 779 */ 780 781 .literal_position 782 783ENTRY(debug_exception) 784 785 rsr a0, SREG_EPS + XCHAL_DEBUGLEVEL 786 bbsi.l a0, PS_EXCM_BIT, 1f # exception mode 787 788 /* Set EPC1 and EXCCAUSE */ 789 790 wsr a2, depc # save a2 temporarily 791 rsr a2, SREG_EPC + XCHAL_DEBUGLEVEL 792 wsr a2, epc1 793 794 movi a2, EXCCAUSE_MAPPED_DEBUG 795 wsr a2, exccause 796 797 /* Restore PS to the value before the debug exc but with PS.EXCM set.*/ 798 799 movi a2, 1 << PS_EXCM_BIT 800 or a2, a0, a2 801 wsr a2, ps 802 803 /* Switch to kernel/user stack, restore jump vector, and save a0 */ 804 805 bbsi.l a2, PS_UM_BIT, 2f # jump if user mode 806 807 addi a2, a1, -16-PT_SIZE # assume kernel stack 8083: 809 l32i a0, a3, DT_DEBUG_SAVE 810 s32i a1, a2, PT_AREG1 811 s32i a0, a2, PT_AREG0 812 movi a0, 0 813 s32i a0, a2, PT_DEPC # mark it as a regular exception 814 xsr a3, SREG_EXCSAVE + XCHAL_DEBUGLEVEL 815 xsr a0, depc 816 s32i a3, a2, PT_AREG3 817 s32i a0, a2, PT_AREG2 818 mov a1, a2 819 820 /* Debug exception is handled as an exception, so interrupts will 821 * likely be enabled in the common exception handler. Disable 822 * preemption if we have HW breakpoints to preserve DEBUGCAUSE.DBNUM 823 * meaning. 824 */ 825#if defined(CONFIG_PREEMPT_COUNT) && defined(CONFIG_HAVE_HW_BREAKPOINT) 826 GET_THREAD_INFO(a2, a1) 827 l32i a3, a2, TI_PRE_COUNT 828 addi a3, a3, 1 829 s32i a3, a2, TI_PRE_COUNT 830#endif 831 832 rsr a2, ps 833 bbsi.l a2, PS_UM_BIT, _user_exception 834 j _kernel_exception 835 8362: rsr a2, excsave1 837 l32i a2, a2, EXC_TABLE_KSTK # load kernel stack pointer 838 j 3b 839 840#ifdef CONFIG_HAVE_HW_BREAKPOINT 841 /* Debug exception while in exception mode. This may happen when 842 * window overflow/underflow handler or fast exception handler hits 843 * data breakpoint, in which case save and disable all data 844 * breakpoints, single-step faulting instruction and restore data 845 * breakpoints. 846 */ 8471: 848 bbci.l a0, PS_UM_BIT, 1b # jump if kernel mode 849 850 rsr a0, debugcause 851 bbsi.l a0, DEBUGCAUSE_DBREAK_BIT, .Ldebug_save_dbreak 852 853 .set _index, 0 854 .rept XCHAL_NUM_DBREAK 855 l32i a0, a3, DT_DBREAKC_SAVE + _index * 4 856 wsr a0, SREG_DBREAKC + _index 857 .set _index, _index + 1 858 .endr 859 860 l32i a0, a3, DT_ICOUNT_LEVEL_SAVE 861 wsr a0, icountlevel 862 863 l32i a0, a3, DT_ICOUNT_SAVE 864 xsr a0, icount 865 866 l32i a0, a3, DT_DEBUG_SAVE 867 xsr a3, SREG_EXCSAVE + XCHAL_DEBUGLEVEL 868 rfi XCHAL_DEBUGLEVEL 869 870.Ldebug_save_dbreak: 871 .set _index, 0 872 .rept XCHAL_NUM_DBREAK 873 movi a0, 0 874 xsr a0, SREG_DBREAKC + _index 875 s32i a0, a3, DT_DBREAKC_SAVE + _index * 4 876 .set _index, _index + 1 877 .endr 878 879 movi a0, XCHAL_EXCM_LEVEL + 1 880 xsr a0, icountlevel 881 s32i a0, a3, DT_ICOUNT_LEVEL_SAVE 882 883 movi a0, 0xfffffffe 884 xsr a0, icount 885 s32i a0, a3, DT_ICOUNT_SAVE 886 887 l32i a0, a3, DT_DEBUG_SAVE 888 xsr a3, SREG_EXCSAVE + XCHAL_DEBUGLEVEL 889 rfi XCHAL_DEBUGLEVEL 890#else 891 /* Debug exception while in exception mode. Should not happen. */ 8921: j 1b // FIXME!! 893#endif 894 895ENDPROC(debug_exception) 896 897/* 898 * We get here in case of an unrecoverable exception. 899 * The only thing we can do is to be nice and print a panic message. 900 * We only produce a single stack frame for panic, so ??? 901 * 902 * 903 * Entry conditions: 904 * 905 * - a0 contains the caller address; original value saved in excsave1. 906 * - the original a0 contains a valid return address (backtrace) or 0. 907 * - a2 contains a valid stackpointer 908 * 909 * Notes: 910 * 911 * - If the stack pointer could be invalid, the caller has to setup a 912 * dummy stack pointer (e.g. the stack of the init_task) 913 * 914 * - If the return address could be invalid, the caller has to set it 915 * to 0, so the backtrace would stop. 916 * 917 */ 918 .align 4 919unrecoverable_text: 920 .ascii "Unrecoverable error in exception handler\0" 921 922 .literal_position 923 924ENTRY(unrecoverable_exception) 925 926 movi a0, 1 927 movi a1, 0 928 929 wsr a0, windowstart 930 wsr a1, windowbase 931 rsync 932 933 movi a1, (1 << PS_WOE_BIT) | LOCKLEVEL 934 wsr a1, ps 935 rsync 936 937 movi a1, init_task 938 movi a0, 0 939 addi a1, a1, PT_REGS_OFFSET 940 941 movi a4, panic 942 movi a6, unrecoverable_text 943 944 callx4 a4 945 9461: j 1b 947 948ENDPROC(unrecoverable_exception) 949 950/* -------------------------- FAST EXCEPTION HANDLERS ----------------------- */ 951 952/* 953 * Fast-handler for alloca exceptions 954 * 955 * The ALLOCA handler is entered when user code executes the MOVSP 956 * instruction and the caller's frame is not in the register file. 957 * 958 * This algorithm was taken from the Ross Morley's RTOS Porting Layer: 959 * 960 * /home/ross/rtos/porting/XtensaRTOS-PortingLayer-20090507/xtensa_vectors.S 961 * 962 * It leverages the existing window spill/fill routines and their support for 963 * double exceptions. The 'movsp' instruction will only cause an exception if 964 * the next window needs to be loaded. In fact this ALLOCA exception may be 965 * replaced at some point by changing the hardware to do a underflow exception 966 * of the proper size instead. 967 * 968 * This algorithm simply backs out the register changes started by the user 969 * excpetion handler, makes it appear that we have started a window underflow 970 * by rotating the window back and then setting the old window base (OWB) in 971 * the 'ps' register with the rolled back window base. The 'movsp' instruction 972 * will be re-executed and this time since the next window frames is in the 973 * active AR registers it won't cause an exception. 974 * 975 * If the WindowUnderflow code gets a TLB miss the page will get mapped 976 * the the partial windeowUnderflow will be handeled in the double exception 977 * handler. 978 * 979 * Entry condition: 980 * 981 * a0: trashed, original value saved on stack (PT_AREG0) 982 * a1: a1 983 * a2: new stack pointer, original in DEPC 984 * a3: a3 985 * depc: a2, original value saved on stack (PT_DEPC) 986 * excsave_1: dispatch table 987 * 988 * PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC 989 * < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception 990 */ 991 992ENTRY(fast_alloca) 993 rsr a0, windowbase 994 rotw -1 995 rsr a2, ps 996 extui a3, a2, PS_OWB_SHIFT, PS_OWB_WIDTH 997 xor a3, a3, a4 998 l32i a4, a6, PT_AREG0 999 l32i a1, a6, PT_DEPC 1000 rsr a6, depc 1001 wsr a1, depc 1002 slli a3, a3, PS_OWB_SHIFT 1003 xor a2, a2, a3 1004 wsr a2, ps 1005 rsync 1006 1007 _bbci.l a4, 31, 4f 1008 rotw -1 1009 _bbci.l a8, 30, 8f 1010 rotw -1 1011 j _WindowUnderflow12 10128: j _WindowUnderflow8 10134: j _WindowUnderflow4 1014ENDPROC(fast_alloca) 1015 1016/* 1017 * fast system calls. 1018 * 1019 * WARNING: The kernel doesn't save the entire user context before 1020 * handling a fast system call. These functions are small and short, 1021 * usually offering some functionality not available to user tasks. 1022 * 1023 * BE CAREFUL TO PRESERVE THE USER'S CONTEXT. 1024 * 1025 * Entry condition: 1026 * 1027 * a0: trashed, original value saved on stack (PT_AREG0) 1028 * a1: a1 1029 * a2: new stack pointer, original in DEPC 1030 * a3: a3 1031 * depc: a2, original value saved on stack (PT_DEPC) 1032 * excsave_1: dispatch table 1033 */ 1034 1035ENTRY(fast_syscall_kernel) 1036 1037 /* Skip syscall. */ 1038 1039 rsr a0, epc1 1040 addi a0, a0, 3 1041 wsr a0, epc1 1042 1043 l32i a0, a2, PT_DEPC 1044 bgeui a0, VALID_DOUBLE_EXCEPTION_ADDRESS, fast_syscall_unrecoverable 1045 1046 rsr a0, depc # get syscall-nr 1047 _beqz a0, fast_syscall_spill_registers 1048 _beqi a0, __NR_xtensa, fast_syscall_xtensa 1049 1050 j kernel_exception 1051 1052ENDPROC(fast_syscall_kernel) 1053 1054ENTRY(fast_syscall_user) 1055 1056 /* Skip syscall. */ 1057 1058 rsr a0, epc1 1059 addi a0, a0, 3 1060 wsr a0, epc1 1061 1062 l32i a0, a2, PT_DEPC 1063 bgeui a0, VALID_DOUBLE_EXCEPTION_ADDRESS, fast_syscall_unrecoverable 1064 1065 rsr a0, depc # get syscall-nr 1066 _beqz a0, fast_syscall_spill_registers 1067 _beqi a0, __NR_xtensa, fast_syscall_xtensa 1068 1069 j user_exception 1070 1071ENDPROC(fast_syscall_user) 1072 1073ENTRY(fast_syscall_unrecoverable) 1074 1075 /* Restore all states. */ 1076 1077 l32i a0, a2, PT_AREG0 # restore a0 1078 xsr a2, depc # restore a2, depc 1079 1080 wsr a0, excsave1 1081 movi a0, unrecoverable_exception 1082 callx0 a0 1083 1084ENDPROC(fast_syscall_unrecoverable) 1085 1086/* 1087 * sysxtensa syscall handler 1088 * 1089 * int sysxtensa (SYS_XTENSA_ATOMIC_SET, ptr, val, unused); 1090 * int sysxtensa (SYS_XTENSA_ATOMIC_ADD, ptr, val, unused); 1091 * int sysxtensa (SYS_XTENSA_ATOMIC_EXG_ADD, ptr, val, unused); 1092 * int sysxtensa (SYS_XTENSA_ATOMIC_CMP_SWP, ptr, oldval, newval); 1093 * a2 a6 a3 a4 a5 1094 * 1095 * Entry condition: 1096 * 1097 * a0: a2 (syscall-nr), original value saved on stack (PT_AREG0) 1098 * a1: a1 1099 * a2: new stack pointer, original in a0 and DEPC 1100 * a3: a3 1101 * a4..a15: unchanged 1102 * depc: a2, original value saved on stack (PT_DEPC) 1103 * excsave_1: dispatch table 1104 * 1105 * PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC 1106 * < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception 1107 * 1108 * Note: we don't have to save a2; a2 holds the return value 1109 * 1110 * We use the two macros TRY and CATCH: 1111 * 1112 * TRY adds an entry to the __ex_table fixup table for the immediately 1113 * following instruction. 1114 * 1115 * CATCH catches any exception that occurred at one of the preceding TRY 1116 * statements and continues from there 1117 * 1118 * Usage TRY l32i a0, a1, 0 1119 * <other code> 1120 * done: rfe 1121 * CATCH <set return code> 1122 * j done 1123 */ 1124 1125 .literal_position 1126 1127#ifdef CONFIG_FAST_SYSCALL_XTENSA 1128 1129#define TRY \ 1130 .section __ex_table, "a"; \ 1131 .word 66f, 67f; \ 1132 .text; \ 113366: 1134 1135#define CATCH \ 113667: 1137 1138ENTRY(fast_syscall_xtensa) 1139 1140 s32i a7, a2, PT_AREG7 # we need an additional register 1141 movi a7, 4 # sizeof(unsigned int) 1142 access_ok a3, a7, a0, a2, .Leac # a0: scratch reg, a2: sp 1143 1144 _bgeui a6, SYS_XTENSA_COUNT, .Lill 1145 _bnei a6, SYS_XTENSA_ATOMIC_CMP_SWP, .Lnswp 1146 1147 /* Fall through for ATOMIC_CMP_SWP. */ 1148 1149.Lswp: /* Atomic compare and swap */ 1150 1151TRY l32i a0, a3, 0 # read old value 1152 bne a0, a4, 1f # same as old value? jump 1153TRY s32i a5, a3, 0 # different, modify value 1154 l32i a7, a2, PT_AREG7 # restore a7 1155 l32i a0, a2, PT_AREG0 # restore a0 1156 movi a2, 1 # and return 1 1157 rfe 1158 11591: l32i a7, a2, PT_AREG7 # restore a7 1160 l32i a0, a2, PT_AREG0 # restore a0 1161 movi a2, 0 # return 0 (note that we cannot set 1162 rfe 1163 1164.Lnswp: /* Atomic set, add, and exg_add. */ 1165 1166TRY l32i a7, a3, 0 # orig 1167 addi a6, a6, -SYS_XTENSA_ATOMIC_SET 1168 add a0, a4, a7 # + arg 1169 moveqz a0, a4, a6 # set 1170 addi a6, a6, SYS_XTENSA_ATOMIC_SET 1171TRY s32i a0, a3, 0 # write new value 1172 1173 mov a0, a2 1174 mov a2, a7 1175 l32i a7, a0, PT_AREG7 # restore a7 1176 l32i a0, a0, PT_AREG0 # restore a0 1177 rfe 1178 1179CATCH 1180.Leac: l32i a7, a2, PT_AREG7 # restore a7 1181 l32i a0, a2, PT_AREG0 # restore a0 1182 movi a2, -EFAULT 1183 rfe 1184 1185.Lill: l32i a7, a2, PT_AREG7 # restore a7 1186 l32i a0, a2, PT_AREG0 # restore a0 1187 movi a2, -EINVAL 1188 rfe 1189 1190ENDPROC(fast_syscall_xtensa) 1191 1192#else /* CONFIG_FAST_SYSCALL_XTENSA */ 1193 1194ENTRY(fast_syscall_xtensa) 1195 1196 l32i a0, a2, PT_AREG0 # restore a0 1197 movi a2, -ENOSYS 1198 rfe 1199 1200ENDPROC(fast_syscall_xtensa) 1201 1202#endif /* CONFIG_FAST_SYSCALL_XTENSA */ 1203 1204 1205/* fast_syscall_spill_registers. 1206 * 1207 * Entry condition: 1208 * 1209 * a0: trashed, original value saved on stack (PT_AREG0) 1210 * a1: a1 1211 * a2: new stack pointer, original in DEPC 1212 * a3: a3 1213 * depc: a2, original value saved on stack (PT_DEPC) 1214 * excsave_1: dispatch table 1215 * 1216 * Note: We assume the stack pointer is EXC_TABLE_KSTK in the fixup handler. 1217 */ 1218 1219#ifdef CONFIG_FAST_SYSCALL_SPILL_REGISTERS 1220 1221ENTRY(fast_syscall_spill_registers) 1222 1223 /* Register a FIXUP handler (pass current wb as a parameter) */ 1224 1225 xsr a3, excsave1 1226 movi a0, fast_syscall_spill_registers_fixup 1227 s32i a0, a3, EXC_TABLE_FIXUP 1228 rsr a0, windowbase 1229 s32i a0, a3, EXC_TABLE_PARAM 1230 xsr a3, excsave1 # restore a3 and excsave_1 1231 1232 /* Save a3, a4 and SAR on stack. */ 1233 1234 rsr a0, sar 1235 s32i a3, a2, PT_AREG3 1236 s32i a0, a2, PT_SAR 1237 1238 /* The spill routine might clobber a4, a7, a8, a11, a12, and a15. */ 1239 1240 s32i a4, a2, PT_AREG4 1241 s32i a7, a2, PT_AREG7 1242 s32i a8, a2, PT_AREG8 1243 s32i a11, a2, PT_AREG11 1244 s32i a12, a2, PT_AREG12 1245 s32i a15, a2, PT_AREG15 1246 1247 /* 1248 * Rotate ws so that the current windowbase is at bit 0. 1249 * Assume ws = xxxwww1yy (www1 current window frame). 1250 * Rotate ws right so that a4 = yyxxxwww1. 1251 */ 1252 1253 rsr a0, windowbase 1254 rsr a3, windowstart # a3 = xxxwww1yy 1255 ssr a0 # holds WB 1256 slli a0, a3, WSBITS 1257 or a3, a3, a0 # a3 = xxxwww1yyxxxwww1yy 1258 srl a3, a3 # a3 = 00xxxwww1yyxxxwww1 1259 1260 /* We are done if there are no more than the current register frame. */ 1261 1262 extui a3, a3, 1, WSBITS-1 # a3 = 0yyxxxwww 1263 movi a0, (1 << (WSBITS-1)) 1264 _beqz a3, .Lnospill # only one active frame? jump 1265 1266 /* We want 1 at the top, so that we return to the current windowbase */ 1267 1268 or a3, a3, a0 # 1yyxxxwww 1269 1270 /* Skip empty frames - get 'oldest' WINDOWSTART-bit. */ 1271 1272 wsr a3, windowstart # save shifted windowstart 1273 neg a0, a3 1274 and a3, a0, a3 # first bit set from right: 000010000 1275 1276 ffs_ws a0, a3 # a0: shifts to skip empty frames 1277 movi a3, WSBITS 1278 sub a0, a3, a0 # WSBITS-a0:number of 0-bits from right 1279 ssr a0 # save in SAR for later. 1280 1281 rsr a3, windowbase 1282 add a3, a3, a0 1283 wsr a3, windowbase 1284 rsync 1285 1286 rsr a3, windowstart 1287 srl a3, a3 # shift windowstart 1288 1289 /* WB is now just one frame below the oldest frame in the register 1290 window. WS is shifted so the oldest frame is in bit 0, thus, WB 1291 and WS differ by one 4-register frame. */ 1292 1293 /* Save frames. Depending what call was used (call4, call8, call12), 1294 * we have to save 4,8. or 12 registers. 1295 */ 1296 1297 1298.Lloop: _bbsi.l a3, 1, .Lc4 1299 _bbci.l a3, 2, .Lc12 1300 1301.Lc8: s32e a4, a13, -16 1302 l32e a4, a5, -12 1303 s32e a8, a4, -32 1304 s32e a5, a13, -12 1305 s32e a6, a13, -8 1306 s32e a7, a13, -4 1307 s32e a9, a4, -28 1308 s32e a10, a4, -24 1309 s32e a11, a4, -20 1310 srli a11, a3, 2 # shift windowbase by 2 1311 rotw 2 1312 _bnei a3, 1, .Lloop 1313 j .Lexit 1314 1315.Lc4: s32e a4, a9, -16 1316 s32e a5, a9, -12 1317 s32e a6, a9, -8 1318 s32e a7, a9, -4 1319 1320 srli a7, a3, 1 1321 rotw 1 1322 _bnei a3, 1, .Lloop 1323 j .Lexit 1324 1325.Lc12: _bbci.l a3, 3, .Linvalid_mask # bit 2 shouldn't be zero! 1326 1327 /* 12-register frame (call12) */ 1328 1329 l32e a0, a5, -12 1330 s32e a8, a0, -48 1331 mov a8, a0 1332 1333 s32e a9, a8, -44 1334 s32e a10, a8, -40 1335 s32e a11, a8, -36 1336 s32e a12, a8, -32 1337 s32e a13, a8, -28 1338 s32e a14, a8, -24 1339 s32e a15, a8, -20 1340 srli a15, a3, 3 1341 1342 /* The stack pointer for a4..a7 is out of reach, so we rotate the 1343 * window, grab the stackpointer, and rotate back. 1344 * Alternatively, we could also use the following approach, but that 1345 * makes the fixup routine much more complicated: 1346 * rotw 1 1347 * s32e a0, a13, -16 1348 * ... 1349 * rotw 2 1350 */ 1351 1352 rotw 1 1353 mov a4, a13 1354 rotw -1 1355 1356 s32e a4, a8, -16 1357 s32e a5, a8, -12 1358 s32e a6, a8, -8 1359 s32e a7, a8, -4 1360 1361 rotw 3 1362 1363 _beqi a3, 1, .Lexit 1364 j .Lloop 1365 1366.Lexit: 1367 1368 /* Done. Do the final rotation and set WS */ 1369 1370 rotw 1 1371 rsr a3, windowbase 1372 ssl a3 1373 movi a3, 1 1374 sll a3, a3 1375 wsr a3, windowstart 1376.Lnospill: 1377 1378 /* Advance PC, restore registers and SAR, and return from exception. */ 1379 1380 l32i a3, a2, PT_SAR 1381 l32i a0, a2, PT_AREG0 1382 wsr a3, sar 1383 l32i a3, a2, PT_AREG3 1384 1385 /* Restore clobbered registers. */ 1386 1387 l32i a4, a2, PT_AREG4 1388 l32i a7, a2, PT_AREG7 1389 l32i a8, a2, PT_AREG8 1390 l32i a11, a2, PT_AREG11 1391 l32i a12, a2, PT_AREG12 1392 l32i a15, a2, PT_AREG15 1393 1394 movi a2, 0 1395 rfe 1396 1397.Linvalid_mask: 1398 1399 /* We get here because of an unrecoverable error in the window 1400 * registers, so set up a dummy frame and kill the user application. 1401 * Note: We assume EXC_TABLE_KSTK contains a valid stack pointer. 1402 */ 1403 1404 movi a0, 1 1405 movi a1, 0 1406 1407 wsr a0, windowstart 1408 wsr a1, windowbase 1409 rsync 1410 1411 movi a0, 0 1412 1413 rsr a3, excsave1 1414 l32i a1, a3, EXC_TABLE_KSTK 1415 1416 movi a4, (1 << PS_WOE_BIT) | LOCKLEVEL 1417 wsr a4, ps 1418 rsync 1419 1420 movi a6, SIGSEGV 1421 movi a4, do_exit 1422 callx4 a4 1423 1424 /* shouldn't return, so panic */ 1425 1426 wsr a0, excsave1 1427 movi a0, unrecoverable_exception 1428 callx0 a0 # should not return 14291: j 1b 1430 1431 1432ENDPROC(fast_syscall_spill_registers) 1433 1434/* Fixup handler. 1435 * 1436 * We get here if the spill routine causes an exception, e.g. tlb miss. 1437 * We basically restore WINDOWBASE and WINDOWSTART to the condition when 1438 * we entered the spill routine and jump to the user exception handler. 1439 * 1440 * Note that we only need to restore the bits in windowstart that have not 1441 * been spilled yet by the _spill_register routine. Luckily, a3 contains a 1442 * rotated windowstart with only those bits set for frames that haven't been 1443 * spilled yet. Because a3 is rotated such that bit 0 represents the register 1444 * frame for the current windowbase - 1, we need to rotate a3 left by the 1445 * value of the current windowbase + 1 and move it to windowstart. 1446 * 1447 * a0: value of depc, original value in depc 1448 * a2: trashed, original value in EXC_TABLE_DOUBLE_SAVE 1449 * a3: exctable, original value in excsave1 1450 */ 1451 1452ENTRY(fast_syscall_spill_registers_fixup) 1453 1454 rsr a2, windowbase # get current windowbase (a2 is saved) 1455 xsr a0, depc # restore depc and a0 1456 ssl a2 # set shift (32 - WB) 1457 1458 /* We need to make sure the current registers (a0-a3) are preserved. 1459 * To do this, we simply set the bit for the current window frame 1460 * in WS, so that the exception handlers save them to the task stack. 1461 * 1462 * Note: we use a3 to set the windowbase, so we take a special care 1463 * of it, saving it in the original _spill_registers frame across 1464 * the exception handler call. 1465 */ 1466 1467 xsr a3, excsave1 # get spill-mask 1468 slli a3, a3, 1 # shift left by one 1469 addi a3, a3, 1 # set the bit for the current window frame 1470 1471 slli a2, a3, 32-WSBITS 1472 src a2, a3, a2 # a2 = xxwww1yyxxxwww1yy...... 1473 wsr a2, windowstart # set corrected windowstart 1474 1475 srli a3, a3, 1 1476 rsr a2, excsave1 1477 l32i a2, a2, EXC_TABLE_DOUBLE_SAVE # restore a2 1478 xsr a2, excsave1 1479 s32i a3, a2, EXC_TABLE_DOUBLE_SAVE # save a3 1480 l32i a3, a2, EXC_TABLE_PARAM # original WB (in user task) 1481 xsr a2, excsave1 1482 1483 /* Return to the original (user task) WINDOWBASE. 1484 * We leave the following frame behind: 1485 * a0, a1, a2 same 1486 * a3: trashed (saved in EXC_TABLE_DOUBLE_SAVE) 1487 * depc: depc (we have to return to that address) 1488 * excsave_1: exctable 1489 */ 1490 1491 wsr a3, windowbase 1492 rsync 1493 1494 /* We are now in the original frame when we entered _spill_registers: 1495 * a0: return address 1496 * a1: used, stack pointer 1497 * a2: kernel stack pointer 1498 * a3: available 1499 * depc: exception address 1500 * excsave: exctable 1501 * Note: This frame might be the same as above. 1502 */ 1503 1504 /* Setup stack pointer. */ 1505 1506 addi a2, a2, -PT_USER_SIZE 1507 s32i a0, a2, PT_AREG0 1508 1509 /* Make sure we return to this fixup handler. */ 1510 1511 movi a3, fast_syscall_spill_registers_fixup_return 1512 s32i a3, a2, PT_DEPC # setup depc 1513 1514 /* Jump to the exception handler. */ 1515 1516 rsr a3, excsave1 1517 rsr a0, exccause 1518 addx4 a0, a0, a3 # find entry in table 1519 l32i a0, a0, EXC_TABLE_FAST_USER # load handler 1520 l32i a3, a3, EXC_TABLE_DOUBLE_SAVE 1521 jx a0 1522 1523ENDPROC(fast_syscall_spill_registers_fixup) 1524 1525ENTRY(fast_syscall_spill_registers_fixup_return) 1526 1527 /* When we return here, all registers have been restored (a2: DEPC) */ 1528 1529 wsr a2, depc # exception address 1530 1531 /* Restore fixup handler. */ 1532 1533 rsr a2, excsave1 1534 s32i a3, a2, EXC_TABLE_DOUBLE_SAVE 1535 movi a3, fast_syscall_spill_registers_fixup 1536 s32i a3, a2, EXC_TABLE_FIXUP 1537 rsr a3, windowbase 1538 s32i a3, a2, EXC_TABLE_PARAM 1539 l32i a2, a2, EXC_TABLE_KSTK 1540 1541 /* Load WB at the time the exception occurred. */ 1542 1543 rsr a3, sar # WB is still in SAR 1544 neg a3, a3 1545 wsr a3, windowbase 1546 rsync 1547 1548 rsr a3, excsave1 1549 l32i a3, a3, EXC_TABLE_DOUBLE_SAVE 1550 1551 rfde 1552 1553ENDPROC(fast_syscall_spill_registers_fixup_return) 1554 1555#else /* CONFIG_FAST_SYSCALL_SPILL_REGISTERS */ 1556 1557ENTRY(fast_syscall_spill_registers) 1558 1559 l32i a0, a2, PT_AREG0 # restore a0 1560 movi a2, -ENOSYS 1561 rfe 1562 1563ENDPROC(fast_syscall_spill_registers) 1564 1565#endif /* CONFIG_FAST_SYSCALL_SPILL_REGISTERS */ 1566 1567#ifdef CONFIG_MMU 1568/* 1569 * We should never get here. Bail out! 1570 */ 1571 1572ENTRY(fast_second_level_miss_double_kernel) 1573 15741: movi a0, unrecoverable_exception 1575 callx0 a0 # should not return 15761: j 1b 1577 1578ENDPROC(fast_second_level_miss_double_kernel) 1579 1580/* First-level entry handler for user, kernel, and double 2nd-level 1581 * TLB miss exceptions. Note that for now, user and kernel miss 1582 * exceptions share the same entry point and are handled identically. 1583 * 1584 * An old, less-efficient C version of this function used to exist. 1585 * We include it below, interleaved as comments, for reference. 1586 * 1587 * Entry condition: 1588 * 1589 * a0: trashed, original value saved on stack (PT_AREG0) 1590 * a1: a1 1591 * a2: new stack pointer, original in DEPC 1592 * a3: a3 1593 * depc: a2, original value saved on stack (PT_DEPC) 1594 * excsave_1: dispatch table 1595 * 1596 * PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC 1597 * < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception 1598 */ 1599 1600ENTRY(fast_second_level_miss) 1601 1602 /* Save a1 and a3. Note: we don't expect a double exception. */ 1603 1604 s32i a1, a2, PT_AREG1 1605 s32i a3, a2, PT_AREG3 1606 1607 /* We need to map the page of PTEs for the user task. Find 1608 * the pointer to that page. Also, it's possible for tsk->mm 1609 * to be NULL while tsk->active_mm is nonzero if we faulted on 1610 * a vmalloc address. In that rare case, we must use 1611 * active_mm instead to avoid a fault in this handler. See 1612 * 1613 * http://mail.nl.linux.org/linux-mm/2002-08/msg00258.html 1614 * (or search Internet on "mm vs. active_mm") 1615 * 1616 * if (!mm) 1617 * mm = tsk->active_mm; 1618 * pgd = pgd_offset (mm, regs->excvaddr); 1619 * pmd = pmd_offset (pgd, regs->excvaddr); 1620 * pmdval = *pmd; 1621 */ 1622 1623 GET_CURRENT(a1,a2) 1624 l32i a0, a1, TASK_MM # tsk->mm 1625 beqz a0, 9f 1626 16278: rsr a3, excvaddr # fault address 1628 _PGD_OFFSET(a0, a3, a1) 1629 l32i a0, a0, 0 # read pmdval 1630 beqz a0, 2f 1631 1632 /* Read ptevaddr and convert to top of page-table page. 1633 * 1634 * vpnval = read_ptevaddr_register() & PAGE_MASK; 1635 * vpnval += DTLB_WAY_PGTABLE; 1636 * pteval = mk_pte (virt_to_page(pmd_val(pmdval)), PAGE_KERNEL); 1637 * write_dtlb_entry (pteval, vpnval); 1638 * 1639 * The messy computation for 'pteval' above really simplifies 1640 * into the following: 1641 * 1642 * pteval = ((pmdval - PAGE_OFFSET + PHYS_OFFSET) & PAGE_MASK) 1643 * | PAGE_DIRECTORY 1644 */ 1645 1646 movi a1, (PHYS_OFFSET - PAGE_OFFSET) & 0xffffffff 1647 add a0, a0, a1 # pmdval - PAGE_OFFSET 1648 extui a1, a0, 0, PAGE_SHIFT # ... & PAGE_MASK 1649 xor a0, a0, a1 1650 1651 movi a1, _PAGE_DIRECTORY 1652 or a0, a0, a1 # ... | PAGE_DIRECTORY 1653 1654 /* 1655 * We utilize all three wired-ways (7-9) to hold pmd translations. 1656 * Memory regions are mapped to the DTLBs according to bits 28 and 29. 1657 * This allows to map the three most common regions to three different 1658 * DTLBs: 1659 * 0,1 -> way 7 program (0040.0000) and virtual (c000.0000) 1660 * 2 -> way 8 shared libaries (2000.0000) 1661 * 3 -> way 0 stack (3000.0000) 1662 */ 1663 1664 extui a3, a3, 28, 2 # addr. bit 28 and 29 0,1,2,3 1665 rsr a1, ptevaddr 1666 addx2 a3, a3, a3 # -> 0,3,6,9 1667 srli a1, a1, PAGE_SHIFT 1668 extui a3, a3, 2, 2 # -> 0,0,1,2 1669 slli a1, a1, PAGE_SHIFT # ptevaddr & PAGE_MASK 1670 addi a3, a3, DTLB_WAY_PGD 1671 add a1, a1, a3 # ... + way_number 1672 16733: wdtlb a0, a1 1674 dsync 1675 1676 /* Exit critical section. */ 1677 16784: rsr a3, excsave1 1679 movi a0, 0 1680 s32i a0, a3, EXC_TABLE_FIXUP 1681 1682 /* Restore the working registers, and return. */ 1683 1684 l32i a0, a2, PT_AREG0 1685 l32i a1, a2, PT_AREG1 1686 l32i a3, a2, PT_AREG3 1687 l32i a2, a2, PT_DEPC 1688 1689 bgeui a2, VALID_DOUBLE_EXCEPTION_ADDRESS, 1f 1690 1691 /* Restore excsave1 and return. */ 1692 1693 rsr a2, depc 1694 rfe 1695 1696 /* Return from double exception. */ 1697 16981: xsr a2, depc 1699 esync 1700 rfde 1701 17029: l32i a0, a1, TASK_ACTIVE_MM # unlikely case mm == 0 1703 bnez a0, 8b 1704 1705 /* Even more unlikely case active_mm == 0. 1706 * We can get here with NMI in the middle of context_switch that 1707 * touches vmalloc area. 1708 */ 1709 movi a0, init_mm 1710 j 8b 1711 1712#if (DCACHE_WAY_SIZE > PAGE_SIZE) 1713 17142: /* Special case for cache aliasing. 1715 * We (should) only get here if a clear_user_page, copy_user_page 1716 * or the aliased cache flush functions got preemptively interrupted 1717 * by another task. Re-establish temporary mapping to the 1718 * TLBTEMP_BASE areas. 1719 */ 1720 1721 /* We shouldn't be in a double exception */ 1722 1723 l32i a0, a2, PT_DEPC 1724 bgeui a0, VALID_DOUBLE_EXCEPTION_ADDRESS, 2f 1725 1726 /* Make sure the exception originated in the special functions */ 1727 1728 movi a0, __tlbtemp_mapping_start 1729 rsr a3, epc1 1730 bltu a3, a0, 2f 1731 movi a0, __tlbtemp_mapping_end 1732 bgeu a3, a0, 2f 1733 1734 /* Check if excvaddr was in one of the TLBTEMP_BASE areas. */ 1735 1736 movi a3, TLBTEMP_BASE_1 1737 rsr a0, excvaddr 1738 bltu a0, a3, 2f 1739 1740 addi a1, a0, -TLBTEMP_SIZE 1741 bgeu a1, a3, 2f 1742 1743 /* Check if we have to restore an ITLB mapping. */ 1744 1745 movi a1, __tlbtemp_mapping_itlb 1746 rsr a3, epc1 1747 sub a3, a3, a1 1748 1749 /* Calculate VPN */ 1750 1751 movi a1, PAGE_MASK 1752 and a1, a1, a0 1753 1754 /* Jump for ITLB entry */ 1755 1756 bgez a3, 1f 1757 1758 /* We can use up to two TLBTEMP areas, one for src and one for dst. */ 1759 1760 extui a3, a0, PAGE_SHIFT + DCACHE_ALIAS_ORDER, 1 1761 add a1, a3, a1 1762 1763 /* PPN is in a6 for the first TLBTEMP area and in a7 for the second. */ 1764 1765 mov a0, a6 1766 movnez a0, a7, a3 1767 j 3b 1768 1769 /* ITLB entry. We only use dst in a6. */ 1770 17711: witlb a6, a1 1772 isync 1773 j 4b 1774 1775 1776#endif // DCACHE_WAY_SIZE > PAGE_SIZE 1777 1778 17792: /* Invalid PGD, default exception handling */ 1780 1781 rsr a1, depc 1782 s32i a1, a2, PT_AREG2 1783 mov a1, a2 1784 1785 rsr a2, ps 1786 bbsi.l a2, PS_UM_BIT, 1f 1787 j _kernel_exception 17881: j _user_exception 1789 1790ENDPROC(fast_second_level_miss) 1791 1792/* 1793 * StoreProhibitedException 1794 * 1795 * Update the pte and invalidate the itlb mapping for this pte. 1796 * 1797 * Entry condition: 1798 * 1799 * a0: trashed, original value saved on stack (PT_AREG0) 1800 * a1: a1 1801 * a2: new stack pointer, original in DEPC 1802 * a3: a3 1803 * depc: a2, original value saved on stack (PT_DEPC) 1804 * excsave_1: dispatch table 1805 * 1806 * PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC 1807 * < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception 1808 */ 1809 1810ENTRY(fast_store_prohibited) 1811 1812 /* Save a1 and a3. */ 1813 1814 s32i a1, a2, PT_AREG1 1815 s32i a3, a2, PT_AREG3 1816 1817 GET_CURRENT(a1,a2) 1818 l32i a0, a1, TASK_MM # tsk->mm 1819 beqz a0, 9f 1820 18218: rsr a1, excvaddr # fault address 1822 _PGD_OFFSET(a0, a1, a3) 1823 l32i a0, a0, 0 1824 beqz a0, 2f 1825 1826 /* 1827 * Note that we test _PAGE_WRITABLE_BIT only if PTE is present 1828 * and is not PAGE_NONE. See pgtable.h for possible PTE layouts. 1829 */ 1830 1831 _PTE_OFFSET(a0, a1, a3) 1832 l32i a3, a0, 0 # read pteval 1833 movi a1, _PAGE_CA_INVALID 1834 ball a3, a1, 2f 1835 bbci.l a3, _PAGE_WRITABLE_BIT, 2f 1836 1837 movi a1, _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_HW_WRITE 1838 or a3, a3, a1 1839 rsr a1, excvaddr 1840 s32i a3, a0, 0 1841 1842 /* We need to flush the cache if we have page coloring. */ 1843#if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK 1844 dhwb a0, 0 1845#endif 1846 pdtlb a0, a1 1847 wdtlb a3, a0 1848 1849 /* Exit critical section. */ 1850 1851 movi a0, 0 1852 rsr a3, excsave1 1853 s32i a0, a3, EXC_TABLE_FIXUP 1854 1855 /* Restore the working registers, and return. */ 1856 1857 l32i a3, a2, PT_AREG3 1858 l32i a1, a2, PT_AREG1 1859 l32i a0, a2, PT_AREG0 1860 l32i a2, a2, PT_DEPC 1861 1862 bgeui a2, VALID_DOUBLE_EXCEPTION_ADDRESS, 1f 1863 1864 rsr a2, depc 1865 rfe 1866 1867 /* Double exception. Restore FIXUP handler and return. */ 1868 18691: xsr a2, depc 1870 esync 1871 rfde 1872 18739: l32i a0, a1, TASK_ACTIVE_MM # unlikely case mm == 0 1874 j 8b 1875 18762: /* If there was a problem, handle fault in C */ 1877 1878 rsr a3, depc # still holds a2 1879 s32i a3, a2, PT_AREG2 1880 mov a1, a2 1881 1882 rsr a2, ps 1883 bbsi.l a2, PS_UM_BIT, 1f 1884 j _kernel_exception 18851: j _user_exception 1886 1887ENDPROC(fast_store_prohibited) 1888 1889#endif /* CONFIG_MMU */ 1890 1891/* 1892 * System Calls. 1893 * 1894 * void system_call (struct pt_regs* regs, int exccause) 1895 * a2 a3 1896 */ 1897 .literal_position 1898 1899ENTRY(system_call) 1900 1901 entry a1, 32 1902 1903 /* regs->syscall = regs->areg[2] */ 1904 1905 l32i a3, a2, PT_AREG2 1906 mov a6, a2 1907 movi a4, do_syscall_trace_enter 1908 s32i a3, a2, PT_SYSCALL 1909 callx4 a4 1910 mov a3, a6 1911 1912 /* syscall = sys_call_table[syscall_nr] */ 1913 1914 movi a4, sys_call_table 1915 movi a5, __NR_syscall_count 1916 movi a6, -ENOSYS 1917 bgeu a3, a5, 1f 1918 1919 addx4 a4, a3, a4 1920 l32i a4, a4, 0 1921 movi a5, sys_ni_syscall; 1922 beq a4, a5, 1f 1923 1924 /* Load args: arg0 - arg5 are passed via regs. */ 1925 1926 l32i a6, a2, PT_AREG6 1927 l32i a7, a2, PT_AREG3 1928 l32i a8, a2, PT_AREG4 1929 l32i a9, a2, PT_AREG5 1930 l32i a10, a2, PT_AREG8 1931 l32i a11, a2, PT_AREG9 1932 1933 /* Pass one additional argument to the syscall: pt_regs (on stack) */ 1934 s32i a2, a1, 0 1935 1936 callx4 a4 1937 19381: /* regs->areg[2] = return_value */ 1939 1940 s32i a6, a2, PT_AREG2 1941 movi a4, do_syscall_trace_leave 1942 mov a6, a2 1943 callx4 a4 1944 retw 1945 1946ENDPROC(system_call) 1947 1948/* 1949 * Spill live registers on the kernel stack macro. 1950 * 1951 * Entry condition: ps.woe is set, ps.excm is cleared 1952 * Exit condition: windowstart has single bit set 1953 * May clobber: a12, a13 1954 */ 1955 .macro spill_registers_kernel 1956 1957#if XCHAL_NUM_AREGS > 16 1958 call12 1f 1959 _j 2f 1960 retw 1961 .align 4 19621: 1963 _entry a1, 48 1964 addi a12, a0, 3 1965#if XCHAL_NUM_AREGS > 32 1966 .rept (XCHAL_NUM_AREGS - 32) / 12 1967 _entry a1, 48 1968 mov a12, a0 1969 .endr 1970#endif 1971 _entry a1, 16 1972#if XCHAL_NUM_AREGS % 12 == 0 1973 mov a8, a8 1974#elif XCHAL_NUM_AREGS % 12 == 4 1975 mov a12, a12 1976#elif XCHAL_NUM_AREGS % 12 == 8 1977 mov a4, a4 1978#endif 1979 retw 19802: 1981#else 1982 mov a12, a12 1983#endif 1984 .endm 1985 1986/* 1987 * Task switch. 1988 * 1989 * struct task* _switch_to (struct task* prev, struct task* next) 1990 * a2 a2 a3 1991 */ 1992 1993ENTRY(_switch_to) 1994 1995 entry a1, 48 1996 1997 mov a11, a3 # and 'next' (a3) 1998 1999 l32i a4, a2, TASK_THREAD_INFO 2000 l32i a5, a3, TASK_THREAD_INFO 2001 2002 save_xtregs_user a4 a6 a8 a9 a12 a13 THREAD_XTREGS_USER 2003 2004#if THREAD_RA > 1020 || THREAD_SP > 1020 2005 addi a10, a2, TASK_THREAD 2006 s32i a0, a10, THREAD_RA - TASK_THREAD # save return address 2007 s32i a1, a10, THREAD_SP - TASK_THREAD # save stack pointer 2008#else 2009 s32i a0, a2, THREAD_RA # save return address 2010 s32i a1, a2, THREAD_SP # save stack pointer 2011#endif 2012 2013 /* Disable ints while we manipulate the stack pointer. */ 2014 2015 irq_save a14, a3 2016 rsync 2017 2018 /* Switch CPENABLE */ 2019 2020#if (XTENSA_HAVE_COPROCESSORS || XTENSA_HAVE_IO_PORTS) 2021 l32i a3, a5, THREAD_CPENABLE 2022 xsr a3, cpenable 2023 s32i a3, a4, THREAD_CPENABLE 2024#endif 2025 2026 /* Flush register file. */ 2027 2028 spill_registers_kernel 2029 2030 /* Set kernel stack (and leave critical section) 2031 * Note: It's save to set it here. The stack will not be overwritten 2032 * because the kernel stack will only be loaded again after 2033 * we return from kernel space. 2034 */ 2035 2036 rsr a3, excsave1 # exc_table 2037 addi a7, a5, PT_REGS_OFFSET 2038 s32i a7, a3, EXC_TABLE_KSTK 2039 2040 /* restore context of the task 'next' */ 2041 2042 l32i a0, a11, THREAD_RA # restore return address 2043 l32i a1, a11, THREAD_SP # restore stack pointer 2044 2045 load_xtregs_user a5 a6 a8 a9 a12 a13 THREAD_XTREGS_USER 2046 2047 wsr a14, ps 2048 rsync 2049 2050 retw 2051 2052ENDPROC(_switch_to) 2053 2054ENTRY(ret_from_fork) 2055 2056 /* void schedule_tail (struct task_struct *prev) 2057 * Note: prev is still in a6 (return value from fake call4 frame) 2058 */ 2059 movi a4, schedule_tail 2060 callx4 a4 2061 2062 movi a4, do_syscall_trace_leave 2063 mov a6, a1 2064 callx4 a4 2065 2066 j common_exception_return 2067 2068ENDPROC(ret_from_fork) 2069 2070/* 2071 * Kernel thread creation helper 2072 * On entry, set up by copy_thread: a2 = thread_fn, a3 = thread_fn arg 2073 * left from _switch_to: a6 = prev 2074 */ 2075ENTRY(ret_from_kernel_thread) 2076 2077 call4 schedule_tail 2078 mov a6, a3 2079 callx4 a2 2080 j common_exception_return 2081 2082ENDPROC(ret_from_kernel_thread) 2083