1/* 2 * arch/xtensa/kernel/entry.S 3 * 4 * Low-level exception handling 5 * 6 * This file is subject to the terms and conditions of the GNU General Public 7 * License. See the file "COPYING" in the main directory of this archive 8 * for more details. 9 * 10 * Copyright (C) 2004-2007 by Tensilica Inc. 11 * 12 * Chris Zankel <chris@zankel.net> 13 * 14 */ 15 16#include <linux/linkage.h> 17#include <asm/asm-offsets.h> 18#include <asm/processor.h> 19#include <asm/coprocessor.h> 20#include <asm/thread_info.h> 21#include <asm/uaccess.h> 22#include <asm/unistd.h> 23#include <asm/ptrace.h> 24#include <asm/current.h> 25#include <asm/pgtable.h> 26#include <asm/page.h> 27#include <asm/signal.h> 28#include <asm/tlbflush.h> 29#include <variant/tie-asm.h> 30 31/* Unimplemented features. */ 32 33#undef KERNEL_STACK_OVERFLOW_CHECK 34#undef PREEMPTIBLE_KERNEL 35#undef ALLOCA_EXCEPTION_IN_IRAM 36 37/* Not well tested. 38 * 39 * - fast_coprocessor 40 */ 41 42/* 43 * Macro to find first bit set in WINDOWBASE from the left + 1 44 * 45 * 100....0 -> 1 46 * 010....0 -> 2 47 * 000....1 -> WSBITS 48 */ 49 50 .macro ffs_ws bit mask 51 52#if XCHAL_HAVE_NSA 53 nsau \bit, \mask # 32-WSBITS ... 31 (32 iff 0) 54 addi \bit, \bit, WSBITS - 32 + 1 # uppest bit set -> return 1 55#else 56 movi \bit, WSBITS 57#if WSBITS > 16 58 _bltui \mask, 0x10000, 99f 59 addi \bit, \bit, -16 60 extui \mask, \mask, 16, 16 61#endif 62#if WSBITS > 8 6399: _bltui \mask, 0x100, 99f 64 addi \bit, \bit, -8 65 srli \mask, \mask, 8 66#endif 6799: _bltui \mask, 0x10, 99f 68 addi \bit, \bit, -4 69 srli \mask, \mask, 4 7099: _bltui \mask, 0x4, 99f 71 addi \bit, \bit, -2 72 srli \mask, \mask, 2 7399: _bltui \mask, 0x2, 99f 74 addi \bit, \bit, -1 7599: 76 77#endif 78 .endm 79 80/* ----------------- DEFAULT FIRST LEVEL EXCEPTION HANDLERS ----------------- */ 81 82/* 83 * First-level exception handler for user exceptions. 84 * Save some special registers, extra states and all registers in the AR 85 * register file that were in use in the user task, and jump to the common 86 * exception code. 87 * We save SAR (used to calculate WMASK), and WB and WS (we don't have to 88 * save them for kernel exceptions). 89 * 90 * Entry condition for user_exception: 91 * 92 * a0: trashed, original value saved on stack (PT_AREG0) 93 * a1: a1 94 * a2: new stack pointer, original value in depc 95 * a3: dispatch table 96 * depc: a2, original value saved on stack (PT_DEPC) 97 * excsave1: a3 98 * 99 * PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC 100 * < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception 101 * 102 * Entry condition for _user_exception: 103 * 104 * a0-a3 and depc have been saved to PT_AREG0...PT_AREG3 and PT_DEPC 105 * excsave has been restored, and 106 * stack pointer (a1) has been set. 107 * 108 * Note: _user_exception might be at an odd address. Don't use call0..call12 109 */ 110 111ENTRY(user_exception) 112 113 /* Save a2, a3, and depc, restore excsave_1 and set SP. */ 114 115 xsr a3, excsave1 116 rsr a0, depc 117 s32i a1, a2, PT_AREG1 118 s32i a0, a2, PT_AREG2 119 s32i a3, a2, PT_AREG3 120 mov a1, a2 121 122 .globl _user_exception 123_user_exception: 124 125 /* Save SAR and turn off single stepping */ 126 127 movi a2, 0 128 rsr a3, sar 129 xsr a2, icountlevel 130 s32i a3, a1, PT_SAR 131 s32i a2, a1, PT_ICOUNTLEVEL 132 133 /* Rotate ws so that the current windowbase is at bit0. */ 134 /* Assume ws = xxwww1yyyy. Rotate ws right, so that a2 = yyyyxxwww1 */ 135 136 rsr a2, windowbase 137 rsr a3, windowstart 138 ssr a2 139 s32i a2, a1, PT_WINDOWBASE 140 s32i a3, a1, PT_WINDOWSTART 141 slli a2, a3, 32-WSBITS 142 src a2, a3, a2 143 srli a2, a2, 32-WSBITS 144 s32i a2, a1, PT_WMASK # needed for restoring registers 145 146 /* Save only live registers. */ 147 148 _bbsi.l a2, 1, 1f 149 s32i a4, a1, PT_AREG4 150 s32i a5, a1, PT_AREG5 151 s32i a6, a1, PT_AREG6 152 s32i a7, a1, PT_AREG7 153 _bbsi.l a2, 2, 1f 154 s32i a8, a1, PT_AREG8 155 s32i a9, a1, PT_AREG9 156 s32i a10, a1, PT_AREG10 157 s32i a11, a1, PT_AREG11 158 _bbsi.l a2, 3, 1f 159 s32i a12, a1, PT_AREG12 160 s32i a13, a1, PT_AREG13 161 s32i a14, a1, PT_AREG14 162 s32i a15, a1, PT_AREG15 163 _bnei a2, 1, 1f # only one valid frame? 164 165 /* Only one valid frame, skip saving regs. */ 166 167 j 2f 168 169 /* Save the remaining registers. 170 * We have to save all registers up to the first '1' from 171 * the right, except the current frame (bit 0). 172 * Assume a2 is: 001001000110001 173 * All register frames starting from the top field to the marked '1' 174 * must be saved. 175 */ 176 1771: addi a3, a2, -1 # eliminate '1' in bit 0: yyyyxxww0 178 neg a3, a3 # yyyyxxww0 -> YYYYXXWW1+1 179 and a3, a3, a2 # max. only one bit is set 180 181 /* Find number of frames to save */ 182 183 ffs_ws a0, a3 # number of frames to the '1' from left 184 185 /* Store information into WMASK: 186 * bits 0..3: xxx1 masked lower 4 bits of the rotated windowstart, 187 * bits 4...: number of valid 4-register frames 188 */ 189 190 slli a3, a0, 4 # number of frames to save in bits 8..4 191 extui a2, a2, 0, 4 # mask for the first 16 registers 192 or a2, a3, a2 193 s32i a2, a1, PT_WMASK # needed when we restore the reg-file 194 195 /* Save 4 registers at a time */ 196 1971: rotw -1 198 s32i a0, a5, PT_AREG_END - 16 199 s32i a1, a5, PT_AREG_END - 12 200 s32i a2, a5, PT_AREG_END - 8 201 s32i a3, a5, PT_AREG_END - 4 202 addi a0, a4, -1 203 addi a1, a5, -16 204 _bnez a0, 1b 205 206 /* WINDOWBASE still in SAR! */ 207 208 rsr a2, sar # original WINDOWBASE 209 movi a3, 1 210 ssl a2 211 sll a3, a3 212 wsr a3, windowstart # set corresponding WINDOWSTART bit 213 wsr a2, windowbase # and WINDOWSTART 214 rsync 215 216 /* We are back to the original stack pointer (a1) */ 217 2182: /* Now, jump to the common exception handler. */ 219 220 j common_exception 221 222ENDPROC(user_exception) 223 224/* 225 * First-level exit handler for kernel exceptions 226 * Save special registers and the live window frame. 227 * Note: Even though we changes the stack pointer, we don't have to do a 228 * MOVSP here, as we do that when we return from the exception. 229 * (See comment in the kernel exception exit code) 230 * 231 * Entry condition for kernel_exception: 232 * 233 * a0: trashed, original value saved on stack (PT_AREG0) 234 * a1: a1 235 * a2: new stack pointer, original in DEPC 236 * a3: dispatch table 237 * depc: a2, original value saved on stack (PT_DEPC) 238 * excsave_1: a3 239 * 240 * PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC 241 * < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception 242 * 243 * Entry condition for _kernel_exception: 244 * 245 * a0-a3 and depc have been saved to PT_AREG0...PT_AREG3 and PT_DEPC 246 * excsave has been restored, and 247 * stack pointer (a1) has been set. 248 * 249 * Note: _kernel_exception might be at an odd address. Don't use call0..call12 250 */ 251 252ENTRY(kernel_exception) 253 254 /* Save a0, a2, a3, DEPC and set SP. */ 255 256 xsr a3, excsave1 # restore a3, excsave_1 257 rsr a0, depc # get a2 258 s32i a1, a2, PT_AREG1 259 s32i a0, a2, PT_AREG2 260 s32i a3, a2, PT_AREG3 261 mov a1, a2 262 263 .globl _kernel_exception 264_kernel_exception: 265 266 /* Save SAR and turn off single stepping */ 267 268 movi a2, 0 269 rsr a3, sar 270 xsr a2, icountlevel 271 s32i a3, a1, PT_SAR 272 s32i a2, a1, PT_ICOUNTLEVEL 273 274 /* Rotate ws so that the current windowbase is at bit0. */ 275 /* Assume ws = xxwww1yyyy. Rotate ws right, so that a2 = yyyyxxwww1 */ 276 277 rsr a2, windowbase # don't need to save these, we only 278 rsr a3, windowstart # need shifted windowstart: windowmask 279 ssr a2 280 slli a2, a3, 32-WSBITS 281 src a2, a3, a2 282 srli a2, a2, 32-WSBITS 283 s32i a2, a1, PT_WMASK # needed for kernel_exception_exit 284 285 /* Save only the live window-frame */ 286 287 _bbsi.l a2, 1, 1f 288 s32i a4, a1, PT_AREG4 289 s32i a5, a1, PT_AREG5 290 s32i a6, a1, PT_AREG6 291 s32i a7, a1, PT_AREG7 292 _bbsi.l a2, 2, 1f 293 s32i a8, a1, PT_AREG8 294 s32i a9, a1, PT_AREG9 295 s32i a10, a1, PT_AREG10 296 s32i a11, a1, PT_AREG11 297 _bbsi.l a2, 3, 1f 298 s32i a12, a1, PT_AREG12 299 s32i a13, a1, PT_AREG13 300 s32i a14, a1, PT_AREG14 301 s32i a15, a1, PT_AREG15 302 3031: 304 305#ifdef KERNEL_STACK_OVERFLOW_CHECK 306 307 /* Stack overflow check, for debugging */ 308 extui a2, a1, TASK_SIZE_BITS,XX 309 movi a3, SIZE?? 310 _bge a2, a3, out_of_stack_panic 311 312#endif 313 314/* 315 * This is the common exception handler. 316 * We get here from the user exception handler or simply by falling through 317 * from the kernel exception handler. 318 * Save the remaining special registers, switch to kernel mode, and jump 319 * to the second-level exception handler. 320 * 321 */ 322 323common_exception: 324 325 /* Save some registers, disable loops and clear the syscall flag. */ 326 327 rsr a2, debugcause 328 rsr a3, epc1 329 s32i a2, a1, PT_DEBUGCAUSE 330 s32i a3, a1, PT_PC 331 332 movi a2, -1 333 rsr a3, excvaddr 334 s32i a2, a1, PT_SYSCALL 335 movi a2, 0 336 s32i a3, a1, PT_EXCVADDR 337 xsr a2, lcount 338 s32i a2, a1, PT_LCOUNT 339 340 /* It is now save to restore the EXC_TABLE_FIXUP variable. */ 341 342 rsr a0, exccause 343 movi a3, 0 344 rsr a2, excsave1 345 s32i a0, a1, PT_EXCCAUSE 346 s32i a3, a2, EXC_TABLE_FIXUP 347 348 /* All unrecoverable states are saved on stack, now, and a1 is valid, 349 * so we can allow exceptions and interrupts (*) again. 350 * Set PS(EXCM = 0, UM = 0, RING = 0, OWB = 0, WOE = 1, INTLEVEL = X) 351 * 352 * (*) We only allow interrupts if PS.INTLEVEL was not set to 1 before 353 * (interrupts disabled) and if this exception is not an interrupt. 354 */ 355 356 rsr a3, ps 357 addi a0, a0, -4 358 movi a2, 1 359 extui a3, a3, 0, 1 # a3 = PS.INTLEVEL[0] 360 moveqz a3, a2, a0 # a3 = 1 iff interrupt exception 361 movi a2, 1 << PS_WOE_BIT 362 or a3, a3, a2 363 rsr a0, exccause 364 xsr a3, ps 365 366 s32i a3, a1, PT_PS # save ps 367 368 /* Save lbeg, lend */ 369 370 rsr a2, lbeg 371 rsr a3, lend 372 s32i a2, a1, PT_LBEG 373 s32i a3, a1, PT_LEND 374 375 /* Save SCOMPARE1 */ 376 377#if XCHAL_HAVE_S32C1I 378 rsr a2, scompare1 379 s32i a2, a1, PT_SCOMPARE1 380#endif 381 382 /* Save optional registers. */ 383 384 save_xtregs_opt a1 a2 a4 a5 a6 a7 PT_XTREGS_OPT 385 386 /* Go to second-level dispatcher. Set up parameters to pass to the 387 * exception handler and call the exception handler. 388 */ 389 390 movi a4, exc_table 391 mov a6, a1 # pass stack frame 392 mov a7, a0 # pass EXCCAUSE 393 addx4 a4, a0, a4 394 l32i a4, a4, EXC_TABLE_DEFAULT # load handler 395 396 /* Call the second-level handler */ 397 398 callx4 a4 399 400 /* Jump here for exception exit */ 401 402common_exception_return: 403 404 /* Jump if we are returning from kernel exceptions. */ 405 4061: l32i a3, a1, PT_PS 407 _bbci.l a3, PS_UM_BIT, 4f 408 409 /* Specific to a user exception exit: 410 * We need to check some flags for signal handling and rescheduling, 411 * and have to restore WB and WS, extra states, and all registers 412 * in the register file that were in use in the user task. 413 * Note that we don't disable interrupts here. 414 */ 415 416 GET_THREAD_INFO(a2,a1) 417 l32i a4, a2, TI_FLAGS 418 419 _bbsi.l a4, TIF_NEED_RESCHED, 3f 420 _bbsi.l a4, TIF_NOTIFY_RESUME, 2f 421 _bbci.l a4, TIF_SIGPENDING, 4f 422 4232: l32i a4, a1, PT_DEPC 424 bgeui a4, VALID_DOUBLE_EXCEPTION_ADDRESS, 4f 425 426 /* Call do_signal() */ 427 428 movi a4, do_notify_resume # int do_notify_resume(struct pt_regs*) 429 mov a6, a1 430 callx4 a4 431 j 1b 432 4333: /* Reschedule */ 434 435 movi a4, schedule # void schedule (void) 436 callx4 a4 437 j 1b 438 4394: /* Restore optional registers. */ 440 441 load_xtregs_opt a1 a2 a4 a5 a6 a7 PT_XTREGS_OPT 442 443 /* Restore SCOMPARE1 */ 444 445#if XCHAL_HAVE_S32C1I 446 l32i a2, a1, PT_SCOMPARE1 447 wsr a2, scompare1 448#endif 449 wsr a3, ps /* disable interrupts */ 450 451 _bbci.l a3, PS_UM_BIT, kernel_exception_exit 452 453user_exception_exit: 454 455 /* Restore the state of the task and return from the exception. */ 456 457 /* Switch to the user thread WINDOWBASE. Save SP temporarily in DEPC */ 458 459 l32i a2, a1, PT_WINDOWBASE 460 l32i a3, a1, PT_WINDOWSTART 461 wsr a1, depc # use DEPC as temp storage 462 wsr a3, windowstart # restore WINDOWSTART 463 ssr a2 # preserve user's WB in the SAR 464 wsr a2, windowbase # switch to user's saved WB 465 rsync 466 rsr a1, depc # restore stack pointer 467 l32i a2, a1, PT_WMASK # register frames saved (in bits 4...9) 468 rotw -1 # we restore a4..a7 469 _bltui a6, 16, 1f # only have to restore current window? 470 471 /* The working registers are a0 and a3. We are restoring to 472 * a4..a7. Be careful not to destroy what we have just restored. 473 * Note: wmask has the format YYYYM: 474 * Y: number of registers saved in groups of 4 475 * M: 4 bit mask of first 16 registers 476 */ 477 478 mov a2, a6 479 mov a3, a5 480 4812: rotw -1 # a0..a3 become a4..a7 482 addi a3, a7, -4*4 # next iteration 483 addi a2, a6, -16 # decrementing Y in WMASK 484 l32i a4, a3, PT_AREG_END + 0 485 l32i a5, a3, PT_AREG_END + 4 486 l32i a6, a3, PT_AREG_END + 8 487 l32i a7, a3, PT_AREG_END + 12 488 _bgeui a2, 16, 2b 489 490 /* Clear unrestored registers (don't leak anything to user-land */ 491 4921: rsr a0, windowbase 493 rsr a3, sar 494 sub a3, a0, a3 495 beqz a3, 2f 496 extui a3, a3, 0, WBBITS 497 4981: rotw -1 499 addi a3, a7, -1 500 movi a4, 0 501 movi a5, 0 502 movi a6, 0 503 movi a7, 0 504 bgei a3, 1, 1b 505 506 /* We are back were we were when we started. 507 * Note: a2 still contains WMASK (if we've returned to the original 508 * frame where we had loaded a2), or at least the lower 4 bits 509 * (if we have restored WSBITS-1 frames). 510 */ 511 5122: j common_exception_exit 513 514 /* This is the kernel exception exit. 515 * We avoided to do a MOVSP when we entered the exception, but we 516 * have to do it here. 517 */ 518 519kernel_exception_exit: 520 521#ifdef PREEMPTIBLE_KERNEL 522 523#ifdef CONFIG_PREEMPT 524 525 /* 526 * Note: We've just returned from a call4, so we have 527 * at least 4 addt'l regs. 528 */ 529 530 /* Check current_thread_info->preempt_count */ 531 532 GET_THREAD_INFO(a2) 533 l32i a3, a2, TI_PREEMPT 534 bnez a3, 1f 535 536 l32i a2, a2, TI_FLAGS 537 5381: 539 540#endif 541 542#endif 543 544 /* Check if we have to do a movsp. 545 * 546 * We only have to do a movsp if the previous window-frame has 547 * been spilled to the *temporary* exception stack instead of the 548 * task's stack. This is the case if the corresponding bit in 549 * WINDOWSTART for the previous window-frame was set before 550 * (not spilled) but is zero now (spilled). 551 * If this bit is zero, all other bits except the one for the 552 * current window frame are also zero. So, we can use a simple test: 553 * 'and' WINDOWSTART and WINDOWSTART-1: 554 * 555 * (XXXXXX1[0]* - 1) AND XXXXXX1[0]* = XXXXXX0[0]* 556 * 557 * The result is zero only if one bit was set. 558 * 559 * (Note: We might have gone through several task switches before 560 * we come back to the current task, so WINDOWBASE might be 561 * different from the time the exception occurred.) 562 */ 563 564 /* Test WINDOWSTART before and after the exception. 565 * We actually have WMASK, so we only have to test if it is 1 or not. 566 */ 567 568 l32i a2, a1, PT_WMASK 569 _beqi a2, 1, common_exception_exit # Spilled before exception,jump 570 571 /* Test WINDOWSTART now. If spilled, do the movsp */ 572 573 rsr a3, windowstart 574 addi a0, a3, -1 575 and a3, a3, a0 576 _bnez a3, common_exception_exit 577 578 /* Do a movsp (we returned from a call4, so we have at least a0..a7) */ 579 580 addi a0, a1, -16 581 l32i a3, a0, 0 582 l32i a4, a0, 4 583 s32i a3, a1, PT_SIZE+0 584 s32i a4, a1, PT_SIZE+4 585 l32i a3, a0, 8 586 l32i a4, a0, 12 587 s32i a3, a1, PT_SIZE+8 588 s32i a4, a1, PT_SIZE+12 589 590 /* Common exception exit. 591 * We restore the special register and the current window frame, and 592 * return from the exception. 593 * 594 * Note: We expect a2 to hold PT_WMASK 595 */ 596 597common_exception_exit: 598 599 /* Restore address registers. */ 600 601 _bbsi.l a2, 1, 1f 602 l32i a4, a1, PT_AREG4 603 l32i a5, a1, PT_AREG5 604 l32i a6, a1, PT_AREG6 605 l32i a7, a1, PT_AREG7 606 _bbsi.l a2, 2, 1f 607 l32i a8, a1, PT_AREG8 608 l32i a9, a1, PT_AREG9 609 l32i a10, a1, PT_AREG10 610 l32i a11, a1, PT_AREG11 611 _bbsi.l a2, 3, 1f 612 l32i a12, a1, PT_AREG12 613 l32i a13, a1, PT_AREG13 614 l32i a14, a1, PT_AREG14 615 l32i a15, a1, PT_AREG15 616 617 /* Restore PC, SAR */ 618 6191: l32i a2, a1, PT_PC 620 l32i a3, a1, PT_SAR 621 wsr a2, epc1 622 wsr a3, sar 623 624 /* Restore LBEG, LEND, LCOUNT */ 625 626 l32i a2, a1, PT_LBEG 627 l32i a3, a1, PT_LEND 628 wsr a2, lbeg 629 l32i a2, a1, PT_LCOUNT 630 wsr a3, lend 631 wsr a2, lcount 632 633 /* We control single stepping through the ICOUNTLEVEL register. */ 634 635 l32i a2, a1, PT_ICOUNTLEVEL 636 movi a3, -2 637 wsr a2, icountlevel 638 wsr a3, icount 639 640 /* Check if it was double exception. */ 641 642 l32i a0, a1, PT_DEPC 643 l32i a3, a1, PT_AREG3 644 l32i a2, a1, PT_AREG2 645 _bgeui a0, VALID_DOUBLE_EXCEPTION_ADDRESS, 1f 646 647 /* Restore a0...a3 and return */ 648 649 l32i a0, a1, PT_AREG0 650 l32i a1, a1, PT_AREG1 651 rfe 652 6531: wsr a0, depc 654 l32i a0, a1, PT_AREG0 655 l32i a1, a1, PT_AREG1 656 rfde 657 658ENDPROC(kernel_exception) 659 660/* 661 * Debug exception handler. 662 * 663 * Currently, we don't support KGDB, so only user application can be debugged. 664 * 665 * When we get here, a0 is trashed and saved to excsave[debuglevel] 666 */ 667 668ENTRY(debug_exception) 669 670 rsr a0, SREG_EPS + XCHAL_DEBUGLEVEL 671 bbsi.l a0, PS_EXCM_BIT, 1f # exception mode 672 673 /* Set EPC1 and EXCCAUSE */ 674 675 wsr a2, depc # save a2 temporarily 676 rsr a2, SREG_EPC + XCHAL_DEBUGLEVEL 677 wsr a2, epc1 678 679 movi a2, EXCCAUSE_MAPPED_DEBUG 680 wsr a2, exccause 681 682 /* Restore PS to the value before the debug exc but with PS.EXCM set.*/ 683 684 movi a2, 1 << PS_EXCM_BIT 685 or a2, a0, a2 686 movi a0, debug_exception # restore a3, debug jump vector 687 wsr a2, ps 688 xsr a0, SREG_EXCSAVE + XCHAL_DEBUGLEVEL 689 690 /* Switch to kernel/user stack, restore jump vector, and save a0 */ 691 692 bbsi.l a2, PS_UM_BIT, 2f # jump if user mode 693 694 addi a2, a1, -16-PT_SIZE # assume kernel stack 695 s32i a0, a2, PT_AREG0 696 movi a0, 0 697 s32i a1, a2, PT_AREG1 698 s32i a0, a2, PT_DEPC # mark it as a regular exception 699 xsr a0, depc 700 s32i a3, a2, PT_AREG3 701 s32i a0, a2, PT_AREG2 702 mov a1, a2 703 j _kernel_exception 704 7052: rsr a2, excsave1 706 l32i a2, a2, EXC_TABLE_KSTK # load kernel stack pointer 707 s32i a0, a2, PT_AREG0 708 movi a0, 0 709 s32i a1, a2, PT_AREG1 710 s32i a0, a2, PT_DEPC 711 xsr a0, depc 712 s32i a3, a2, PT_AREG3 713 s32i a0, a2, PT_AREG2 714 mov a1, a2 715 j _user_exception 716 717 /* Debug exception while in exception mode. */ 7181: j 1b // FIXME!! 719 720ENDPROC(debug_exception) 721 722/* 723 * We get here in case of an unrecoverable exception. 724 * The only thing we can do is to be nice and print a panic message. 725 * We only produce a single stack frame for panic, so ??? 726 * 727 * 728 * Entry conditions: 729 * 730 * - a0 contains the caller address; original value saved in excsave1. 731 * - the original a0 contains a valid return address (backtrace) or 0. 732 * - a2 contains a valid stackpointer 733 * 734 * Notes: 735 * 736 * - If the stack pointer could be invalid, the caller has to setup a 737 * dummy stack pointer (e.g. the stack of the init_task) 738 * 739 * - If the return address could be invalid, the caller has to set it 740 * to 0, so the backtrace would stop. 741 * 742 */ 743 .align 4 744unrecoverable_text: 745 .ascii "Unrecoverable error in exception handler\0" 746 747ENTRY(unrecoverable_exception) 748 749 movi a0, 1 750 movi a1, 0 751 752 wsr a0, windowstart 753 wsr a1, windowbase 754 rsync 755 756 movi a1, (1 << PS_WOE_BIT) | 1 757 wsr a1, ps 758 rsync 759 760 movi a1, init_task 761 movi a0, 0 762 addi a1, a1, PT_REGS_OFFSET 763 764 movi a4, panic 765 movi a6, unrecoverable_text 766 767 callx4 a4 768 7691: j 1b 770 771ENDPROC(unrecoverable_exception) 772 773/* -------------------------- FAST EXCEPTION HANDLERS ----------------------- */ 774 775/* 776 * Fast-handler for alloca exceptions 777 * 778 * The ALLOCA handler is entered when user code executes the MOVSP 779 * instruction and the caller's frame is not in the register file. 780 * In this case, the caller frame's a0..a3 are on the stack just 781 * below sp (a1), and this handler moves them. 782 * 783 * For "MOVSP <ar>,<as>" without destination register a1, this routine 784 * simply moves the value from <as> to <ar> without moving the save area. 785 * 786 * Entry condition: 787 * 788 * a0: trashed, original value saved on stack (PT_AREG0) 789 * a1: a1 790 * a2: new stack pointer, original in DEPC 791 * a3: dispatch table 792 * depc: a2, original value saved on stack (PT_DEPC) 793 * excsave_1: a3 794 * 795 * PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC 796 * < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception 797 */ 798 799#if XCHAL_HAVE_BE 800#define _EXTUI_MOVSP_SRC(ar) extui ar, ar, 4, 4 801#define _EXTUI_MOVSP_DST(ar) extui ar, ar, 0, 4 802#else 803#define _EXTUI_MOVSP_SRC(ar) extui ar, ar, 0, 4 804#define _EXTUI_MOVSP_DST(ar) extui ar, ar, 4, 4 805#endif 806 807ENTRY(fast_alloca) 808 809 /* We shouldn't be in a double exception. */ 810 811 l32i a0, a2, PT_DEPC 812 _bgeui a0, VALID_DOUBLE_EXCEPTION_ADDRESS, .Lunhandled_double 813 814 rsr a0, depc # get a2 815 s32i a4, a2, PT_AREG4 # save a4 and 816 s32i a0, a2, PT_AREG2 # a2 to stack 817 818 /* Exit critical section. */ 819 820 movi a0, 0 821 s32i a0, a3, EXC_TABLE_FIXUP 822 823 /* Restore a3, excsave_1 */ 824 825 xsr a3, excsave1 # make sure excsave_1 is valid for dbl. 826 rsr a4, epc1 # get exception address 827 s32i a3, a2, PT_AREG3 # save a3 to stack 828 829#ifdef ALLOCA_EXCEPTION_IN_IRAM 830#error iram not supported 831#else 832 /* Note: l8ui not allowed in IRAM/IROM!! */ 833 l8ui a0, a4, 1 # read as(src) from MOVSP instruction 834#endif 835 movi a3, .Lmovsp_src 836 _EXTUI_MOVSP_SRC(a0) # extract source register number 837 addx8 a3, a0, a3 838 jx a3 839 840.Lunhandled_double: 841 wsr a0, excsave1 842 movi a0, unrecoverable_exception 843 callx0 a0 844 845 .align 8 846.Lmovsp_src: 847 l32i a3, a2, PT_AREG0; _j 1f; .align 8 848 mov a3, a1; _j 1f; .align 8 849 l32i a3, a2, PT_AREG2; _j 1f; .align 8 850 l32i a3, a2, PT_AREG3; _j 1f; .align 8 851 l32i a3, a2, PT_AREG4; _j 1f; .align 8 852 mov a3, a5; _j 1f; .align 8 853 mov a3, a6; _j 1f; .align 8 854 mov a3, a7; _j 1f; .align 8 855 mov a3, a8; _j 1f; .align 8 856 mov a3, a9; _j 1f; .align 8 857 mov a3, a10; _j 1f; .align 8 858 mov a3, a11; _j 1f; .align 8 859 mov a3, a12; _j 1f; .align 8 860 mov a3, a13; _j 1f; .align 8 861 mov a3, a14; _j 1f; .align 8 862 mov a3, a15; _j 1f; .align 8 863 8641: 865 866#ifdef ALLOCA_EXCEPTION_IN_IRAM 867#error iram not supported 868#else 869 l8ui a0, a4, 0 # read ar(dst) from MOVSP instruction 870#endif 871 addi a4, a4, 3 # step over movsp 872 _EXTUI_MOVSP_DST(a0) # extract destination register 873 wsr a4, epc1 # save new epc_1 874 875 _bnei a0, 1, 1f # no 'movsp a1, ax': jump 876 877 /* Move the save area. This implies the use of the L32E 878 * and S32E instructions, because this move must be done with 879 * the user's PS.RING privilege levels, not with ring 0 880 * (kernel's) privileges currently active with PS.EXCM 881 * set. Note that we have stil registered a fixup routine with the 882 * double exception vector in case a double exception occurs. 883 */ 884 885 /* a0,a4:avail a1:old user stack a2:exc. stack a3:new user stack. */ 886 887 l32e a0, a1, -16 888 l32e a4, a1, -12 889 s32e a0, a3, -16 890 s32e a4, a3, -12 891 l32e a0, a1, -8 892 l32e a4, a1, -4 893 s32e a0, a3, -8 894 s32e a4, a3, -4 895 896 /* Restore stack-pointer and all the other saved registers. */ 897 898 mov a1, a3 899 900 l32i a4, a2, PT_AREG4 901 l32i a3, a2, PT_AREG3 902 l32i a0, a2, PT_AREG0 903 l32i a2, a2, PT_AREG2 904 rfe 905 906 /* MOVSP <at>,<as> was invoked with <at> != a1. 907 * Because the stack pointer is not being modified, 908 * we should be able to just modify the pointer 909 * without moving any save area. 910 * The processor only traps these occurrences if the 911 * caller window isn't live, so unfortunately we can't 912 * use this as an alternate trap mechanism. 913 * So we just do the move. This requires that we 914 * resolve the destination register, not just the source, 915 * so there's some extra work. 916 * (PERHAPS NOT REALLY NEEDED, BUT CLEANER...) 917 */ 918 919 /* a0 dst-reg, a1 user-stack, a2 stack, a3 value of src reg. */ 920 9211: movi a4, .Lmovsp_dst 922 addx8 a4, a0, a4 923 jx a4 924 925 .align 8 926.Lmovsp_dst: 927 s32i a3, a2, PT_AREG0; _j 1f; .align 8 928 mov a1, a3; _j 1f; .align 8 929 s32i a3, a2, PT_AREG2; _j 1f; .align 8 930 s32i a3, a2, PT_AREG3; _j 1f; .align 8 931 s32i a3, a2, PT_AREG4; _j 1f; .align 8 932 mov a5, a3; _j 1f; .align 8 933 mov a6, a3; _j 1f; .align 8 934 mov a7, a3; _j 1f; .align 8 935 mov a8, a3; _j 1f; .align 8 936 mov a9, a3; _j 1f; .align 8 937 mov a10, a3; _j 1f; .align 8 938 mov a11, a3; _j 1f; .align 8 939 mov a12, a3; _j 1f; .align 8 940 mov a13, a3; _j 1f; .align 8 941 mov a14, a3; _j 1f; .align 8 942 mov a15, a3; _j 1f; .align 8 943 9441: l32i a4, a2, PT_AREG4 945 l32i a3, a2, PT_AREG3 946 l32i a0, a2, PT_AREG0 947 l32i a2, a2, PT_AREG2 948 rfe 949 950ENDPROC(fast_alloca) 951 952/* 953 * fast system calls. 954 * 955 * WARNING: The kernel doesn't save the entire user context before 956 * handling a fast system call. These functions are small and short, 957 * usually offering some functionality not available to user tasks. 958 * 959 * BE CAREFUL TO PRESERVE THE USER'S CONTEXT. 960 * 961 * Entry condition: 962 * 963 * a0: trashed, original value saved on stack (PT_AREG0) 964 * a1: a1 965 * a2: new stack pointer, original in DEPC 966 * a3: dispatch table 967 * depc: a2, original value saved on stack (PT_DEPC) 968 * excsave_1: a3 969 */ 970 971ENTRY(fast_syscall_kernel) 972 973 /* Skip syscall. */ 974 975 rsr a0, epc1 976 addi a0, a0, 3 977 wsr a0, epc1 978 979 l32i a0, a2, PT_DEPC 980 bgeui a0, VALID_DOUBLE_EXCEPTION_ADDRESS, fast_syscall_unrecoverable 981 982 rsr a0, depc # get syscall-nr 983 _beqz a0, fast_syscall_spill_registers 984 _beqi a0, __NR_xtensa, fast_syscall_xtensa 985 986 j kernel_exception 987 988ENDPROC(fast_syscall_kernel) 989 990ENTRY(fast_syscall_user) 991 992 /* Skip syscall. */ 993 994 rsr a0, epc1 995 addi a0, a0, 3 996 wsr a0, epc1 997 998 l32i a0, a2, PT_DEPC 999 bgeui a0, VALID_DOUBLE_EXCEPTION_ADDRESS, fast_syscall_unrecoverable 1000 1001 rsr a0, depc # get syscall-nr 1002 _beqz a0, fast_syscall_spill_registers 1003 _beqi a0, __NR_xtensa, fast_syscall_xtensa 1004 1005 j user_exception 1006 1007ENDPROC(fast_syscall_user) 1008 1009ENTRY(fast_syscall_unrecoverable) 1010 1011 /* Restore all states. */ 1012 1013 l32i a0, a2, PT_AREG0 # restore a0 1014 xsr a2, depc # restore a2, depc 1015 rsr a3, excsave1 1016 1017 wsr a0, excsave1 1018 movi a0, unrecoverable_exception 1019 callx0 a0 1020 1021ENDPROC(fast_syscall_unrecoverable) 1022 1023/* 1024 * sysxtensa syscall handler 1025 * 1026 * int sysxtensa (SYS_XTENSA_ATOMIC_SET, ptr, val, unused); 1027 * int sysxtensa (SYS_XTENSA_ATOMIC_ADD, ptr, val, unused); 1028 * int sysxtensa (SYS_XTENSA_ATOMIC_EXG_ADD, ptr, val, unused); 1029 * int sysxtensa (SYS_XTENSA_ATOMIC_CMP_SWP, ptr, oldval, newval); 1030 * a2 a6 a3 a4 a5 1031 * 1032 * Entry condition: 1033 * 1034 * a0: a2 (syscall-nr), original value saved on stack (PT_AREG0) 1035 * a1: a1 1036 * a2: new stack pointer, original in a0 and DEPC 1037 * a3: dispatch table, original in excsave_1 1038 * a4..a15: unchanged 1039 * depc: a2, original value saved on stack (PT_DEPC) 1040 * excsave_1: a3 1041 * 1042 * PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC 1043 * < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception 1044 * 1045 * Note: we don't have to save a2; a2 holds the return value 1046 * 1047 * We use the two macros TRY and CATCH: 1048 * 1049 * TRY adds an entry to the __ex_table fixup table for the immediately 1050 * following instruction. 1051 * 1052 * CATCH catches any exception that occurred at one of the preceding TRY 1053 * statements and continues from there 1054 * 1055 * Usage TRY l32i a0, a1, 0 1056 * <other code> 1057 * done: rfe 1058 * CATCH <set return code> 1059 * j done 1060 */ 1061 1062#define TRY \ 1063 .section __ex_table, "a"; \ 1064 .word 66f, 67f; \ 1065 .text; \ 106666: 1067 1068#define CATCH \ 106967: 1070 1071ENTRY(fast_syscall_xtensa) 1072 1073 xsr a3, excsave1 # restore a3, excsave1 1074 1075 s32i a7, a2, PT_AREG7 # we need an additional register 1076 movi a7, 4 # sizeof(unsigned int) 1077 access_ok a3, a7, a0, a2, .Leac # a0: scratch reg, a2: sp 1078 1079 addi a6, a6, -1 # assuming SYS_XTENSA_ATOMIC_SET = 1 1080 _bgeui a6, SYS_XTENSA_COUNT - 1, .Lill 1081 _bnei a6, SYS_XTENSA_ATOMIC_CMP_SWP - 1, .Lnswp 1082 1083 /* Fall through for ATOMIC_CMP_SWP. */ 1084 1085.Lswp: /* Atomic compare and swap */ 1086 1087TRY l32i a0, a3, 0 # read old value 1088 bne a0, a4, 1f # same as old value? jump 1089TRY s32i a5, a3, 0 # different, modify value 1090 l32i a7, a2, PT_AREG7 # restore a7 1091 l32i a0, a2, PT_AREG0 # restore a0 1092 movi a2, 1 # and return 1 1093 addi a6, a6, 1 # restore a6 (really necessary?) 1094 rfe 1095 10961: l32i a7, a2, PT_AREG7 # restore a7 1097 l32i a0, a2, PT_AREG0 # restore a0 1098 movi a2, 0 # return 0 (note that we cannot set 1099 addi a6, a6, 1 # restore a6 (really necessary?) 1100 rfe 1101 1102.Lnswp: /* Atomic set, add, and exg_add. */ 1103 1104TRY l32i a7, a3, 0 # orig 1105 add a0, a4, a7 # + arg 1106 moveqz a0, a4, a6 # set 1107TRY s32i a0, a3, 0 # write new value 1108 1109 mov a0, a2 1110 mov a2, a7 1111 l32i a7, a0, PT_AREG7 # restore a7 1112 l32i a0, a0, PT_AREG0 # restore a0 1113 addi a6, a6, 1 # restore a6 (really necessary?) 1114 rfe 1115 1116CATCH 1117.Leac: l32i a7, a2, PT_AREG7 # restore a7 1118 l32i a0, a2, PT_AREG0 # restore a0 1119 movi a2, -EFAULT 1120 rfe 1121 1122.Lill: l32i a7, a2, PT_AREG0 # restore a7 1123 l32i a0, a2, PT_AREG0 # restore a0 1124 movi a2, -EINVAL 1125 rfe 1126 1127ENDPROC(fast_syscall_xtensa) 1128 1129 1130/* fast_syscall_spill_registers. 1131 * 1132 * Entry condition: 1133 * 1134 * a0: trashed, original value saved on stack (PT_AREG0) 1135 * a1: a1 1136 * a2: new stack pointer, original in DEPC 1137 * a3: dispatch table 1138 * depc: a2, original value saved on stack (PT_DEPC) 1139 * excsave_1: a3 1140 * 1141 * Note: We assume the stack pointer is EXC_TABLE_KSTK in the fixup handler. 1142 */ 1143 1144ENTRY(fast_syscall_spill_registers) 1145 1146 /* Register a FIXUP handler (pass current wb as a parameter) */ 1147 1148 movi a0, fast_syscall_spill_registers_fixup 1149 s32i a0, a3, EXC_TABLE_FIXUP 1150 rsr a0, windowbase 1151 s32i a0, a3, EXC_TABLE_PARAM 1152 1153 /* Save a3 and SAR on stack. */ 1154 1155 rsr a0, sar 1156 xsr a3, excsave1 # restore a3 and excsave_1 1157 s32i a3, a2, PT_AREG3 1158 s32i a4, a2, PT_AREG4 1159 s32i a0, a2, PT_AREG5 # store SAR to PT_AREG5 1160 1161 /* The spill routine might clobber a7, a11, and a15. */ 1162 1163 s32i a7, a2, PT_AREG7 1164 s32i a11, a2, PT_AREG11 1165 s32i a15, a2, PT_AREG15 1166 1167 call0 _spill_registers # destroys a3, a4, and SAR 1168 1169 /* Advance PC, restore registers and SAR, and return from exception. */ 1170 1171 l32i a3, a2, PT_AREG5 1172 l32i a4, a2, PT_AREG4 1173 l32i a0, a2, PT_AREG0 1174 wsr a3, sar 1175 l32i a3, a2, PT_AREG3 1176 1177 /* Restore clobbered registers. */ 1178 1179 l32i a7, a2, PT_AREG7 1180 l32i a11, a2, PT_AREG11 1181 l32i a15, a2, PT_AREG15 1182 1183 movi a2, 0 1184 rfe 1185 1186ENDPROC(fast_syscall_spill_registers) 1187 1188/* Fixup handler. 1189 * 1190 * We get here if the spill routine causes an exception, e.g. tlb miss. 1191 * We basically restore WINDOWBASE and WINDOWSTART to the condition when 1192 * we entered the spill routine and jump to the user exception handler. 1193 * 1194 * a0: value of depc, original value in depc 1195 * a2: trashed, original value in EXC_TABLE_DOUBLE_SAVE 1196 * a3: exctable, original value in excsave1 1197 */ 1198 1199fast_syscall_spill_registers_fixup: 1200 1201 rsr a2, windowbase # get current windowbase (a2 is saved) 1202 xsr a0, depc # restore depc and a0 1203 ssl a2 # set shift (32 - WB) 1204 1205 /* We need to make sure the current registers (a0-a3) are preserved. 1206 * To do this, we simply set the bit for the current window frame 1207 * in WS, so that the exception handlers save them to the task stack. 1208 */ 1209 1210 rsr a3, excsave1 # get spill-mask 1211 slli a2, a3, 1 # shift left by one 1212 1213 slli a3, a2, 32-WSBITS 1214 src a2, a2, a3 # a1 = xxwww1yyxxxwww1yy...... 1215 wsr a2, windowstart # set corrected windowstart 1216 1217 movi a3, exc_table 1218 l32i a2, a3, EXC_TABLE_DOUBLE_SAVE # restore a2 1219 l32i a3, a3, EXC_TABLE_PARAM # original WB (in user task) 1220 1221 /* Return to the original (user task) WINDOWBASE. 1222 * We leave the following frame behind: 1223 * a0, a1, a2 same 1224 * a3: trashed (saved in excsave_1) 1225 * depc: depc (we have to return to that address) 1226 * excsave_1: a3 1227 */ 1228 1229 wsr a3, windowbase 1230 rsync 1231 1232 /* We are now in the original frame when we entered _spill_registers: 1233 * a0: return address 1234 * a1: used, stack pointer 1235 * a2: kernel stack pointer 1236 * a3: available, saved in EXCSAVE_1 1237 * depc: exception address 1238 * excsave: a3 1239 * Note: This frame might be the same as above. 1240 */ 1241 1242 /* Setup stack pointer. */ 1243 1244 addi a2, a2, -PT_USER_SIZE 1245 s32i a0, a2, PT_AREG0 1246 1247 /* Make sure we return to this fixup handler. */ 1248 1249 movi a3, fast_syscall_spill_registers_fixup_return 1250 s32i a3, a2, PT_DEPC # setup depc 1251 1252 /* Jump to the exception handler. */ 1253 1254 movi a3, exc_table 1255 rsr a0, exccause 1256 addx4 a0, a0, a3 # find entry in table 1257 l32i a0, a0, EXC_TABLE_FAST_USER # load handler 1258 jx a0 1259 1260fast_syscall_spill_registers_fixup_return: 1261 1262 /* When we return here, all registers have been restored (a2: DEPC) */ 1263 1264 wsr a2, depc # exception address 1265 1266 /* Restore fixup handler. */ 1267 1268 xsr a3, excsave1 1269 movi a2, fast_syscall_spill_registers_fixup 1270 s32i a2, a3, EXC_TABLE_FIXUP 1271 rsr a2, windowbase 1272 s32i a2, a3, EXC_TABLE_PARAM 1273 l32i a2, a3, EXC_TABLE_KSTK 1274 1275 /* Load WB at the time the exception occurred. */ 1276 1277 rsr a3, sar # WB is still in SAR 1278 neg a3, a3 1279 wsr a3, windowbase 1280 rsync 1281 1282 /* Restore a3 and return. */ 1283 1284 movi a3, exc_table 1285 xsr a3, excsave1 1286 1287 rfde 1288 1289 1290/* 1291 * spill all registers. 1292 * 1293 * This is not a real function. The following conditions must be met: 1294 * 1295 * - must be called with call0. 1296 * - uses a3, a4 and SAR. 1297 * - the last 'valid' register of each frame are clobbered. 1298 * - the caller must have registered a fixup handler 1299 * (or be inside a critical section) 1300 * - PS_EXCM must be set (PS_WOE cleared?) 1301 */ 1302 1303ENTRY(_spill_registers) 1304 1305 /* 1306 * Rotate ws so that the current windowbase is at bit 0. 1307 * Assume ws = xxxwww1yy (www1 current window frame). 1308 * Rotate ws right so that a4 = yyxxxwww1. 1309 */ 1310 1311 rsr a4, windowbase 1312 rsr a3, windowstart # a3 = xxxwww1yy 1313 ssr a4 # holds WB 1314 slli a4, a3, WSBITS 1315 or a3, a3, a4 # a3 = xxxwww1yyxxxwww1yy 1316 srl a3, a3 # a3 = 00xxxwww1yyxxxwww1 1317 1318 /* We are done if there are no more than the current register frame. */ 1319 1320 extui a3, a3, 1, WSBITS-1 # a3 = 0yyxxxwww 1321 movi a4, (1 << (WSBITS-1)) 1322 _beqz a3, .Lnospill # only one active frame? jump 1323 1324 /* We want 1 at the top, so that we return to the current windowbase */ 1325 1326 or a3, a3, a4 # 1yyxxxwww 1327 1328 /* Skip empty frames - get 'oldest' WINDOWSTART-bit. */ 1329 1330 wsr a3, windowstart # save shifted windowstart 1331 neg a4, a3 1332 and a3, a4, a3 # first bit set from right: 000010000 1333 1334 ffs_ws a4, a3 # a4: shifts to skip empty frames 1335 movi a3, WSBITS 1336 sub a4, a3, a4 # WSBITS-a4:number of 0-bits from right 1337 ssr a4 # save in SAR for later. 1338 1339 rsr a3, windowbase 1340 add a3, a3, a4 1341 wsr a3, windowbase 1342 rsync 1343 1344 rsr a3, windowstart 1345 srl a3, a3 # shift windowstart 1346 1347 /* WB is now just one frame below the oldest frame in the register 1348 window. WS is shifted so the oldest frame is in bit 0, thus, WB 1349 and WS differ by one 4-register frame. */ 1350 1351 /* Save frames. Depending what call was used (call4, call8, call12), 1352 * we have to save 4,8. or 12 registers. 1353 */ 1354 1355 _bbsi.l a3, 1, .Lc4 1356 _bbsi.l a3, 2, .Lc8 1357 1358 /* Special case: we have a call12-frame starting at a4. */ 1359 1360 _bbci.l a3, 3, .Lc12 # bit 3 shouldn't be zero! (Jump to Lc12 first) 1361 1362 s32e a4, a1, -16 # a1 is valid with an empty spill area 1363 l32e a4, a5, -12 1364 s32e a8, a4, -48 1365 mov a8, a4 1366 l32e a4, a1, -16 1367 j .Lc12c 1368 1369.Lnospill: 1370 ret 1371 1372.Lloop: _bbsi.l a3, 1, .Lc4 1373 _bbci.l a3, 2, .Lc12 1374 1375.Lc8: s32e a4, a13, -16 1376 l32e a4, a5, -12 1377 s32e a8, a4, -32 1378 s32e a5, a13, -12 1379 s32e a6, a13, -8 1380 s32e a7, a13, -4 1381 s32e a9, a4, -28 1382 s32e a10, a4, -24 1383 s32e a11, a4, -20 1384 1385 srli a11, a3, 2 # shift windowbase by 2 1386 rotw 2 1387 _bnei a3, 1, .Lloop 1388 1389.Lexit: /* Done. Do the final rotation, set WS, and return. */ 1390 1391 rotw 1 1392 rsr a3, windowbase 1393 ssl a3 1394 movi a3, 1 1395 sll a3, a3 1396 wsr a3, windowstart 1397 ret 1398 1399.Lc4: s32e a4, a9, -16 1400 s32e a5, a9, -12 1401 s32e a6, a9, -8 1402 s32e a7, a9, -4 1403 1404 srli a7, a3, 1 1405 rotw 1 1406 _bnei a3, 1, .Lloop 1407 j .Lexit 1408 1409.Lc12: _bbci.l a3, 3, .Linvalid_mask # bit 2 shouldn't be zero! 1410 1411 /* 12-register frame (call12) */ 1412 1413 l32e a2, a5, -12 1414 s32e a8, a2, -48 1415 mov a8, a2 1416 1417.Lc12c: s32e a9, a8, -44 1418 s32e a10, a8, -40 1419 s32e a11, a8, -36 1420 s32e a12, a8, -32 1421 s32e a13, a8, -28 1422 s32e a14, a8, -24 1423 s32e a15, a8, -20 1424 srli a15, a3, 3 1425 1426 /* The stack pointer for a4..a7 is out of reach, so we rotate the 1427 * window, grab the stackpointer, and rotate back. 1428 * Alternatively, we could also use the following approach, but that 1429 * makes the fixup routine much more complicated: 1430 * rotw 1 1431 * s32e a0, a13, -16 1432 * ... 1433 * rotw 2 1434 */ 1435 1436 rotw 1 1437 mov a5, a13 1438 rotw -1 1439 1440 s32e a4, a9, -16 1441 s32e a5, a9, -12 1442 s32e a6, a9, -8 1443 s32e a7, a9, -4 1444 1445 rotw 3 1446 1447 _beqi a3, 1, .Lexit 1448 j .Lloop 1449 1450.Linvalid_mask: 1451 1452 /* We get here because of an unrecoverable error in the window 1453 * registers. If we are in user space, we kill the application, 1454 * however, this condition is unrecoverable in kernel space. 1455 */ 1456 1457 rsr a0, ps 1458 _bbci.l a0, PS_UM_BIT, 1f 1459 1460 /* User space: Setup a dummy frame and kill application. 1461 * Note: We assume EXC_TABLE_KSTK contains a valid stack pointer. 1462 */ 1463 1464 movi a0, 1 1465 movi a1, 0 1466 1467 wsr a0, windowstart 1468 wsr a1, windowbase 1469 rsync 1470 1471 movi a0, 0 1472 1473 movi a3, exc_table 1474 l32i a1, a3, EXC_TABLE_KSTK 1475 wsr a3, excsave1 1476 1477 movi a4, (1 << PS_WOE_BIT) | 1 1478 wsr a4, ps 1479 rsync 1480 1481 movi a6, SIGSEGV 1482 movi a4, do_exit 1483 callx4 a4 1484 14851: /* Kernel space: PANIC! */ 1486 1487 wsr a0, excsave1 1488 movi a0, unrecoverable_exception 1489 callx0 a0 # should not return 14901: j 1b 1491 1492ENDPROC(_spill_registers) 1493 1494#ifdef CONFIG_MMU 1495/* 1496 * We should never get here. Bail out! 1497 */ 1498 1499ENTRY(fast_second_level_miss_double_kernel) 1500 15011: movi a0, unrecoverable_exception 1502 callx0 a0 # should not return 15031: j 1b 1504 1505ENDPROC(fast_second_level_miss_double_kernel) 1506 1507/* First-level entry handler for user, kernel, and double 2nd-level 1508 * TLB miss exceptions. Note that for now, user and kernel miss 1509 * exceptions share the same entry point and are handled identically. 1510 * 1511 * An old, less-efficient C version of this function used to exist. 1512 * We include it below, interleaved as comments, for reference. 1513 * 1514 * Entry condition: 1515 * 1516 * a0: trashed, original value saved on stack (PT_AREG0) 1517 * a1: a1 1518 * a2: new stack pointer, original in DEPC 1519 * a3: dispatch table 1520 * depc: a2, original value saved on stack (PT_DEPC) 1521 * excsave_1: a3 1522 * 1523 * PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC 1524 * < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception 1525 */ 1526 1527ENTRY(fast_second_level_miss) 1528 1529 /* Save a1. Note: we don't expect a double exception. */ 1530 1531 s32i a1, a2, PT_AREG1 1532 1533 /* We need to map the page of PTEs for the user task. Find 1534 * the pointer to that page. Also, it's possible for tsk->mm 1535 * to be NULL while tsk->active_mm is nonzero if we faulted on 1536 * a vmalloc address. In that rare case, we must use 1537 * active_mm instead to avoid a fault in this handler. See 1538 * 1539 * http://mail.nl.linux.org/linux-mm/2002-08/msg00258.html 1540 * (or search Internet on "mm vs. active_mm") 1541 * 1542 * if (!mm) 1543 * mm = tsk->active_mm; 1544 * pgd = pgd_offset (mm, regs->excvaddr); 1545 * pmd = pmd_offset (pgd, regs->excvaddr); 1546 * pmdval = *pmd; 1547 */ 1548 1549 GET_CURRENT(a1,a2) 1550 l32i a0, a1, TASK_MM # tsk->mm 1551 beqz a0, 9f 1552 1553 1554 /* We deliberately destroy a3 that holds the exception table. */ 1555 15568: rsr a3, excvaddr # fault address 1557 _PGD_OFFSET(a0, a3, a1) 1558 l32i a0, a0, 0 # read pmdval 1559 beqz a0, 2f 1560 1561 /* Read ptevaddr and convert to top of page-table page. 1562 * 1563 * vpnval = read_ptevaddr_register() & PAGE_MASK; 1564 * vpnval += DTLB_WAY_PGTABLE; 1565 * pteval = mk_pte (virt_to_page(pmd_val(pmdval)), PAGE_KERNEL); 1566 * write_dtlb_entry (pteval, vpnval); 1567 * 1568 * The messy computation for 'pteval' above really simplifies 1569 * into the following: 1570 * 1571 * pteval = ((pmdval - PAGE_OFFSET) & PAGE_MASK) | PAGE_DIRECTORY 1572 */ 1573 1574 movi a1, (-PAGE_OFFSET) & 0xffffffff 1575 add a0, a0, a1 # pmdval - PAGE_OFFSET 1576 extui a1, a0, 0, PAGE_SHIFT # ... & PAGE_MASK 1577 xor a0, a0, a1 1578 1579 movi a1, _PAGE_DIRECTORY 1580 or a0, a0, a1 # ... | PAGE_DIRECTORY 1581 1582 /* 1583 * We utilize all three wired-ways (7-9) to hold pmd translations. 1584 * Memory regions are mapped to the DTLBs according to bits 28 and 29. 1585 * This allows to map the three most common regions to three different 1586 * DTLBs: 1587 * 0,1 -> way 7 program (0040.0000) and virtual (c000.0000) 1588 * 2 -> way 8 shared libaries (2000.0000) 1589 * 3 -> way 0 stack (3000.0000) 1590 */ 1591 1592 extui a3, a3, 28, 2 # addr. bit 28 and 29 0,1,2,3 1593 rsr a1, ptevaddr 1594 addx2 a3, a3, a3 # -> 0,3,6,9 1595 srli a1, a1, PAGE_SHIFT 1596 extui a3, a3, 2, 2 # -> 0,0,1,2 1597 slli a1, a1, PAGE_SHIFT # ptevaddr & PAGE_MASK 1598 addi a3, a3, DTLB_WAY_PGD 1599 add a1, a1, a3 # ... + way_number 1600 16013: wdtlb a0, a1 1602 dsync 1603 1604 /* Exit critical section. */ 1605 16064: movi a3, exc_table # restore a3 1607 movi a0, 0 1608 s32i a0, a3, EXC_TABLE_FIXUP 1609 1610 /* Restore the working registers, and return. */ 1611 1612 l32i a0, a2, PT_AREG0 1613 l32i a1, a2, PT_AREG1 1614 l32i a2, a2, PT_DEPC 1615 xsr a3, excsave1 1616 1617 bgeui a2, VALID_DOUBLE_EXCEPTION_ADDRESS, 1f 1618 1619 /* Restore excsave1 and return. */ 1620 1621 rsr a2, depc 1622 rfe 1623 1624 /* Return from double exception. */ 1625 16261: xsr a2, depc 1627 esync 1628 rfde 1629 16309: l32i a0, a1, TASK_ACTIVE_MM # unlikely case mm == 0 1631 j 8b 1632 1633#if (DCACHE_WAY_SIZE > PAGE_SIZE) 1634 16352: /* Special case for cache aliasing. 1636 * We (should) only get here if a clear_user_page, copy_user_page 1637 * or the aliased cache flush functions got preemptively interrupted 1638 * by another task. Re-establish temporary mapping to the 1639 * TLBTEMP_BASE areas. 1640 */ 1641 1642 /* We shouldn't be in a double exception */ 1643 1644 l32i a0, a2, PT_DEPC 1645 bgeui a0, VALID_DOUBLE_EXCEPTION_ADDRESS, 2f 1646 1647 /* Make sure the exception originated in the special functions */ 1648 1649 movi a0, __tlbtemp_mapping_start 1650 rsr a3, epc1 1651 bltu a3, a0, 2f 1652 movi a0, __tlbtemp_mapping_end 1653 bgeu a3, a0, 2f 1654 1655 /* Check if excvaddr was in one of the TLBTEMP_BASE areas. */ 1656 1657 movi a3, TLBTEMP_BASE_1 1658 rsr a0, excvaddr 1659 bltu a0, a3, 2f 1660 1661 addi a1, a0, -(2 << (DCACHE_ALIAS_ORDER + PAGE_SHIFT)) 1662 bgeu a1, a3, 2f 1663 1664 /* Check if we have to restore an ITLB mapping. */ 1665 1666 movi a1, __tlbtemp_mapping_itlb 1667 rsr a3, epc1 1668 sub a3, a3, a1 1669 1670 /* Calculate VPN */ 1671 1672 movi a1, PAGE_MASK 1673 and a1, a1, a0 1674 1675 /* Jump for ITLB entry */ 1676 1677 bgez a3, 1f 1678 1679 /* We can use up to two TLBTEMP areas, one for src and one for dst. */ 1680 1681 extui a3, a0, PAGE_SHIFT + DCACHE_ALIAS_ORDER, 1 1682 add a1, a3, a1 1683 1684 /* PPN is in a6 for the first TLBTEMP area and in a7 for the second. */ 1685 1686 mov a0, a6 1687 movnez a0, a7, a3 1688 j 3b 1689 1690 /* ITLB entry. We only use dst in a6. */ 1691 16921: witlb a6, a1 1693 isync 1694 j 4b 1695 1696 1697#endif // DCACHE_WAY_SIZE > PAGE_SIZE 1698 1699 17002: /* Invalid PGD, default exception handling */ 1701 1702 movi a3, exc_table 1703 rsr a1, depc 1704 xsr a3, excsave1 1705 s32i a1, a2, PT_AREG2 1706 s32i a3, a2, PT_AREG3 1707 mov a1, a2 1708 1709 rsr a2, ps 1710 bbsi.l a2, PS_UM_BIT, 1f 1711 j _kernel_exception 17121: j _user_exception 1713 1714ENDPROC(fast_second_level_miss) 1715 1716/* 1717 * StoreProhibitedException 1718 * 1719 * Update the pte and invalidate the itlb mapping for this pte. 1720 * 1721 * Entry condition: 1722 * 1723 * a0: trashed, original value saved on stack (PT_AREG0) 1724 * a1: a1 1725 * a2: new stack pointer, original in DEPC 1726 * a3: dispatch table 1727 * depc: a2, original value saved on stack (PT_DEPC) 1728 * excsave_1: a3 1729 * 1730 * PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC 1731 * < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception 1732 */ 1733 1734ENTRY(fast_store_prohibited) 1735 1736 /* Save a1 and a4. */ 1737 1738 s32i a1, a2, PT_AREG1 1739 s32i a4, a2, PT_AREG4 1740 1741 GET_CURRENT(a1,a2) 1742 l32i a0, a1, TASK_MM # tsk->mm 1743 beqz a0, 9f 1744 17458: rsr a1, excvaddr # fault address 1746 _PGD_OFFSET(a0, a1, a4) 1747 l32i a0, a0, 0 1748 beqz a0, 2f 1749 1750 /* Note that we assume _PAGE_WRITABLE_BIT is only set if pte is valid.*/ 1751 1752 _PTE_OFFSET(a0, a1, a4) 1753 l32i a4, a0, 0 # read pteval 1754 bbci.l a4, _PAGE_WRITABLE_BIT, 2f 1755 1756 movi a1, _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_HW_WRITE 1757 or a4, a4, a1 1758 rsr a1, excvaddr 1759 s32i a4, a0, 0 1760 1761 /* We need to flush the cache if we have page coloring. */ 1762#if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK 1763 dhwb a0, 0 1764#endif 1765 pdtlb a0, a1 1766 wdtlb a4, a0 1767 1768 /* Exit critical section. */ 1769 1770 movi a0, 0 1771 s32i a0, a3, EXC_TABLE_FIXUP 1772 1773 /* Restore the working registers, and return. */ 1774 1775 l32i a4, a2, PT_AREG4 1776 l32i a1, a2, PT_AREG1 1777 l32i a0, a2, PT_AREG0 1778 l32i a2, a2, PT_DEPC 1779 1780 /* Restore excsave1 and a3. */ 1781 1782 xsr a3, excsave1 1783 bgeui a2, VALID_DOUBLE_EXCEPTION_ADDRESS, 1f 1784 1785 rsr a2, depc 1786 rfe 1787 1788 /* Double exception. Restore FIXUP handler and return. */ 1789 17901: xsr a2, depc 1791 esync 1792 rfde 1793 17949: l32i a0, a1, TASK_ACTIVE_MM # unlikely case mm == 0 1795 j 8b 1796 17972: /* If there was a problem, handle fault in C */ 1798 1799 rsr a4, depc # still holds a2 1800 xsr a3, excsave1 1801 s32i a4, a2, PT_AREG2 1802 s32i a3, a2, PT_AREG3 1803 l32i a4, a2, PT_AREG4 1804 mov a1, a2 1805 1806 rsr a2, ps 1807 bbsi.l a2, PS_UM_BIT, 1f 1808 j _kernel_exception 18091: j _user_exception 1810 1811ENDPROC(fast_store_prohibited) 1812 1813#endif /* CONFIG_MMU */ 1814 1815/* 1816 * System Calls. 1817 * 1818 * void system_call (struct pt_regs* regs, int exccause) 1819 * a2 a3 1820 */ 1821 1822ENTRY(system_call) 1823 1824 entry a1, 32 1825 1826 /* regs->syscall = regs->areg[2] */ 1827 1828 l32i a3, a2, PT_AREG2 1829 mov a6, a2 1830 movi a4, do_syscall_trace_enter 1831 s32i a3, a2, PT_SYSCALL 1832 callx4 a4 1833 1834 /* syscall = sys_call_table[syscall_nr] */ 1835 1836 movi a4, sys_call_table; 1837 movi a5, __NR_syscall_count 1838 movi a6, -ENOSYS 1839 bgeu a3, a5, 1f 1840 1841 addx4 a4, a3, a4 1842 l32i a4, a4, 0 1843 movi a5, sys_ni_syscall; 1844 beq a4, a5, 1f 1845 1846 /* Load args: arg0 - arg5 are passed via regs. */ 1847 1848 l32i a6, a2, PT_AREG6 1849 l32i a7, a2, PT_AREG3 1850 l32i a8, a2, PT_AREG4 1851 l32i a9, a2, PT_AREG5 1852 l32i a10, a2, PT_AREG8 1853 l32i a11, a2, PT_AREG9 1854 1855 /* Pass one additional argument to the syscall: pt_regs (on stack) */ 1856 s32i a2, a1, 0 1857 1858 callx4 a4 1859 18601: /* regs->areg[2] = return_value */ 1861 1862 s32i a6, a2, PT_AREG2 1863 movi a4, do_syscall_trace_leave 1864 mov a6, a2 1865 callx4 a4 1866 retw 1867 1868ENDPROC(system_call) 1869 1870 1871/* 1872 * Task switch. 1873 * 1874 * struct task* _switch_to (struct task* prev, struct task* next) 1875 * a2 a2 a3 1876 */ 1877 1878ENTRY(_switch_to) 1879 1880 entry a1, 16 1881 1882 mov a12, a2 # preserve 'prev' (a2) 1883 mov a13, a3 # and 'next' (a3) 1884 1885 l32i a4, a2, TASK_THREAD_INFO 1886 l32i a5, a3, TASK_THREAD_INFO 1887 1888 save_xtregs_user a4 a6 a8 a9 a10 a11 THREAD_XTREGS_USER 1889 1890 s32i a0, a12, THREAD_RA # save return address 1891 s32i a1, a12, THREAD_SP # save stack pointer 1892 1893 /* Disable ints while we manipulate the stack pointer. */ 1894 1895 movi a14, (1 << PS_EXCM_BIT) | LOCKLEVEL 1896 xsr a14, ps 1897 rsr a3, excsave1 1898 rsync 1899 s32i a3, a3, EXC_TABLE_FIXUP /* enter critical section */ 1900 1901 /* Switch CPENABLE */ 1902 1903#if (XTENSA_HAVE_COPROCESSORS || XTENSA_HAVE_IO_PORTS) 1904 l32i a3, a5, THREAD_CPENABLE 1905 xsr a3, cpenable 1906 s32i a3, a4, THREAD_CPENABLE 1907#endif 1908 1909 /* Flush register file. */ 1910 1911 call0 _spill_registers # destroys a3, a4, and SAR 1912 1913 /* Set kernel stack (and leave critical section) 1914 * Note: It's save to set it here. The stack will not be overwritten 1915 * because the kernel stack will only be loaded again after 1916 * we return from kernel space. 1917 */ 1918 1919 rsr a3, excsave1 # exc_table 1920 movi a6, 0 1921 addi a7, a5, PT_REGS_OFFSET 1922 s32i a6, a3, EXC_TABLE_FIXUP 1923 s32i a7, a3, EXC_TABLE_KSTK 1924 1925 /* restore context of the task that 'next' addresses */ 1926 1927 l32i a0, a13, THREAD_RA # restore return address 1928 l32i a1, a13, THREAD_SP # restore stack pointer 1929 1930 load_xtregs_user a5 a6 a8 a9 a10 a11 THREAD_XTREGS_USER 1931 1932 wsr a14, ps 1933 mov a2, a12 # return 'prev' 1934 rsync 1935 1936 retw 1937 1938ENDPROC(_switch_to) 1939 1940ENTRY(ret_from_fork) 1941 1942 /* void schedule_tail (struct task_struct *prev) 1943 * Note: prev is still in a6 (return value from fake call4 frame) 1944 */ 1945 movi a4, schedule_tail 1946 callx4 a4 1947 1948 movi a4, do_syscall_trace_leave 1949 mov a6, a1 1950 callx4 a4 1951 1952 j common_exception_return 1953 1954ENDPROC(ret_from_fork) 1955 1956/* 1957 * Kernel thread creation helper 1958 * On entry, set up by copy_thread: a2 = thread_fn, a3 = thread_fn arg 1959 * left from _switch_to: a6 = prev 1960 */ 1961ENTRY(ret_from_kernel_thread) 1962 1963 call4 schedule_tail 1964 mov a6, a3 1965 callx4 a2 1966 j common_exception_return 1967 1968ENDPROC(ret_from_kernel_thread) 1969