1 /* 2 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com) 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License version 2 as 6 * published by the Free Software Foundation. 7 * 8 * Vineetg: March 2009 (Supporting 2 levels of Interrupts) 9 * Stack switching code can no longer reliably rely on the fact that 10 * if we are NOT in user mode, stack is switched to kernel mode. 11 * e.g. L2 IRQ interrupted a L1 ISR which had not yet completed 12 * it's prologue including stack switching from user mode 13 * 14 * Vineetg: Aug 28th 2008: Bug #94984 15 * -Zero Overhead Loop Context shd be cleared when entering IRQ/EXcp/Trap 16 * Normally CPU does this automatically, however when doing FAKE rtie, 17 * we also need to explicitly do this. The problem in macros 18 * FAKE_RET_FROM_EXCPN and FAKE_RET_FROM_EXCPN_LOCK_IRQ was that this bit 19 * was being "CLEARED" rather then "SET". Actually "SET" clears ZOL context 20 * 21 * Vineetg: May 5th 2008 22 * -Modified CALLEE_REG save/restore macros to handle the fact that 23 * r25 contains the kernel current task ptr 24 * - Defined Stack Switching Macro to be reused in all intr/excp hdlrs 25 * - Shaved off 11 instructions from RESTORE_ALL_INT1 by using the 26 * address Write back load ld.ab instead of seperate ld/add instn 27 * 28 * Amit Bhor, Sameer Dhavale: Codito Technologies 2004 29 */ 30 31 #ifndef __ASM_ARC_ENTRY_H 32 #define __ASM_ARC_ENTRY_H 33 34 #ifdef __ASSEMBLY__ 35 #include <asm/unistd.h> /* For NR_syscalls defination */ 36 #include <asm/asm-offsets.h> 37 #include <asm/arcregs.h> 38 #include <asm/ptrace.h> 39 #include <asm/processor.h> /* For VMALLOC_START */ 40 #include <asm/thread_info.h> /* For THREAD_SIZE */ 41 42 /* Note on the LD/ST addr modes with addr reg wback 43 * 44 * LD.a same as LD.aw 45 * 46 * LD.a reg1, [reg2, x] => Pre Incr 47 * Eff Addr for load = [reg2 + x] 48 * 49 * LD.ab reg1, [reg2, x] => Post Incr 50 * Eff Addr for load = [reg2] 51 */ 52 53 /*-------------------------------------------------------------- 54 * Save caller saved registers (scratch registers) ( r0 - r12 ) 55 * Registers are pushed / popped in the order defined in struct ptregs 56 * in asm/ptrace.h 57 *-------------------------------------------------------------*/ 58 .macro SAVE_CALLER_SAVED 59 st.a r0, [sp, -4] 60 st.a r1, [sp, -4] 61 st.a r2, [sp, -4] 62 st.a r3, [sp, -4] 63 st.a r4, [sp, -4] 64 st.a r5, [sp, -4] 65 st.a r6, [sp, -4] 66 st.a r7, [sp, -4] 67 st.a r8, [sp, -4] 68 st.a r9, [sp, -4] 69 st.a r10, [sp, -4] 70 st.a r11, [sp, -4] 71 st.a r12, [sp, -4] 72 .endm 73 74 /*-------------------------------------------------------------- 75 * Restore caller saved registers (scratch registers) 76 *-------------------------------------------------------------*/ 77 .macro RESTORE_CALLER_SAVED 78 ld.ab r12, [sp, 4] 79 ld.ab r11, [sp, 4] 80 ld.ab r10, [sp, 4] 81 ld.ab r9, [sp, 4] 82 ld.ab r8, [sp, 4] 83 ld.ab r7, [sp, 4] 84 ld.ab r6, [sp, 4] 85 ld.ab r5, [sp, 4] 86 ld.ab r4, [sp, 4] 87 ld.ab r3, [sp, 4] 88 ld.ab r2, [sp, 4] 89 ld.ab r1, [sp, 4] 90 ld.ab r0, [sp, 4] 91 .endm 92 93 94 /*-------------------------------------------------------------- 95 * Save callee saved registers (non scratch registers) ( r13 - r25 ) 96 * on kernel stack. 97 * User mode callee regs need to be saved in case of 98 * -fork and friends for replicating from parent to child 99 * -before going into do_signal( ) for ptrace/core-dump 100 * Special case handling is required for r25 in case it is used by kernel 101 * for caching task ptr. Low level exception/ISR save user mode r25 102 * into task->thread.user_r25. So it needs to be retrieved from there and 103 * saved into kernel stack with rest of callee reg-file 104 *-------------------------------------------------------------*/ 105 .macro SAVE_CALLEE_SAVED_USER 106 st.a r13, [sp, -4] 107 st.a r14, [sp, -4] 108 st.a r15, [sp, -4] 109 st.a r16, [sp, -4] 110 st.a r17, [sp, -4] 111 st.a r18, [sp, -4] 112 st.a r19, [sp, -4] 113 st.a r20, [sp, -4] 114 st.a r21, [sp, -4] 115 st.a r22, [sp, -4] 116 st.a r23, [sp, -4] 117 st.a r24, [sp, -4] 118 119 #ifdef CONFIG_ARC_CURR_IN_REG 120 ; Retrieve orig r25 and save it on stack 121 ld r12, [r25, TASK_THREAD + THREAD_USER_R25] 122 st.a r12, [sp, -4] 123 #else 124 st.a r25, [sp, -4] 125 #endif 126 127 /* move up by 1 word to "create" callee_regs->"stack_place_holder" */ 128 sub sp, sp, 4 129 .endm 130 131 /*-------------------------------------------------------------- 132 * Save callee saved registers (non scratch registers) ( r13 - r25 ) 133 * kernel mode callee regs needed to be saved in case of context switch 134 * If r25 is used for caching task pointer then that need not be saved 135 * as it can be re-created from current task global 136 *-------------------------------------------------------------*/ 137 .macro SAVE_CALLEE_SAVED_KERNEL 138 st.a r13, [sp, -4] 139 st.a r14, [sp, -4] 140 st.a r15, [sp, -4] 141 st.a r16, [sp, -4] 142 st.a r17, [sp, -4] 143 st.a r18, [sp, -4] 144 st.a r19, [sp, -4] 145 st.a r20, [sp, -4] 146 st.a r21, [sp, -4] 147 st.a r22, [sp, -4] 148 st.a r23, [sp, -4] 149 st.a r24, [sp, -4] 150 #ifdef CONFIG_ARC_CURR_IN_REG 151 sub sp, sp, 8 152 #else 153 st.a r25, [sp, -4] 154 sub sp, sp, 4 155 #endif 156 .endm 157 158 /*-------------------------------------------------------------- 159 * RESTORE_CALLEE_SAVED_KERNEL: 160 * Loads callee (non scratch) Reg File by popping from Kernel mode stack. 161 * This is reverse of SAVE_CALLEE_SAVED, 162 * 163 * NOTE: 164 * Ideally this shd only be called in switch_to for loading 165 * switched-IN task's CALLEE Reg File. 166 * For all other cases RESTORE_CALLEE_SAVED_FAST must be used 167 * which simply pops the stack w/o touching regs. 168 *-------------------------------------------------------------*/ 169 .macro RESTORE_CALLEE_SAVED_KERNEL 170 171 172 #ifdef CONFIG_ARC_CURR_IN_REG 173 add sp, sp, 8 /* skip callee_reg gutter and user r25 placeholder */ 174 #else 175 add sp, sp, 4 /* skip "callee_regs->stack_place_holder" */ 176 ld.ab r25, [sp, 4] 177 #endif 178 179 ld.ab r24, [sp, 4] 180 ld.ab r23, [sp, 4] 181 ld.ab r22, [sp, 4] 182 ld.ab r21, [sp, 4] 183 ld.ab r20, [sp, 4] 184 ld.ab r19, [sp, 4] 185 ld.ab r18, [sp, 4] 186 ld.ab r17, [sp, 4] 187 ld.ab r16, [sp, 4] 188 ld.ab r15, [sp, 4] 189 ld.ab r14, [sp, 4] 190 ld.ab r13, [sp, 4] 191 192 .endm 193 194 /*-------------------------------------------------------------- 195 * RESTORE_CALLEE_SAVED_USER: 196 * This is called after do_signal where tracer might have changed callee regs 197 * thus we need to restore the reg file. 198 * Special case handling is required for r25 in case it is used by kernel 199 * for caching task ptr. Ptrace would have modified on-kernel-stack value of 200 * r25, which needs to be shoved back into task->thread.user_r25 where from 201 * Low level exception/ISR return code will retrieve to populate with rest of 202 * callee reg-file. 203 *-------------------------------------------------------------*/ 204 .macro RESTORE_CALLEE_SAVED_USER 205 206 add sp, sp, 4 /* skip "callee_regs->stack_place_holder" */ 207 208 #ifdef CONFIG_ARC_CURR_IN_REG 209 ld.ab r12, [sp, 4] 210 st r12, [r25, TASK_THREAD + THREAD_USER_R25] 211 #else 212 ld.ab r25, [sp, 4] 213 #endif 214 215 ld.ab r24, [sp, 4] 216 ld.ab r23, [sp, 4] 217 ld.ab r22, [sp, 4] 218 ld.ab r21, [sp, 4] 219 ld.ab r20, [sp, 4] 220 ld.ab r19, [sp, 4] 221 ld.ab r18, [sp, 4] 222 ld.ab r17, [sp, 4] 223 ld.ab r16, [sp, 4] 224 ld.ab r15, [sp, 4] 225 ld.ab r14, [sp, 4] 226 ld.ab r13, [sp, 4] 227 .endm 228 229 /*-------------------------------------------------------------- 230 * Super FAST Restore callee saved regs by simply re-adjusting SP 231 *-------------------------------------------------------------*/ 232 .macro DISCARD_CALLEE_SAVED_USER 233 add sp, sp, 14 * 4 234 .endm 235 236 /*-------------------------------------------------------------- 237 * Restore User mode r25 saved in task_struct->thread.user_r25 238 *-------------------------------------------------------------*/ 239 .macro RESTORE_USER_R25 240 ld r25, [r25, TASK_THREAD + THREAD_USER_R25] 241 .endm 242 243 /*------------------------------------------------------------- 244 * given a tsk struct, get to the base of it's kernel mode stack 245 * tsk->thread_info is really a PAGE, whose bottom hoists stack 246 * which grows upwards towards thread_info 247 *------------------------------------------------------------*/ 248 249 .macro GET_TSK_STACK_BASE tsk, out 250 251 /* Get task->thread_info (this is essentially start of a PAGE) */ 252 ld \out, [\tsk, TASK_THREAD_INFO] 253 254 /* Go to end of page where stack begins (grows upwards) */ 255 add2 \out, \out, (THREAD_SIZE - 4)/4 /* one word GUTTER */ 256 257 .endm 258 259 /*-------------------------------------------------------------- 260 * Switch to Kernel Mode stack if SP points to User Mode stack 261 * 262 * Entry : r9 contains pre-IRQ/exception/trap status32 263 * Exit : SP is set to kernel mode stack pointer 264 * If CURR_IN_REG, r25 set to "current" task pointer 265 * Clobbers: r9 266 *-------------------------------------------------------------*/ 267 268 .macro SWITCH_TO_KERNEL_STK 269 270 /* User Mode when this happened ? Yes: Proceed to switch stack */ 271 bbit1 r9, STATUS_U_BIT, 88f 272 273 /* OK we were already in kernel mode when this event happened, thus can 274 * assume SP is kernel mode SP. _NO_ need to do any stack switching 275 */ 276 277 #ifdef CONFIG_ARC_COMPACT_IRQ_LEVELS 278 /* However.... 279 * If Level 2 Interrupts enabled, we may end up with a corner case: 280 * 1. User Task executing 281 * 2. L1 IRQ taken, ISR starts (CPU auto-switched to KERNEL mode) 282 * 3. But before it could switch SP from USER to KERNEL stack 283 * a L2 IRQ "Interrupts" L1 284 * Thay way although L2 IRQ happened in Kernel mode, stack is still 285 * not switched. 286 * To handle this, we may need to switch stack even if in kernel mode 287 * provided SP has values in range of USER mode stack ( < 0x7000_0000 ) 288 */ 289 brlo sp, VMALLOC_START, 88f 290 291 /* TODO: vineetg: 292 * We need to be a bit more cautious here. What if a kernel bug in 293 * L1 ISR, caused SP to go whaco (some small value which looks like 294 * USER stk) and then we take L2 ISR. 295 * Above brlo alone would treat it as a valid L1-L2 sceanrio 296 * instead of shouting alound 297 * The only feasible way is to make sure this L2 happened in 298 * L1 prelogue ONLY i.e. ilink2 is less than a pre-set marker in 299 * L1 ISR before it switches stack 300 */ 301 302 #endif 303 304 /* Save Pre Intr/Exception KERNEL MODE SP on kernel stack 305 * safe-keeping not really needed, but it keeps the epilogue code 306 * (SP restore) simpler/uniform. 307 */ 308 b.d 77f 309 310 st.a sp, [sp, -12] ; Make room for orig_r0 and orig_r8 311 312 88: /*------Intr/Ecxp happened in user mode, "switch" stack ------ */ 313 314 GET_CURR_TASK_ON_CPU r9 315 316 #ifdef CONFIG_ARC_CURR_IN_REG 317 318 /* If current task pointer cached in r25, time to 319 * -safekeep USER r25 in task->thread_struct->user_r25 320 * -load r25 with current task ptr 321 */ 322 st.as r25, [r9, (TASK_THREAD + THREAD_USER_R25)/4] 323 mov r25, r9 324 #endif 325 326 /* With current tsk in r9, get it's kernel mode stack base */ 327 GET_TSK_STACK_BASE r9, r9 328 329 #ifdef PT_REGS_CANARY 330 st 0xabcdabcd, [r9, 0] 331 #endif 332 333 /* Save Pre Intr/Exception User SP on kernel stack */ 334 st.a sp, [r9, -12] ; Make room for orig_r0 and orig_r8 335 336 /* CAUTION: 337 * SP should be set at the very end when we are done with everything 338 * In case of 2 levels of interrupt we depend on value of SP to assume 339 * that everything else is done (loading r25 etc) 340 */ 341 342 /* set SP to point to kernel mode stack */ 343 mov sp, r9 344 345 77: /* ----- Stack Switched to kernel Mode, Now save REG FILE ----- */ 346 347 .endm 348 349 /*------------------------------------------------------------ 350 * "FAKE" a rtie to return from CPU Exception context 351 * This is to re-enable Exceptions within exception 352 * Look at EV_ProtV to see how this is actually used 353 *-------------------------------------------------------------*/ 354 355 .macro FAKE_RET_FROM_EXCPN reg 356 357 ld \reg, [sp, PT_status32] 358 bic \reg, \reg, (STATUS_U_MASK|STATUS_DE_MASK) 359 bset \reg, \reg, STATUS_L_BIT 360 sr \reg, [erstatus] 361 mov \reg, 55f 362 sr \reg, [eret] 363 364 rtie 365 55: 366 .endm 367 368 /* 369 * @reg [OUT] &thread_info of "current" 370 */ 371 .macro GET_CURR_THR_INFO_FROM_SP reg 372 and \reg, sp, ~(THREAD_SIZE - 1) 373 .endm 374 375 /* 376 * @reg [OUT] thread_info->flags of "current" 377 */ 378 .macro GET_CURR_THR_INFO_FLAGS reg 379 GET_CURR_THR_INFO_FROM_SP \reg 380 ld \reg, [\reg, THREAD_INFO_FLAGS] 381 .endm 382 383 /*-------------------------------------------------------------- 384 * For early Exception Prologue, a core reg is temporarily needed to 385 * code the rest of prolog (stack switching). This is done by stashing 386 * it to memory (non-SMP case) or SCRATCH0 Aux Reg (SMP). 387 * 388 * Before saving the full regfile - this reg is restored back, only 389 * to be saved again on kernel mode stack, as part of ptregs. 390 *-------------------------------------------------------------*/ 391 .macro EXCPN_PROLOG_FREEUP_REG reg 392 #ifdef CONFIG_SMP 393 sr \reg, [ARC_REG_SCRATCH_DATA0] 394 #else 395 st \reg, [@ex_saved_reg1] 396 #endif 397 .endm 398 399 .macro EXCPN_PROLOG_RESTORE_REG reg 400 #ifdef CONFIG_SMP 401 lr \reg, [ARC_REG_SCRATCH_DATA0] 402 #else 403 ld \reg, [@ex_saved_reg1] 404 #endif 405 .endm 406 407 /*-------------------------------------------------------------- 408 * Save all registers used by Exceptions (TLB Miss, Prot-V, Mem err etc) 409 * Requires SP to be already switched to kernel mode Stack 410 * sp points to the next free element on the stack at exit of this macro. 411 * Registers are pushed / popped in the order defined in struct ptregs 412 * in asm/ptrace.h 413 * Note that syscalls are implemented via TRAP which is also a exception 414 * from CPU's point of view 415 *-------------------------------------------------------------*/ 416 .macro SAVE_ALL_EXCEPTION marker 417 418 st \marker, [sp, 8] 419 st r0, [sp, 4] /* orig_r0, needed only for sys calls */ 420 421 /* Restore r9 used to code the early prologue */ 422 EXCPN_PROLOG_RESTORE_REG r9 423 424 SAVE_CALLER_SAVED 425 st.a r26, [sp, -4] /* gp */ 426 st.a fp, [sp, -4] 427 st.a blink, [sp, -4] 428 lr r9, [eret] 429 st.a r9, [sp, -4] 430 lr r9, [erstatus] 431 st.a r9, [sp, -4] 432 st.a lp_count, [sp, -4] 433 lr r9, [lp_end] 434 st.a r9, [sp, -4] 435 lr r9, [lp_start] 436 st.a r9, [sp, -4] 437 lr r9, [erbta] 438 st.a r9, [sp, -4] 439 440 #ifdef PT_REGS_CANARY 441 mov r9, 0xdeadbeef 442 st r9, [sp, -4] 443 #endif 444 445 /* move up by 1 word to "create" pt_regs->"stack_place_holder" */ 446 sub sp, sp, 4 447 .endm 448 449 /*-------------------------------------------------------------- 450 * Save scratch regs for exceptions 451 *-------------------------------------------------------------*/ 452 .macro SAVE_ALL_SYS 453 SAVE_ALL_EXCEPTION orig_r8_IS_EXCPN 454 .endm 455 456 /*-------------------------------------------------------------- 457 * Save scratch regs for sys calls 458 *-------------------------------------------------------------*/ 459 .macro SAVE_ALL_TRAP 460 /* 461 * Setup pt_regs->orig_r8. 462 * Encode syscall number (r8) in upper short word of event type (r9) 463 * N.B. #1: This is already endian safe (see ptrace.h) 464 * #2: Only r9 can be used as scratch as it is already clobbered 465 * and it's contents are no longer needed by the latter part 466 * of exception prologue 467 */ 468 lsl r9, r8, 16 469 or r9, r9, orig_r8_IS_SCALL 470 471 SAVE_ALL_EXCEPTION r9 472 .endm 473 474 /*-------------------------------------------------------------- 475 * Restore all registers used by system call or Exceptions 476 * SP should always be pointing to the next free stack element 477 * when entering this macro. 478 * 479 * NOTE: 480 * 481 * It is recommended that lp_count/ilink1/ilink2 not be used as a dest reg 482 * for memory load operations. If used in that way interrupts are deffered 483 * by hardware and that is not good. 484 *-------------------------------------------------------------*/ 485 .macro RESTORE_ALL_SYS 486 487 add sp, sp, 4 /* hop over unused "pt_regs->stack_place_holder" */ 488 489 ld.ab r9, [sp, 4] 490 sr r9, [erbta] 491 ld.ab r9, [sp, 4] 492 sr r9, [lp_start] 493 ld.ab r9, [sp, 4] 494 sr r9, [lp_end] 495 ld.ab r9, [sp, 4] 496 mov lp_count, r9 497 ld.ab r9, [sp, 4] 498 sr r9, [erstatus] 499 ld.ab r9, [sp, 4] 500 sr r9, [eret] 501 ld.ab blink, [sp, 4] 502 ld.ab fp, [sp, 4] 503 ld.ab r26, [sp, 4] /* gp */ 504 RESTORE_CALLER_SAVED 505 506 ld sp, [sp] /* restore original sp */ 507 /* orig_r0 and orig_r8 skipped automatically */ 508 .endm 509 510 511 /*-------------------------------------------------------------- 512 * Save all registers used by interrupt handlers. 513 *-------------------------------------------------------------*/ 514 .macro SAVE_ALL_INT1 515 516 /* restore original r9 , saved in int1_saved_reg 517 * It will be saved on stack in macro: SAVE_CALLER_SAVED 518 */ 519 #ifdef CONFIG_SMP 520 lr r9, [ARC_REG_SCRATCH_DATA0] 521 #else 522 ld r9, [@int1_saved_reg] 523 #endif 524 525 /* now we are ready to save the remaining context :) */ 526 st orig_r8_IS_IRQ1, [sp, 8] /* Event Type */ 527 st 0, [sp, 4] /* orig_r0 , N/A for IRQ */ 528 SAVE_CALLER_SAVED 529 st.a r26, [sp, -4] /* gp */ 530 st.a fp, [sp, -4] 531 st.a blink, [sp, -4] 532 st.a ilink1, [sp, -4] 533 lr r9, [status32_l1] 534 st.a r9, [sp, -4] 535 st.a lp_count, [sp, -4] 536 lr r9, [lp_end] 537 st.a r9, [sp, -4] 538 lr r9, [lp_start] 539 st.a r9, [sp, -4] 540 lr r9, [bta_l1] 541 st.a r9, [sp, -4] 542 543 #ifdef PT_REGS_CANARY 544 mov r9, 0xdeadbee1 545 st r9, [sp, -4] 546 #endif 547 /* move up by 1 word to "create" pt_regs->"stack_place_holder" */ 548 sub sp, sp, 4 549 .endm 550 551 .macro SAVE_ALL_INT2 552 553 /* TODO-vineetg: SMP we can't use global nor can we use 554 * SCRATCH0 as we do for int1 because while int1 is using 555 * it, int2 can come 556 */ 557 /* retsore original r9 , saved in sys_saved_r9 */ 558 ld r9, [@int2_saved_reg] 559 560 /* now we are ready to save the remaining context :) */ 561 st orig_r8_IS_IRQ2, [sp, 8] /* Event Type */ 562 st 0, [sp, 4] /* orig_r0 , N/A for IRQ */ 563 SAVE_CALLER_SAVED 564 st.a r26, [sp, -4] /* gp */ 565 st.a fp, [sp, -4] 566 st.a blink, [sp, -4] 567 st.a ilink2, [sp, -4] 568 lr r9, [status32_l2] 569 st.a r9, [sp, -4] 570 st.a lp_count, [sp, -4] 571 lr r9, [lp_end] 572 st.a r9, [sp, -4] 573 lr r9, [lp_start] 574 st.a r9, [sp, -4] 575 lr r9, [bta_l2] 576 st.a r9, [sp, -4] 577 578 #ifdef PT_REGS_CANARY 579 mov r9, 0xdeadbee2 580 st r9, [sp, -4] 581 #endif 582 583 /* move up by 1 word to "create" pt_regs->"stack_place_holder" */ 584 sub sp, sp, 4 585 .endm 586 587 /*-------------------------------------------------------------- 588 * Restore all registers used by interrupt handlers. 589 * 590 * NOTE: 591 * 592 * It is recommended that lp_count/ilink1/ilink2 not be used as a dest reg 593 * for memory load operations. If used in that way interrupts are deffered 594 * by hardware and that is not good. 595 *-------------------------------------------------------------*/ 596 597 .macro RESTORE_ALL_INT1 598 add sp, sp, 4 /* hop over unused "pt_regs->stack_place_holder" */ 599 600 ld.ab r9, [sp, 4] /* Actual reg file */ 601 sr r9, [bta_l1] 602 ld.ab r9, [sp, 4] 603 sr r9, [lp_start] 604 ld.ab r9, [sp, 4] 605 sr r9, [lp_end] 606 ld.ab r9, [sp, 4] 607 mov lp_count, r9 608 ld.ab r9, [sp, 4] 609 sr r9, [status32_l1] 610 ld.ab r9, [sp, 4] 611 mov ilink1, r9 612 ld.ab blink, [sp, 4] 613 ld.ab fp, [sp, 4] 614 ld.ab r26, [sp, 4] /* gp */ 615 RESTORE_CALLER_SAVED 616 617 ld sp, [sp] /* restore original sp */ 618 /* orig_r0 and orig_r8 skipped automatically */ 619 .endm 620 621 .macro RESTORE_ALL_INT2 622 add sp, sp, 4 /* hop over unused "pt_regs->stack_place_holder" */ 623 624 ld.ab r9, [sp, 4] 625 sr r9, [bta_l2] 626 ld.ab r9, [sp, 4] 627 sr r9, [lp_start] 628 ld.ab r9, [sp, 4] 629 sr r9, [lp_end] 630 ld.ab r9, [sp, 4] 631 mov lp_count, r9 632 ld.ab r9, [sp, 4] 633 sr r9, [status32_l2] 634 ld.ab r9, [sp, 4] 635 mov ilink2, r9 636 ld.ab blink, [sp, 4] 637 ld.ab fp, [sp, 4] 638 ld.ab r26, [sp, 4] /* gp */ 639 RESTORE_CALLER_SAVED 640 641 ld sp, [sp] /* restore original sp */ 642 /* orig_r0 and orig_r8 skipped automatically */ 643 644 .endm 645 646 647 /* Get CPU-ID of this core */ 648 .macro GET_CPU_ID reg 649 lr \reg, [identity] 650 lsr \reg, \reg, 8 651 bmsk \reg, \reg, 7 652 .endm 653 654 #ifdef CONFIG_SMP 655 656 /*------------------------------------------------- 657 * Retrieve the current running task on this CPU 658 * 1. Determine curr CPU id. 659 * 2. Use it to index into _current_task[ ] 660 */ 661 .macro GET_CURR_TASK_ON_CPU reg 662 GET_CPU_ID \reg 663 ld.as \reg, [@_current_task, \reg] 664 .endm 665 666 /*------------------------------------------------- 667 * Save a new task as the "current" task on this CPU 668 * 1. Determine curr CPU id. 669 * 2. Use it to index into _current_task[ ] 670 * 671 * Coded differently than GET_CURR_TASK_ON_CPU (which uses LD.AS) 672 * because ST r0, [r1, offset] can ONLY have s9 @offset 673 * while LD can take s9 (4 byte insn) or LIMM (8 byte insn) 674 */ 675 676 .macro SET_CURR_TASK_ON_CPU tsk, tmp 677 GET_CPU_ID \tmp 678 add2 \tmp, @_current_task, \tmp 679 st \tsk, [\tmp] 680 #ifdef CONFIG_ARC_CURR_IN_REG 681 mov r25, \tsk 682 #endif 683 684 .endm 685 686 687 #else /* Uniprocessor implementation of macros */ 688 689 .macro GET_CURR_TASK_ON_CPU reg 690 ld \reg, [@_current_task] 691 .endm 692 693 .macro SET_CURR_TASK_ON_CPU tsk, tmp 694 st \tsk, [@_current_task] 695 #ifdef CONFIG_ARC_CURR_IN_REG 696 mov r25, \tsk 697 #endif 698 .endm 699 700 #endif /* SMP / UNI */ 701 702 /* ------------------------------------------------------------------ 703 * Get the ptr to some field of Current Task at @off in task struct 704 * -Uses r25 for Current task ptr if that is enabled 705 */ 706 707 #ifdef CONFIG_ARC_CURR_IN_REG 708 709 .macro GET_CURR_TASK_FIELD_PTR off, reg 710 add \reg, r25, \off 711 .endm 712 713 #else 714 715 .macro GET_CURR_TASK_FIELD_PTR off, reg 716 GET_CURR_TASK_ON_CPU \reg 717 add \reg, \reg, \off 718 .endm 719 720 #endif /* CONFIG_ARC_CURR_IN_REG */ 721 722 #endif /* __ASSEMBLY__ */ 723 724 #endif /* __ASM_ARC_ENTRY_H */ 725