xref: /linux/arch/arc/include/asm/entry.h (revision 3ebedbb2fdf730b7e5e2417dbd37faee6304bfb5)
19d42c84fSVineet Gupta /*
29d42c84fSVineet Gupta  * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
39d42c84fSVineet Gupta  *
49d42c84fSVineet Gupta  * This program is free software; you can redistribute it and/or modify
59d42c84fSVineet Gupta  * it under the terms of the GNU General Public License version 2 as
69d42c84fSVineet Gupta  * published by the Free Software Foundation.
79d42c84fSVineet Gupta  *
84788a594SVineet Gupta  * Vineetg: March 2009 (Supporting 2 levels of Interrupts)
94788a594SVineet Gupta  *  Stack switching code can no longer reliably rely on the fact that
104788a594SVineet Gupta  *  if we are NOT in user mode, stack is switched to kernel mode.
114788a594SVineet Gupta  *  e.g. L2 IRQ interrupted a L1 ISR which had not yet completed
124788a594SVineet Gupta  *  it's prologue including stack switching from user mode
134788a594SVineet Gupta  *
149d42c84fSVineet Gupta  * Vineetg: Aug 28th 2008: Bug #94984
159d42c84fSVineet Gupta  *  -Zero Overhead Loop Context shd be cleared when entering IRQ/EXcp/Trap
169d42c84fSVineet Gupta  *   Normally CPU does this automatically, however when doing FAKE rtie,
179d42c84fSVineet Gupta  *   we also need to explicitly do this. The problem in macros
189d42c84fSVineet Gupta  *   FAKE_RET_FROM_EXCPN and FAKE_RET_FROM_EXCPN_LOCK_IRQ was that this bit
199d42c84fSVineet Gupta  *   was being "CLEARED" rather then "SET". Actually "SET" clears ZOL context
209d42c84fSVineet Gupta  *
219d42c84fSVineet Gupta  * Vineetg: May 5th 2008
22080c3747SVineet Gupta  *  -Modified CALLEE_REG save/restore macros to handle the fact that
23080c3747SVineet Gupta  *      r25 contains the kernel current task ptr
249d42c84fSVineet Gupta  *  - Defined Stack Switching Macro to be reused in all intr/excp hdlrs
259d42c84fSVineet Gupta  *  - Shaved off 11 instructions from RESTORE_ALL_INT1 by using the
269d42c84fSVineet Gupta  *      address Write back load ld.ab instead of seperate ld/add instn
279d42c84fSVineet Gupta  *
289d42c84fSVineet Gupta  * Amit Bhor, Sameer Dhavale: Codito Technologies 2004
299d42c84fSVineet Gupta  */
309d42c84fSVineet Gupta 
319d42c84fSVineet Gupta #ifndef __ASM_ARC_ENTRY_H
329d42c84fSVineet Gupta #define __ASM_ARC_ENTRY_H
339d42c84fSVineet Gupta 
349d42c84fSVineet Gupta #ifdef __ASSEMBLY__
359d42c84fSVineet Gupta #include <asm/unistd.h>		/* For NR_syscalls defination */
369d42c84fSVineet Gupta #include <asm/asm-offsets.h>
379d42c84fSVineet Gupta #include <asm/arcregs.h>
389d42c84fSVineet Gupta #include <asm/ptrace.h>
39080c3747SVineet Gupta #include <asm/processor.h>	/* For VMALLOC_START */
409d42c84fSVineet Gupta #include <asm/thread_info.h>	/* For THREAD_SIZE */
419d42c84fSVineet Gupta 
429d42c84fSVineet Gupta /* Note on the LD/ST addr modes with addr reg wback
439d42c84fSVineet Gupta  *
449d42c84fSVineet Gupta  * LD.a same as LD.aw
459d42c84fSVineet Gupta  *
469d42c84fSVineet Gupta  * LD.a    reg1, [reg2, x]  => Pre Incr
479d42c84fSVineet Gupta  *      Eff Addr for load = [reg2 + x]
489d42c84fSVineet Gupta  *
499d42c84fSVineet Gupta  * LD.ab   reg1, [reg2, x]  => Post Incr
509d42c84fSVineet Gupta  *      Eff Addr for load = [reg2]
519d42c84fSVineet Gupta  */
529d42c84fSVineet Gupta 
53*3ebedbb2SVineet Gupta .macro PUSH reg
54*3ebedbb2SVineet Gupta 	st.a	\reg, [sp, -4]
55*3ebedbb2SVineet Gupta .endm
56*3ebedbb2SVineet Gupta 
57*3ebedbb2SVineet Gupta .macro PUSHAX aux
58*3ebedbb2SVineet Gupta 	lr	r9, [\aux]
59*3ebedbb2SVineet Gupta 	PUSH	r9
60*3ebedbb2SVineet Gupta .endm
61*3ebedbb2SVineet Gupta 
62*3ebedbb2SVineet Gupta .macro POP reg
63*3ebedbb2SVineet Gupta 	ld.ab	\reg, [sp, 4]
64*3ebedbb2SVineet Gupta .endm
65*3ebedbb2SVineet Gupta 
66*3ebedbb2SVineet Gupta .macro POPAX aux
67*3ebedbb2SVineet Gupta 	POP	r9
68*3ebedbb2SVineet Gupta 	sr	r9, [\aux]
699d42c84fSVineet Gupta .endm
709d42c84fSVineet Gupta 
719d42c84fSVineet Gupta /*--------------------------------------------------------------
72*3ebedbb2SVineet Gupta  * Helpers to save/restore Scratch Regs:
73*3ebedbb2SVineet Gupta  * used by Interrupt/Exception Prologue/Epilogue
749d42c84fSVineet Gupta  *-------------------------------------------------------------*/
75*3ebedbb2SVineet Gupta .macro  SAVE_R0_TO_R12
76*3ebedbb2SVineet Gupta 	PUSH	r0
77*3ebedbb2SVineet Gupta 	PUSH	r1
78*3ebedbb2SVineet Gupta 	PUSH	r2
79*3ebedbb2SVineet Gupta 	PUSH	r3
80*3ebedbb2SVineet Gupta 	PUSH	r4
81*3ebedbb2SVineet Gupta 	PUSH	r5
82*3ebedbb2SVineet Gupta 	PUSH	r6
83*3ebedbb2SVineet Gupta 	PUSH	r7
84*3ebedbb2SVineet Gupta 	PUSH	r8
85*3ebedbb2SVineet Gupta 	PUSH	r9
86*3ebedbb2SVineet Gupta 	PUSH	r10
87*3ebedbb2SVineet Gupta 	PUSH	r11
88*3ebedbb2SVineet Gupta 	PUSH	r12
89*3ebedbb2SVineet Gupta .endm
90*3ebedbb2SVineet Gupta 
91*3ebedbb2SVineet Gupta .macro RESTORE_R12_TO_R0
92*3ebedbb2SVineet Gupta 	POP	r12
93*3ebedbb2SVineet Gupta 	POP	r11
94*3ebedbb2SVineet Gupta 	POP	r10
95*3ebedbb2SVineet Gupta 	POP	r9
96*3ebedbb2SVineet Gupta 	POP	r8
97*3ebedbb2SVineet Gupta 	POP	r7
98*3ebedbb2SVineet Gupta 	POP	r6
99*3ebedbb2SVineet Gupta 	POP	r5
100*3ebedbb2SVineet Gupta 	POP	r4
101*3ebedbb2SVineet Gupta 	POP	r3
102*3ebedbb2SVineet Gupta 	POP	r2
103*3ebedbb2SVineet Gupta 	POP	r1
104*3ebedbb2SVineet Gupta 	POP	r0
105*3ebedbb2SVineet Gupta .endm
106*3ebedbb2SVineet Gupta 
107*3ebedbb2SVineet Gupta /*--------------------------------------------------------------
108*3ebedbb2SVineet Gupta  * Helpers to save/restore callee-saved regs:
109*3ebedbb2SVineet Gupta  * used by several macros below
110*3ebedbb2SVineet Gupta  *-------------------------------------------------------------*/
111*3ebedbb2SVineet Gupta .macro SAVE_R13_TO_R24
112*3ebedbb2SVineet Gupta 	PUSH	r13
113*3ebedbb2SVineet Gupta 	PUSH	r14
114*3ebedbb2SVineet Gupta 	PUSH	r15
115*3ebedbb2SVineet Gupta 	PUSH	r16
116*3ebedbb2SVineet Gupta 	PUSH	r17
117*3ebedbb2SVineet Gupta 	PUSH	r18
118*3ebedbb2SVineet Gupta 	PUSH	r19
119*3ebedbb2SVineet Gupta 	PUSH	r20
120*3ebedbb2SVineet Gupta 	PUSH	r21
121*3ebedbb2SVineet Gupta 	PUSH	r22
122*3ebedbb2SVineet Gupta 	PUSH	r23
123*3ebedbb2SVineet Gupta 	PUSH	r24
124*3ebedbb2SVineet Gupta .endm
125*3ebedbb2SVineet Gupta 
126*3ebedbb2SVineet Gupta .macro RESTORE_R24_TO_R13
127*3ebedbb2SVineet Gupta 	POP	r24
128*3ebedbb2SVineet Gupta 	POP	r23
129*3ebedbb2SVineet Gupta 	POP	r22
130*3ebedbb2SVineet Gupta 	POP	r21
131*3ebedbb2SVineet Gupta 	POP	r20
132*3ebedbb2SVineet Gupta 	POP	r19
133*3ebedbb2SVineet Gupta 	POP	r18
134*3ebedbb2SVineet Gupta 	POP	r17
135*3ebedbb2SVineet Gupta 	POP	r16
136*3ebedbb2SVineet Gupta 	POP	r15
137*3ebedbb2SVineet Gupta 	POP	r14
138*3ebedbb2SVineet Gupta 	POP	r13
1399d42c84fSVineet Gupta .endm
1409d42c84fSVineet Gupta 
1419d42c84fSVineet Gupta 
1429d42c84fSVineet Gupta /*--------------------------------------------------------------
143*3ebedbb2SVineet Gupta  * Collect User Mode callee regs as struct callee_regs - needed by
144*3ebedbb2SVineet Gupta  * fork/do_signal/unaligned-access-emulation.
145*3ebedbb2SVineet Gupta  * (By default only scratch regs are saved on entry to kernel)
146*3ebedbb2SVineet Gupta  *
147*3ebedbb2SVineet Gupta  * Special handling for r25 if used for caching Task Pointer.
148*3ebedbb2SVineet Gupta  * It would have been saved in task->thread.user_r25 already, but to keep
149*3ebedbb2SVineet Gupta  * the interface same it is copied into regular r25 placeholder in
150*3ebedbb2SVineet Gupta  * struct callee_regs.
1519d42c84fSVineet Gupta  *-------------------------------------------------------------*/
1529d42c84fSVineet Gupta .macro SAVE_CALLEE_SAVED_USER
153*3ebedbb2SVineet Gupta 
154*3ebedbb2SVineet Gupta 	SAVE_R13_TO_R24
155080c3747SVineet Gupta 
156080c3747SVineet Gupta #ifdef CONFIG_ARC_CURR_IN_REG
157080c3747SVineet Gupta 	; Retrieve orig r25 and save it on stack
158080c3747SVineet Gupta 	ld      r12, [r25, TASK_THREAD + THREAD_USER_R25]
159080c3747SVineet Gupta 	st.a    r12, [sp, -4]
160080c3747SVineet Gupta #else
161*3ebedbb2SVineet Gupta 	PUSH	r25
162080c3747SVineet Gupta #endif
1639d42c84fSVineet Gupta 
1649d42c84fSVineet Gupta .endm
1659d42c84fSVineet Gupta 
1669d42c84fSVineet Gupta /*--------------------------------------------------------------
167*3ebedbb2SVineet Gupta  * Save kernel Mode callee regs at the time of Contect Switch.
168*3ebedbb2SVineet Gupta  *
169*3ebedbb2SVineet Gupta  * Special handling for r25 if used for caching Task Pointer.
170*3ebedbb2SVineet Gupta  * Kernel simply skips saving it since it will be loaded with
171*3ebedbb2SVineet Gupta  * incoming task pointer anyways
1729d42c84fSVineet Gupta  *-------------------------------------------------------------*/
1739d42c84fSVineet Gupta .macro SAVE_CALLEE_SAVED_KERNEL
174*3ebedbb2SVineet Gupta 
175*3ebedbb2SVineet Gupta 	SAVE_R13_TO_R24
176*3ebedbb2SVineet Gupta 
177080c3747SVineet Gupta #ifdef CONFIG_ARC_CURR_IN_REG
17816f9afe6SVineet Gupta 	sub     sp, sp, 4
179080c3747SVineet Gupta #else
180*3ebedbb2SVineet Gupta 	PUSH	r25
181080c3747SVineet Gupta #endif
1829d42c84fSVineet Gupta .endm
1839d42c84fSVineet Gupta 
1849d42c84fSVineet Gupta /*--------------------------------------------------------------
185*3ebedbb2SVineet Gupta  * Opposite of SAVE_CALLEE_SAVED_KERNEL
1869d42c84fSVineet Gupta  *-------------------------------------------------------------*/
1879d42c84fSVineet Gupta .macro RESTORE_CALLEE_SAVED_KERNEL
1889d42c84fSVineet Gupta 
189080c3747SVineet Gupta #ifdef CONFIG_ARC_CURR_IN_REG
19016f9afe6SVineet Gupta 	add     sp, sp, 4  /* skip usual r25 placeholder */
191080c3747SVineet Gupta #else
192*3ebedbb2SVineet Gupta 	POP	r25
193080c3747SVineet Gupta #endif
194*3ebedbb2SVineet Gupta 	RESTORE_R24_TO_R13
1959d42c84fSVineet Gupta .endm
1969d42c84fSVineet Gupta 
1979d42c84fSVineet Gupta /*--------------------------------------------------------------
198*3ebedbb2SVineet Gupta  * Opposite of SAVE_CALLEE_SAVED_USER
199*3ebedbb2SVineet Gupta  *
200*3ebedbb2SVineet Gupta  * ptrace tracer or unaligned-access fixup might have changed a user mode
201*3ebedbb2SVineet Gupta  * callee reg which is saved back to usual r25 storage location
202c3581039SVineet Gupta  *-------------------------------------------------------------*/
203c3581039SVineet Gupta .macro RESTORE_CALLEE_SAVED_USER
204c3581039SVineet Gupta 
205c3581039SVineet Gupta #ifdef CONFIG_ARC_CURR_IN_REG
206c3581039SVineet Gupta 	ld.ab   r12, [sp, 4]
207c3581039SVineet Gupta 	st      r12, [r25, TASK_THREAD + THREAD_USER_R25]
208c3581039SVineet Gupta #else
209*3ebedbb2SVineet Gupta 	POP	r25
210c3581039SVineet Gupta #endif
211*3ebedbb2SVineet Gupta 	RESTORE_R24_TO_R13
212c3581039SVineet Gupta .endm
213c3581039SVineet Gupta 
214c3581039SVineet Gupta /*--------------------------------------------------------------
2159d42c84fSVineet Gupta  * Super FAST Restore callee saved regs by simply re-adjusting SP
2169d42c84fSVineet Gupta  *-------------------------------------------------------------*/
2179d42c84fSVineet Gupta .macro DISCARD_CALLEE_SAVED_USER
21816f9afe6SVineet Gupta 	add     sp, sp, SZ_CALLEE_REGS
2199d42c84fSVineet Gupta .endm
2209d42c84fSVineet Gupta 
2219d42c84fSVineet Gupta /*--------------------------------------------------------------
2229d42c84fSVineet Gupta  * Restore User mode r25 saved in task_struct->thread.user_r25
2239d42c84fSVineet Gupta  *-------------------------------------------------------------*/
2249d42c84fSVineet Gupta .macro RESTORE_USER_R25
2259d42c84fSVineet Gupta 	ld  r25, [r25, TASK_THREAD + THREAD_USER_R25]
2269d42c84fSVineet Gupta .endm
2279d42c84fSVineet Gupta 
2289d42c84fSVineet Gupta /*-------------------------------------------------------------
2299d42c84fSVineet Gupta  * given a tsk struct, get to the base of it's kernel mode stack
2309d42c84fSVineet Gupta  * tsk->thread_info is really a PAGE, whose bottom hoists stack
2319d42c84fSVineet Gupta  * which grows upwards towards thread_info
2329d42c84fSVineet Gupta  *------------------------------------------------------------*/
2339d42c84fSVineet Gupta 
2349d42c84fSVineet Gupta .macro GET_TSK_STACK_BASE tsk, out
2359d42c84fSVineet Gupta 
2369d42c84fSVineet Gupta 	/* Get task->thread_info (this is essentially start of a PAGE) */
2379d42c84fSVineet Gupta 	ld  \out, [\tsk, TASK_THREAD_INFO]
2389d42c84fSVineet Gupta 
2399d42c84fSVineet Gupta 	/* Go to end of page where stack begins (grows upwards) */
240283237a0SVineet Gupta 	add2 \out, \out, (THREAD_SIZE)/4
2419d42c84fSVineet Gupta 
2429d42c84fSVineet Gupta .endm
2439d42c84fSVineet Gupta 
2449d42c84fSVineet Gupta /*--------------------------------------------------------------
2459d42c84fSVineet Gupta  * Switch to Kernel Mode stack if SP points to User Mode stack
2469d42c84fSVineet Gupta  *
2479d42c84fSVineet Gupta  * Entry   : r9 contains pre-IRQ/exception/trap status32
2489d42c84fSVineet Gupta  * Exit    : SP is set to kernel mode stack pointer
249080c3747SVineet Gupta  *           If CURR_IN_REG, r25 set to "current" task pointer
2509d42c84fSVineet Gupta  * Clobbers: r9
2519d42c84fSVineet Gupta  *-------------------------------------------------------------*/
2529d42c84fSVineet Gupta 
2539d42c84fSVineet Gupta .macro SWITCH_TO_KERNEL_STK
2549d42c84fSVineet Gupta 
2559d42c84fSVineet Gupta 	/* User Mode when this happened ? Yes: Proceed to switch stack */
2569d42c84fSVineet Gupta 	bbit1   r9, STATUS_U_BIT, 88f
2579d42c84fSVineet Gupta 
2589d42c84fSVineet Gupta 	/* OK we were already in kernel mode when this event happened, thus can
2599d42c84fSVineet Gupta 	 * assume SP is kernel mode SP. _NO_ need to do any stack switching
2609d42c84fSVineet Gupta 	 */
2619d42c84fSVineet Gupta 
2624788a594SVineet Gupta #ifdef CONFIG_ARC_COMPACT_IRQ_LEVELS
2634788a594SVineet Gupta 	/* However....
2644788a594SVineet Gupta 	 * If Level 2 Interrupts enabled, we may end up with a corner case:
2654788a594SVineet Gupta 	 * 1. User Task executing
2664788a594SVineet Gupta 	 * 2. L1 IRQ taken, ISR starts (CPU auto-switched to KERNEL mode)
2674788a594SVineet Gupta 	 * 3. But before it could switch SP from USER to KERNEL stack
2684788a594SVineet Gupta 	 *      a L2 IRQ "Interrupts" L1
2694788a594SVineet Gupta 	 * Thay way although L2 IRQ happened in Kernel mode, stack is still
2704788a594SVineet Gupta 	 * not switched.
2714788a594SVineet Gupta 	 * To handle this, we may need to switch stack even if in kernel mode
2724788a594SVineet Gupta 	 * provided SP has values in range of USER mode stack ( < 0x7000_0000 )
2734788a594SVineet Gupta 	 */
2744788a594SVineet Gupta 	brlo sp, VMALLOC_START, 88f
2754788a594SVineet Gupta 
2764788a594SVineet Gupta 	/* TODO: vineetg:
2774788a594SVineet Gupta 	 * We need to be a bit more cautious here. What if a kernel bug in
2784788a594SVineet Gupta 	 * L1 ISR, caused SP to go whaco (some small value which looks like
2794788a594SVineet Gupta 	 * USER stk) and then we take L2 ISR.
2804788a594SVineet Gupta 	 * Above brlo alone would treat it as a valid L1-L2 sceanrio
2814788a594SVineet Gupta 	 * instead of shouting alound
2824788a594SVineet Gupta 	 * The only feasible way is to make sure this L2 happened in
2834788a594SVineet Gupta 	 * L1 prelogue ONLY i.e. ilink2 is less than a pre-set marker in
2844788a594SVineet Gupta 	 * L1 ISR before it switches stack
2854788a594SVineet Gupta 	 */
2864788a594SVineet Gupta 
2874788a594SVineet Gupta #endif
2884788a594SVineet Gupta 
2899d42c84fSVineet Gupta 	/* Save Pre Intr/Exception KERNEL MODE SP on kernel stack
2909d42c84fSVineet Gupta 	 * safe-keeping not really needed, but it keeps the epilogue code
2919d42c84fSVineet Gupta 	 * (SP restore) simpler/uniform.
2929d42c84fSVineet Gupta 	 */
2939d42c84fSVineet Gupta 	b.d	77f
2949d42c84fSVineet Gupta 
2959d42c84fSVineet Gupta 	st.a	sp, [sp, -12]	; Make room for orig_r0 and orig_r8
2969d42c84fSVineet Gupta 
2979d42c84fSVineet Gupta 88: /*------Intr/Ecxp happened in user mode, "switch" stack ------ */
2989d42c84fSVineet Gupta 
2999d42c84fSVineet Gupta 	GET_CURR_TASK_ON_CPU   r9
3009d42c84fSVineet Gupta 
301080c3747SVineet Gupta #ifdef CONFIG_ARC_CURR_IN_REG
302080c3747SVineet Gupta 
303080c3747SVineet Gupta 	/* If current task pointer cached in r25, time to
304080c3747SVineet Gupta 	 *  -safekeep USER r25 in task->thread_struct->user_r25
305080c3747SVineet Gupta 	 *  -load r25 with current task ptr
306080c3747SVineet Gupta 	 */
307080c3747SVineet Gupta 	st.as	r25, [r9, (TASK_THREAD + THREAD_USER_R25)/4]
308080c3747SVineet Gupta 	mov	r25, r9
309080c3747SVineet Gupta #endif
310080c3747SVineet Gupta 
3119d42c84fSVineet Gupta 	/* With current tsk in r9, get it's kernel mode stack base */
3129d42c84fSVineet Gupta 	GET_TSK_STACK_BASE  r9, r9
3139d42c84fSVineet Gupta 
3149d42c84fSVineet Gupta 	/* Save Pre Intr/Exception User SP on kernel stack */
3159d42c84fSVineet Gupta 	st.a    sp, [r9, -12]	; Make room for orig_r0 and orig_r8
3169d42c84fSVineet Gupta 
3179d42c84fSVineet Gupta 	/* CAUTION:
3189d42c84fSVineet Gupta 	 * SP should be set at the very end when we are done with everything
3199d42c84fSVineet Gupta 	 * In case of 2 levels of interrupt we depend on value of SP to assume
3209d42c84fSVineet Gupta 	 * that everything else is done (loading r25 etc)
3219d42c84fSVineet Gupta 	 */
3229d42c84fSVineet Gupta 
3239d42c84fSVineet Gupta 	/* set SP to point to kernel mode stack */
3249d42c84fSVineet Gupta 	mov sp, r9
3259d42c84fSVineet Gupta 
3269d42c84fSVineet Gupta 77: /* ----- Stack Switched to kernel Mode, Now save REG FILE ----- */
3279d42c84fSVineet Gupta 
3289d42c84fSVineet Gupta .endm
3299d42c84fSVineet Gupta 
3309d42c84fSVineet Gupta /*------------------------------------------------------------
3319d42c84fSVineet Gupta  * "FAKE" a rtie to return from CPU Exception context
3329d42c84fSVineet Gupta  * This is to re-enable Exceptions within exception
3339d42c84fSVineet Gupta  * Look at EV_ProtV to see how this is actually used
3349d42c84fSVineet Gupta  *-------------------------------------------------------------*/
3359d42c84fSVineet Gupta 
3369d42c84fSVineet Gupta .macro FAKE_RET_FROM_EXCPN  reg
3379d42c84fSVineet Gupta 
3389d42c84fSVineet Gupta 	ld  \reg, [sp, PT_status32]
3399d42c84fSVineet Gupta 	bic  \reg, \reg, (STATUS_U_MASK|STATUS_DE_MASK)
3409d42c84fSVineet Gupta 	bset \reg, \reg, STATUS_L_BIT
3419d42c84fSVineet Gupta 	sr  \reg, [erstatus]
3429d42c84fSVineet Gupta 	mov \reg, 55f
3439d42c84fSVineet Gupta 	sr  \reg, [eret]
3449d42c84fSVineet Gupta 
3459d42c84fSVineet Gupta 	rtie
3469d42c84fSVineet Gupta 55:
3479d42c84fSVineet Gupta .endm
3489d42c84fSVineet Gupta 
3499d42c84fSVineet Gupta /*
3509d42c84fSVineet Gupta  * @reg [OUT] &thread_info of "current"
3519d42c84fSVineet Gupta  */
3529d42c84fSVineet Gupta .macro GET_CURR_THR_INFO_FROM_SP  reg
353*3ebedbb2SVineet Gupta 	bic \reg, sp, (THREAD_SIZE - 1)
3549d42c84fSVineet Gupta .endm
3559d42c84fSVineet Gupta 
3569d42c84fSVineet Gupta /*
3579d42c84fSVineet Gupta  * @reg [OUT] thread_info->flags of "current"
3589d42c84fSVineet Gupta  */
3599d42c84fSVineet Gupta .macro GET_CURR_THR_INFO_FLAGS  reg
3609d42c84fSVineet Gupta 	GET_CURR_THR_INFO_FROM_SP  \reg
3619d42c84fSVineet Gupta 	ld  \reg, [\reg, THREAD_INFO_FLAGS]
3629d42c84fSVineet Gupta .endm
3639d42c84fSVineet Gupta 
3649d42c84fSVineet Gupta /*--------------------------------------------------------------
3659d42c84fSVineet Gupta  * For early Exception Prologue, a core reg is temporarily needed to
3669d42c84fSVineet Gupta  * code the rest of prolog (stack switching). This is done by stashing
3679d42c84fSVineet Gupta  * it to memory (non-SMP case) or SCRATCH0 Aux Reg (SMP).
3689d42c84fSVineet Gupta  *
3699d42c84fSVineet Gupta  * Before saving the full regfile - this reg is restored back, only
3709d42c84fSVineet Gupta  * to be saved again on kernel mode stack, as part of ptregs.
3719d42c84fSVineet Gupta  *-------------------------------------------------------------*/
3729d42c84fSVineet Gupta .macro EXCPN_PROLOG_FREEUP_REG	reg
37341195d23SVineet Gupta #ifdef CONFIG_SMP
37441195d23SVineet Gupta 	sr  \reg, [ARC_REG_SCRATCH_DATA0]
37541195d23SVineet Gupta #else
3769d42c84fSVineet Gupta 	st  \reg, [@ex_saved_reg1]
37741195d23SVineet Gupta #endif
3789d42c84fSVineet Gupta .endm
3799d42c84fSVineet Gupta 
3809d42c84fSVineet Gupta .macro EXCPN_PROLOG_RESTORE_REG	reg
38141195d23SVineet Gupta #ifdef CONFIG_SMP
38241195d23SVineet Gupta 	lr  \reg, [ARC_REG_SCRATCH_DATA0]
38341195d23SVineet Gupta #else
3849d42c84fSVineet Gupta 	ld  \reg, [@ex_saved_reg1]
38541195d23SVineet Gupta #endif
3869d42c84fSVineet Gupta .endm
3879d42c84fSVineet Gupta 
3889d42c84fSVineet Gupta /*--------------------------------------------------------------
3899d42c84fSVineet Gupta  * Save all registers used by Exceptions (TLB Miss, Prot-V, Mem err etc)
3909d42c84fSVineet Gupta  * Requires SP to be already switched to kernel mode Stack
3919d42c84fSVineet Gupta  * sp points to the next free element on the stack at exit of this macro.
3929d42c84fSVineet Gupta  * Registers are pushed / popped in the order defined in struct ptregs
3939d42c84fSVineet Gupta  * in asm/ptrace.h
3949d42c84fSVineet Gupta  * Note that syscalls are implemented via TRAP which is also a exception
3959d42c84fSVineet Gupta  * from CPU's point of view
3969d42c84fSVineet Gupta  *-------------------------------------------------------------*/
3979d42c84fSVineet Gupta .macro SAVE_ALL_EXCEPTION   marker
3989d42c84fSVineet Gupta 
399367f3fcdSVineet Gupta 	st      \marker, [sp, 8]	/* orig_r8 */
4005c39c0abSVineet Gupta 	st      r0, [sp, 4]    /* orig_r0, needed only for sys calls */
4015c39c0abSVineet Gupta 
4029d42c84fSVineet Gupta 	/* Restore r9 used to code the early prologue */
4039d42c84fSVineet Gupta 	EXCPN_PROLOG_RESTORE_REG  r9
4049d42c84fSVineet Gupta 
405*3ebedbb2SVineet Gupta 	SAVE_R0_TO_R12
406*3ebedbb2SVineet Gupta 	PUSH	gp
407*3ebedbb2SVineet Gupta 	PUSH	fp
408*3ebedbb2SVineet Gupta 	PUSH	blink
409*3ebedbb2SVineet Gupta 	PUSHAX	eret
410*3ebedbb2SVineet Gupta 	PUSHAX	erstatus
411*3ebedbb2SVineet Gupta 	PUSH	lp_count
412*3ebedbb2SVineet Gupta 	PUSHAX	lp_end
413*3ebedbb2SVineet Gupta 	PUSHAX	lp_start
414*3ebedbb2SVineet Gupta 	PUSHAX	erbta
4159d42c84fSVineet Gupta .endm
4169d42c84fSVineet Gupta 
4179d42c84fSVineet Gupta /*--------------------------------------------------------------
4189d42c84fSVineet Gupta  * Save scratch regs for exceptions
4199d42c84fSVineet Gupta  *-------------------------------------------------------------*/
4209d42c84fSVineet Gupta .macro SAVE_ALL_SYS
4215c39c0abSVineet Gupta 	SAVE_ALL_EXCEPTION  orig_r8_IS_EXCPN
4229d42c84fSVineet Gupta .endm
4239d42c84fSVineet Gupta 
4249d42c84fSVineet Gupta /*--------------------------------------------------------------
4259d42c84fSVineet Gupta  * Save scratch regs for sys calls
4269d42c84fSVineet Gupta  *-------------------------------------------------------------*/
4279d42c84fSVineet Gupta .macro SAVE_ALL_TRAP
4285c39c0abSVineet Gupta 	/*
4295c39c0abSVineet Gupta 	 * Setup pt_regs->orig_r8.
4305c39c0abSVineet Gupta 	 * Encode syscall number (r8) in upper short word of event type (r9)
4315c39c0abSVineet Gupta 	 * N.B. #1: This is already endian safe (see ptrace.h)
4325c39c0abSVineet Gupta 	 *      #2: Only r9 can be used as scratch as it is already clobbered
4335c39c0abSVineet Gupta 	 *          and it's contents are no longer needed by the latter part
4345c39c0abSVineet Gupta 	 *          of exception prologue
4355c39c0abSVineet Gupta 	 */
4365c39c0abSVineet Gupta 	lsl  r9, r8, 16
4375c39c0abSVineet Gupta 	or   r9, r9, orig_r8_IS_SCALL
4385c39c0abSVineet Gupta 
4395c39c0abSVineet Gupta 	SAVE_ALL_EXCEPTION  r9
4409d42c84fSVineet Gupta .endm
4419d42c84fSVineet Gupta 
4429d42c84fSVineet Gupta /*--------------------------------------------------------------
4439d42c84fSVineet Gupta  * Restore all registers used by system call or Exceptions
4449d42c84fSVineet Gupta  * SP should always be pointing to the next free stack element
4459d42c84fSVineet Gupta  * when entering this macro.
4469d42c84fSVineet Gupta  *
4479d42c84fSVineet Gupta  * NOTE:
4489d42c84fSVineet Gupta  *
4499d42c84fSVineet Gupta  * It is recommended that lp_count/ilink1/ilink2 not be used as a dest reg
4509d42c84fSVineet Gupta  * for memory load operations. If used in that way interrupts are deffered
4519d42c84fSVineet Gupta  * by hardware and that is not good.
4529d42c84fSVineet Gupta  *-------------------------------------------------------------*/
4539d42c84fSVineet Gupta .macro RESTORE_ALL_SYS
454*3ebedbb2SVineet Gupta 	POPAX	erbta
455*3ebedbb2SVineet Gupta 	POPAX	lp_start
456*3ebedbb2SVineet Gupta 	POPAX	lp_end
457*3ebedbb2SVineet Gupta 
458*3ebedbb2SVineet Gupta 	POP	r9
459*3ebedbb2SVineet Gupta 	mov	lp_count, r9	;LD to lp_count is not allowed
460*3ebedbb2SVineet Gupta 
461*3ebedbb2SVineet Gupta 	POPAX	erstatus
462*3ebedbb2SVineet Gupta 	POPAX	eret
463*3ebedbb2SVineet Gupta 	POP	blink
464*3ebedbb2SVineet Gupta 	POP	fp
465*3ebedbb2SVineet Gupta 	POP	gp
466*3ebedbb2SVineet Gupta 	RESTORE_R12_TO_R0
4679d42c84fSVineet Gupta 
4689d42c84fSVineet Gupta 	ld  sp, [sp] /* restore original sp */
4699d42c84fSVineet Gupta 	/* orig_r0 and orig_r8 skipped automatically */
4709d42c84fSVineet Gupta .endm
4719d42c84fSVineet Gupta 
4729d42c84fSVineet Gupta 
4739d42c84fSVineet Gupta /*--------------------------------------------------------------
4749d42c84fSVineet Gupta  * Save all registers used by interrupt handlers.
4759d42c84fSVineet Gupta  *-------------------------------------------------------------*/
4769d42c84fSVineet Gupta .macro SAVE_ALL_INT1
4779d42c84fSVineet Gupta 
478*3ebedbb2SVineet Gupta 	/* restore original r9 to be saved as part of reg-file */
47941195d23SVineet Gupta #ifdef CONFIG_SMP
48041195d23SVineet Gupta 	lr  r9, [ARC_REG_SCRATCH_DATA0]
48141195d23SVineet Gupta #else
4829d42c84fSVineet Gupta 	ld  r9, [@int1_saved_reg]
48341195d23SVineet Gupta #endif
4849d42c84fSVineet Gupta 
4859d42c84fSVineet Gupta 	/* now we are ready to save the remaining context :) */
4865c39c0abSVineet Gupta 	st      orig_r8_IS_IRQ1, [sp, 8]    /* Event Type */
4879d42c84fSVineet Gupta 	st      0, [sp, 4]    /* orig_r0 , N/A for IRQ */
488*3ebedbb2SVineet Gupta 
489*3ebedbb2SVineet Gupta 	SAVE_R0_TO_R12
490*3ebedbb2SVineet Gupta 	PUSH	gp
491*3ebedbb2SVineet Gupta 	PUSH	fp
492*3ebedbb2SVineet Gupta 	PUSH	blink
493*3ebedbb2SVineet Gupta 	PUSH	ilink1
494*3ebedbb2SVineet Gupta 	PUSHAX	status32_l1
495*3ebedbb2SVineet Gupta 	PUSH	lp_count
496*3ebedbb2SVineet Gupta 	PUSHAX	lp_end
497*3ebedbb2SVineet Gupta 	PUSHAX	lp_start
498*3ebedbb2SVineet Gupta 	PUSHAX	bta_l1
4999d42c84fSVineet Gupta .endm
5009d42c84fSVineet Gupta 
5014788a594SVineet Gupta .macro SAVE_ALL_INT2
5024788a594SVineet Gupta 
5034788a594SVineet Gupta 	/* TODO-vineetg: SMP we can't use global nor can we use
5044788a594SVineet Gupta 	*   SCRATCH0 as we do for int1 because while int1 is using
5054788a594SVineet Gupta 	*   it, int2 can come
5064788a594SVineet Gupta 	*/
5074788a594SVineet Gupta 	/* retsore original r9 , saved in sys_saved_r9 */
5084788a594SVineet Gupta 	ld  r9, [@int2_saved_reg]
5094788a594SVineet Gupta 
5104788a594SVineet Gupta 	/* now we are ready to save the remaining context :) */
5114788a594SVineet Gupta 	st      orig_r8_IS_IRQ2, [sp, 8]    /* Event Type */
5124788a594SVineet Gupta 	st      0, [sp, 4]    /* orig_r0 , N/A for IRQ */
513*3ebedbb2SVineet Gupta 
514*3ebedbb2SVineet Gupta 	SAVE_R0_TO_R12
515*3ebedbb2SVineet Gupta 	PUSH	gp
516*3ebedbb2SVineet Gupta 	PUSH	fp
517*3ebedbb2SVineet Gupta 	PUSH	blink
518*3ebedbb2SVineet Gupta 	PUSH	ilink2
519*3ebedbb2SVineet Gupta 	PUSHAX	status32_l2
520*3ebedbb2SVineet Gupta 	PUSH	lp_count
521*3ebedbb2SVineet Gupta 	PUSHAX	lp_end
522*3ebedbb2SVineet Gupta 	PUSHAX	lp_start
523*3ebedbb2SVineet Gupta 	PUSHAX	bta_l2
5244788a594SVineet Gupta .endm
5254788a594SVineet Gupta 
5269d42c84fSVineet Gupta /*--------------------------------------------------------------
5279d42c84fSVineet Gupta  * Restore all registers used by interrupt handlers.
5289d42c84fSVineet Gupta  *
5299d42c84fSVineet Gupta  * NOTE:
5309d42c84fSVineet Gupta  *
5319d42c84fSVineet Gupta  * It is recommended that lp_count/ilink1/ilink2 not be used as a dest reg
5329d42c84fSVineet Gupta  * for memory load operations. If used in that way interrupts are deffered
5339d42c84fSVineet Gupta  * by hardware and that is not good.
5349d42c84fSVineet Gupta  *-------------------------------------------------------------*/
5359d42c84fSVineet Gupta 
5369d42c84fSVineet Gupta .macro RESTORE_ALL_INT1
537*3ebedbb2SVineet Gupta 	POPAX	bta_l1
538*3ebedbb2SVineet Gupta 	POPAX	lp_start
539*3ebedbb2SVineet Gupta 	POPAX	lp_end
540*3ebedbb2SVineet Gupta 
541*3ebedbb2SVineet Gupta 	POP	r9
542*3ebedbb2SVineet Gupta 	mov	lp_count, r9	;LD to lp_count is not allowed
543*3ebedbb2SVineet Gupta 
544*3ebedbb2SVineet Gupta 	POPAX	status32_l1
545*3ebedbb2SVineet Gupta 	POP	ilink1
546*3ebedbb2SVineet Gupta 	POP	blink
547*3ebedbb2SVineet Gupta 	POP	fp
548*3ebedbb2SVineet Gupta 	POP	gp
549*3ebedbb2SVineet Gupta 	RESTORE_R12_TO_R0
5509d42c84fSVineet Gupta 
5519d42c84fSVineet Gupta 	ld  sp, [sp] /* restore original sp */
5529d42c84fSVineet Gupta 	/* orig_r0 and orig_r8 skipped automatically */
5539d42c84fSVineet Gupta .endm
5549d42c84fSVineet Gupta 
5554788a594SVineet Gupta .macro RESTORE_ALL_INT2
556*3ebedbb2SVineet Gupta 	POPAX	bta_l2
557*3ebedbb2SVineet Gupta 	POPAX	lp_start
558*3ebedbb2SVineet Gupta 	POPAX	lp_end
559*3ebedbb2SVineet Gupta 
560*3ebedbb2SVineet Gupta 	POP	r9
561*3ebedbb2SVineet Gupta 	mov	lp_count, r9	;LD to lp_count is not allowed
562*3ebedbb2SVineet Gupta 
563*3ebedbb2SVineet Gupta 	POPAX	status32_l2
564*3ebedbb2SVineet Gupta 	POP	ilink2
565*3ebedbb2SVineet Gupta 	POP	blink
566*3ebedbb2SVineet Gupta 	POP	fp
567*3ebedbb2SVineet Gupta 	POP	gp
568*3ebedbb2SVineet Gupta 	RESTORE_R12_TO_R0
5694788a594SVineet Gupta 
5704788a594SVineet Gupta 	ld  sp, [sp] /* restore original sp */
5714788a594SVineet Gupta 	/* orig_r0 and orig_r8 skipped automatically */
5724788a594SVineet Gupta .endm
5734788a594SVineet Gupta 
5744788a594SVineet Gupta 
5759d42c84fSVineet Gupta /* Get CPU-ID of this core */
5769d42c84fSVineet Gupta .macro  GET_CPU_ID  reg
5779d42c84fSVineet Gupta 	lr  \reg, [identity]
5789d42c84fSVineet Gupta 	lsr \reg, \reg, 8
5799d42c84fSVineet Gupta 	bmsk \reg, \reg, 7
5809d42c84fSVineet Gupta .endm
5819d42c84fSVineet Gupta 
58241195d23SVineet Gupta #ifdef CONFIG_SMP
58341195d23SVineet Gupta 
58441195d23SVineet Gupta /*-------------------------------------------------
58541195d23SVineet Gupta  * Retrieve the current running task on this CPU
58641195d23SVineet Gupta  * 1. Determine curr CPU id.
58741195d23SVineet Gupta  * 2. Use it to index into _current_task[ ]
58841195d23SVineet Gupta  */
58941195d23SVineet Gupta .macro  GET_CURR_TASK_ON_CPU   reg
59041195d23SVineet Gupta 	GET_CPU_ID  \reg
59141195d23SVineet Gupta 	ld.as  \reg, [@_current_task, \reg]
59241195d23SVineet Gupta .endm
59341195d23SVineet Gupta 
59441195d23SVineet Gupta /*-------------------------------------------------
59541195d23SVineet Gupta  * Save a new task as the "current" task on this CPU
59641195d23SVineet Gupta  * 1. Determine curr CPU id.
59741195d23SVineet Gupta  * 2. Use it to index into _current_task[ ]
59841195d23SVineet Gupta  *
59941195d23SVineet Gupta  * Coded differently than GET_CURR_TASK_ON_CPU (which uses LD.AS)
60041195d23SVineet Gupta  * because ST r0, [r1, offset] can ONLY have s9 @offset
60141195d23SVineet Gupta  * while   LD can take s9 (4 byte insn) or LIMM (8 byte insn)
60241195d23SVineet Gupta  */
60341195d23SVineet Gupta 
60441195d23SVineet Gupta .macro  SET_CURR_TASK_ON_CPU    tsk, tmp
60541195d23SVineet Gupta 	GET_CPU_ID  \tmp
60641195d23SVineet Gupta 	add2 \tmp, @_current_task, \tmp
60741195d23SVineet Gupta 	st   \tsk, [\tmp]
60841195d23SVineet Gupta #ifdef CONFIG_ARC_CURR_IN_REG
60941195d23SVineet Gupta 	mov r25, \tsk
61041195d23SVineet Gupta #endif
61141195d23SVineet Gupta 
61241195d23SVineet Gupta .endm
61341195d23SVineet Gupta 
61441195d23SVineet Gupta 
61541195d23SVineet Gupta #else   /* Uniprocessor implementation of macros */
61641195d23SVineet Gupta 
6179d42c84fSVineet Gupta .macro  GET_CURR_TASK_ON_CPU    reg
6189d42c84fSVineet Gupta 	ld  \reg, [@_current_task]
6199d42c84fSVineet Gupta .endm
6209d42c84fSVineet Gupta 
6219d42c84fSVineet Gupta .macro  SET_CURR_TASK_ON_CPU    tsk, tmp
6229d42c84fSVineet Gupta 	st  \tsk, [@_current_task]
623080c3747SVineet Gupta #ifdef CONFIG_ARC_CURR_IN_REG
624080c3747SVineet Gupta 	mov r25, \tsk
625080c3747SVineet Gupta #endif
6269d42c84fSVineet Gupta .endm
6279d42c84fSVineet Gupta 
62841195d23SVineet Gupta #endif /* SMP / UNI */
62941195d23SVineet Gupta 
6309d42c84fSVineet Gupta /* ------------------------------------------------------------------
6319d42c84fSVineet Gupta  * Get the ptr to some field of Current Task at @off in task struct
632080c3747SVineet Gupta  *  -Uses r25 for Current task ptr if that is enabled
6339d42c84fSVineet Gupta  */
6349d42c84fSVineet Gupta 
635080c3747SVineet Gupta #ifdef CONFIG_ARC_CURR_IN_REG
636080c3747SVineet Gupta 
637080c3747SVineet Gupta .macro GET_CURR_TASK_FIELD_PTR  off,  reg
638080c3747SVineet Gupta 	add \reg, r25, \off
639080c3747SVineet Gupta .endm
640080c3747SVineet Gupta 
641080c3747SVineet Gupta #else
642080c3747SVineet Gupta 
6439d42c84fSVineet Gupta .macro GET_CURR_TASK_FIELD_PTR  off,  reg
6449d42c84fSVineet Gupta 	GET_CURR_TASK_ON_CPU  \reg
6459d42c84fSVineet Gupta 	add \reg, \reg, \off
6469d42c84fSVineet Gupta .endm
6479d42c84fSVineet Gupta 
648080c3747SVineet Gupta #endif	/* CONFIG_ARC_CURR_IN_REG */
649080c3747SVineet Gupta 
6509d42c84fSVineet Gupta #endif  /* __ASSEMBLY__ */
6519d42c84fSVineet Gupta 
6529d42c84fSVineet Gupta #endif  /* __ASM_ARC_ENTRY_H */
653