xref: /linux/arch/xtensa/kernel/entry.S (revision e58e871becec2d3b04ed91c0c16fe8deac9c9dfa)
1/*
2 * Low-level exception handling
3 *
4 * This file is subject to the terms and conditions of the GNU General Public
5 * License.  See the file "COPYING" in the main directory of this archive
6 * for more details.
7 *
8 * Copyright (C) 2004 - 2008 by Tensilica Inc.
9 * Copyright (C) 2015 Cadence Design Systems Inc.
10 *
11 * Chris Zankel <chris@zankel.net>
12 *
13 */
14
15#include <linux/linkage.h>
16#include <asm/asm-offsets.h>
17#include <asm/processor.h>
18#include <asm/coprocessor.h>
19#include <asm/thread_info.h>
20#include <asm/asm-uaccess.h>
21#include <asm/unistd.h>
22#include <asm/ptrace.h>
23#include <asm/current.h>
24#include <asm/pgtable.h>
25#include <asm/page.h>
26#include <asm/signal.h>
27#include <asm/tlbflush.h>
28#include <variant/tie-asm.h>
29
30/* Unimplemented features. */
31
32#undef KERNEL_STACK_OVERFLOW_CHECK
33
34/* Not well tested.
35 *
36 * - fast_coprocessor
37 */
38
39/*
40 * Macro to find first bit set in WINDOWBASE from the left + 1
41 *
42 * 100....0 -> 1
43 * 010....0 -> 2
44 * 000....1 -> WSBITS
45 */
46
47	.macro ffs_ws bit mask
48
49#if XCHAL_HAVE_NSA
50	nsau    \bit, \mask			# 32-WSBITS ... 31 (32 iff 0)
51	addi    \bit, \bit, WSBITS - 32 + 1   	# uppest bit set -> return 1
52#else
53	movi    \bit, WSBITS
54#if WSBITS > 16
55	_bltui  \mask, 0x10000, 99f
56	addi    \bit, \bit, -16
57	extui   \mask, \mask, 16, 16
58#endif
59#if WSBITS > 8
6099:	_bltui  \mask, 0x100, 99f
61	addi    \bit, \bit, -8
62	srli    \mask, \mask, 8
63#endif
6499:	_bltui  \mask, 0x10, 99f
65	addi    \bit, \bit, -4
66	srli    \mask, \mask, 4
6799:	_bltui  \mask, 0x4, 99f
68	addi    \bit, \bit, -2
69	srli    \mask, \mask, 2
7099:	_bltui  \mask, 0x2, 99f
71	addi    \bit, \bit, -1
7299:
73
74#endif
75	.endm
76
77
78	.macro	irq_save flags tmp
79#if XTENSA_FAKE_NMI
80#if defined(CONFIG_DEBUG_KERNEL) && (LOCKLEVEL | TOPLEVEL) >= XCHAL_DEBUGLEVEL
81	rsr	\flags, ps
82	extui	\tmp, \flags, PS_INTLEVEL_SHIFT, PS_INTLEVEL_WIDTH
83	bgei	\tmp, LOCKLEVEL, 99f
84	rsil	\tmp, LOCKLEVEL
8599:
86#else
87	movi	\tmp, LOCKLEVEL
88	rsr	\flags, ps
89	or	\flags, \flags, \tmp
90	xsr	\flags, ps
91	rsync
92#endif
93#else
94	rsil	\flags, LOCKLEVEL
95#endif
96	.endm
97
98/* ----------------- DEFAULT FIRST LEVEL EXCEPTION HANDLERS ----------------- */
99
100/*
101 * First-level exception handler for user exceptions.
102 * Save some special registers, extra states and all registers in the AR
103 * register file that were in use in the user task, and jump to the common
104 * exception code.
105 * We save SAR (used to calculate WMASK), and WB and WS (we don't have to
106 * save them for kernel exceptions).
107 *
108 * Entry condition for user_exception:
109 *
110 *   a0:	trashed, original value saved on stack (PT_AREG0)
111 *   a1:	a1
112 *   a2:	new stack pointer, original value in depc
113 *   a3:	a3
114 *   depc:	a2, original value saved on stack (PT_DEPC)
115 *   excsave1:	dispatch table
116 *
117 *   PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC
118 *	     <  VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception
119 *
120 * Entry condition for _user_exception:
121 *
122 *   a0-a3 and depc have been saved to PT_AREG0...PT_AREG3 and PT_DEPC
123 *   excsave has been restored, and
124 *   stack pointer (a1) has been set.
125 *
126 * Note: _user_exception might be at an odd address. Don't use call0..call12
127 */
128
129ENTRY(user_exception)
130
131	/* Save a1, a2, a3, and set SP. */
132
133	rsr	a0, depc
134	s32i	a1, a2, PT_AREG1
135	s32i	a0, a2, PT_AREG2
136	s32i	a3, a2, PT_AREG3
137	mov	a1, a2
138
139	.globl _user_exception
140_user_exception:
141
142	/* Save SAR and turn off single stepping */
143
144	movi	a2, 0
145	wsr	a2, depc		# terminate user stack trace with 0
146	rsr	a3, sar
147	xsr	a2, icountlevel
148	s32i	a3, a1, PT_SAR
149	s32i	a2, a1, PT_ICOUNTLEVEL
150
151#if XCHAL_HAVE_THREADPTR
152	rur	a2, threadptr
153	s32i	a2, a1, PT_THREADPTR
154#endif
155
156	/* Rotate ws so that the current windowbase is at bit0. */
157	/* Assume ws = xxwww1yyyy. Rotate ws right, so that a2 = yyyyxxwww1 */
158
159	rsr	a2, windowbase
160	rsr	a3, windowstart
161	ssr	a2
162	s32i	a2, a1, PT_WINDOWBASE
163	s32i	a3, a1, PT_WINDOWSTART
164	slli	a2, a3, 32-WSBITS
165	src	a2, a3, a2
166	srli	a2, a2, 32-WSBITS
167	s32i	a2, a1, PT_WMASK	# needed for restoring registers
168
169	/* Save only live registers. */
170
171	_bbsi.l	a2, 1, 1f
172	s32i	a4, a1, PT_AREG4
173	s32i	a5, a1, PT_AREG5
174	s32i	a6, a1, PT_AREG6
175	s32i	a7, a1, PT_AREG7
176	_bbsi.l	a2, 2, 1f
177	s32i	a8, a1, PT_AREG8
178	s32i	a9, a1, PT_AREG9
179	s32i	a10, a1, PT_AREG10
180	s32i	a11, a1, PT_AREG11
181	_bbsi.l	a2, 3, 1f
182	s32i	a12, a1, PT_AREG12
183	s32i	a13, a1, PT_AREG13
184	s32i	a14, a1, PT_AREG14
185	s32i	a15, a1, PT_AREG15
186	_bnei	a2, 1, 1f		# only one valid frame?
187
188	/* Only one valid frame, skip saving regs. */
189
190	j	2f
191
192	/* Save the remaining registers.
193	 * We have to save all registers up to the first '1' from
194	 * the right, except the current frame (bit 0).
195	 * Assume a2 is:  001001000110001
196	 * All register frames starting from the top field to the marked '1'
197	 * must be saved.
198	 */
199
2001:	addi	a3, a2, -1		# eliminate '1' in bit 0: yyyyxxww0
201	neg	a3, a3			# yyyyxxww0 -> YYYYXXWW1+1
202	and	a3, a3, a2		# max. only one bit is set
203
204	/* Find number of frames to save */
205
206	ffs_ws	a0, a3			# number of frames to the '1' from left
207
208	/* Store information into WMASK:
209	 * bits 0..3: xxx1 masked lower 4 bits of the rotated windowstart,
210	 * bits 4...: number of valid 4-register frames
211	 */
212
213	slli	a3, a0, 4		# number of frames to save in bits 8..4
214	extui	a2, a2, 0, 4		# mask for the first 16 registers
215	or	a2, a3, a2
216	s32i	a2, a1, PT_WMASK	# needed when we restore the reg-file
217
218	/* Save 4 registers at a time */
219
2201:	rotw	-1
221	s32i	a0, a5, PT_AREG_END - 16
222	s32i	a1, a5, PT_AREG_END - 12
223	s32i	a2, a5, PT_AREG_END - 8
224	s32i	a3, a5, PT_AREG_END - 4
225	addi	a0, a4, -1
226	addi	a1, a5, -16
227	_bnez	a0, 1b
228
229	/* WINDOWBASE still in SAR! */
230
231	rsr	a2, sar			# original WINDOWBASE
232	movi	a3, 1
233	ssl	a2
234	sll	a3, a3
235	wsr	a3, windowstart		# set corresponding WINDOWSTART bit
236	wsr	a2, windowbase		# and WINDOWSTART
237	rsync
238
239	/* We are back to the original stack pointer (a1) */
240
2412:	/* Now, jump to the common exception handler. */
242
243	j	common_exception
244
245ENDPROC(user_exception)
246
247/*
248 * First-level exit handler for kernel exceptions
249 * Save special registers and the live window frame.
250 * Note: Even though we changes the stack pointer, we don't have to do a
251 *	 MOVSP here, as we do that when we return from the exception.
252 *	 (See comment in the kernel exception exit code)
253 *
254 * Entry condition for kernel_exception:
255 *
256 *   a0:	trashed, original value saved on stack (PT_AREG0)
257 *   a1:	a1
258 *   a2:	new stack pointer, original in DEPC
259 *   a3:	a3
260 *   depc:	a2, original value saved on stack (PT_DEPC)
261 *   excsave_1:	dispatch table
262 *
263 *   PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC
264 *	     <  VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception
265 *
266 * Entry condition for _kernel_exception:
267 *
268 *   a0-a3 and depc have been saved to PT_AREG0...PT_AREG3 and PT_DEPC
269 *   excsave has been restored, and
270 *   stack pointer (a1) has been set.
271 *
272 * Note: _kernel_exception might be at an odd address. Don't use call0..call12
273 */
274
275ENTRY(kernel_exception)
276
277	/* Save a1, a2, a3, and set SP. */
278
279	rsr	a0, depc		# get a2
280	s32i	a1, a2, PT_AREG1
281	s32i	a0, a2, PT_AREG2
282	s32i	a3, a2, PT_AREG3
283	mov	a1, a2
284
285	.globl _kernel_exception
286_kernel_exception:
287
288	/* Save SAR and turn off single stepping */
289
290	movi	a2, 0
291	rsr	a3, sar
292	xsr	a2, icountlevel
293	s32i	a3, a1, PT_SAR
294	s32i	a2, a1, PT_ICOUNTLEVEL
295
296	/* Rotate ws so that the current windowbase is at bit0. */
297	/* Assume ws = xxwww1yyyy. Rotate ws right, so that a2 = yyyyxxwww1 */
298
299	rsr	a2, windowbase		# don't need to save these, we only
300	rsr	a3, windowstart		# need shifted windowstart: windowmask
301	ssr	a2
302	slli	a2, a3, 32-WSBITS
303	src	a2, a3, a2
304	srli	a2, a2, 32-WSBITS
305	s32i	a2, a1, PT_WMASK	# needed for kernel_exception_exit
306
307	/* Save only the live window-frame */
308
309	_bbsi.l	a2, 1, 1f
310	s32i	a4, a1, PT_AREG4
311	s32i	a5, a1, PT_AREG5
312	s32i	a6, a1, PT_AREG6
313	s32i	a7, a1, PT_AREG7
314	_bbsi.l	a2, 2, 1f
315	s32i	a8, a1, PT_AREG8
316	s32i	a9, a1, PT_AREG9
317	s32i	a10, a1, PT_AREG10
318	s32i	a11, a1, PT_AREG11
319	_bbsi.l	a2, 3, 1f
320	s32i	a12, a1, PT_AREG12
321	s32i	a13, a1, PT_AREG13
322	s32i	a14, a1, PT_AREG14
323	s32i	a15, a1, PT_AREG15
324
325	_bnei	a2, 1, 1f
326
327	/* Copy spill slots of a0 and a1 to imitate movsp
328	 * in order to keep exception stack continuous
329	 */
330	l32i	a3, a1, PT_SIZE
331	l32i	a0, a1, PT_SIZE + 4
332	s32e	a3, a1, -16
333	s32e	a0, a1, -12
3341:
335	l32i	a0, a1, PT_AREG0	# restore saved a0
336	wsr	a0, depc
337
338#ifdef KERNEL_STACK_OVERFLOW_CHECK
339
340	/*  Stack overflow check, for debugging  */
341	extui	a2, a1, TASK_SIZE_BITS,XX
342	movi	a3, SIZE??
343	_bge	a2, a3, out_of_stack_panic
344
345#endif
346
347/*
348 * This is the common exception handler.
349 * We get here from the user exception handler or simply by falling through
350 * from the kernel exception handler.
351 * Save the remaining special registers, switch to kernel mode, and jump
352 * to the second-level exception handler.
353 *
354 */
355
356common_exception:
357
358	/* Save some registers, disable loops and clear the syscall flag. */
359
360	rsr	a2, debugcause
361	rsr	a3, epc1
362	s32i	a2, a1, PT_DEBUGCAUSE
363	s32i	a3, a1, PT_PC
364
365	movi	a2, -1
366	rsr	a3, excvaddr
367	s32i	a2, a1, PT_SYSCALL
368	movi	a2, 0
369	s32i	a3, a1, PT_EXCVADDR
370#if XCHAL_HAVE_LOOPS
371	xsr	a2, lcount
372	s32i	a2, a1, PT_LCOUNT
373#endif
374
375	/* It is now save to restore the EXC_TABLE_FIXUP variable. */
376
377	rsr	a2, exccause
378	movi	a3, 0
379	rsr	a0, excsave1
380	s32i	a2, a1, PT_EXCCAUSE
381	s32i	a3, a0, EXC_TABLE_FIXUP
382
383	/* All unrecoverable states are saved on stack, now, and a1 is valid.
384	 * Now we can allow exceptions again. In case we've got an interrupt
385	 * PS.INTLEVEL is set to LOCKLEVEL disabling furhter interrupts,
386	 * otherwise it's left unchanged.
387	 *
388	 * Set PS(EXCM = 0, UM = 0, RING = 0, OWB = 0, WOE = 1, INTLEVEL = X)
389	 */
390
391	rsr	a3, ps
392	s32i	a3, a1, PT_PS		# save ps
393
394#if XTENSA_FAKE_NMI
395	/* Correct PS needs to be saved in the PT_PS:
396	 * - in case of exception or level-1 interrupt it's in the PS,
397	 *   and is already saved.
398	 * - in case of medium level interrupt it's in the excsave2.
399	 */
400	movi	a0, EXCCAUSE_MAPPED_NMI
401	extui	a3, a3, PS_INTLEVEL_SHIFT, PS_INTLEVEL_WIDTH
402	beq	a2, a0, .Lmedium_level_irq
403	bnei	a2, EXCCAUSE_LEVEL1_INTERRUPT, .Lexception
404	beqz	a3, .Llevel1_irq	# level-1 IRQ sets ps.intlevel to 0
405
406.Lmedium_level_irq:
407	rsr	a0, excsave2
408	s32i	a0, a1, PT_PS		# save medium-level interrupt ps
409	bgei	a3, LOCKLEVEL, .Lexception
410
411.Llevel1_irq:
412	movi	a3, LOCKLEVEL
413
414.Lexception:
415	movi	a0, 1 << PS_WOE_BIT
416	or	a3, a3, a0
417#else
418	addi	a2, a2, -EXCCAUSE_LEVEL1_INTERRUPT
419	movi	a0, LOCKLEVEL
420	extui	a3, a3, PS_INTLEVEL_SHIFT, PS_INTLEVEL_WIDTH
421					# a3 = PS.INTLEVEL
422	moveqz	a3, a0, a2		# a3 = LOCKLEVEL iff interrupt
423	movi	a2, 1 << PS_WOE_BIT
424	or	a3, a3, a2
425	rsr	a2, exccause
426#endif
427
428	/* restore return address (or 0 if return to userspace) */
429	rsr	a0, depc
430	wsr	a3, ps
431	rsync				# PS.WOE => rsync => overflow
432
433	/* Save lbeg, lend */
434#if XCHAL_HAVE_LOOPS
435	rsr	a4, lbeg
436	rsr	a3, lend
437	s32i	a4, a1, PT_LBEG
438	s32i	a3, a1, PT_LEND
439#endif
440
441	/* Save SCOMPARE1 */
442
443#if XCHAL_HAVE_S32C1I
444	rsr     a3, scompare1
445	s32i    a3, a1, PT_SCOMPARE1
446#endif
447
448	/* Save optional registers. */
449
450	save_xtregs_opt a1 a3 a4 a5 a6 a7 PT_XTREGS_OPT
451
452	/* Go to second-level dispatcher. Set up parameters to pass to the
453	 * exception handler and call the exception handler.
454	 */
455
456	rsr	a4, excsave1
457	mov	a6, a1			# pass stack frame
458	mov	a7, a2			# pass EXCCAUSE
459	addx4	a4, a2, a4
460	l32i	a4, a4, EXC_TABLE_DEFAULT		# load handler
461
462	/* Call the second-level handler */
463
464	callx4	a4
465
466	/* Jump here for exception exit */
467	.global common_exception_return
468common_exception_return:
469
470#if XTENSA_FAKE_NMI
471	l32i	a2, a1, PT_EXCCAUSE
472	movi	a3, EXCCAUSE_MAPPED_NMI
473	beq	a2, a3, .LNMIexit
474#endif
4751:
476	irq_save a2, a3
477#ifdef CONFIG_TRACE_IRQFLAGS
478	movi	a4, trace_hardirqs_off
479	callx4	a4
480#endif
481
482	/* Jump if we are returning from kernel exceptions. */
483
484	l32i	a3, a1, PT_PS
485	GET_THREAD_INFO(a2, a1)
486	l32i	a4, a2, TI_FLAGS
487	_bbci.l	a3, PS_UM_BIT, 6f
488
489	/* Specific to a user exception exit:
490	 * We need to check some flags for signal handling and rescheduling,
491	 * and have to restore WB and WS, extra states, and all registers
492	 * in the register file that were in use in the user task.
493	 * Note that we don't disable interrupts here.
494	 */
495
496	_bbsi.l	a4, TIF_NEED_RESCHED, 3f
497	_bbsi.l	a4, TIF_NOTIFY_RESUME, 2f
498	_bbci.l	a4, TIF_SIGPENDING, 5f
499
5002:	l32i	a4, a1, PT_DEPC
501	bgeui	a4, VALID_DOUBLE_EXCEPTION_ADDRESS, 4f
502
503	/* Call do_signal() */
504
505#ifdef CONFIG_TRACE_IRQFLAGS
506	movi	a4, trace_hardirqs_on
507	callx4	a4
508#endif
509	rsil	a2, 0
510	movi	a4, do_notify_resume	# int do_notify_resume(struct pt_regs*)
511	mov	a6, a1
512	callx4	a4
513	j	1b
514
5153:	/* Reschedule */
516
517#ifdef CONFIG_TRACE_IRQFLAGS
518	movi	a4, trace_hardirqs_on
519	callx4	a4
520#endif
521	rsil	a2, 0
522	movi	a4, schedule	# void schedule (void)
523	callx4	a4
524	j	1b
525
526#ifdef CONFIG_PREEMPT
5276:
528	_bbci.l	a4, TIF_NEED_RESCHED, 4f
529
530	/* Check current_thread_info->preempt_count */
531
532	l32i	a4, a2, TI_PRE_COUNT
533	bnez	a4, 4f
534	movi	a4, preempt_schedule_irq
535	callx4	a4
536	j	1b
537#endif
538
539#if XTENSA_FAKE_NMI
540.LNMIexit:
541	l32i	a3, a1, PT_PS
542	_bbci.l	a3, PS_UM_BIT, 4f
543#endif
544
5455:
546#ifdef CONFIG_HAVE_HW_BREAKPOINT
547	_bbci.l	a4, TIF_DB_DISABLED, 7f
548	movi	a4, restore_dbreak
549	callx4	a4
5507:
551#endif
552#ifdef CONFIG_DEBUG_TLB_SANITY
553	l32i	a4, a1, PT_DEPC
554	bgeui	a4, VALID_DOUBLE_EXCEPTION_ADDRESS, 4f
555	movi	a4, check_tlb_sanity
556	callx4	a4
557#endif
5586:
5594:
560#ifdef CONFIG_TRACE_IRQFLAGS
561	extui	a4, a3, PS_INTLEVEL_SHIFT, PS_INTLEVEL_WIDTH
562	bgei	a4, LOCKLEVEL, 1f
563	movi	a4, trace_hardirqs_on
564	callx4	a4
5651:
566#endif
567	/* Restore optional registers. */
568
569	load_xtregs_opt a1 a2 a4 a5 a6 a7 PT_XTREGS_OPT
570
571	/* Restore SCOMPARE1 */
572
573#if XCHAL_HAVE_S32C1I
574	l32i    a2, a1, PT_SCOMPARE1
575	wsr     a2, scompare1
576#endif
577	wsr	a3, ps		/* disable interrupts */
578
579	_bbci.l	a3, PS_UM_BIT, kernel_exception_exit
580
581user_exception_exit:
582
583	/* Restore the state of the task and return from the exception. */
584
585	/* Switch to the user thread WINDOWBASE. Save SP temporarily in DEPC */
586
587	l32i	a2, a1, PT_WINDOWBASE
588	l32i	a3, a1, PT_WINDOWSTART
589	wsr	a1, depc		# use DEPC as temp storage
590	wsr	a3, windowstart		# restore WINDOWSTART
591	ssr	a2			# preserve user's WB in the SAR
592	wsr	a2, windowbase		# switch to user's saved WB
593	rsync
594	rsr	a1, depc		# restore stack pointer
595	l32i	a2, a1, PT_WMASK	# register frames saved (in bits 4...9)
596	rotw	-1			# we restore a4..a7
597	_bltui	a6, 16, 1f		# only have to restore current window?
598
599	/* The working registers are a0 and a3.  We are restoring to
600	 * a4..a7.  Be careful not to destroy what we have just restored.
601	 * Note: wmask has the format YYYYM:
602	 *       Y: number of registers saved in groups of 4
603	 *       M: 4 bit mask of first 16 registers
604	 */
605
606	mov	a2, a6
607	mov	a3, a5
608
6092:	rotw	-1			# a0..a3 become a4..a7
610	addi	a3, a7, -4*4		# next iteration
611	addi	a2, a6, -16		# decrementing Y in WMASK
612	l32i	a4, a3, PT_AREG_END + 0
613	l32i	a5, a3, PT_AREG_END + 4
614	l32i	a6, a3, PT_AREG_END + 8
615	l32i	a7, a3, PT_AREG_END + 12
616	_bgeui	a2, 16, 2b
617
618	/* Clear unrestored registers (don't leak anything to user-land */
619
6201:	rsr	a0, windowbase
621	rsr	a3, sar
622	sub	a3, a0, a3
623	beqz	a3, 2f
624	extui	a3, a3, 0, WBBITS
625
6261:	rotw	-1
627	addi	a3, a7, -1
628	movi	a4, 0
629	movi	a5, 0
630	movi	a6, 0
631	movi	a7, 0
632	bgei	a3, 1, 1b
633
634	/* We are back were we were when we started.
635	 * Note: a2 still contains WMASK (if we've returned to the original
636	 *	 frame where we had loaded a2), or at least the lower 4 bits
637	 *	 (if we have restored WSBITS-1 frames).
638	 */
639
6402:
641#if XCHAL_HAVE_THREADPTR
642	l32i	a3, a1, PT_THREADPTR
643	wur	a3, threadptr
644#endif
645
646	j	common_exception_exit
647
648	/* This is the kernel exception exit.
649	 * We avoided to do a MOVSP when we entered the exception, but we
650	 * have to do it here.
651	 */
652
653kernel_exception_exit:
654
655	/* Check if we have to do a movsp.
656	 *
657	 * We only have to do a movsp if the previous window-frame has
658	 * been spilled to the *temporary* exception stack instead of the
659	 * task's stack. This is the case if the corresponding bit in
660	 * WINDOWSTART for the previous window-frame was set before
661	 * (not spilled) but is zero now (spilled).
662	 * If this bit is zero, all other bits except the one for the
663	 * current window frame are also zero. So, we can use a simple test:
664	 * 'and' WINDOWSTART and WINDOWSTART-1:
665	 *
666	 *  (XXXXXX1[0]* - 1) AND XXXXXX1[0]* = XXXXXX0[0]*
667	 *
668	 * The result is zero only if one bit was set.
669	 *
670	 * (Note: We might have gone through several task switches before
671	 *        we come back to the current task, so WINDOWBASE might be
672	 *        different from the time the exception occurred.)
673	 */
674
675	/* Test WINDOWSTART before and after the exception.
676	 * We actually have WMASK, so we only have to test if it is 1 or not.
677	 */
678
679	l32i	a2, a1, PT_WMASK
680	_beqi	a2, 1, common_exception_exit	# Spilled before exception,jump
681
682	/* Test WINDOWSTART now. If spilled, do the movsp */
683
684	rsr     a3, windowstart
685	addi	a0, a3, -1
686	and     a3, a3, a0
687	_bnez	a3, common_exception_exit
688
689	/* Do a movsp (we returned from a call4, so we have at least a0..a7) */
690
691	addi    a0, a1, -16
692	l32i    a3, a0, 0
693	l32i    a4, a0, 4
694	s32i    a3, a1, PT_SIZE+0
695	s32i    a4, a1, PT_SIZE+4
696	l32i    a3, a0, 8
697	l32i    a4, a0, 12
698	s32i    a3, a1, PT_SIZE+8
699	s32i    a4, a1, PT_SIZE+12
700
701	/* Common exception exit.
702	 * We restore the special register and the current window frame, and
703	 * return from the exception.
704	 *
705	 * Note: We expect a2 to hold PT_WMASK
706	 */
707
708common_exception_exit:
709
710	/* Restore address registers. */
711
712	_bbsi.l	a2, 1, 1f
713	l32i	a4,  a1, PT_AREG4
714	l32i	a5,  a1, PT_AREG5
715	l32i	a6,  a1, PT_AREG6
716	l32i	a7,  a1, PT_AREG7
717	_bbsi.l	a2, 2, 1f
718	l32i	a8,  a1, PT_AREG8
719	l32i	a9,  a1, PT_AREG9
720	l32i	a10, a1, PT_AREG10
721	l32i	a11, a1, PT_AREG11
722	_bbsi.l	a2, 3, 1f
723	l32i	a12, a1, PT_AREG12
724	l32i	a13, a1, PT_AREG13
725	l32i	a14, a1, PT_AREG14
726	l32i	a15, a1, PT_AREG15
727
728	/* Restore PC, SAR */
729
7301:	l32i	a2, a1, PT_PC
731	l32i	a3, a1, PT_SAR
732	wsr	a2, epc1
733	wsr	a3, sar
734
735	/* Restore LBEG, LEND, LCOUNT */
736#if XCHAL_HAVE_LOOPS
737	l32i	a2, a1, PT_LBEG
738	l32i	a3, a1, PT_LEND
739	wsr	a2, lbeg
740	l32i	a2, a1, PT_LCOUNT
741	wsr	a3, lend
742	wsr	a2, lcount
743#endif
744
745	/* We control single stepping through the ICOUNTLEVEL register. */
746
747	l32i	a2, a1, PT_ICOUNTLEVEL
748	movi	a3, -2
749	wsr	a2, icountlevel
750	wsr	a3, icount
751
752	/* Check if it was double exception. */
753
754	l32i	a0, a1, PT_DEPC
755	l32i	a3, a1, PT_AREG3
756	l32i	a2, a1, PT_AREG2
757	_bgeui	a0, VALID_DOUBLE_EXCEPTION_ADDRESS, 1f
758
759	/* Restore a0...a3 and return */
760
761	l32i	a0, a1, PT_AREG0
762	l32i	a1, a1, PT_AREG1
763	rfe
764
7651: 	wsr	a0, depc
766	l32i	a0, a1, PT_AREG0
767	l32i	a1, a1, PT_AREG1
768	rfde
769
770ENDPROC(kernel_exception)
771
772/*
773 * Debug exception handler.
774 *
775 * Currently, we don't support KGDB, so only user application can be debugged.
776 *
777 * When we get here,  a0 is trashed and saved to excsave[debuglevel]
778 */
779
780ENTRY(debug_exception)
781
782	rsr	a0, SREG_EPS + XCHAL_DEBUGLEVEL
783	bbsi.l	a0, PS_EXCM_BIT, 1f	# exception mode
784
785	/* Set EPC1 and EXCCAUSE */
786
787	wsr	a2, depc		# save a2 temporarily
788	rsr	a2, SREG_EPC + XCHAL_DEBUGLEVEL
789	wsr	a2, epc1
790
791	movi	a2, EXCCAUSE_MAPPED_DEBUG
792	wsr	a2, exccause
793
794	/* Restore PS to the value before the debug exc but with PS.EXCM set.*/
795
796	movi	a2, 1 << PS_EXCM_BIT
797	or	a2, a0, a2
798	wsr	a2, ps
799
800	/* Switch to kernel/user stack, restore jump vector, and save a0 */
801
802	bbsi.l	a2, PS_UM_BIT, 2f	# jump if user mode
803
804	addi	a2, a1, -16-PT_SIZE	# assume kernel stack
8053:
806	l32i	a0, a3, DT_DEBUG_SAVE
807	s32i	a1, a2, PT_AREG1
808	s32i	a0, a2, PT_AREG0
809	movi	a0, 0
810	s32i	a0, a2, PT_DEPC		# mark it as a regular exception
811	xsr	a3, SREG_EXCSAVE + XCHAL_DEBUGLEVEL
812	xsr	a0, depc
813	s32i	a3, a2, PT_AREG3
814	s32i	a0, a2, PT_AREG2
815	mov	a1, a2
816
817	/* Debug exception is handled as an exception, so interrupts will
818	 * likely be enabled in the common exception handler. Disable
819	 * preemption if we have HW breakpoints to preserve DEBUGCAUSE.DBNUM
820	 * meaning.
821	 */
822#if defined(CONFIG_PREEMPT_COUNT) && defined(CONFIG_HAVE_HW_BREAKPOINT)
823	GET_THREAD_INFO(a2, a1)
824	l32i	a3, a2, TI_PRE_COUNT
825	addi	a3, a3, 1
826	s32i	a3, a2, TI_PRE_COUNT
827#endif
828
829	rsr	a2, ps
830	bbsi.l	a2, PS_UM_BIT, _user_exception
831	j	_kernel_exception
832
8332:	rsr	a2, excsave1
834	l32i	a2, a2, EXC_TABLE_KSTK	# load kernel stack pointer
835	j	3b
836
837#ifdef CONFIG_HAVE_HW_BREAKPOINT
838	/* Debug exception while in exception mode. This may happen when
839	 * window overflow/underflow handler or fast exception handler hits
840	 * data breakpoint, in which case save and disable all data
841	 * breakpoints, single-step faulting instruction and restore data
842	 * breakpoints.
843	 */
8441:
845	bbci.l	a0, PS_UM_BIT, 1b	# jump if kernel mode
846
847	rsr	a0, debugcause
848	bbsi.l	a0, DEBUGCAUSE_DBREAK_BIT, .Ldebug_save_dbreak
849
850	.set	_index, 0
851	.rept	XCHAL_NUM_DBREAK
852	l32i	a0, a3, DT_DBREAKC_SAVE + _index * 4
853	wsr	a0, SREG_DBREAKC + _index
854	.set	_index, _index + 1
855	.endr
856
857	l32i	a0, a3, DT_ICOUNT_LEVEL_SAVE
858	wsr	a0, icountlevel
859
860	l32i	a0, a3, DT_ICOUNT_SAVE
861	xsr	a0, icount
862
863	l32i	a0, a3, DT_DEBUG_SAVE
864	xsr	a3, SREG_EXCSAVE + XCHAL_DEBUGLEVEL
865	rfi	XCHAL_DEBUGLEVEL
866
867.Ldebug_save_dbreak:
868	.set	_index, 0
869	.rept	XCHAL_NUM_DBREAK
870	movi	a0, 0
871	xsr	a0, SREG_DBREAKC + _index
872	s32i	a0, a3, DT_DBREAKC_SAVE + _index * 4
873	.set	_index, _index + 1
874	.endr
875
876	movi	a0, XCHAL_EXCM_LEVEL + 1
877	xsr	a0, icountlevel
878	s32i	a0, a3, DT_ICOUNT_LEVEL_SAVE
879
880	movi	a0, 0xfffffffe
881	xsr	a0, icount
882	s32i	a0, a3, DT_ICOUNT_SAVE
883
884	l32i	a0, a3, DT_DEBUG_SAVE
885	xsr	a3, SREG_EXCSAVE + XCHAL_DEBUGLEVEL
886	rfi	XCHAL_DEBUGLEVEL
887#else
888	/* Debug exception while in exception mode. Should not happen. */
8891:	j	1b	// FIXME!!
890#endif
891
892ENDPROC(debug_exception)
893
894/*
895 * We get here in case of an unrecoverable exception.
896 * The only thing we can do is to be nice and print a panic message.
897 * We only produce a single stack frame for panic, so ???
898 *
899 *
900 * Entry conditions:
901 *
902 *   - a0 contains the caller address; original value saved in excsave1.
903 *   - the original a0 contains a valid return address (backtrace) or 0.
904 *   - a2 contains a valid stackpointer
905 *
906 * Notes:
907 *
908 *   - If the stack pointer could be invalid, the caller has to setup a
909 *     dummy stack pointer (e.g. the stack of the init_task)
910 *
911 *   - If the return address could be invalid, the caller has to set it
912 *     to 0, so the backtrace would stop.
913 *
914 */
915	.align 4
916unrecoverable_text:
917	.ascii "Unrecoverable error in exception handler\0"
918
919ENTRY(unrecoverable_exception)
920
921	movi	a0, 1
922	movi	a1, 0
923
924	wsr	a0, windowstart
925	wsr	a1, windowbase
926	rsync
927
928	movi	a1, (1 << PS_WOE_BIT) | LOCKLEVEL
929	wsr	a1, ps
930	rsync
931
932	movi	a1, init_task
933	movi	a0, 0
934	addi	a1, a1, PT_REGS_OFFSET
935
936	movi	a4, panic
937	movi	a6, unrecoverable_text
938
939	callx4	a4
940
9411:	j	1b
942
943ENDPROC(unrecoverable_exception)
944
945/* -------------------------- FAST EXCEPTION HANDLERS ----------------------- */
946
947/*
948 * Fast-handler for alloca exceptions
949 *
950 *  The ALLOCA handler is entered when user code executes the MOVSP
951 *  instruction and the caller's frame is not in the register file.
952 *
953 * This algorithm was taken from the Ross Morley's RTOS Porting Layer:
954 *
955 *    /home/ross/rtos/porting/XtensaRTOS-PortingLayer-20090507/xtensa_vectors.S
956 *
957 * It leverages the existing window spill/fill routines and their support for
958 * double exceptions. The 'movsp' instruction will only cause an exception if
959 * the next window needs to be loaded. In fact this ALLOCA exception may be
960 * replaced at some point by changing the hardware to do a underflow exception
961 * of the proper size instead.
962 *
963 * This algorithm simply backs out the register changes started by the user
964 * excpetion handler, makes it appear that we have started a window underflow
965 * by rotating the window back and then setting the old window base (OWB) in
966 * the 'ps' register with the rolled back window base. The 'movsp' instruction
967 * will be re-executed and this time since the next window frames is in the
968 * active AR registers it won't cause an exception.
969 *
970 * If the WindowUnderflow code gets a TLB miss the page will get mapped
971 * the the partial windeowUnderflow will be handeled in the double exception
972 * handler.
973 *
974 * Entry condition:
975 *
976 *   a0:	trashed, original value saved on stack (PT_AREG0)
977 *   a1:	a1
978 *   a2:	new stack pointer, original in DEPC
979 *   a3:	a3
980 *   depc:	a2, original value saved on stack (PT_DEPC)
981 *   excsave_1:	dispatch table
982 *
983 *   PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC
984 *	     <  VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception
985 */
986
987ENTRY(fast_alloca)
988	rsr	a0, windowbase
989	rotw	-1
990	rsr	a2, ps
991	extui	a3, a2, PS_OWB_SHIFT, PS_OWB_WIDTH
992	xor	a3, a3, a4
993	l32i	a4, a6, PT_AREG0
994	l32i	a1, a6, PT_DEPC
995	rsr	a6, depc
996	wsr	a1, depc
997	slli	a3, a3, PS_OWB_SHIFT
998	xor	a2, a2, a3
999	wsr	a2, ps
1000	rsync
1001
1002	_bbci.l	a4, 31, 4f
1003	rotw	-1
1004	_bbci.l	a8, 30, 8f
1005	rotw	-1
1006	j	_WindowUnderflow12
10078:	j	_WindowUnderflow8
10084:	j	_WindowUnderflow4
1009ENDPROC(fast_alloca)
1010
1011/*
1012 * fast system calls.
1013 *
1014 * WARNING:  The kernel doesn't save the entire user context before
1015 * handling a fast system call.  These functions are small and short,
1016 * usually offering some functionality not available to user tasks.
1017 *
1018 * BE CAREFUL TO PRESERVE THE USER'S CONTEXT.
1019 *
1020 * Entry condition:
1021 *
1022 *   a0:	trashed, original value saved on stack (PT_AREG0)
1023 *   a1:	a1
1024 *   a2:	new stack pointer, original in DEPC
1025 *   a3:	a3
1026 *   depc:	a2, original value saved on stack (PT_DEPC)
1027 *   excsave_1:	dispatch table
1028 */
1029
1030ENTRY(fast_syscall_kernel)
1031
1032	/* Skip syscall. */
1033
1034	rsr	a0, epc1
1035	addi	a0, a0, 3
1036	wsr	a0, epc1
1037
1038	l32i	a0, a2, PT_DEPC
1039	bgeui	a0, VALID_DOUBLE_EXCEPTION_ADDRESS, fast_syscall_unrecoverable
1040
1041	rsr	a0, depc			# get syscall-nr
1042	_beqz	a0, fast_syscall_spill_registers
1043	_beqi	a0, __NR_xtensa, fast_syscall_xtensa
1044
1045	j	kernel_exception
1046
1047ENDPROC(fast_syscall_kernel)
1048
1049ENTRY(fast_syscall_user)
1050
1051	/* Skip syscall. */
1052
1053	rsr	a0, epc1
1054	addi	a0, a0, 3
1055	wsr	a0, epc1
1056
1057	l32i	a0, a2, PT_DEPC
1058	bgeui	a0, VALID_DOUBLE_EXCEPTION_ADDRESS, fast_syscall_unrecoverable
1059
1060	rsr	a0, depc			# get syscall-nr
1061	_beqz	a0, fast_syscall_spill_registers
1062	_beqi	a0, __NR_xtensa, fast_syscall_xtensa
1063
1064	j	user_exception
1065
1066ENDPROC(fast_syscall_user)
1067
1068ENTRY(fast_syscall_unrecoverable)
1069
1070	/* Restore all states. */
1071
1072	l32i    a0, a2, PT_AREG0        # restore a0
1073	xsr     a2, depc                # restore a2, depc
1074
1075	wsr     a0, excsave1
1076	movi    a0, unrecoverable_exception
1077	callx0  a0
1078
1079ENDPROC(fast_syscall_unrecoverable)
1080
1081/*
1082 * sysxtensa syscall handler
1083 *
1084 * int sysxtensa (SYS_XTENSA_ATOMIC_SET,     ptr, val,    unused);
1085 * int sysxtensa (SYS_XTENSA_ATOMIC_ADD,     ptr, val,    unused);
1086 * int sysxtensa (SYS_XTENSA_ATOMIC_EXG_ADD, ptr, val,    unused);
1087 * int sysxtensa (SYS_XTENSA_ATOMIC_CMP_SWP, ptr, oldval, newval);
1088 *        a2            a6                   a3    a4      a5
1089 *
1090 * Entry condition:
1091 *
1092 *   a0:	a2 (syscall-nr), original value saved on stack (PT_AREG0)
1093 *   a1:	a1
1094 *   a2:	new stack pointer, original in a0 and DEPC
1095 *   a3:	a3
1096 *   a4..a15:	unchanged
1097 *   depc:	a2, original value saved on stack (PT_DEPC)
1098 *   excsave_1:	dispatch table
1099 *
1100 *   PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC
1101 *	     <  VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception
1102 *
1103 * Note: we don't have to save a2; a2 holds the return value
1104 *
1105 * We use the two macros TRY and CATCH:
1106 *
1107 * TRY	 adds an entry to the __ex_table fixup table for the immediately
1108 *	 following instruction.
1109 *
1110 * CATCH catches any exception that occurred at one of the preceding TRY
1111 *       statements and continues from there
1112 *
1113 * Usage TRY	l32i	a0, a1, 0
1114 *		<other code>
1115 *	 done:	rfe
1116 *	 CATCH	<set return code>
1117 *		j done
1118 */
1119
1120#ifdef CONFIG_FAST_SYSCALL_XTENSA
1121
1122#define TRY								\
1123	.section __ex_table, "a";					\
1124	.word	66f, 67f;						\
1125	.text;								\
112666:
1127
1128#define CATCH								\
112967:
1130
1131ENTRY(fast_syscall_xtensa)
1132
1133	s32i	a7, a2, PT_AREG7	# we need an additional register
1134	movi	a7, 4			# sizeof(unsigned int)
1135	access_ok a3, a7, a0, a2, .Leac	# a0: scratch reg, a2: sp
1136
1137	_bgeui	a6, SYS_XTENSA_COUNT, .Lill
1138	_bnei	a6, SYS_XTENSA_ATOMIC_CMP_SWP, .Lnswp
1139
1140	/* Fall through for ATOMIC_CMP_SWP. */
1141
1142.Lswp:	/* Atomic compare and swap */
1143
1144TRY	l32i	a0, a3, 0		# read old value
1145	bne	a0, a4, 1f		# same as old value? jump
1146TRY	s32i	a5, a3, 0		# different, modify value
1147	l32i	a7, a2, PT_AREG7	# restore a7
1148	l32i	a0, a2, PT_AREG0	# restore a0
1149	movi	a2, 1			# and return 1
1150	rfe
1151
11521:	l32i	a7, a2, PT_AREG7	# restore a7
1153	l32i	a0, a2, PT_AREG0	# restore a0
1154	movi	a2, 0			# return 0 (note that we cannot set
1155	rfe
1156
1157.Lnswp:	/* Atomic set, add, and exg_add. */
1158
1159TRY	l32i	a7, a3, 0		# orig
1160	addi	a6, a6, -SYS_XTENSA_ATOMIC_SET
1161	add	a0, a4, a7		# + arg
1162	moveqz	a0, a4, a6		# set
1163	addi	a6, a6, SYS_XTENSA_ATOMIC_SET
1164TRY	s32i	a0, a3, 0		# write new value
1165
1166	mov	a0, a2
1167	mov	a2, a7
1168	l32i	a7, a0, PT_AREG7	# restore a7
1169	l32i	a0, a0, PT_AREG0	# restore a0
1170	rfe
1171
1172CATCH
1173.Leac:	l32i	a7, a2, PT_AREG7	# restore a7
1174	l32i	a0, a2, PT_AREG0	# restore a0
1175	movi	a2, -EFAULT
1176	rfe
1177
1178.Lill:	l32i	a7, a2, PT_AREG7	# restore a7
1179	l32i	a0, a2, PT_AREG0	# restore a0
1180	movi	a2, -EINVAL
1181	rfe
1182
1183ENDPROC(fast_syscall_xtensa)
1184
1185#else /* CONFIG_FAST_SYSCALL_XTENSA */
1186
1187ENTRY(fast_syscall_xtensa)
1188
1189	l32i    a0, a2, PT_AREG0        # restore a0
1190	movi	a2, -ENOSYS
1191	rfe
1192
1193ENDPROC(fast_syscall_xtensa)
1194
1195#endif /* CONFIG_FAST_SYSCALL_XTENSA */
1196
1197
1198/* fast_syscall_spill_registers.
1199 *
1200 * Entry condition:
1201 *
1202 *   a0:	trashed, original value saved on stack (PT_AREG0)
1203 *   a1:	a1
1204 *   a2:	new stack pointer, original in DEPC
1205 *   a3:	a3
1206 *   depc:	a2, original value saved on stack (PT_DEPC)
1207 *   excsave_1:	dispatch table
1208 *
1209 * Note: We assume the stack pointer is EXC_TABLE_KSTK in the fixup handler.
1210 */
1211
1212#ifdef CONFIG_FAST_SYSCALL_SPILL_REGISTERS
1213
1214ENTRY(fast_syscall_spill_registers)
1215
1216	/* Register a FIXUP handler (pass current wb as a parameter) */
1217
1218	xsr	a3, excsave1
1219	movi	a0, fast_syscall_spill_registers_fixup
1220	s32i	a0, a3, EXC_TABLE_FIXUP
1221	rsr	a0, windowbase
1222	s32i	a0, a3, EXC_TABLE_PARAM
1223	xsr	a3, excsave1		# restore a3 and excsave_1
1224
1225	/* Save a3, a4 and SAR on stack. */
1226
1227	rsr	a0, sar
1228	s32i	a3, a2, PT_AREG3
1229	s32i	a0, a2, PT_SAR
1230
1231	/* The spill routine might clobber a4, a7, a8, a11, a12, and a15. */
1232
1233	s32i	a4, a2, PT_AREG4
1234	s32i	a7, a2, PT_AREG7
1235	s32i	a8, a2, PT_AREG8
1236	s32i	a11, a2, PT_AREG11
1237	s32i	a12, a2, PT_AREG12
1238	s32i	a15, a2, PT_AREG15
1239
1240	/*
1241	 * Rotate ws so that the current windowbase is at bit 0.
1242	 * Assume ws = xxxwww1yy (www1 current window frame).
1243	 * Rotate ws right so that a4 = yyxxxwww1.
1244	 */
1245
1246	rsr	a0, windowbase
1247	rsr	a3, windowstart		# a3 = xxxwww1yy
1248	ssr	a0			# holds WB
1249	slli	a0, a3, WSBITS
1250	or	a3, a3, a0		# a3 = xxxwww1yyxxxwww1yy
1251	srl	a3, a3			# a3 = 00xxxwww1yyxxxwww1
1252
1253	/* We are done if there are no more than the current register frame. */
1254
1255	extui	a3, a3, 1, WSBITS-1	# a3 = 0yyxxxwww
1256	movi	a0, (1 << (WSBITS-1))
1257	_beqz	a3, .Lnospill		# only one active frame? jump
1258
1259	/* We want 1 at the top, so that we return to the current windowbase */
1260
1261	or	a3, a3, a0		# 1yyxxxwww
1262
1263	/* Skip empty frames - get 'oldest' WINDOWSTART-bit. */
1264
1265	wsr	a3, windowstart		# save shifted windowstart
1266	neg	a0, a3
1267	and	a3, a0, a3		# first bit set from right: 000010000
1268
1269	ffs_ws	a0, a3			# a0: shifts to skip empty frames
1270	movi	a3, WSBITS
1271	sub	a0, a3, a0		# WSBITS-a0:number of 0-bits from right
1272	ssr	a0			# save in SAR for later.
1273
1274	rsr	a3, windowbase
1275	add	a3, a3, a0
1276	wsr	a3, windowbase
1277	rsync
1278
1279	rsr	a3, windowstart
1280	srl	a3, a3			# shift windowstart
1281
1282	/* WB is now just one frame below the oldest frame in the register
1283	   window. WS is shifted so the oldest frame is in bit 0, thus, WB
1284	   and WS differ by one 4-register frame. */
1285
1286	/* Save frames. Depending what call was used (call4, call8, call12),
1287	 * we have to save 4,8. or 12 registers.
1288	 */
1289
1290
1291.Lloop: _bbsi.l	a3, 1, .Lc4
1292	_bbci.l	a3, 2, .Lc12
1293
1294.Lc8:	s32e	a4, a13, -16
1295	l32e	a4, a5, -12
1296	s32e	a8, a4, -32
1297	s32e	a5, a13, -12
1298	s32e	a6, a13, -8
1299	s32e	a7, a13, -4
1300	s32e	a9, a4, -28
1301	s32e	a10, a4, -24
1302	s32e	a11, a4, -20
1303	srli	a11, a3, 2		# shift windowbase by 2
1304	rotw	2
1305	_bnei	a3, 1, .Lloop
1306	j	.Lexit
1307
1308.Lc4:	s32e	a4, a9, -16
1309	s32e	a5, a9, -12
1310	s32e	a6, a9, -8
1311	s32e	a7, a9, -4
1312
1313	srli	a7, a3, 1
1314	rotw	1
1315	_bnei	a3, 1, .Lloop
1316	j	.Lexit
1317
1318.Lc12:	_bbci.l	a3, 3, .Linvalid_mask	# bit 2 shouldn't be zero!
1319
1320	/* 12-register frame (call12) */
1321
1322	l32e	a0, a5, -12
1323	s32e	a8, a0, -48
1324	mov	a8, a0
1325
1326	s32e	a9, a8, -44
1327	s32e	a10, a8, -40
1328	s32e	a11, a8, -36
1329	s32e	a12, a8, -32
1330	s32e	a13, a8, -28
1331	s32e	a14, a8, -24
1332	s32e	a15, a8, -20
1333	srli	a15, a3, 3
1334
1335	/* The stack pointer for a4..a7 is out of reach, so we rotate the
1336	 * window, grab the stackpointer, and rotate back.
1337	 * Alternatively, we could also use the following approach, but that
1338	 * makes the fixup routine much more complicated:
1339	 * rotw	1
1340	 * s32e	a0, a13, -16
1341	 * ...
1342	 * rotw 2
1343	 */
1344
1345	rotw	1
1346	mov	a4, a13
1347	rotw	-1
1348
1349	s32e	a4, a8, -16
1350	s32e	a5, a8, -12
1351	s32e	a6, a8, -8
1352	s32e	a7, a8, -4
1353
1354	rotw	3
1355
1356	_beqi	a3, 1, .Lexit
1357	j	.Lloop
1358
1359.Lexit:
1360
1361	/* Done. Do the final rotation and set WS */
1362
1363	rotw	1
1364	rsr	a3, windowbase
1365	ssl	a3
1366	movi	a3, 1
1367	sll	a3, a3
1368	wsr	a3, windowstart
1369.Lnospill:
1370
1371	/* Advance PC, restore registers and SAR, and return from exception. */
1372
1373	l32i	a3, a2, PT_SAR
1374	l32i	a0, a2, PT_AREG0
1375	wsr	a3, sar
1376	l32i	a3, a2, PT_AREG3
1377
1378	/* Restore clobbered registers. */
1379
1380	l32i	a4, a2, PT_AREG4
1381	l32i	a7, a2, PT_AREG7
1382	l32i	a8, a2, PT_AREG8
1383	l32i	a11, a2, PT_AREG11
1384	l32i	a12, a2, PT_AREG12
1385	l32i	a15, a2, PT_AREG15
1386
1387	movi	a2, 0
1388	rfe
1389
1390.Linvalid_mask:
1391
1392	/* We get here because of an unrecoverable error in the window
1393	 * registers, so set up a dummy frame and kill the user application.
1394	 * Note: We assume EXC_TABLE_KSTK contains a valid stack pointer.
1395	 */
1396
1397	movi	a0, 1
1398	movi	a1, 0
1399
1400	wsr	a0, windowstart
1401	wsr	a1, windowbase
1402	rsync
1403
1404	movi	a0, 0
1405
1406	rsr	a3, excsave1
1407	l32i	a1, a3, EXC_TABLE_KSTK
1408
1409	movi	a4, (1 << PS_WOE_BIT) | LOCKLEVEL
1410	wsr	a4, ps
1411	rsync
1412
1413	movi	a6, SIGSEGV
1414	movi	a4, do_exit
1415	callx4	a4
1416
1417	/* shouldn't return, so panic */
1418
1419	wsr	a0, excsave1
1420	movi	a0, unrecoverable_exception
1421	callx0	a0		# should not return
14221:	j	1b
1423
1424
1425ENDPROC(fast_syscall_spill_registers)
1426
1427/* Fixup handler.
1428 *
1429 * We get here if the spill routine causes an exception, e.g. tlb miss.
1430 * We basically restore WINDOWBASE and WINDOWSTART to the condition when
1431 * we entered the spill routine and jump to the user exception handler.
1432 *
1433 * Note that we only need to restore the bits in windowstart that have not
1434 * been spilled yet by the _spill_register routine. Luckily, a3 contains a
1435 * rotated windowstart with only those bits set for frames that haven't been
1436 * spilled yet. Because a3 is rotated such that bit 0 represents the register
1437 * frame for the current windowbase - 1, we need to rotate a3 left by the
1438 * value of the current windowbase + 1 and move it to windowstart.
1439 *
1440 * a0: value of depc, original value in depc
1441 * a2: trashed, original value in EXC_TABLE_DOUBLE_SAVE
1442 * a3: exctable, original value in excsave1
1443 */
1444
1445ENTRY(fast_syscall_spill_registers_fixup)
1446
1447	rsr	a2, windowbase	# get current windowbase (a2 is saved)
1448	xsr	a0, depc	# restore depc and a0
1449	ssl	a2		# set shift (32 - WB)
1450
1451	/* We need to make sure the current registers (a0-a3) are preserved.
1452	 * To do this, we simply set the bit for the current window frame
1453	 * in WS, so that the exception handlers save them to the task stack.
1454	 *
1455	 * Note: we use a3 to set the windowbase, so we take a special care
1456	 * of it, saving it in the original _spill_registers frame across
1457	 * the exception handler call.
1458	 */
1459
1460	xsr	a3, excsave1	# get spill-mask
1461	slli	a3, a3, 1	# shift left by one
1462	addi	a3, a3, 1	# set the bit for the current window frame
1463
1464	slli	a2, a3, 32-WSBITS
1465	src	a2, a3, a2	# a2 = xxwww1yyxxxwww1yy......
1466	wsr	a2, windowstart	# set corrected windowstart
1467
1468	srli	a3, a3, 1
1469	rsr	a2, excsave1
1470	l32i	a2, a2, EXC_TABLE_DOUBLE_SAVE	# restore a2
1471	xsr	a2, excsave1
1472	s32i	a3, a2, EXC_TABLE_DOUBLE_SAVE	# save a3
1473	l32i	a3, a2, EXC_TABLE_PARAM	# original WB (in user task)
1474	xsr	a2, excsave1
1475
1476	/* Return to the original (user task) WINDOWBASE.
1477	 * We leave the following frame behind:
1478	 * a0, a1, a2	same
1479	 * a3:		trashed (saved in EXC_TABLE_DOUBLE_SAVE)
1480	 * depc:	depc (we have to return to that address)
1481	 * excsave_1:	exctable
1482	 */
1483
1484	wsr	a3, windowbase
1485	rsync
1486
1487	/* We are now in the original frame when we entered _spill_registers:
1488	 *  a0: return address
1489	 *  a1: used, stack pointer
1490	 *  a2: kernel stack pointer
1491	 *  a3: available
1492	 *  depc: exception address
1493	 *  excsave: exctable
1494	 * Note: This frame might be the same as above.
1495	 */
1496
1497	/* Setup stack pointer. */
1498
1499	addi	a2, a2, -PT_USER_SIZE
1500	s32i	a0, a2, PT_AREG0
1501
1502	/* Make sure we return to this fixup handler. */
1503
1504	movi	a3, fast_syscall_spill_registers_fixup_return
1505	s32i	a3, a2, PT_DEPC		# setup depc
1506
1507	/* Jump to the exception handler. */
1508
1509	rsr	a3, excsave1
1510	rsr	a0, exccause
1511	addx4	a0, a0, a3              	# find entry in table
1512	l32i	a0, a0, EXC_TABLE_FAST_USER     # load handler
1513	l32i	a3, a3, EXC_TABLE_DOUBLE_SAVE
1514	jx	a0
1515
1516ENDPROC(fast_syscall_spill_registers_fixup)
1517
1518ENTRY(fast_syscall_spill_registers_fixup_return)
1519
1520	/* When we return here, all registers have been restored (a2: DEPC) */
1521
1522	wsr	a2, depc		# exception address
1523
1524	/* Restore fixup handler. */
1525
1526	rsr	a2, excsave1
1527	s32i	a3, a2, EXC_TABLE_DOUBLE_SAVE
1528	movi	a3, fast_syscall_spill_registers_fixup
1529	s32i	a3, a2, EXC_TABLE_FIXUP
1530	rsr	a3, windowbase
1531	s32i	a3, a2, EXC_TABLE_PARAM
1532	l32i	a2, a2, EXC_TABLE_KSTK
1533
1534	/* Load WB at the time the exception occurred. */
1535
1536	rsr	a3, sar			# WB is still in SAR
1537	neg	a3, a3
1538	wsr	a3, windowbase
1539	rsync
1540
1541	rsr	a3, excsave1
1542	l32i	a3, a3, EXC_TABLE_DOUBLE_SAVE
1543
1544	rfde
1545
1546ENDPROC(fast_syscall_spill_registers_fixup_return)
1547
1548#else /* CONFIG_FAST_SYSCALL_SPILL_REGISTERS */
1549
1550ENTRY(fast_syscall_spill_registers)
1551
1552	l32i    a0, a2, PT_AREG0        # restore a0
1553	movi	a2, -ENOSYS
1554	rfe
1555
1556ENDPROC(fast_syscall_spill_registers)
1557
1558#endif /* CONFIG_FAST_SYSCALL_SPILL_REGISTERS */
1559
1560#ifdef CONFIG_MMU
1561/*
1562 * We should never get here. Bail out!
1563 */
1564
1565ENTRY(fast_second_level_miss_double_kernel)
1566
15671:	movi	a0, unrecoverable_exception
1568	callx0	a0		# should not return
15691:	j	1b
1570
1571ENDPROC(fast_second_level_miss_double_kernel)
1572
1573/* First-level entry handler for user, kernel, and double 2nd-level
1574 * TLB miss exceptions.  Note that for now, user and kernel miss
1575 * exceptions share the same entry point and are handled identically.
1576 *
1577 * An old, less-efficient C version of this function used to exist.
1578 * We include it below, interleaved as comments, for reference.
1579 *
1580 * Entry condition:
1581 *
1582 *   a0:	trashed, original value saved on stack (PT_AREG0)
1583 *   a1:	a1
1584 *   a2:	new stack pointer, original in DEPC
1585 *   a3:	a3
1586 *   depc:	a2, original value saved on stack (PT_DEPC)
1587 *   excsave_1:	dispatch table
1588 *
1589 *   PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC
1590 *	     <  VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception
1591 */
1592
1593ENTRY(fast_second_level_miss)
1594
1595	/* Save a1 and a3. Note: we don't expect a double exception. */
1596
1597	s32i	a1, a2, PT_AREG1
1598	s32i	a3, a2, PT_AREG3
1599
1600	/* We need to map the page of PTEs for the user task.  Find
1601	 * the pointer to that page.  Also, it's possible for tsk->mm
1602	 * to be NULL while tsk->active_mm is nonzero if we faulted on
1603	 * a vmalloc address.  In that rare case, we must use
1604	 * active_mm instead to avoid a fault in this handler.  See
1605	 *
1606	 * http://mail.nl.linux.org/linux-mm/2002-08/msg00258.html
1607	 *   (or search Internet on "mm vs. active_mm")
1608	 *
1609	 *	if (!mm)
1610	 *		mm = tsk->active_mm;
1611	 *	pgd = pgd_offset (mm, regs->excvaddr);
1612	 *	pmd = pmd_offset (pgd, regs->excvaddr);
1613	 *	pmdval = *pmd;
1614	 */
1615
1616	GET_CURRENT(a1,a2)
1617	l32i	a0, a1, TASK_MM		# tsk->mm
1618	beqz	a0, 9f
1619
16208:	rsr	a3, excvaddr		# fault address
1621	_PGD_OFFSET(a0, a3, a1)
1622	l32i	a0, a0, 0		# read pmdval
1623	beqz	a0, 2f
1624
1625	/* Read ptevaddr and convert to top of page-table page.
1626	 *
1627	 * 	vpnval = read_ptevaddr_register() & PAGE_MASK;
1628	 * 	vpnval += DTLB_WAY_PGTABLE;
1629	 *	pteval = mk_pte (virt_to_page(pmd_val(pmdval)), PAGE_KERNEL);
1630	 *	write_dtlb_entry (pteval, vpnval);
1631	 *
1632	 * The messy computation for 'pteval' above really simplifies
1633	 * into the following:
1634	 *
1635	 * pteval = ((pmdval - PAGE_OFFSET + PHYS_OFFSET) & PAGE_MASK)
1636	 *                 | PAGE_DIRECTORY
1637	 */
1638
1639	movi	a1, (PHYS_OFFSET - PAGE_OFFSET) & 0xffffffff
1640	add	a0, a0, a1		# pmdval - PAGE_OFFSET
1641	extui	a1, a0, 0, PAGE_SHIFT	# ... & PAGE_MASK
1642	xor	a0, a0, a1
1643
1644	movi	a1, _PAGE_DIRECTORY
1645	or	a0, a0, a1		# ... | PAGE_DIRECTORY
1646
1647	/*
1648	 * We utilize all three wired-ways (7-9) to hold pmd translations.
1649	 * Memory regions are mapped to the DTLBs according to bits 28 and 29.
1650	 * This allows to map the three most common regions to three different
1651	 * DTLBs:
1652	 *  0,1 -> way 7	program (0040.0000) and virtual (c000.0000)
1653	 *  2   -> way 8	shared libaries (2000.0000)
1654	 *  3   -> way 0	stack (3000.0000)
1655	 */
1656
1657	extui	a3, a3, 28, 2		# addr. bit 28 and 29	0,1,2,3
1658	rsr	a1, ptevaddr
1659	addx2	a3, a3, a3		# ->			0,3,6,9
1660	srli	a1, a1, PAGE_SHIFT
1661	extui	a3, a3, 2, 2		# ->			0,0,1,2
1662	slli	a1, a1, PAGE_SHIFT	# ptevaddr & PAGE_MASK
1663	addi	a3, a3, DTLB_WAY_PGD
1664	add	a1, a1, a3		# ... + way_number
1665
16663:	wdtlb	a0, a1
1667	dsync
1668
1669	/* Exit critical section. */
1670
16714:	rsr	a3, excsave1
1672	movi	a0, 0
1673	s32i	a0, a3, EXC_TABLE_FIXUP
1674
1675	/* Restore the working registers, and return. */
1676
1677	l32i	a0, a2, PT_AREG0
1678	l32i	a1, a2, PT_AREG1
1679	l32i	a3, a2, PT_AREG3
1680	l32i	a2, a2, PT_DEPC
1681
1682	bgeui	a2, VALID_DOUBLE_EXCEPTION_ADDRESS, 1f
1683
1684	/* Restore excsave1 and return. */
1685
1686	rsr	a2, depc
1687	rfe
1688
1689	/* Return from double exception. */
1690
16911:	xsr	a2, depc
1692	esync
1693	rfde
1694
16959:	l32i	a0, a1, TASK_ACTIVE_MM	# unlikely case mm == 0
1696	bnez	a0, 8b
1697
1698	/* Even more unlikely case active_mm == 0.
1699	 * We can get here with NMI in the middle of context_switch that
1700	 * touches vmalloc area.
1701	 */
1702	movi	a0, init_mm
1703	j	8b
1704
1705#if (DCACHE_WAY_SIZE > PAGE_SIZE)
1706
17072:	/* Special case for cache aliasing.
1708	 * We (should) only get here if a clear_user_page, copy_user_page
1709	 * or the aliased cache flush functions got preemptively interrupted
1710	 * by another task. Re-establish temporary mapping to the
1711	 * TLBTEMP_BASE areas.
1712	 */
1713
1714	/* We shouldn't be in a double exception */
1715
1716	l32i	a0, a2, PT_DEPC
1717	bgeui	a0, VALID_DOUBLE_EXCEPTION_ADDRESS, 2f
1718
1719	/* Make sure the exception originated in the special functions */
1720
1721	movi	a0, __tlbtemp_mapping_start
1722	rsr	a3, epc1
1723	bltu	a3, a0, 2f
1724	movi	a0, __tlbtemp_mapping_end
1725	bgeu	a3, a0, 2f
1726
1727	/* Check if excvaddr was in one of the TLBTEMP_BASE areas. */
1728
1729	movi	a3, TLBTEMP_BASE_1
1730	rsr	a0, excvaddr
1731	bltu	a0, a3, 2f
1732
1733	addi	a1, a0, -TLBTEMP_SIZE
1734	bgeu	a1, a3, 2f
1735
1736	/* Check if we have to restore an ITLB mapping. */
1737
1738	movi	a1, __tlbtemp_mapping_itlb
1739	rsr	a3, epc1
1740	sub	a3, a3, a1
1741
1742	/* Calculate VPN */
1743
1744	movi	a1, PAGE_MASK
1745	and	a1, a1, a0
1746
1747	/* Jump for ITLB entry */
1748
1749	bgez	a3, 1f
1750
1751	/* We can use up to two TLBTEMP areas, one for src and one for dst. */
1752
1753	extui	a3, a0, PAGE_SHIFT + DCACHE_ALIAS_ORDER, 1
1754	add	a1, a3, a1
1755
1756	/* PPN is in a6 for the first TLBTEMP area and in a7 for the second. */
1757
1758	mov	a0, a6
1759	movnez	a0, a7, a3
1760	j	3b
1761
1762	/* ITLB entry. We only use dst in a6. */
1763
17641:	witlb	a6, a1
1765	isync
1766	j	4b
1767
1768
1769#endif	// DCACHE_WAY_SIZE > PAGE_SIZE
1770
1771
17722:	/* Invalid PGD, default exception handling */
1773
1774	rsr	a1, depc
1775	s32i	a1, a2, PT_AREG2
1776	mov	a1, a2
1777
1778	rsr	a2, ps
1779	bbsi.l	a2, PS_UM_BIT, 1f
1780	j	_kernel_exception
17811:	j	_user_exception
1782
1783ENDPROC(fast_second_level_miss)
1784
1785/*
1786 * StoreProhibitedException
1787 *
1788 * Update the pte and invalidate the itlb mapping for this pte.
1789 *
1790 * Entry condition:
1791 *
1792 *   a0:	trashed, original value saved on stack (PT_AREG0)
1793 *   a1:	a1
1794 *   a2:	new stack pointer, original in DEPC
1795 *   a3:	a3
1796 *   depc:	a2, original value saved on stack (PT_DEPC)
1797 *   excsave_1:	dispatch table
1798 *
1799 *   PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC
1800 *	     <  VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception
1801 */
1802
1803ENTRY(fast_store_prohibited)
1804
1805	/* Save a1 and a3. */
1806
1807	s32i	a1, a2, PT_AREG1
1808	s32i	a3, a2, PT_AREG3
1809
1810	GET_CURRENT(a1,a2)
1811	l32i	a0, a1, TASK_MM		# tsk->mm
1812	beqz	a0, 9f
1813
18148:	rsr	a1, excvaddr		# fault address
1815	_PGD_OFFSET(a0, a1, a3)
1816	l32i	a0, a0, 0
1817	beqz	a0, 2f
1818
1819	/*
1820	 * Note that we test _PAGE_WRITABLE_BIT only if PTE is present
1821	 * and is not PAGE_NONE. See pgtable.h for possible PTE layouts.
1822	 */
1823
1824	_PTE_OFFSET(a0, a1, a3)
1825	l32i	a3, a0, 0		# read pteval
1826	movi	a1, _PAGE_CA_INVALID
1827	ball	a3, a1, 2f
1828	bbci.l	a3, _PAGE_WRITABLE_BIT, 2f
1829
1830	movi	a1, _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_HW_WRITE
1831	or	a3, a3, a1
1832	rsr	a1, excvaddr
1833	s32i	a3, a0, 0
1834
1835	/* We need to flush the cache if we have page coloring. */
1836#if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK
1837	dhwb	a0, 0
1838#endif
1839	pdtlb	a0, a1
1840	wdtlb	a3, a0
1841
1842	/* Exit critical section. */
1843
1844	movi	a0, 0
1845	rsr	a3, excsave1
1846	s32i	a0, a3, EXC_TABLE_FIXUP
1847
1848	/* Restore the working registers, and return. */
1849
1850	l32i	a3, a2, PT_AREG3
1851	l32i	a1, a2, PT_AREG1
1852	l32i	a0, a2, PT_AREG0
1853	l32i	a2, a2, PT_DEPC
1854
1855	bgeui	a2, VALID_DOUBLE_EXCEPTION_ADDRESS, 1f
1856
1857	rsr	a2, depc
1858	rfe
1859
1860	/* Double exception. Restore FIXUP handler and return. */
1861
18621:	xsr	a2, depc
1863	esync
1864	rfde
1865
18669:	l32i	a0, a1, TASK_ACTIVE_MM	# unlikely case mm == 0
1867	j	8b
1868
18692:	/* If there was a problem, handle fault in C */
1870
1871	rsr	a3, depc	# still holds a2
1872	s32i	a3, a2, PT_AREG2
1873	mov	a1, a2
1874
1875	rsr	a2, ps
1876	bbsi.l	a2, PS_UM_BIT, 1f
1877	j	_kernel_exception
18781:	j	_user_exception
1879
1880ENDPROC(fast_store_prohibited)
1881
1882#endif /* CONFIG_MMU */
1883
1884/*
1885 * System Calls.
1886 *
1887 * void system_call (struct pt_regs* regs, int exccause)
1888 *                            a2                 a3
1889 */
1890
1891ENTRY(system_call)
1892
1893	entry	a1, 32
1894
1895	/* regs->syscall = regs->areg[2] */
1896
1897	l32i	a3, a2, PT_AREG2
1898	mov	a6, a2
1899	movi	a4, do_syscall_trace_enter
1900	s32i	a3, a2, PT_SYSCALL
1901	callx4	a4
1902	mov	a3, a6
1903
1904	/* syscall = sys_call_table[syscall_nr] */
1905
1906	movi	a4, sys_call_table
1907	movi	a5, __NR_syscall_count
1908	movi	a6, -ENOSYS
1909	bgeu	a3, a5, 1f
1910
1911	addx4	a4, a3, a4
1912	l32i	a4, a4, 0
1913	movi	a5, sys_ni_syscall;
1914	beq	a4, a5, 1f
1915
1916	/* Load args: arg0 - arg5 are passed via regs. */
1917
1918	l32i	a6, a2, PT_AREG6
1919	l32i	a7, a2, PT_AREG3
1920	l32i	a8, a2, PT_AREG4
1921	l32i	a9, a2, PT_AREG5
1922	l32i	a10, a2, PT_AREG8
1923	l32i	a11, a2, PT_AREG9
1924
1925	/* Pass one additional argument to the syscall: pt_regs (on stack) */
1926	s32i	a2, a1, 0
1927
1928	callx4	a4
1929
19301:	/* regs->areg[2] = return_value */
1931
1932	s32i	a6, a2, PT_AREG2
1933	movi	a4, do_syscall_trace_leave
1934	mov	a6, a2
1935	callx4	a4
1936	retw
1937
1938ENDPROC(system_call)
1939
1940/*
1941 * Spill live registers on the kernel stack macro.
1942 *
1943 * Entry condition: ps.woe is set, ps.excm is cleared
1944 * Exit condition: windowstart has single bit set
1945 * May clobber: a12, a13
1946 */
1947	.macro	spill_registers_kernel
1948
1949#if XCHAL_NUM_AREGS > 16
1950	call12	1f
1951	_j	2f
1952	retw
1953	.align	4
19541:
1955	_entry	a1, 48
1956	addi	a12, a0, 3
1957#if XCHAL_NUM_AREGS > 32
1958	.rept	(XCHAL_NUM_AREGS - 32) / 12
1959	_entry	a1, 48
1960	mov	a12, a0
1961	.endr
1962#endif
1963	_entry	a1, 16
1964#if XCHAL_NUM_AREGS % 12 == 0
1965	mov	a8, a8
1966#elif XCHAL_NUM_AREGS % 12 == 4
1967	mov	a12, a12
1968#elif XCHAL_NUM_AREGS % 12 == 8
1969	mov	a4, a4
1970#endif
1971	retw
19722:
1973#else
1974	mov	a12, a12
1975#endif
1976	.endm
1977
1978/*
1979 * Task switch.
1980 *
1981 * struct task*  _switch_to (struct task* prev, struct task* next)
1982 *         a2                              a2                 a3
1983 */
1984
1985ENTRY(_switch_to)
1986
1987	entry	a1, 48
1988
1989	mov	a11, a3			# and 'next' (a3)
1990
1991	l32i	a4, a2, TASK_THREAD_INFO
1992	l32i	a5, a3, TASK_THREAD_INFO
1993
1994	save_xtregs_user a4 a6 a8 a9 a12 a13 THREAD_XTREGS_USER
1995
1996#if THREAD_RA > 1020 || THREAD_SP > 1020
1997	addi	a10, a2, TASK_THREAD
1998	s32i	a0, a10, THREAD_RA - TASK_THREAD	# save return address
1999	s32i	a1, a10, THREAD_SP - TASK_THREAD	# save stack pointer
2000#else
2001	s32i	a0, a2, THREAD_RA	# save return address
2002	s32i	a1, a2, THREAD_SP	# save stack pointer
2003#endif
2004
2005	/* Disable ints while we manipulate the stack pointer. */
2006
2007	irq_save a14, a3
2008	rsync
2009
2010	/* Switch CPENABLE */
2011
2012#if (XTENSA_HAVE_COPROCESSORS || XTENSA_HAVE_IO_PORTS)
2013	l32i	a3, a5, THREAD_CPENABLE
2014	xsr	a3, cpenable
2015	s32i	a3, a4, THREAD_CPENABLE
2016#endif
2017
2018	/* Flush register file. */
2019
2020	spill_registers_kernel
2021
2022	/* Set kernel stack (and leave critical section)
2023	 * Note: It's save to set it here. The stack will not be overwritten
2024	 *       because the kernel stack will only be loaded again after
2025	 *       we return from kernel space.
2026	 */
2027
2028	rsr	a3, excsave1		# exc_table
2029	addi	a7, a5, PT_REGS_OFFSET
2030	s32i	a7, a3, EXC_TABLE_KSTK
2031
2032	/* restore context of the task 'next' */
2033
2034	l32i	a0, a11, THREAD_RA	# restore return address
2035	l32i	a1, a11, THREAD_SP	# restore stack pointer
2036
2037	load_xtregs_user a5 a6 a8 a9 a12 a13 THREAD_XTREGS_USER
2038
2039	wsr	a14, ps
2040	rsync
2041
2042	retw
2043
2044ENDPROC(_switch_to)
2045
2046ENTRY(ret_from_fork)
2047
2048	/* void schedule_tail (struct task_struct *prev)
2049	 * Note: prev is still in a6 (return value from fake call4 frame)
2050	 */
2051	movi	a4, schedule_tail
2052	callx4	a4
2053
2054	movi	a4, do_syscall_trace_leave
2055	mov	a6, a1
2056	callx4	a4
2057
2058	j	common_exception_return
2059
2060ENDPROC(ret_from_fork)
2061
2062/*
2063 * Kernel thread creation helper
2064 * On entry, set up by copy_thread: a2 = thread_fn, a3 = thread_fn arg
2065 *           left from _switch_to: a6 = prev
2066 */
2067ENTRY(ret_from_kernel_thread)
2068
2069	call4	schedule_tail
2070	mov	a6, a3
2071	callx4	a2
2072	j	common_exception_return
2073
2074ENDPROC(ret_from_kernel_thread)
2075