xref: /linux/arch/xtensa/kernel/entry.S (revision 55427d5b0dff5754f3579dde93955aac483e8d6a)
1/*
2 * Low-level exception handling
3 *
4 * This file is subject to the terms and conditions of the GNU General Public
5 * License.  See the file "COPYING" in the main directory of this archive
6 * for more details.
7 *
8 * Copyright (C) 2004 - 2008 by Tensilica Inc.
9 * Copyright (C) 2015 Cadence Design Systems Inc.
10 *
11 * Chris Zankel <chris@zankel.net>
12 *
13 */
14
15#include <linux/linkage.h>
16#include <linux/pgtable.h>
17#include <asm/asm-offsets.h>
18#include <asm/asmmacro.h>
19#include <asm/processor.h>
20#include <asm/coprocessor.h>
21#include <asm/thread_info.h>
22#include <asm/asm-uaccess.h>
23#include <asm/unistd.h>
24#include <asm/ptrace.h>
25#include <asm/current.h>
26#include <asm/page.h>
27#include <asm/signal.h>
28#include <asm/tlbflush.h>
29#include <variant/tie-asm.h>
30
31/*
32 * Macro to find first bit set in WINDOWBASE from the left + 1
33 *
34 * 100....0 -> 1
35 * 010....0 -> 2
36 * 000....1 -> WSBITS
37 */
38
39	.macro ffs_ws bit mask
40
41#if XCHAL_HAVE_NSA
42	nsau    \bit, \mask			# 32-WSBITS ... 31 (32 iff 0)
43	addi    \bit, \bit, WSBITS - 32 + 1   	# uppest bit set -> return 1
44#else
45	movi    \bit, WSBITS
46#if WSBITS > 16
47	_bltui  \mask, 0x10000, 99f
48	addi    \bit, \bit, -16
49	extui   \mask, \mask, 16, 16
50#endif
51#if WSBITS > 8
5299:	_bltui  \mask, 0x100, 99f
53	addi    \bit, \bit, -8
54	srli    \mask, \mask, 8
55#endif
5699:	_bltui  \mask, 0x10, 99f
57	addi    \bit, \bit, -4
58	srli    \mask, \mask, 4
5999:	_bltui  \mask, 0x4, 99f
60	addi    \bit, \bit, -2
61	srli    \mask, \mask, 2
6299:	_bltui  \mask, 0x2, 99f
63	addi    \bit, \bit, -1
6499:
65
66#endif
67	.endm
68
69
70	.macro	irq_save flags tmp
71#if XTENSA_FAKE_NMI
72#if defined(CONFIG_DEBUG_KERNEL) && (LOCKLEVEL | TOPLEVEL) >= XCHAL_DEBUGLEVEL
73	rsr	\flags, ps
74	extui	\tmp, \flags, PS_INTLEVEL_SHIFT, PS_INTLEVEL_WIDTH
75	bgei	\tmp, LOCKLEVEL, 99f
76	rsil	\tmp, LOCKLEVEL
7799:
78#else
79	movi	\tmp, LOCKLEVEL
80	rsr	\flags, ps
81	or	\flags, \flags, \tmp
82	xsr	\flags, ps
83	rsync
84#endif
85#else
86	rsil	\flags, LOCKLEVEL
87#endif
88	.endm
89
90/* ----------------- DEFAULT FIRST LEVEL EXCEPTION HANDLERS ----------------- */
91
92/*
93 * First-level exception handler for user exceptions.
94 * Save some special registers, extra states and all registers in the AR
95 * register file that were in use in the user task, and jump to the common
96 * exception code.
97 * We save SAR (used to calculate WMASK), and WB and WS (we don't have to
98 * save them for kernel exceptions).
99 *
100 * Entry condition for user_exception:
101 *
102 *   a0:	trashed, original value saved on stack (PT_AREG0)
103 *   a1:	a1
104 *   a2:	new stack pointer, original value in depc
105 *   a3:	a3
106 *   depc:	a2, original value saved on stack (PT_DEPC)
107 *   excsave1:	dispatch table
108 *
109 *   PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC
110 *	     <  VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception
111 *
112 * Entry condition for _user_exception:
113 *
114 *   a0-a3 and depc have been saved to PT_AREG0...PT_AREG3 and PT_DEPC
115 *   excsave has been restored, and
116 *   stack pointer (a1) has been set.
117 *
118 * Note: _user_exception might be at an odd address. Don't use call0..call12
119 */
120	.literal_position
121
122ENTRY(user_exception)
123
124	/* Save a1, a2, a3, and set SP. */
125
126	rsr	a0, depc
127	s32i	a1, a2, PT_AREG1
128	s32i	a0, a2, PT_AREG2
129	s32i	a3, a2, PT_AREG3
130	mov	a1, a2
131
132	.globl _user_exception
133_user_exception:
134
135	/* Save SAR and turn off single stepping */
136
137	movi	a2, 0
138	wsr	a2, depc		# terminate user stack trace with 0
139	rsr	a3, sar
140	xsr	a2, icountlevel
141	s32i	a3, a1, PT_SAR
142	s32i	a2, a1, PT_ICOUNTLEVEL
143
144#if XCHAL_HAVE_THREADPTR
145	rur	a2, threadptr
146	s32i	a2, a1, PT_THREADPTR
147#endif
148
149	/* Rotate ws so that the current windowbase is at bit0. */
150	/* Assume ws = xxwww1yyyy. Rotate ws right, so that a2 = yyyyxxwww1 */
151
152#if defined(USER_SUPPORT_WINDOWED)
153	rsr	a2, windowbase
154	rsr	a3, windowstart
155	ssr	a2
156	s32i	a2, a1, PT_WINDOWBASE
157	s32i	a3, a1, PT_WINDOWSTART
158	slli	a2, a3, 32-WSBITS
159	src	a2, a3, a2
160	srli	a2, a2, 32-WSBITS
161	s32i	a2, a1, PT_WMASK	# needed for restoring registers
162#else
163	movi	a2, 0
164	movi	a3, 1
165	s32i	a2, a1, PT_WINDOWBASE
166	s32i	a3, a1, PT_WINDOWSTART
167	s32i	a3, a1, PT_WMASK
168#endif
169
170	/* Save only live registers. */
171
172UABI_W	_bbsi.l	a2, 1, 1f
173	s32i	a4, a1, PT_AREG4
174	s32i	a5, a1, PT_AREG5
175	s32i	a6, a1, PT_AREG6
176	s32i	a7, a1, PT_AREG7
177UABI_W	_bbsi.l	a2, 2, 1f
178	s32i	a8, a1, PT_AREG8
179	s32i	a9, a1, PT_AREG9
180	s32i	a10, a1, PT_AREG10
181	s32i	a11, a1, PT_AREG11
182UABI_W	_bbsi.l	a2, 3, 1f
183	s32i	a12, a1, PT_AREG12
184	s32i	a13, a1, PT_AREG13
185	s32i	a14, a1, PT_AREG14
186	s32i	a15, a1, PT_AREG15
187
188#if defined(USER_SUPPORT_WINDOWED)
189	_bnei	a2, 1, 1f		# only one valid frame?
190
191	/* Only one valid frame, skip saving regs. */
192
193	j	2f
194
195	/* Save the remaining registers.
196	 * We have to save all registers up to the first '1' from
197	 * the right, except the current frame (bit 0).
198	 * Assume a2 is:  001001000110001
199	 * All register frames starting from the top field to the marked '1'
200	 * must be saved.
201	 */
202
2031:	addi	a3, a2, -1		# eliminate '1' in bit 0: yyyyxxww0
204	neg	a3, a3			# yyyyxxww0 -> YYYYXXWW1+1
205	and	a3, a3, a2		# max. only one bit is set
206
207	/* Find number of frames to save */
208
209	ffs_ws	a0, a3			# number of frames to the '1' from left
210
211	/* Store information into WMASK:
212	 * bits 0..3: xxx1 masked lower 4 bits of the rotated windowstart,
213	 * bits 4...: number of valid 4-register frames
214	 */
215
216	slli	a3, a0, 4		# number of frames to save in bits 8..4
217	extui	a2, a2, 0, 4		# mask for the first 16 registers
218	or	a2, a3, a2
219	s32i	a2, a1, PT_WMASK	# needed when we restore the reg-file
220
221	/* Save 4 registers at a time */
222
2231:	rotw	-1
224	s32i	a0, a5, PT_AREG_END - 16
225	s32i	a1, a5, PT_AREG_END - 12
226	s32i	a2, a5, PT_AREG_END - 8
227	s32i	a3, a5, PT_AREG_END - 4
228	addi	a0, a4, -1
229	addi	a1, a5, -16
230	_bnez	a0, 1b
231
232	/* WINDOWBASE still in SAR! */
233
234	rsr	a2, sar			# original WINDOWBASE
235	movi	a3, 1
236	ssl	a2
237	sll	a3, a3
238	wsr	a3, windowstart		# set corresponding WINDOWSTART bit
239	wsr	a2, windowbase		# and WINDOWSTART
240	rsync
241
242	/* We are back to the original stack pointer (a1) */
243#endif
2442:	/* Now, jump to the common exception handler. */
245
246	j	common_exception
247
248ENDPROC(user_exception)
249
250/*
251 * First-level exit handler for kernel exceptions
252 * Save special registers and the live window frame.
253 * Note: Even though we changes the stack pointer, we don't have to do a
254 *	 MOVSP here, as we do that when we return from the exception.
255 *	 (See comment in the kernel exception exit code)
256 *
257 * Entry condition for kernel_exception:
258 *
259 *   a0:	trashed, original value saved on stack (PT_AREG0)
260 *   a1:	a1
261 *   a2:	new stack pointer, original in DEPC
262 *   a3:	a3
263 *   depc:	a2, original value saved on stack (PT_DEPC)
264 *   excsave_1:	dispatch table
265 *
266 *   PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC
267 *	     <  VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception
268 *
269 * Entry condition for _kernel_exception:
270 *
271 *   a0-a3 and depc have been saved to PT_AREG0...PT_AREG3 and PT_DEPC
272 *   excsave has been restored, and
273 *   stack pointer (a1) has been set.
274 *
275 * Note: _kernel_exception might be at an odd address. Don't use call0..call12
276 */
277
278ENTRY(kernel_exception)
279
280	/* Save a1, a2, a3, and set SP. */
281
282	rsr	a0, depc		# get a2
283	s32i	a1, a2, PT_AREG1
284	s32i	a0, a2, PT_AREG2
285	s32i	a3, a2, PT_AREG3
286	mov	a1, a2
287
288	.globl _kernel_exception
289_kernel_exception:
290
291	/* Save SAR and turn off single stepping */
292
293	movi	a2, 0
294	rsr	a3, sar
295	xsr	a2, icountlevel
296	s32i	a3, a1, PT_SAR
297	s32i	a2, a1, PT_ICOUNTLEVEL
298
299#if defined(__XTENSA_WINDOWED_ABI__)
300	/* Rotate ws so that the current windowbase is at bit0. */
301	/* Assume ws = xxwww1yyyy. Rotate ws right, so that a2 = yyyyxxwww1 */
302
303	rsr	a2, windowbase		# don't need to save these, we only
304	rsr	a3, windowstart		# need shifted windowstart: windowmask
305	ssr	a2
306	slli	a2, a3, 32-WSBITS
307	src	a2, a3, a2
308	srli	a2, a2, 32-WSBITS
309	s32i	a2, a1, PT_WMASK	# needed for kernel_exception_exit
310#endif
311
312	/* Save only the live window-frame */
313
314KABI_W	_bbsi.l	a2, 1, 1f
315	s32i	a4, a1, PT_AREG4
316	s32i	a5, a1, PT_AREG5
317	s32i	a6, a1, PT_AREG6
318	s32i	a7, a1, PT_AREG7
319KABI_W	_bbsi.l	a2, 2, 1f
320	s32i	a8, a1, PT_AREG8
321	s32i	a9, a1, PT_AREG9
322	s32i	a10, a1, PT_AREG10
323	s32i	a11, a1, PT_AREG11
324KABI_W	_bbsi.l	a2, 3, 1f
325	s32i	a12, a1, PT_AREG12
326	s32i	a13, a1, PT_AREG13
327	s32i	a14, a1, PT_AREG14
328	s32i	a15, a1, PT_AREG15
329
330#ifdef __XTENSA_WINDOWED_ABI__
331	_bnei	a2, 1, 1f
332	/* Copy spill slots of a0 and a1 to imitate movsp
333	 * in order to keep exception stack continuous
334	 */
335	l32i	a3, a1, PT_KERNEL_SIZE
336	l32i	a0, a1, PT_KERNEL_SIZE + 4
337	s32e	a3, a1, -16
338	s32e	a0, a1, -12
339#endif
3401:
341	l32i	a0, a1, PT_AREG0	# restore saved a0
342	wsr	a0, depc
343
344/*
345 * This is the common exception handler.
346 * We get here from the user exception handler or simply by falling through
347 * from the kernel exception handler.
348 * Save the remaining special registers, switch to kernel mode, and jump
349 * to the second-level exception handler.
350 *
351 */
352
353common_exception:
354
355	/* Save some registers, disable loops and clear the syscall flag. */
356
357	rsr	a2, debugcause
358	rsr	a3, epc1
359	s32i	a2, a1, PT_DEBUGCAUSE
360	s32i	a3, a1, PT_PC
361
362	movi	a2, NO_SYSCALL
363	rsr	a3, excvaddr
364	s32i	a2, a1, PT_SYSCALL
365	movi	a2, 0
366	s32i	a3, a1, PT_EXCVADDR
367#if XCHAL_HAVE_LOOPS
368	xsr	a2, lcount
369	s32i	a2, a1, PT_LCOUNT
370#endif
371
372#if XCHAL_HAVE_EXCLUSIVE
373	/* Clear exclusive access monitor set by interrupted code */
374	clrex
375#endif
376
377	/* It is now save to restore the EXC_TABLE_FIXUP variable. */
378
379	rsr	a2, exccause
380	movi	a3, 0
381	rsr	a0, excsave1
382	s32i	a2, a1, PT_EXCCAUSE
383	s32i	a3, a0, EXC_TABLE_FIXUP
384
385	/* All unrecoverable states are saved on stack, now, and a1 is valid.
386	 * Now we can allow exceptions again. In case we've got an interrupt
387	 * PS.INTLEVEL is set to LOCKLEVEL disabling furhter interrupts,
388	 * otherwise it's left unchanged.
389	 *
390	 * Set PS(EXCM = 0, UM = 0, RING = 0, OWB = 0, WOE = 1, INTLEVEL = X)
391	 */
392
393	rsr	a3, ps
394	s32i	a3, a1, PT_PS		# save ps
395
396#if XTENSA_FAKE_NMI
397	/* Correct PS needs to be saved in the PT_PS:
398	 * - in case of exception or level-1 interrupt it's in the PS,
399	 *   and is already saved.
400	 * - in case of medium level interrupt it's in the excsave2.
401	 */
402	movi	a0, EXCCAUSE_MAPPED_NMI
403	extui	a3, a3, PS_INTLEVEL_SHIFT, PS_INTLEVEL_WIDTH
404	beq	a2, a0, .Lmedium_level_irq
405	bnei	a2, EXCCAUSE_LEVEL1_INTERRUPT, .Lexception
406	beqz	a3, .Llevel1_irq	# level-1 IRQ sets ps.intlevel to 0
407
408.Lmedium_level_irq:
409	rsr	a0, excsave2
410	s32i	a0, a1, PT_PS		# save medium-level interrupt ps
411	bgei	a3, LOCKLEVEL, .Lexception
412
413.Llevel1_irq:
414	movi	a3, LOCKLEVEL
415
416.Lexception:
417KABI_W	movi	a0, PS_WOE_MASK
418KABI_W	or	a3, a3, a0
419#else
420	addi	a2, a2, -EXCCAUSE_LEVEL1_INTERRUPT
421	movi	a0, LOCKLEVEL
422	extui	a3, a3, PS_INTLEVEL_SHIFT, PS_INTLEVEL_WIDTH
423					# a3 = PS.INTLEVEL
424	moveqz	a3, a0, a2		# a3 = LOCKLEVEL iff interrupt
425KABI_W	movi	a2, PS_WOE_MASK
426KABI_W	or	a3, a3, a2
427#endif
428
429	/* restore return address (or 0 if return to userspace) */
430	rsr	a0, depc
431	wsr	a3, ps
432	rsync				# PS.WOE => rsync => overflow
433
434	/* Save lbeg, lend */
435#if XCHAL_HAVE_LOOPS
436	rsr	a4, lbeg
437	rsr	a3, lend
438	s32i	a4, a1, PT_LBEG
439	s32i	a3, a1, PT_LEND
440#endif
441
442	/* Save SCOMPARE1 */
443
444#if XCHAL_HAVE_S32C1I
445	rsr     a3, scompare1
446	s32i    a3, a1, PT_SCOMPARE1
447#endif
448
449	/* Save optional registers. */
450
451	save_xtregs_opt a1 a3 a4 a5 a6 a7 PT_XTREGS_OPT
452
453#ifdef CONFIG_TRACE_IRQFLAGS
454	rsr		abi_tmp0, ps
455	extui		abi_tmp0, abi_tmp0, PS_INTLEVEL_SHIFT, PS_INTLEVEL_WIDTH
456	beqz		abi_tmp0, 1f
457	abi_call	trace_hardirqs_off
4581:
459#endif
460#ifdef CONFIG_CONTEXT_TRACKING
461	l32i		abi_tmp0, a1, PT_PS
462	bbci.l		abi_tmp0, PS_UM_BIT, 1f
463	abi_call	context_tracking_user_exit
4641:
465#endif
466
467	/* Go to second-level dispatcher. Set up parameters to pass to the
468	 * exception handler and call the exception handler.
469	 */
470
471	l32i		abi_arg1, a1, PT_EXCCAUSE	# pass EXCCAUSE
472	rsr		abi_tmp0, excsave1
473	addx4		abi_tmp0, abi_arg1, abi_tmp0
474	l32i		abi_tmp0, abi_tmp0, EXC_TABLE_DEFAULT	# load handler
475	mov		abi_arg0, a1			# pass stack frame
476
477	/* Call the second-level handler */
478
479	abi_callx	abi_tmp0
480
481	/* Jump here for exception exit */
482	.global common_exception_return
483common_exception_return:
484
485#if XTENSA_FAKE_NMI
486	l32i		abi_tmp0, a1, PT_EXCCAUSE
487	movi		abi_tmp1, EXCCAUSE_MAPPED_NMI
488	l32i		abi_saved1, a1, PT_PS
489	beq		abi_tmp0, abi_tmp1, .Lrestore_state
490#endif
491.Ltif_loop:
492	irq_save	abi_tmp0, abi_tmp1
493#ifdef CONFIG_TRACE_IRQFLAGS
494	abi_call	trace_hardirqs_off
495#endif
496
497	/* Jump if we are returning from kernel exceptions. */
498
499	l32i		abi_saved1, a1, PT_PS
500	GET_THREAD_INFO(abi_tmp0, a1)
501	l32i		abi_saved0, abi_tmp0, TI_FLAGS
502	_bbci.l		abi_saved1, PS_UM_BIT, .Lexit_tif_loop_kernel
503
504	/* Specific to a user exception exit:
505	 * We need to check some flags for signal handling and rescheduling,
506	 * and have to restore WB and WS, extra states, and all registers
507	 * in the register file that were in use in the user task.
508	 * Note that we don't disable interrupts here.
509	 */
510
511	_bbsi.l		abi_saved0, TIF_NEED_RESCHED, .Lresched
512	movi		abi_tmp0, _TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NOTIFY_SIGNAL
513	bnone		abi_saved0, abi_tmp0, .Lexit_tif_loop_user
514
515	l32i		abi_tmp0, a1, PT_DEPC
516	bgeui		abi_tmp0, VALID_DOUBLE_EXCEPTION_ADDRESS, .Lrestore_state
517
518	/* Call do_signal() */
519
520#ifdef CONFIG_TRACE_IRQFLAGS
521	abi_call	trace_hardirqs_on
522#endif
523	rsil		abi_tmp0, 0
524	mov		abi_arg0, a1
525	abi_call	do_notify_resume	# int do_notify_resume(struct pt_regs*)
526	j		.Ltif_loop
527
528.Lresched:
529#ifdef CONFIG_TRACE_IRQFLAGS
530	abi_call	trace_hardirqs_on
531#endif
532	rsil		abi_tmp0, 0
533	abi_call	schedule	# void schedule (void)
534	j		.Ltif_loop
535
536.Lexit_tif_loop_kernel:
537#ifdef CONFIG_PREEMPTION
538	_bbci.l		abi_saved0, TIF_NEED_RESCHED, .Lrestore_state
539
540	/* Check current_thread_info->preempt_count */
541
542	l32i		abi_tmp1, abi_tmp0, TI_PRE_COUNT
543	bnez		abi_tmp1, .Lrestore_state
544	abi_call	preempt_schedule_irq
545#endif
546	j		.Lrestore_state
547
548.Lexit_tif_loop_user:
549#ifdef CONFIG_CONTEXT_TRACKING
550	abi_call	context_tracking_user_enter
551#endif
552#ifdef CONFIG_HAVE_HW_BREAKPOINT
553	_bbci.l		abi_saved0, TIF_DB_DISABLED, 1f
554	abi_call	restore_dbreak
5551:
556#endif
557#ifdef CONFIG_DEBUG_TLB_SANITY
558	l32i		abi_tmp0, a1, PT_DEPC
559	bgeui		abi_tmp0, VALID_DOUBLE_EXCEPTION_ADDRESS, .Lrestore_state
560	abi_call	check_tlb_sanity
561#endif
562
563.Lrestore_state:
564#ifdef CONFIG_TRACE_IRQFLAGS
565	extui		abi_tmp0, abi_saved1, PS_INTLEVEL_SHIFT, PS_INTLEVEL_WIDTH
566	bgei		abi_tmp0, LOCKLEVEL, 1f
567	abi_call	trace_hardirqs_on
5681:
569#endif
570	/*
571	 * Restore optional registers.
572	 * abi_arg* are used as temporary registers here.
573	 */
574
575	load_xtregs_opt a1 abi_tmp0 abi_arg0 abi_arg1 abi_arg2 abi_arg3 PT_XTREGS_OPT
576
577	/* Restore SCOMPARE1 */
578
579#if XCHAL_HAVE_S32C1I
580	l32i		abi_tmp0, a1, PT_SCOMPARE1
581	wsr		abi_tmp0, scompare1
582#endif
583	wsr		abi_saved1, ps		/* disable interrupts */
584	_bbci.l		abi_saved1, PS_UM_BIT, kernel_exception_exit
585
586user_exception_exit:
587
588	/* Restore the state of the task and return from the exception. */
589
590#if defined(USER_SUPPORT_WINDOWED)
591	/* Switch to the user thread WINDOWBASE. Save SP temporarily in DEPC */
592
593	l32i	a2, a1, PT_WINDOWBASE
594	l32i	a3, a1, PT_WINDOWSTART
595	wsr	a1, depc		# use DEPC as temp storage
596	wsr	a3, windowstart		# restore WINDOWSTART
597	ssr	a2			# preserve user's WB in the SAR
598	wsr	a2, windowbase		# switch to user's saved WB
599	rsync
600	rsr	a1, depc		# restore stack pointer
601	l32i	a2, a1, PT_WMASK	# register frames saved (in bits 4...9)
602	rotw	-1			# we restore a4..a7
603	_bltui	a6, 16, .Lclear_regs	# only have to restore current window?
604
605	/* The working registers are a0 and a3.  We are restoring to
606	 * a4..a7.  Be careful not to destroy what we have just restored.
607	 * Note: wmask has the format YYYYM:
608	 *       Y: number of registers saved in groups of 4
609	 *       M: 4 bit mask of first 16 registers
610	 */
611
612	mov	a2, a6
613	mov	a3, a5
614
6151:	rotw	-1			# a0..a3 become a4..a7
616	addi	a3, a7, -4*4		# next iteration
617	addi	a2, a6, -16		# decrementing Y in WMASK
618	l32i	a4, a3, PT_AREG_END + 0
619	l32i	a5, a3, PT_AREG_END + 4
620	l32i	a6, a3, PT_AREG_END + 8
621	l32i	a7, a3, PT_AREG_END + 12
622	_bgeui	a2, 16, 1b
623
624	/* Clear unrestored registers (don't leak anything to user-land */
625
626.Lclear_regs:
627	rsr	a0, windowbase
628	rsr	a3, sar
629	sub	a3, a0, a3
630	beqz	a3, 2f
631	extui	a3, a3, 0, WBBITS
632
6331:	rotw	-1
634	addi	a3, a7, -1
635	movi	a4, 0
636	movi	a5, 0
637	movi	a6, 0
638	movi	a7, 0
639	bgei	a3, 1, 1b
640
641	/* We are back were we were when we started.
642	 * Note: a2 still contains WMASK (if we've returned to the original
643	 *	 frame where we had loaded a2), or at least the lower 4 bits
644	 *	 (if we have restored WSBITS-1 frames).
645	 */
6462:
647#else
648	movi	a2, 1
649#endif
650#if XCHAL_HAVE_THREADPTR
651	l32i	a3, a1, PT_THREADPTR
652	wur	a3, threadptr
653#endif
654
655	j	common_exception_exit
656
657	/* This is the kernel exception exit.
658	 * We avoided to do a MOVSP when we entered the exception, but we
659	 * have to do it here.
660	 */
661
662kernel_exception_exit:
663
664#if defined(__XTENSA_WINDOWED_ABI__)
665	/* Check if we have to do a movsp.
666	 *
667	 * We only have to do a movsp if the previous window-frame has
668	 * been spilled to the *temporary* exception stack instead of the
669	 * task's stack. This is the case if the corresponding bit in
670	 * WINDOWSTART for the previous window-frame was set before
671	 * (not spilled) but is zero now (spilled).
672	 * If this bit is zero, all other bits except the one for the
673	 * current window frame are also zero. So, we can use a simple test:
674	 * 'and' WINDOWSTART and WINDOWSTART-1:
675	 *
676	 *  (XXXXXX1[0]* - 1) AND XXXXXX1[0]* = XXXXXX0[0]*
677	 *
678	 * The result is zero only if one bit was set.
679	 *
680	 * (Note: We might have gone through several task switches before
681	 *        we come back to the current task, so WINDOWBASE might be
682	 *        different from the time the exception occurred.)
683	 */
684
685	/* Test WINDOWSTART before and after the exception.
686	 * We actually have WMASK, so we only have to test if it is 1 or not.
687	 */
688
689	l32i	a2, a1, PT_WMASK
690	_beqi	a2, 1, common_exception_exit	# Spilled before exception,jump
691
692	/* Test WINDOWSTART now. If spilled, do the movsp */
693
694	rsr     a3, windowstart
695	addi	a0, a3, -1
696	and     a3, a3, a0
697	_bnez	a3, common_exception_exit
698
699	/* Do a movsp (we returned from a call4, so we have at least a0..a7) */
700
701	addi    a0, a1, -16
702	l32i    a3, a0, 0
703	l32i    a4, a0, 4
704	s32i    a3, a1, PT_KERNEL_SIZE + 0
705	s32i    a4, a1, PT_KERNEL_SIZE + 4
706	l32i    a3, a0, 8
707	l32i    a4, a0, 12
708	s32i    a3, a1, PT_KERNEL_SIZE + 8
709	s32i    a4, a1, PT_KERNEL_SIZE + 12
710
711	/* Common exception exit.
712	 * We restore the special register and the current window frame, and
713	 * return from the exception.
714	 *
715	 * Note: We expect a2 to hold PT_WMASK
716	 */
717#else
718	movi	a2, 1
719#endif
720
721common_exception_exit:
722
723	/* Restore address registers. */
724
725	_bbsi.l	a2, 1, 1f
726	l32i	a4,  a1, PT_AREG4
727	l32i	a5,  a1, PT_AREG5
728	l32i	a6,  a1, PT_AREG6
729	l32i	a7,  a1, PT_AREG7
730	_bbsi.l	a2, 2, 1f
731	l32i	a8,  a1, PT_AREG8
732	l32i	a9,  a1, PT_AREG9
733	l32i	a10, a1, PT_AREG10
734	l32i	a11, a1, PT_AREG11
735	_bbsi.l	a2, 3, 1f
736	l32i	a12, a1, PT_AREG12
737	l32i	a13, a1, PT_AREG13
738	l32i	a14, a1, PT_AREG14
739	l32i	a15, a1, PT_AREG15
740
741	/* Restore PC, SAR */
742
7431:	l32i	a2, a1, PT_PC
744	l32i	a3, a1, PT_SAR
745	wsr	a2, epc1
746	wsr	a3, sar
747
748	/* Restore LBEG, LEND, LCOUNT */
749#if XCHAL_HAVE_LOOPS
750	l32i	a2, a1, PT_LBEG
751	l32i	a3, a1, PT_LEND
752	wsr	a2, lbeg
753	l32i	a2, a1, PT_LCOUNT
754	wsr	a3, lend
755	wsr	a2, lcount
756#endif
757
758	/* We control single stepping through the ICOUNTLEVEL register. */
759
760	l32i	a2, a1, PT_ICOUNTLEVEL
761	movi	a3, -2
762	wsr	a2, icountlevel
763	wsr	a3, icount
764
765	/* Check if it was double exception. */
766
767	l32i	a0, a1, PT_DEPC
768	l32i	a3, a1, PT_AREG3
769	l32i	a2, a1, PT_AREG2
770	_bgeui	a0, VALID_DOUBLE_EXCEPTION_ADDRESS, 1f
771
772	/* Restore a0...a3 and return */
773
774	l32i	a0, a1, PT_AREG0
775	l32i	a1, a1, PT_AREG1
776	rfe
777
7781: 	wsr	a0, depc
779	l32i	a0, a1, PT_AREG0
780	l32i	a1, a1, PT_AREG1
781	rfde
782
783ENDPROC(kernel_exception)
784
785/*
786 * Debug exception handler.
787 *
788 * Currently, we don't support KGDB, so only user application can be debugged.
789 *
790 * When we get here,  a0 is trashed and saved to excsave[debuglevel]
791 */
792
793	.literal_position
794
795ENTRY(debug_exception)
796
797	rsr	a0, SREG_EPS + XCHAL_DEBUGLEVEL
798	bbsi.l	a0, PS_EXCM_BIT, 1f	# exception mode
799
800	/* Set EPC1 and EXCCAUSE */
801
802	wsr	a2, depc		# save a2 temporarily
803	rsr	a2, SREG_EPC + XCHAL_DEBUGLEVEL
804	wsr	a2, epc1
805
806	movi	a2, EXCCAUSE_MAPPED_DEBUG
807	wsr	a2, exccause
808
809	/* Restore PS to the value before the debug exc but with PS.EXCM set.*/
810
811	movi	a2, 1 << PS_EXCM_BIT
812	or	a2, a0, a2
813	wsr	a2, ps
814
815	/* Switch to kernel/user stack, restore jump vector, and save a0 */
816
817	bbsi.l	a2, PS_UM_BIT, 2f	# jump if user mode
818
819	addi	a2, a1, -16 - PT_KERNEL_SIZE	# assume kernel stack
8203:
821	l32i	a0, a3, DT_DEBUG_SAVE
822	s32i	a1, a2, PT_AREG1
823	s32i	a0, a2, PT_AREG0
824	movi	a0, 0
825	s32i	a0, a2, PT_DEPC		# mark it as a regular exception
826	xsr	a3, SREG_EXCSAVE + XCHAL_DEBUGLEVEL
827	xsr	a0, depc
828	s32i	a3, a2, PT_AREG3
829	s32i	a0, a2, PT_AREG2
830	mov	a1, a2
831
832	/* Debug exception is handled as an exception, so interrupts will
833	 * likely be enabled in the common exception handler. Disable
834	 * preemption if we have HW breakpoints to preserve DEBUGCAUSE.DBNUM
835	 * meaning.
836	 */
837#if defined(CONFIG_PREEMPT_COUNT) && defined(CONFIG_HAVE_HW_BREAKPOINT)
838	GET_THREAD_INFO(a2, a1)
839	l32i	a3, a2, TI_PRE_COUNT
840	addi	a3, a3, 1
841	s32i	a3, a2, TI_PRE_COUNT
842#endif
843
844	rsr	a2, ps
845	bbsi.l	a2, PS_UM_BIT, _user_exception
846	j	_kernel_exception
847
8482:	rsr	a2, excsave1
849	l32i	a2, a2, EXC_TABLE_KSTK	# load kernel stack pointer
850	j	3b
851
852#ifdef CONFIG_HAVE_HW_BREAKPOINT
853	/* Debug exception while in exception mode. This may happen when
854	 * window overflow/underflow handler or fast exception handler hits
855	 * data breakpoint, in which case save and disable all data
856	 * breakpoints, single-step faulting instruction and restore data
857	 * breakpoints.
858	 */
8591:
860	bbci.l	a0, PS_UM_BIT, 1b	# jump if kernel mode
861
862	rsr	a0, debugcause
863	bbsi.l	a0, DEBUGCAUSE_DBREAK_BIT, .Ldebug_save_dbreak
864
865	.set	_index, 0
866	.rept	XCHAL_NUM_DBREAK
867	l32i	a0, a3, DT_DBREAKC_SAVE + _index * 4
868	wsr	a0, SREG_DBREAKC + _index
869	.set	_index, _index + 1
870	.endr
871
872	l32i	a0, a3, DT_ICOUNT_LEVEL_SAVE
873	wsr	a0, icountlevel
874
875	l32i	a0, a3, DT_ICOUNT_SAVE
876	xsr	a0, icount
877
878	l32i	a0, a3, DT_DEBUG_SAVE
879	xsr	a3, SREG_EXCSAVE + XCHAL_DEBUGLEVEL
880	rfi	XCHAL_DEBUGLEVEL
881
882.Ldebug_save_dbreak:
883	.set	_index, 0
884	.rept	XCHAL_NUM_DBREAK
885	movi	a0, 0
886	xsr	a0, SREG_DBREAKC + _index
887	s32i	a0, a3, DT_DBREAKC_SAVE + _index * 4
888	.set	_index, _index + 1
889	.endr
890
891	movi	a0, XCHAL_EXCM_LEVEL + 1
892	xsr	a0, icountlevel
893	s32i	a0, a3, DT_ICOUNT_LEVEL_SAVE
894
895	movi	a0, 0xfffffffe
896	xsr	a0, icount
897	s32i	a0, a3, DT_ICOUNT_SAVE
898
899	l32i	a0, a3, DT_DEBUG_SAVE
900	xsr	a3, SREG_EXCSAVE + XCHAL_DEBUGLEVEL
901	rfi	XCHAL_DEBUGLEVEL
902#else
903	/* Debug exception while in exception mode. Should not happen. */
9041:	j	1b	// FIXME!!
905#endif
906
907ENDPROC(debug_exception)
908
909/*
910 * We get here in case of an unrecoverable exception.
911 * The only thing we can do is to be nice and print a panic message.
912 * We only produce a single stack frame for panic, so ???
913 *
914 *
915 * Entry conditions:
916 *
917 *   - a0 contains the caller address; original value saved in excsave1.
918 *   - the original a0 contains a valid return address (backtrace) or 0.
919 *   - a2 contains a valid stackpointer
920 *
921 * Notes:
922 *
923 *   - If the stack pointer could be invalid, the caller has to setup a
924 *     dummy stack pointer (e.g. the stack of the init_task)
925 *
926 *   - If the return address could be invalid, the caller has to set it
927 *     to 0, so the backtrace would stop.
928 *
929 */
930	.align 4
931unrecoverable_text:
932	.ascii "Unrecoverable error in exception handler\0"
933
934	.literal_position
935
936ENTRY(unrecoverable_exception)
937
938#if XCHAL_HAVE_WINDOWED
939	movi	a0, 1
940	movi	a1, 0
941
942	wsr	a0, windowstart
943	wsr	a1, windowbase
944	rsync
945#endif
946
947	movi	a1, KERNEL_PS_WOE_MASK | LOCKLEVEL
948	wsr	a1, ps
949	rsync
950
951	movi	a1, init_task
952	movi	a0, 0
953	addi	a1, a1, PT_REGS_OFFSET
954
955	movi	abi_arg0, unrecoverable_text
956	abi_call	panic
957
9581:	j	1b
959
960ENDPROC(unrecoverable_exception)
961
962/* -------------------------- FAST EXCEPTION HANDLERS ----------------------- */
963
964	__XTENSA_HANDLER
965	.literal_position
966
967#ifdef SUPPORT_WINDOWED
968/*
969 * Fast-handler for alloca exceptions
970 *
971 *  The ALLOCA handler is entered when user code executes the MOVSP
972 *  instruction and the caller's frame is not in the register file.
973 *
974 * This algorithm was taken from the Ross Morley's RTOS Porting Layer:
975 *
976 *    /home/ross/rtos/porting/XtensaRTOS-PortingLayer-20090507/xtensa_vectors.S
977 *
978 * It leverages the existing window spill/fill routines and their support for
979 * double exceptions. The 'movsp' instruction will only cause an exception if
980 * the next window needs to be loaded. In fact this ALLOCA exception may be
981 * replaced at some point by changing the hardware to do a underflow exception
982 * of the proper size instead.
983 *
984 * This algorithm simply backs out the register changes started by the user
985 * exception handler, makes it appear that we have started a window underflow
986 * by rotating the window back and then setting the old window base (OWB) in
987 * the 'ps' register with the rolled back window base. The 'movsp' instruction
988 * will be re-executed and this time since the next window frames is in the
989 * active AR registers it won't cause an exception.
990 *
991 * If the WindowUnderflow code gets a TLB miss the page will get mapped
992 * the partial WindowUnderflow will be handled in the double exception
993 * handler.
994 *
995 * Entry condition:
996 *
997 *   a0:	trashed, original value saved on stack (PT_AREG0)
998 *   a1:	a1
999 *   a2:	new stack pointer, original in DEPC
1000 *   a3:	a3
1001 *   depc:	a2, original value saved on stack (PT_DEPC)
1002 *   excsave_1:	dispatch table
1003 *
1004 *   PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC
1005 *	     <  VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception
1006 */
1007
1008ENTRY(fast_alloca)
1009	rsr	a0, windowbase
1010	rotw	-1
1011	rsr	a2, ps
1012	extui	a3, a2, PS_OWB_SHIFT, PS_OWB_WIDTH
1013	xor	a3, a3, a4
1014	l32i	a4, a6, PT_AREG0
1015	l32i	a1, a6, PT_DEPC
1016	rsr	a6, depc
1017	wsr	a1, depc
1018	slli	a3, a3, PS_OWB_SHIFT
1019	xor	a2, a2, a3
1020	wsr	a2, ps
1021	rsync
1022
1023	_bbci.l	a4, 31, 4f
1024	rotw	-1
1025	_bbci.l	a8, 30, 8f
1026	rotw	-1
1027	j	_WindowUnderflow12
10288:	j	_WindowUnderflow8
10294:	j	_WindowUnderflow4
1030ENDPROC(fast_alloca)
1031#endif
1032
1033#ifdef CONFIG_USER_ABI_CALL0_PROBE
1034/*
1035 * fast illegal instruction handler.
1036 *
1037 * This is used to fix up user PS.WOE on the exception caused
1038 * by the first opcode related to register window. If PS.WOE is
1039 * already set it goes directly to the common user exception handler.
1040 *
1041 * Entry condition:
1042 *
1043 *   a0:	trashed, original value saved on stack (PT_AREG0)
1044 *   a1:	a1
1045 *   a2:	new stack pointer, original in DEPC
1046 *   a3:	a3
1047 *   depc:	a2, original value saved on stack (PT_DEPC)
1048 *   excsave_1:	dispatch table
1049 */
1050
1051ENTRY(fast_illegal_instruction_user)
1052
1053	rsr	a0, ps
1054	bbsi.l	a0, PS_WOE_BIT, 1f
1055	s32i	a3, a2, PT_AREG3
1056	movi	a3, PS_WOE_MASK
1057	or	a0, a0, a3
1058	wsr	a0, ps
1059	l32i	a3, a2, PT_AREG3
1060	l32i	a0, a2, PT_AREG0
1061	rsr	a2, depc
1062	rfe
10631:
1064	call0	user_exception
1065
1066ENDPROC(fast_illegal_instruction_user)
1067#endif
1068
1069	/*
1070 * fast system calls.
1071 *
1072 * WARNING:  The kernel doesn't save the entire user context before
1073 * handling a fast system call.  These functions are small and short,
1074 * usually offering some functionality not available to user tasks.
1075 *
1076 * BE CAREFUL TO PRESERVE THE USER'S CONTEXT.
1077 *
1078 * Entry condition:
1079 *
1080 *   a0:	trashed, original value saved on stack (PT_AREG0)
1081 *   a1:	a1
1082 *   a2:	new stack pointer, original in DEPC
1083 *   a3:	a3
1084 *   depc:	a2, original value saved on stack (PT_DEPC)
1085 *   excsave_1:	dispatch table
1086 */
1087
1088ENTRY(fast_syscall_user)
1089
1090	/* Skip syscall. */
1091
1092	rsr	a0, epc1
1093	addi	a0, a0, 3
1094	wsr	a0, epc1
1095
1096	l32i	a0, a2, PT_DEPC
1097	bgeui	a0, VALID_DOUBLE_EXCEPTION_ADDRESS, fast_syscall_unrecoverable
1098
1099	rsr	a0, depc			# get syscall-nr
1100	_beqz	a0, fast_syscall_spill_registers
1101	_beqi	a0, __NR_xtensa, fast_syscall_xtensa
1102
1103	call0	user_exception
1104
1105ENDPROC(fast_syscall_user)
1106
1107ENTRY(fast_syscall_unrecoverable)
1108
1109	/* Restore all states. */
1110
1111	l32i    a0, a2, PT_AREG0        # restore a0
1112	xsr     a2, depc                # restore a2, depc
1113
1114	wsr     a0, excsave1
1115	call0	unrecoverable_exception
1116
1117ENDPROC(fast_syscall_unrecoverable)
1118
1119/*
1120 * sysxtensa syscall handler
1121 *
1122 * int sysxtensa (SYS_XTENSA_ATOMIC_SET,     ptr, val,    unused);
1123 * int sysxtensa (SYS_XTENSA_ATOMIC_ADD,     ptr, val,    unused);
1124 * int sysxtensa (SYS_XTENSA_ATOMIC_EXG_ADD, ptr, val,    unused);
1125 * int sysxtensa (SYS_XTENSA_ATOMIC_CMP_SWP, ptr, oldval, newval);
1126 *        a2            a6                   a3    a4      a5
1127 *
1128 * Entry condition:
1129 *
1130 *   a0:	a2 (syscall-nr), original value saved on stack (PT_AREG0)
1131 *   a1:	a1
1132 *   a2:	new stack pointer, original in a0 and DEPC
1133 *   a3:	a3
1134 *   a4..a15:	unchanged
1135 *   depc:	a2, original value saved on stack (PT_DEPC)
1136 *   excsave_1:	dispatch table
1137 *
1138 *   PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC
1139 *	     <  VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception
1140 *
1141 * Note: we don't have to save a2; a2 holds the return value
1142 */
1143
1144	.literal_position
1145
1146#ifdef CONFIG_FAST_SYSCALL_XTENSA
1147
1148ENTRY(fast_syscall_xtensa)
1149
1150	s32i	a7, a2, PT_AREG7	# we need an additional register
1151	movi	a7, 4			# sizeof(unsigned int)
1152	access_ok a3, a7, a0, a2, .Leac	# a0: scratch reg, a2: sp
1153
1154	_bgeui	a6, SYS_XTENSA_COUNT, .Lill
1155	_bnei	a6, SYS_XTENSA_ATOMIC_CMP_SWP, .Lnswp
1156
1157	/* Fall through for ATOMIC_CMP_SWP. */
1158
1159.Lswp:	/* Atomic compare and swap */
1160
1161EX(.Leac) l32i	a0, a3, 0		# read old value
1162	bne	a0, a4, 1f		# same as old value? jump
1163EX(.Leac) s32i	a5, a3, 0		# different, modify value
1164	l32i	a7, a2, PT_AREG7	# restore a7
1165	l32i	a0, a2, PT_AREG0	# restore a0
1166	movi	a2, 1			# and return 1
1167	rfe
1168
11691:	l32i	a7, a2, PT_AREG7	# restore a7
1170	l32i	a0, a2, PT_AREG0	# restore a0
1171	movi	a2, 0			# return 0 (note that we cannot set
1172	rfe
1173
1174.Lnswp:	/* Atomic set, add, and exg_add. */
1175
1176EX(.Leac) l32i	a7, a3, 0		# orig
1177	addi	a6, a6, -SYS_XTENSA_ATOMIC_SET
1178	add	a0, a4, a7		# + arg
1179	moveqz	a0, a4, a6		# set
1180	addi	a6, a6, SYS_XTENSA_ATOMIC_SET
1181EX(.Leac) s32i	a0, a3, 0		# write new value
1182
1183	mov	a0, a2
1184	mov	a2, a7
1185	l32i	a7, a0, PT_AREG7	# restore a7
1186	l32i	a0, a0, PT_AREG0	# restore a0
1187	rfe
1188
1189.Leac:	l32i	a7, a2, PT_AREG7	# restore a7
1190	l32i	a0, a2, PT_AREG0	# restore a0
1191	movi	a2, -EFAULT
1192	rfe
1193
1194.Lill:	l32i	a7, a2, PT_AREG7	# restore a7
1195	l32i	a0, a2, PT_AREG0	# restore a0
1196	movi	a2, -EINVAL
1197	rfe
1198
1199ENDPROC(fast_syscall_xtensa)
1200
1201#else /* CONFIG_FAST_SYSCALL_XTENSA */
1202
1203ENTRY(fast_syscall_xtensa)
1204
1205	l32i    a0, a2, PT_AREG0        # restore a0
1206	movi	a2, -ENOSYS
1207	rfe
1208
1209ENDPROC(fast_syscall_xtensa)
1210
1211#endif /* CONFIG_FAST_SYSCALL_XTENSA */
1212
1213
1214/* fast_syscall_spill_registers.
1215 *
1216 * Entry condition:
1217 *
1218 *   a0:	trashed, original value saved on stack (PT_AREG0)
1219 *   a1:	a1
1220 *   a2:	new stack pointer, original in DEPC
1221 *   a3:	a3
1222 *   depc:	a2, original value saved on stack (PT_DEPC)
1223 *   excsave_1:	dispatch table
1224 *
1225 * Note: We assume the stack pointer is EXC_TABLE_KSTK in the fixup handler.
1226 */
1227
1228#if defined(CONFIG_FAST_SYSCALL_SPILL_REGISTERS) && \
1229		defined(USER_SUPPORT_WINDOWED)
1230
1231ENTRY(fast_syscall_spill_registers)
1232
1233	/* Register a FIXUP handler (pass current wb as a parameter) */
1234
1235	xsr	a3, excsave1
1236	movi	a0, fast_syscall_spill_registers_fixup
1237	s32i	a0, a3, EXC_TABLE_FIXUP
1238	rsr	a0, windowbase
1239	s32i	a0, a3, EXC_TABLE_PARAM
1240	xsr	a3, excsave1		# restore a3 and excsave_1
1241
1242	/* Save a3, a4 and SAR on stack. */
1243
1244	rsr	a0, sar
1245	s32i	a3, a2, PT_AREG3
1246	s32i	a0, a2, PT_SAR
1247
1248	/* The spill routine might clobber a4, a7, a8, a11, a12, and a15. */
1249
1250	s32i	a4, a2, PT_AREG4
1251	s32i	a7, a2, PT_AREG7
1252	s32i	a8, a2, PT_AREG8
1253	s32i	a11, a2, PT_AREG11
1254	s32i	a12, a2, PT_AREG12
1255	s32i	a15, a2, PT_AREG15
1256
1257	/*
1258	 * Rotate ws so that the current windowbase is at bit 0.
1259	 * Assume ws = xxxwww1yy (www1 current window frame).
1260	 * Rotate ws right so that a4 = yyxxxwww1.
1261	 */
1262
1263	rsr	a0, windowbase
1264	rsr	a3, windowstart		# a3 = xxxwww1yy
1265	ssr	a0			# holds WB
1266	slli	a0, a3, WSBITS
1267	or	a3, a3, a0		# a3 = xxxwww1yyxxxwww1yy
1268	srl	a3, a3			# a3 = 00xxxwww1yyxxxwww1
1269
1270	/* We are done if there are no more than the current register frame. */
1271
1272	extui	a3, a3, 1, WSBITS-1	# a3 = 0yyxxxwww
1273	movi	a0, (1 << (WSBITS-1))
1274	_beqz	a3, .Lnospill		# only one active frame? jump
1275
1276	/* We want 1 at the top, so that we return to the current windowbase */
1277
1278	or	a3, a3, a0		# 1yyxxxwww
1279
1280	/* Skip empty frames - get 'oldest' WINDOWSTART-bit. */
1281
1282	wsr	a3, windowstart		# save shifted windowstart
1283	neg	a0, a3
1284	and	a3, a0, a3		# first bit set from right: 000010000
1285
1286	ffs_ws	a0, a3			# a0: shifts to skip empty frames
1287	movi	a3, WSBITS
1288	sub	a0, a3, a0		# WSBITS-a0:number of 0-bits from right
1289	ssr	a0			# save in SAR for later.
1290
1291	rsr	a3, windowbase
1292	add	a3, a3, a0
1293	wsr	a3, windowbase
1294	rsync
1295
1296	rsr	a3, windowstart
1297	srl	a3, a3			# shift windowstart
1298
1299	/* WB is now just one frame below the oldest frame in the register
1300	   window. WS is shifted so the oldest frame is in bit 0, thus, WB
1301	   and WS differ by one 4-register frame. */
1302
1303	/* Save frames. Depending what call was used (call4, call8, call12),
1304	 * we have to save 4,8. or 12 registers.
1305	 */
1306
1307
1308.Lloop: _bbsi.l	a3, 1, .Lc4
1309	_bbci.l	a3, 2, .Lc12
1310
1311.Lc8:	s32e	a4, a13, -16
1312	l32e	a4, a5, -12
1313	s32e	a8, a4, -32
1314	s32e	a5, a13, -12
1315	s32e	a6, a13, -8
1316	s32e	a7, a13, -4
1317	s32e	a9, a4, -28
1318	s32e	a10, a4, -24
1319	s32e	a11, a4, -20
1320	srli	a11, a3, 2		# shift windowbase by 2
1321	rotw	2
1322	_bnei	a3, 1, .Lloop
1323	j	.Lexit
1324
1325.Lc4:	s32e	a4, a9, -16
1326	s32e	a5, a9, -12
1327	s32e	a6, a9, -8
1328	s32e	a7, a9, -4
1329
1330	srli	a7, a3, 1
1331	rotw	1
1332	_bnei	a3, 1, .Lloop
1333	j	.Lexit
1334
1335.Lc12:	_bbci.l	a3, 3, .Linvalid_mask	# bit 2 shouldn't be zero!
1336
1337	/* 12-register frame (call12) */
1338
1339	l32e	a0, a5, -12
1340	s32e	a8, a0, -48
1341	mov	a8, a0
1342
1343	s32e	a9, a8, -44
1344	s32e	a10, a8, -40
1345	s32e	a11, a8, -36
1346	s32e	a12, a8, -32
1347	s32e	a13, a8, -28
1348	s32e	a14, a8, -24
1349	s32e	a15, a8, -20
1350	srli	a15, a3, 3
1351
1352	/* The stack pointer for a4..a7 is out of reach, so we rotate the
1353	 * window, grab the stackpointer, and rotate back.
1354	 * Alternatively, we could also use the following approach, but that
1355	 * makes the fixup routine much more complicated:
1356	 * rotw	1
1357	 * s32e	a0, a13, -16
1358	 * ...
1359	 * rotw 2
1360	 */
1361
1362	rotw	1
1363	mov	a4, a13
1364	rotw	-1
1365
1366	s32e	a4, a8, -16
1367	s32e	a5, a8, -12
1368	s32e	a6, a8, -8
1369	s32e	a7, a8, -4
1370
1371	rotw	3
1372
1373	_beqi	a3, 1, .Lexit
1374	j	.Lloop
1375
1376.Lexit:
1377
1378	/* Done. Do the final rotation and set WS */
1379
1380	rotw	1
1381	rsr	a3, windowbase
1382	ssl	a3
1383	movi	a3, 1
1384	sll	a3, a3
1385	wsr	a3, windowstart
1386.Lnospill:
1387
1388	/* Advance PC, restore registers and SAR, and return from exception. */
1389
1390	l32i	a3, a2, PT_SAR
1391	l32i	a0, a2, PT_AREG0
1392	wsr	a3, sar
1393	l32i	a3, a2, PT_AREG3
1394
1395	/* Restore clobbered registers. */
1396
1397	l32i	a4, a2, PT_AREG4
1398	l32i	a7, a2, PT_AREG7
1399	l32i	a8, a2, PT_AREG8
1400	l32i	a11, a2, PT_AREG11
1401	l32i	a12, a2, PT_AREG12
1402	l32i	a15, a2, PT_AREG15
1403
1404	movi	a2, 0
1405	rfe
1406
1407.Linvalid_mask:
1408
1409	/* We get here because of an unrecoverable error in the window
1410	 * registers, so set up a dummy frame and kill the user application.
1411	 * Note: We assume EXC_TABLE_KSTK contains a valid stack pointer.
1412	 */
1413
1414	movi	a0, 1
1415	movi	a1, 0
1416
1417	wsr	a0, windowstart
1418	wsr	a1, windowbase
1419	rsync
1420
1421	movi	a0, 0
1422
1423	rsr	a3, excsave1
1424	l32i	a1, a3, EXC_TABLE_KSTK
1425
1426	movi	a4, KERNEL_PS_WOE_MASK | LOCKLEVEL
1427	wsr	a4, ps
1428	rsync
1429
1430	movi	abi_arg0, SIGSEGV
1431	abi_call	make_task_dead
1432
1433	/* shouldn't return, so panic */
1434
1435	wsr	a0, excsave1
1436	call0	unrecoverable_exception		# should not return
14371:	j	1b
1438
1439
1440ENDPROC(fast_syscall_spill_registers)
1441
1442/* Fixup handler.
1443 *
1444 * We get here if the spill routine causes an exception, e.g. tlb miss.
1445 * We basically restore WINDOWBASE and WINDOWSTART to the condition when
1446 * we entered the spill routine and jump to the user exception handler.
1447 *
1448 * Note that we only need to restore the bits in windowstart that have not
1449 * been spilled yet by the _spill_register routine. Luckily, a3 contains a
1450 * rotated windowstart with only those bits set for frames that haven't been
1451 * spilled yet. Because a3 is rotated such that bit 0 represents the register
1452 * frame for the current windowbase - 1, we need to rotate a3 left by the
1453 * value of the current windowbase + 1 and move it to windowstart.
1454 *
1455 * a0: value of depc, original value in depc
1456 * a2: trashed, original value in EXC_TABLE_DOUBLE_SAVE
1457 * a3: exctable, original value in excsave1
1458 */
1459
1460ENTRY(fast_syscall_spill_registers_fixup)
1461
1462	rsr	a2, windowbase	# get current windowbase (a2 is saved)
1463	xsr	a0, depc	# restore depc and a0
1464	ssl	a2		# set shift (32 - WB)
1465
1466	/* We need to make sure the current registers (a0-a3) are preserved.
1467	 * To do this, we simply set the bit for the current window frame
1468	 * in WS, so that the exception handlers save them to the task stack.
1469	 *
1470	 * Note: we use a3 to set the windowbase, so we take a special care
1471	 * of it, saving it in the original _spill_registers frame across
1472	 * the exception handler call.
1473	 */
1474
1475	xsr	a3, excsave1	# get spill-mask
1476	slli	a3, a3, 1	# shift left by one
1477	addi	a3, a3, 1	# set the bit for the current window frame
1478
1479	slli	a2, a3, 32-WSBITS
1480	src	a2, a3, a2	# a2 = xxwww1yyxxxwww1yy......
1481	wsr	a2, windowstart	# set corrected windowstart
1482
1483	srli	a3, a3, 1
1484	rsr	a2, excsave1
1485	l32i	a2, a2, EXC_TABLE_DOUBLE_SAVE	# restore a2
1486	xsr	a2, excsave1
1487	s32i	a3, a2, EXC_TABLE_DOUBLE_SAVE	# save a3
1488	l32i	a3, a2, EXC_TABLE_PARAM	# original WB (in user task)
1489	xsr	a2, excsave1
1490
1491	/* Return to the original (user task) WINDOWBASE.
1492	 * We leave the following frame behind:
1493	 * a0, a1, a2	same
1494	 * a3:		trashed (saved in EXC_TABLE_DOUBLE_SAVE)
1495	 * depc:	depc (we have to return to that address)
1496	 * excsave_1:	exctable
1497	 */
1498
1499	wsr	a3, windowbase
1500	rsync
1501
1502	/* We are now in the original frame when we entered _spill_registers:
1503	 *  a0: return address
1504	 *  a1: used, stack pointer
1505	 *  a2: kernel stack pointer
1506	 *  a3: available
1507	 *  depc: exception address
1508	 *  excsave: exctable
1509	 * Note: This frame might be the same as above.
1510	 */
1511
1512	/* Setup stack pointer. */
1513
1514	addi	a2, a2, -PT_USER_SIZE
1515	s32i	a0, a2, PT_AREG0
1516
1517	/* Make sure we return to this fixup handler. */
1518
1519	movi	a3, fast_syscall_spill_registers_fixup_return
1520	s32i	a3, a2, PT_DEPC		# setup depc
1521
1522	/* Jump to the exception handler. */
1523
1524	rsr	a3, excsave1
1525	rsr	a0, exccause
1526	addx4	a0, a0, a3              	# find entry in table
1527	l32i	a0, a0, EXC_TABLE_FAST_USER     # load handler
1528	l32i	a3, a3, EXC_TABLE_DOUBLE_SAVE
1529	jx	a0
1530
1531ENDPROC(fast_syscall_spill_registers_fixup)
1532
1533ENTRY(fast_syscall_spill_registers_fixup_return)
1534
1535	/* When we return here, all registers have been restored (a2: DEPC) */
1536
1537	wsr	a2, depc		# exception address
1538
1539	/* Restore fixup handler. */
1540
1541	rsr	a2, excsave1
1542	s32i	a3, a2, EXC_TABLE_DOUBLE_SAVE
1543	movi	a3, fast_syscall_spill_registers_fixup
1544	s32i	a3, a2, EXC_TABLE_FIXUP
1545	rsr	a3, windowbase
1546	s32i	a3, a2, EXC_TABLE_PARAM
1547	l32i	a2, a2, EXC_TABLE_KSTK
1548
1549	/* Load WB at the time the exception occurred. */
1550
1551	rsr	a3, sar			# WB is still in SAR
1552	neg	a3, a3
1553	wsr	a3, windowbase
1554	rsync
1555
1556	rsr	a3, excsave1
1557	l32i	a3, a3, EXC_TABLE_DOUBLE_SAVE
1558
1559	rfde
1560
1561ENDPROC(fast_syscall_spill_registers_fixup_return)
1562
1563#else /* CONFIG_FAST_SYSCALL_SPILL_REGISTERS */
1564
1565ENTRY(fast_syscall_spill_registers)
1566
1567	l32i    a0, a2, PT_AREG0        # restore a0
1568	movi	a2, -ENOSYS
1569	rfe
1570
1571ENDPROC(fast_syscall_spill_registers)
1572
1573#endif /* CONFIG_FAST_SYSCALL_SPILL_REGISTERS */
1574
1575#ifdef CONFIG_MMU
1576/*
1577 * We should never get here. Bail out!
1578 */
1579
1580ENTRY(fast_second_level_miss_double_kernel)
1581
15821:
1583	call0	unrecoverable_exception		# should not return
15841:	j	1b
1585
1586ENDPROC(fast_second_level_miss_double_kernel)
1587
1588/* First-level entry handler for user, kernel, and double 2nd-level
1589 * TLB miss exceptions.  Note that for now, user and kernel miss
1590 * exceptions share the same entry point and are handled identically.
1591 *
1592 * An old, less-efficient C version of this function used to exist.
1593 * We include it below, interleaved as comments, for reference.
1594 *
1595 * Entry condition:
1596 *
1597 *   a0:	trashed, original value saved on stack (PT_AREG0)
1598 *   a1:	a1
1599 *   a2:	new stack pointer, original in DEPC
1600 *   a3:	a3
1601 *   depc:	a2, original value saved on stack (PT_DEPC)
1602 *   excsave_1:	dispatch table
1603 *
1604 *   PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC
1605 *	     <  VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception
1606 */
1607
1608ENTRY(fast_second_level_miss)
1609
1610	/* Save a1 and a3. Note: we don't expect a double exception. */
1611
1612	s32i	a1, a2, PT_AREG1
1613	s32i	a3, a2, PT_AREG3
1614
1615	/* We need to map the page of PTEs for the user task.  Find
1616	 * the pointer to that page.  Also, it's possible for tsk->mm
1617	 * to be NULL while tsk->active_mm is nonzero if we faulted on
1618	 * a vmalloc address.  In that rare case, we must use
1619	 * active_mm instead to avoid a fault in this handler.  See
1620	 *
1621	 * http://mail.nl.linux.org/linux-mm/2002-08/msg00258.html
1622	 *   (or search Internet on "mm vs. active_mm")
1623	 *
1624	 *	if (!mm)
1625	 *		mm = tsk->active_mm;
1626	 *	pgd = pgd_offset (mm, regs->excvaddr);
1627	 *	pmd = pmd_offset (pgd, regs->excvaddr);
1628	 *	pmdval = *pmd;
1629	 */
1630
1631	GET_CURRENT(a1,a2)
1632	l32i	a0, a1, TASK_MM		# tsk->mm
1633	beqz	a0, 9f
1634
16358:	rsr	a3, excvaddr		# fault address
1636	_PGD_OFFSET(a0, a3, a1)
1637	l32i	a0, a0, 0		# read pmdval
1638	beqz	a0, 2f
1639
1640	/* Read ptevaddr and convert to top of page-table page.
1641	 *
1642	 * 	vpnval = read_ptevaddr_register() & PAGE_MASK;
1643	 * 	vpnval += DTLB_WAY_PGTABLE;
1644	 *	pteval = mk_pte (virt_to_page(pmd_val(pmdval)), PAGE_KERNEL);
1645	 *	write_dtlb_entry (pteval, vpnval);
1646	 *
1647	 * The messy computation for 'pteval' above really simplifies
1648	 * into the following:
1649	 *
1650	 * pteval = ((pmdval - PAGE_OFFSET + PHYS_OFFSET) & PAGE_MASK)
1651	 *                 | PAGE_DIRECTORY
1652	 */
1653
1654	movi	a1, (PHYS_OFFSET - PAGE_OFFSET) & 0xffffffff
1655	add	a0, a0, a1		# pmdval - PAGE_OFFSET
1656	extui	a1, a0, 0, PAGE_SHIFT	# ... & PAGE_MASK
1657	xor	a0, a0, a1
1658
1659	movi	a1, _PAGE_DIRECTORY
1660	or	a0, a0, a1		# ... | PAGE_DIRECTORY
1661
1662	/*
1663	 * We utilize all three wired-ways (7-9) to hold pmd translations.
1664	 * Memory regions are mapped to the DTLBs according to bits 28 and 29.
1665	 * This allows to map the three most common regions to three different
1666	 * DTLBs:
1667	 *  0,1 -> way 7	program (0040.0000) and virtual (c000.0000)
1668	 *  2   -> way 8	shared libaries (2000.0000)
1669	 *  3   -> way 0	stack (3000.0000)
1670	 */
1671
1672	extui	a3, a3, 28, 2		# addr. bit 28 and 29	0,1,2,3
1673	rsr	a1, ptevaddr
1674	addx2	a3, a3, a3		# ->			0,3,6,9
1675	srli	a1, a1, PAGE_SHIFT
1676	extui	a3, a3, 2, 2		# ->			0,0,1,2
1677	slli	a1, a1, PAGE_SHIFT	# ptevaddr & PAGE_MASK
1678	addi	a3, a3, DTLB_WAY_PGD
1679	add	a1, a1, a3		# ... + way_number
1680
16813:	wdtlb	a0, a1
1682	dsync
1683
1684	/* Exit critical section. */
1685
16864:	rsr	a3, excsave1
1687	movi	a0, 0
1688	s32i	a0, a3, EXC_TABLE_FIXUP
1689
1690	/* Restore the working registers, and return. */
1691
1692	l32i	a0, a2, PT_AREG0
1693	l32i	a1, a2, PT_AREG1
1694	l32i	a3, a2, PT_AREG3
1695	l32i	a2, a2, PT_DEPC
1696
1697	bgeui	a2, VALID_DOUBLE_EXCEPTION_ADDRESS, 1f
1698
1699	/* Restore excsave1 and return. */
1700
1701	rsr	a2, depc
1702	rfe
1703
1704	/* Return from double exception. */
1705
17061:	xsr	a2, depc
1707	esync
1708	rfde
1709
17109:	l32i	a0, a1, TASK_ACTIVE_MM	# unlikely case mm == 0
1711	bnez	a0, 8b
1712
1713	/* Even more unlikely case active_mm == 0.
1714	 * We can get here with NMI in the middle of context_switch that
1715	 * touches vmalloc area.
1716	 */
1717	movi	a0, init_mm
1718	j	8b
1719
1720#if (DCACHE_WAY_SIZE > PAGE_SIZE)
1721
17222:	/* Special case for cache aliasing.
1723	 * We (should) only get here if a clear_user_page, copy_user_page
1724	 * or the aliased cache flush functions got preemptively interrupted
1725	 * by another task. Re-establish temporary mapping to the
1726	 * TLBTEMP_BASE areas.
1727	 */
1728
1729	/* We shouldn't be in a double exception */
1730
1731	l32i	a0, a2, PT_DEPC
1732	bgeui	a0, VALID_DOUBLE_EXCEPTION_ADDRESS, 2f
1733
1734	/* Make sure the exception originated in the special functions */
1735
1736	movi	a0, __tlbtemp_mapping_start
1737	rsr	a3, epc1
1738	bltu	a3, a0, 2f
1739	movi	a0, __tlbtemp_mapping_end
1740	bgeu	a3, a0, 2f
1741
1742	/* Check if excvaddr was in one of the TLBTEMP_BASE areas. */
1743
1744	movi	a3, TLBTEMP_BASE_1
1745	rsr	a0, excvaddr
1746	bltu	a0, a3, 2f
1747
1748	addi	a1, a0, -TLBTEMP_SIZE
1749	bgeu	a1, a3, 2f
1750
1751	/* Check if we have to restore an ITLB mapping. */
1752
1753	movi	a1, __tlbtemp_mapping_itlb
1754	rsr	a3, epc1
1755	sub	a3, a3, a1
1756
1757	/* Calculate VPN */
1758
1759	movi	a1, PAGE_MASK
1760	and	a1, a1, a0
1761
1762	/* Jump for ITLB entry */
1763
1764	bgez	a3, 1f
1765
1766	/* We can use up to two TLBTEMP areas, one for src and one for dst. */
1767
1768	extui	a3, a0, PAGE_SHIFT + DCACHE_ALIAS_ORDER, 1
1769	add	a1, a3, a1
1770
1771	/* PPN is in a6 for the first TLBTEMP area and in a7 for the second. */
1772
1773	mov	a0, a6
1774	movnez	a0, a7, a3
1775	j	3b
1776
1777	/* ITLB entry. We only use dst in a6. */
1778
17791:	witlb	a6, a1
1780	isync
1781	j	4b
1782
1783
1784#endif	// DCACHE_WAY_SIZE > PAGE_SIZE
1785
1786
17872:	/* Invalid PGD, default exception handling */
1788
1789	rsr	a1, depc
1790	s32i	a1, a2, PT_AREG2
1791	mov	a1, a2
1792
1793	rsr	a2, ps
1794	bbsi.l	a2, PS_UM_BIT, 1f
1795	call0	_kernel_exception
17961:	call0	_user_exception
1797
1798ENDPROC(fast_second_level_miss)
1799
1800/*
1801 * StoreProhibitedException
1802 *
1803 * Update the pte and invalidate the itlb mapping for this pte.
1804 *
1805 * Entry condition:
1806 *
1807 *   a0:	trashed, original value saved on stack (PT_AREG0)
1808 *   a1:	a1
1809 *   a2:	new stack pointer, original in DEPC
1810 *   a3:	a3
1811 *   depc:	a2, original value saved on stack (PT_DEPC)
1812 *   excsave_1:	dispatch table
1813 *
1814 *   PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC
1815 *	     <  VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception
1816 */
1817
1818ENTRY(fast_store_prohibited)
1819
1820	/* Save a1 and a3. */
1821
1822	s32i	a1, a2, PT_AREG1
1823	s32i	a3, a2, PT_AREG3
1824
1825	GET_CURRENT(a1,a2)
1826	l32i	a0, a1, TASK_MM		# tsk->mm
1827	beqz	a0, 9f
1828
18298:	rsr	a1, excvaddr		# fault address
1830	_PGD_OFFSET(a0, a1, a3)
1831	l32i	a0, a0, 0
1832	beqz	a0, 2f
1833
1834	/*
1835	 * Note that we test _PAGE_WRITABLE_BIT only if PTE is present
1836	 * and is not PAGE_NONE. See pgtable.h for possible PTE layouts.
1837	 */
1838
1839	_PTE_OFFSET(a0, a1, a3)
1840	l32i	a3, a0, 0		# read pteval
1841	movi	a1, _PAGE_CA_INVALID
1842	ball	a3, a1, 2f
1843	bbci.l	a3, _PAGE_WRITABLE_BIT, 2f
1844
1845	movi	a1, _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_HW_WRITE
1846	or	a3, a3, a1
1847	rsr	a1, excvaddr
1848	s32i	a3, a0, 0
1849
1850	/* We need to flush the cache if we have page coloring. */
1851#if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK
1852	dhwb	a0, 0
1853#endif
1854	pdtlb	a0, a1
1855	wdtlb	a3, a0
1856
1857	/* Exit critical section. */
1858
1859	movi	a0, 0
1860	rsr	a3, excsave1
1861	s32i	a0, a3, EXC_TABLE_FIXUP
1862
1863	/* Restore the working registers, and return. */
1864
1865	l32i	a3, a2, PT_AREG3
1866	l32i	a1, a2, PT_AREG1
1867	l32i	a0, a2, PT_AREG0
1868	l32i	a2, a2, PT_DEPC
1869
1870	bgeui	a2, VALID_DOUBLE_EXCEPTION_ADDRESS, 1f
1871
1872	rsr	a2, depc
1873	rfe
1874
1875	/* Double exception. Restore FIXUP handler and return. */
1876
18771:	xsr	a2, depc
1878	esync
1879	rfde
1880
18819:	l32i	a0, a1, TASK_ACTIVE_MM	# unlikely case mm == 0
1882	j	8b
1883
18842:	/* If there was a problem, handle fault in C */
1885
1886	rsr	a3, depc	# still holds a2
1887	s32i	a3, a2, PT_AREG2
1888	mov	a1, a2
1889
1890	rsr	a2, ps
1891	bbsi.l	a2, PS_UM_BIT, 1f
1892	call0	_kernel_exception
18931:	call0	_user_exception
1894
1895ENDPROC(fast_store_prohibited)
1896
1897#endif /* CONFIG_MMU */
1898
1899	.text
1900/*
1901 * System Calls.
1902 *
1903 * void system_call (struct pt_regs* regs, int exccause)
1904 *                            a2                 a3
1905 */
1906	.literal_position
1907
1908ENTRY(system_call)
1909
1910#if defined(__XTENSA_WINDOWED_ABI__)
1911	abi_entry_default
1912#elif defined(__XTENSA_CALL0_ABI__)
1913	abi_entry(12)
1914
1915	s32i	a0, sp, 0
1916	s32i	abi_saved0, sp, 4
1917	s32i	abi_saved1, sp, 8
1918	mov	abi_saved0, a2
1919#else
1920#error Unsupported Xtensa ABI
1921#endif
1922
1923	/* regs->syscall = regs->areg[2] */
1924
1925	l32i	a7, abi_saved0, PT_AREG2
1926	s32i	a7, abi_saved0, PT_SYSCALL
1927
1928	GET_THREAD_INFO(a4, a1)
1929	l32i	abi_saved1, a4, TI_FLAGS
1930	movi	a4, _TIF_WORK_MASK
1931	and	abi_saved1, abi_saved1, a4
1932	beqz	abi_saved1, 1f
1933
1934	mov	abi_arg0, abi_saved0
1935	abi_call	do_syscall_trace_enter
1936	beqz	abi_rv, .Lsyscall_exit
1937	l32i	a7, abi_saved0, PT_SYSCALL
1938
19391:
1940	/* syscall = sys_call_table[syscall_nr] */
1941
1942	movi	a4, sys_call_table
1943	movi	a5, __NR_syscalls
1944	movi	abi_rv, -ENOSYS
1945	bgeu	a7, a5, 1f
1946
1947	addx4	a4, a7, a4
1948	l32i	abi_tmp0, a4, 0
1949
1950	/* Load args: arg0 - arg5 are passed via regs. */
1951
1952	l32i	abi_arg0, abi_saved0, PT_AREG6
1953	l32i	abi_arg1, abi_saved0, PT_AREG3
1954	l32i	abi_arg2, abi_saved0, PT_AREG4
1955	l32i	abi_arg3, abi_saved0, PT_AREG5
1956	l32i	abi_arg4, abi_saved0, PT_AREG8
1957	l32i	abi_arg5, abi_saved0, PT_AREG9
1958
1959	abi_callx	abi_tmp0
1960
19611:	/* regs->areg[2] = return_value */
1962
1963	s32i	abi_rv, abi_saved0, PT_AREG2
1964	bnez	abi_saved1, 1f
1965.Lsyscall_exit:
1966#if defined(__XTENSA_WINDOWED_ABI__)
1967	abi_ret_default
1968#elif defined(__XTENSA_CALL0_ABI__)
1969	l32i	a0, sp, 0
1970	l32i	abi_saved0, sp, 4
1971	l32i	abi_saved1, sp, 8
1972	abi_ret(12)
1973#else
1974#error Unsupported Xtensa ABI
1975#endif
1976
19771:
1978	mov	abi_arg0, abi_saved0
1979	abi_call	do_syscall_trace_leave
1980	j	.Lsyscall_exit
1981
1982ENDPROC(system_call)
1983
1984/*
1985 * Spill live registers on the kernel stack macro.
1986 *
1987 * Entry condition: ps.woe is set, ps.excm is cleared
1988 * Exit condition: windowstart has single bit set
1989 * May clobber: a12, a13
1990 */
1991	.macro	spill_registers_kernel
1992
1993#if XCHAL_NUM_AREGS > 16
1994	call12	1f
1995	_j	2f
1996	retw
1997	.align	4
19981:
1999	_entry	a1, 48
2000	addi	a12, a0, 3
2001#if XCHAL_NUM_AREGS > 32
2002	.rept	(XCHAL_NUM_AREGS - 32) / 12
2003	_entry	a1, 48
2004	mov	a12, a0
2005	.endr
2006#endif
2007	_entry	a1, 16
2008#if XCHAL_NUM_AREGS % 12 == 0
2009	mov	a8, a8
2010#elif XCHAL_NUM_AREGS % 12 == 4
2011	mov	a12, a12
2012#elif XCHAL_NUM_AREGS % 12 == 8
2013	mov	a4, a4
2014#endif
2015	retw
20162:
2017#else
2018	mov	a12, a12
2019#endif
2020	.endm
2021
2022/*
2023 * Task switch.
2024 *
2025 * struct task*  _switch_to (struct task* prev, struct task* next)
2026 *         a2                              a2                 a3
2027 */
2028
2029ENTRY(_switch_to)
2030
2031#if defined(__XTENSA_WINDOWED_ABI__)
2032	abi_entry(XTENSA_SPILL_STACK_RESERVE)
2033#elif defined(__XTENSA_CALL0_ABI__)
2034	abi_entry(16)
2035
2036	s32i	a12, sp, 0
2037	s32i	a13, sp, 4
2038	s32i	a14, sp, 8
2039	s32i	a15, sp, 12
2040#else
2041#error Unsupported Xtensa ABI
2042#endif
2043	mov	a11, a3			# and 'next' (a3)
2044
2045	l32i	a4, a2, TASK_THREAD_INFO
2046	l32i	a5, a3, TASK_THREAD_INFO
2047
2048	save_xtregs_user a4 a6 a8 a9 a12 a13 THREAD_XTREGS_USER
2049
2050#if THREAD_RA > 1020 || THREAD_SP > 1020
2051	addi	a10, a2, TASK_THREAD
2052	s32i	a0, a10, THREAD_RA - TASK_THREAD	# save return address
2053	s32i	a1, a10, THREAD_SP - TASK_THREAD	# save stack pointer
2054#else
2055	s32i	a0, a2, THREAD_RA	# save return address
2056	s32i	a1, a2, THREAD_SP	# save stack pointer
2057#endif
2058
2059#if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_SMP)
2060	movi	a6, __stack_chk_guard
2061	l32i	a8, a3, TASK_STACK_CANARY
2062	s32i	a8, a6, 0
2063#endif
2064
2065	/* Disable ints while we manipulate the stack pointer. */
2066
2067	irq_save a14, a3
2068	rsync
2069
2070	/* Switch CPENABLE */
2071
2072#if (XTENSA_HAVE_COPROCESSORS || XTENSA_HAVE_IO_PORTS)
2073	l32i	a3, a5, THREAD_CPENABLE
2074	xsr	a3, cpenable
2075	s32i	a3, a4, THREAD_CPENABLE
2076#endif
2077
2078#if XCHAL_HAVE_EXCLUSIVE
2079	l32i	a3, a5, THREAD_ATOMCTL8
2080	getex	a3
2081	s32i	a3, a4, THREAD_ATOMCTL8
2082#endif
2083
2084	/* Flush register file. */
2085
2086#if defined(__XTENSA_WINDOWED_ABI__)
2087	spill_registers_kernel
2088#endif
2089
2090	/* Set kernel stack (and leave critical section)
2091	 * Note: It's save to set it here. The stack will not be overwritten
2092	 *       because the kernel stack will only be loaded again after
2093	 *       we return from kernel space.
2094	 */
2095
2096	rsr	a3, excsave1		# exc_table
2097	addi	a7, a5, PT_REGS_OFFSET
2098	s32i	a7, a3, EXC_TABLE_KSTK
2099
2100	/* restore context of the task 'next' */
2101
2102	l32i	a0, a11, THREAD_RA	# restore return address
2103	l32i	a1, a11, THREAD_SP	# restore stack pointer
2104
2105	load_xtregs_user a5 a6 a8 a9 a12 a13 THREAD_XTREGS_USER
2106
2107	wsr	a14, ps
2108	rsync
2109
2110#if defined(__XTENSA_WINDOWED_ABI__)
2111	abi_ret(XTENSA_SPILL_STACK_RESERVE)
2112#elif defined(__XTENSA_CALL0_ABI__)
2113	l32i	a12, sp, 0
2114	l32i	a13, sp, 4
2115	l32i	a14, sp, 8
2116	l32i	a15, sp, 12
2117	abi_ret(16)
2118#else
2119#error Unsupported Xtensa ABI
2120#endif
2121
2122ENDPROC(_switch_to)
2123
2124ENTRY(ret_from_fork)
2125
2126	/* void schedule_tail (struct task_struct *prev)
2127	 * Note: prev is still in abi_arg0 (return value from fake call frame)
2128	 */
2129	abi_call	schedule_tail
2130
2131	mov		abi_arg0, a1
2132	abi_call	do_syscall_trace_leave
2133	j		common_exception_return
2134
2135ENDPROC(ret_from_fork)
2136
2137/*
2138 * Kernel thread creation helper
2139 * On entry, set up by copy_thread: abi_saved0 = thread_fn,
2140 * abi_saved1 = thread_fn arg. Left from _switch_to: abi_arg0 = prev
2141 */
2142ENTRY(ret_from_kernel_thread)
2143
2144	abi_call	schedule_tail
2145	mov		abi_arg0, abi_saved1
2146	abi_callx	abi_saved0
2147	j		common_exception_return
2148
2149ENDPROC(ret_from_kernel_thread)
2150