xref: /linux/arch/xtensa/kernel/entry.S (revision 14b42963f64b98ab61fa9723c03d71aa5ef4f862)
1/*
2 * arch/xtensa/kernel/entry.S
3 *
4 * Low-level exception handling
5 *
6 * This file is subject to the terms and conditions of the GNU General Public
7 * License.  See the file "COPYING" in the main directory of this archive
8 * for more details.
9 *
10 * Copyright (C) 2004-2005 by Tensilica Inc.
11 *
12 * Chris Zankel <chris@zankel.net>
13 *
14 */
15
16#include <linux/linkage.h>
17#include <asm/asm-offsets.h>
18#include <asm/processor.h>
19#include <asm/thread_info.h>
20#include <asm/uaccess.h>
21#include <asm/unistd.h>
22#include <asm/ptrace.h>
23#include <asm/current.h>
24#include <asm/pgtable.h>
25#include <asm/page.h>
26#include <asm/signal.h>
27#include <xtensa/coreasm.h>
28
29/* Unimplemented features. */
30
31#undef SIGNAL_HANDLING_IN_DOUBLE_EXCEPTION
32#undef KERNEL_STACK_OVERFLOW_CHECK
33#undef PREEMPTIBLE_KERNEL
34#undef ALLOCA_EXCEPTION_IN_IRAM
35
36/* Not well tested.
37 *
38 * - fast_coprocessor
39 */
40
41/*
42 * Macro to find first bit set in WINDOWBASE from the left + 1
43 *
44 * 100....0 -> 1
45 * 010....0 -> 2
46 * 000....1 -> WSBITS
47 */
48
49	.macro ffs_ws bit mask
50
51#if XCHAL_HAVE_NSA
52	nsau    \bit, \mask			# 32-WSBITS ... 31 (32 iff 0)
53	addi    \bit, \bit, WSBITS - 32 + 1   	# uppest bit set -> return 1
54#else
55	movi    \bit, WSBITS
56#if WSBITS > 16
57	_bltui  \mask, 0x10000, 99f
58	addi    \bit, \bit, -16
59	extui   \mask, \mask, 16, 16
60#endif
61#if WSBITS > 8
6299:	_bltui  \mask, 0x100, 99f
63	addi    \bit, \bit, -8
64	srli    \mask, \mask, 8
65#endif
6699:	_bltui  \mask, 0x10, 99f
67	addi    \bit, \bit, -4
68	srli    \mask, \mask, 4
6999:	_bltui  \mask, 0x4, 99f
70	addi    \bit, \bit, -2
71	srli    \mask, \mask, 2
7299:	_bltui  \mask, 0x2, 99f
73	addi    \bit, \bit, -1
7499:
75
76#endif
77	.endm
78
79/* ----------------- DEFAULT FIRST LEVEL EXCEPTION HANDLERS ----------------- */
80
81/*
82 * First-level exception handler for user exceptions.
83 * Save some special registers, extra states and all registers in the AR
84 * register file that were in use in the user task, and jump to the common
85 * exception code.
86 * We save SAR (used to calculate WMASK), and WB and WS (we don't have to
87 * save them for kernel exceptions).
88 *
89 * Entry condition for user_exception:
90 *
91 *   a0:	trashed, original value saved on stack (PT_AREG0)
92 *   a1:	a1
93 *   a2:	new stack pointer, original value in depc
94 *   a3:	dispatch table
95 *   depc:	a2, original value saved on stack (PT_DEPC)
96 *   excsave1:	a3
97 *
98 *   PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC
99 *	     <  VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception
100 *
101 * Entry condition for _user_exception:
102 *
103 *   a0-a3 and depc have been saved to PT_AREG0...PT_AREG3 and PT_DEPC
104 *   excsave has been restored, and
105 *   stack pointer (a1) has been set.
106 *
107 * Note: _user_exception might be at an odd adress. Don't use call0..call12
108 */
109
110ENTRY(user_exception)
111
112	/* Save a2, a3, and depc, restore excsave_1 and set SP. */
113
114	xsr	a3, EXCSAVE_1
115	rsr	a0, DEPC
116	s32i	a1, a2, PT_AREG1
117	s32i	a0, a2, PT_AREG2
118	s32i	a3, a2, PT_AREG3
119	mov	a1, a2
120
121	.globl _user_exception
122_user_exception:
123
124	/* Save SAR and turn off single stepping */
125
126	movi	a2, 0
127	rsr	a3, SAR
128	wsr	a2, ICOUNTLEVEL
129	s32i	a3, a1, PT_SAR
130
131	/* Rotate ws so that the current windowbase is at bit0. */
132	/* Assume ws = xxwww1yyyy. Rotate ws right, so that a2 = yyyyxxwww1 */
133
134	rsr	a2, WINDOWBASE
135	rsr	a3, WINDOWSTART
136	ssr	a2
137	s32i	a2, a1, PT_WINDOWBASE
138	s32i	a3, a1, PT_WINDOWSTART
139	slli	a2, a3, 32-WSBITS
140	src	a2, a3, a2
141	srli	a2, a2, 32-WSBITS
142	s32i	a2, a1, PT_WMASK	# needed for restoring registers
143
144	/* Save only live registers. */
145
146	_bbsi.l	a2, 1, 1f
147	s32i	a4, a1, PT_AREG4
148	s32i	a5, a1, PT_AREG5
149	s32i	a6, a1, PT_AREG6
150	s32i	a7, a1, PT_AREG7
151	_bbsi.l	a2, 2, 1f
152	s32i	a8, a1, PT_AREG8
153	s32i	a9, a1, PT_AREG9
154	s32i	a10, a1, PT_AREG10
155	s32i	a11, a1, PT_AREG11
156	_bbsi.l	a2, 3, 1f
157	s32i	a12, a1, PT_AREG12
158	s32i	a13, a1, PT_AREG13
159	s32i	a14, a1, PT_AREG14
160	s32i	a15, a1, PT_AREG15
161	_bnei	a2, 1, 1f		# only one valid frame?
162
163	/* Only one valid frame, skip saving regs. */
164
165	j	2f
166
167	/* Save the remaining registers.
168	 * We have to save all registers up to the first '1' from
169	 * the right, except the current frame (bit 0).
170	 * Assume a2 is:  001001000110001
171	 * All regiser frames starting from the top fiel to the marked '1'
172	 * must be saved.
173	 */
174
1751:	addi	a3, a2, -1		# eliminate '1' in bit 0: yyyyxxww0
176	neg	a3, a3			# yyyyxxww0 -> YYYYXXWW1+1
177	and	a3, a3, a2		# max. only one bit is set
178
179	/* Find number of frames to save */
180
181	ffs_ws	a0, a3			# number of frames to the '1' from left
182
183	/* Store information into WMASK:
184	 * bits 0..3: xxx1 masked lower 4 bits of the rotated windowstart,
185	 * bits 4...: number of valid 4-register frames
186	 */
187
188	slli	a3, a0, 4		# number of frames to save in bits 8..4
189	extui	a2, a2, 0, 4		# mask for the first 16 registers
190	or	a2, a3, a2
191	s32i	a2, a1, PT_WMASK	# needed when we restore the reg-file
192
193	/* Save 4 registers at a time */
194
1951:	rotw	-1
196	s32i	a0, a5, PT_AREG_END - 16
197	s32i	a1, a5, PT_AREG_END - 12
198	s32i	a2, a5, PT_AREG_END - 8
199	s32i	a3, a5, PT_AREG_END - 4
200	addi	a0, a4, -1
201	addi	a1, a5, -16
202	_bnez	a0, 1b
203
204	/* WINDOWBASE still in SAR! */
205
206	rsr	a2, SAR			# original WINDOWBASE
207	movi	a3, 1
208	ssl	a2
209	sll	a3, a3
210	wsr	a3, WINDOWSTART		# set corresponding WINDOWSTART bit
211	wsr	a2, WINDOWBASE		# and WINDOWSTART
212	rsync
213
214	/* We are back to the original stack pointer (a1) */
215
2162:
217#if XCHAL_EXTRA_SA_SIZE
218
219	/* For user exceptions, save the extra state into the user's TCB.
220	 * Note: We must assume that xchal_extra_store_funcbody destroys a2..a15
221	 */
222
223	GET_CURRENT(a2,a1)
224	addi	a2, a2, THREAD_CP_SAVE
225	xchal_extra_store_funcbody
226#endif
227
228	/* Now, jump to the common exception handler. */
229
230	j	common_exception
231
232
233/*
234 * First-level exit handler for kernel exceptions
235 * Save special registers and the live window frame.
236 * Note: Even though we changes the stack pointer, we don't have to do a
237 *	 MOVSP here, as we do that when we return from the exception.
238 *	 (See comment in the kernel exception exit code)
239 *
240 * Entry condition for kernel_exception:
241 *
242 *   a0:	trashed, original value saved on stack (PT_AREG0)
243 *   a1:	a1
244 *   a2:	new stack pointer, original in DEPC
245 *   a3:	dispatch table
246 *   depc:	a2, original value saved on stack (PT_DEPC)
247 *   excsave_1:	a3
248 *
249 *   PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC
250 *	     <  VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception
251 *
252 * Entry condition for _kernel_exception:
253 *
254 *   a0-a3 and depc have been saved to PT_AREG0...PT_AREG3 and PT_DEPC
255 *   excsave has been restored, and
256 *   stack pointer (a1) has been set.
257 *
258 * Note: _kernel_exception might be at an odd adress. Don't use call0..call12
259 */
260
261ENTRY(kernel_exception)
262
263	/* Save a0, a2, a3, DEPC and set SP. */
264
265	xsr	a3, EXCSAVE_1		# restore a3, excsave_1
266	rsr	a0, DEPC		# get a2
267	s32i	a1, a2, PT_AREG1
268	s32i	a0, a2, PT_AREG2
269	s32i	a3, a2, PT_AREG3
270	mov	a1, a2
271
272	.globl _kernel_exception
273_kernel_exception:
274
275	/* Save SAR and turn off single stepping */
276
277	movi	a2, 0
278	rsr	a3, SAR
279	wsr	a2, ICOUNTLEVEL
280	s32i	a3, a1, PT_SAR
281
282	/* Rotate ws so that the current windowbase is at bit0. */
283	/* Assume ws = xxwww1yyyy. Rotate ws right, so that a2 = yyyyxxwww1 */
284
285	rsr	a2, WINDOWBASE		# don't need to save these, we only
286	rsr	a3, WINDOWSTART		# need shifted windowstart: windowmask
287	ssr	a2
288	slli	a2, a3, 32-WSBITS
289	src	a2, a3, a2
290	srli	a2, a2, 32-WSBITS
291	s32i	a2, a1, PT_WMASK	# needed for kernel_exception_exit
292
293	/* Save only the live window-frame */
294
295	_bbsi.l	a2, 1, 1f
296	s32i	a4, a1, PT_AREG4
297	s32i	a5, a1, PT_AREG5
298	s32i	a6, a1, PT_AREG6
299	s32i	a7, a1, PT_AREG7
300	_bbsi.l	a2, 2, 1f
301	s32i	a8, a1, PT_AREG8
302	s32i	a9, a1, PT_AREG9
303	s32i	a10, a1, PT_AREG10
304	s32i	a11, a1, PT_AREG11
305	_bbsi.l	a2, 3, 1f
306	s32i	a12, a1, PT_AREG12
307	s32i	a13, a1, PT_AREG13
308	s32i	a14, a1, PT_AREG14
309	s32i	a15, a1, PT_AREG15
310
3111:
312
313#ifdef KERNEL_STACK_OVERFLOW_CHECK
314
315	/*  Stack overflow check, for debugging  */
316	extui	a2, a1, TASK_SIZE_BITS,XX
317	movi	a3, SIZE??
318	_bge	a2, a3, out_of_stack_panic
319
320#endif
321
322/*
323 * This is the common exception handler.
324 * We get here from the user exception handler or simply by falling through
325 * from the kernel exception handler.
326 * Save the remaining special registers, switch to kernel mode, and jump
327 * to the second-level exception handler.
328 *
329 */
330
331common_exception:
332
333	/* Save EXCVADDR, DEBUGCAUSE, and PC, and clear LCOUNT */
334
335	rsr	a2, DEBUGCAUSE
336	rsr	a3, EPC_1
337	s32i	a2, a1, PT_DEBUGCAUSE
338	s32i	a3, a1, PT_PC
339
340	rsr	a3, EXCVADDR
341	movi	a2, 0
342	s32i	a3, a1, PT_EXCVADDR
343	xsr	a2, LCOUNT
344	s32i	a2, a1, PT_LCOUNT
345
346	/* It is now save to restore the EXC_TABLE_FIXUP variable. */
347
348	rsr	a0, EXCCAUSE
349	movi	a3, 0
350	rsr	a2, EXCSAVE_1
351	s32i	a0, a1, PT_EXCCAUSE
352	s32i	a3, a2, EXC_TABLE_FIXUP
353
354	/* All unrecoverable states are saved on stack, now, and a1 is valid,
355	 * so we can allow exceptions and interrupts (*) again.
356	 * Set PS(EXCM = 0, UM = 0, RING = 0, OWB = 0, WOE = 1, INTLEVEL = X)
357	 *
358	 * (*) We only allow interrupts if PS.INTLEVEL was not set to 1 before
359	 *     (interrupts disabled) and if this exception is not an interrupt.
360	 */
361
362	rsr	a3, PS
363	addi	a0, a0, -4
364	movi	a2, 1
365	extui	a3, a3, 0, 1		# a3 = PS.INTLEVEL[0]
366	moveqz	a3, a2, a0		# a3 = 1 iff interrupt exception
367	movi	a2, PS_WOE_MASK
368	or	a3, a3, a2
369	rsr	a0, EXCCAUSE
370	xsr	a3, PS
371
372	s32i	a3, a1, PT_PS		# save ps
373
374	/* Save LBEG, LEND */
375
376	rsr	a2, LBEG
377	rsr	a3, LEND
378	s32i	a2, a1, PT_LBEG
379	s32i	a3, a1, PT_LEND
380
381	/* Go to second-level dispatcher. Set up parameters to pass to the
382	 * exception handler and call the exception handler.
383	 */
384
385	movi	a4, exc_table
386	mov	a6, a1			# pass stack frame
387	mov	a7, a0			# pass EXCCAUSE
388	addx4	a4, a0, a4
389	l32i	a4, a4, EXC_TABLE_DEFAULT		# load handler
390
391	/* Call the second-level handler */
392
393	callx4	a4
394
395	/* Jump here for exception exit */
396
397common_exception_return:
398
399	/* Jump if we are returning from kernel exceptions. */
400
4011:	l32i	a3, a1, PT_PS
402	_bbsi.l	a3, PS_UM_SHIFT, 2f
403	j	kernel_exception_exit
404
405	/* Specific to a user exception exit:
406	 * We need to check some flags for signal handling and rescheduling,
407	 * and have to restore WB and WS, extra states, and all registers
408	 * in the register file that were in use in the user task.
409	 */
410
4112:	wsr	a3, PS		/* disable interrupts */
412
413	/* Check for signals (keep interrupts disabled while we read TI_FLAGS)
414	 * Note: PS.INTLEVEL = 0, PS.EXCM = 1
415	 */
416
417	GET_THREAD_INFO(a2,a1)
418	l32i	a4, a2, TI_FLAGS
419
420	/* Enable interrupts again.
421	 * Note: When we get here, we certainly have handled any interrupts.
422	 *       (Hint: There is only one user exception frame on stack)
423	 */
424
425	movi	a3, PS_WOE_MASK
426
427	_bbsi.l	a4, TIF_NEED_RESCHED, 3f
428	_bbci.l	a4, TIF_SIGPENDING, 4f
429
430#ifndef SIGNAL_HANDLING_IN_DOUBLE_EXCEPTION
431	l32i	a4, a1, PT_DEPC
432	bgeui	a4, VALID_DOUBLE_EXCEPTION_ADDRESS, 4f
433#endif
434
435	/* Reenable interrupts and call do_signal() */
436
437	wsr	a3, PS
438	movi	a4, do_signal	# int do_signal(struct pt_regs*, sigset_t*)
439	mov	a6, a1
440	movi	a7, 0
441	callx4	a4
442	j	1b
443
4443:	/* Reenable interrupts and reschedule */
445
446	wsr	a3, PS
447	movi	a4, schedule	# void schedule (void)
448	callx4	a4
449	j	1b
450
451	/* Restore the state of the task and return from the exception. */
452
453
454	/* If we are returning from a user exception, and the process
455	 * to run next has PT_SINGLESTEP set, we want to setup
456	 * ICOUNT and ICOUNTLEVEL to step one instruction.
457	 * PT_SINGLESTEP is set by sys_ptrace (ptrace.c)
458	 */
459
4604:	/* a2 holds GET_CURRENT(a2,a1)  */
461
462	l32i	a3, a2, TI_TASK
463	l32i	a3, a3, TASK_PTRACE
464	bbci.l	a3, PT_SINGLESTEP_BIT, 1f # jump if single-step flag is not set
465
466	movi	a3, -2			# PT_SINGLESTEP flag is set,
467	movi	a4, 1			# icountlevel of 1 means it won't
468	wsr	a3, ICOUNT		# start counting until after rfe
469	wsr	a4, ICOUNTLEVEL		# so setup icount & icountlevel.
470	isync
471
4721:
473
474#if XCHAL_EXTRA_SA_SIZE
475
476	/* For user exceptions, restore the extra state from the user's TCB. */
477
478	/* Note: a2 still contains GET_CURRENT(a2,a1) */
479	addi	a2, a2, THREAD_CP_SAVE
480	xchal_extra_load_funcbody
481
482	/* We must assume that xchal_extra_store_funcbody destroys
483	 * registers a2..a15.  FIXME, this list can eventually be
484	 * reduced once real register requirements of the macro are
485	 * finalized. */
486
487#endif /* XCHAL_EXTRA_SA_SIZE */
488
489
490	/* Switch to the user thread WINDOWBASE. Save SP temporarily in DEPC */
491
492	l32i	a2, a1, PT_WINDOWBASE
493	l32i	a3, a1, PT_WINDOWSTART
494	wsr	a1, DEPC		# use DEPC as temp storage
495	wsr	a3, WINDOWSTART		# restore WINDOWSTART
496	ssr	a2			# preserve user's WB in the SAR
497	wsr	a2, WINDOWBASE		# switch to user's saved WB
498	rsync
499	rsr	a1, DEPC		# restore stack pointer
500	l32i	a2, a1, PT_WMASK	# register frames saved (in bits 4...9)
501	rotw	-1			# we restore a4..a7
502	_bltui	a6, 16, 1f		# only have to restore current window?
503
504	/* The working registers are a0 and a3.  We are restoring to
505	 * a4..a7.  Be careful not to destroy what we have just restored.
506	 * Note: wmask has the format YYYYM:
507	 *       Y: number of registers saved in groups of 4
508	 *       M: 4 bit mask of first 16 registers
509	 */
510
511	mov	a2, a6
512	mov	a3, a5
513
5142:	rotw	-1			# a0..a3 become a4..a7
515	addi	a3, a7, -4*4		# next iteration
516	addi	a2, a6, -16		# decrementing Y in WMASK
517	l32i	a4, a3, PT_AREG_END + 0
518	l32i	a5, a3, PT_AREG_END + 4
519	l32i	a6, a3, PT_AREG_END + 8
520	l32i	a7, a3, PT_AREG_END + 12
521	_bgeui	a2, 16, 2b
522
523	/* Clear unrestored registers (don't leak anything to user-land */
524
5251:	rsr	a0, WINDOWBASE
526	rsr	a3, SAR
527	sub	a3, a0, a3
528	beqz	a3, 2f
529	extui	a3, a3, 0, WBBITS
530
5311:	rotw	-1
532	addi	a3, a7, -1
533	movi	a4, 0
534	movi	a5, 0
535	movi	a6, 0
536	movi	a7, 0
537	bgei	a3, 1, 1b
538
539	/* We are back were we were when we started.
540	 * Note: a2 still contains WMASK (if we've returned to the original
541	 *	 frame where we had loaded a2), or at least the lower 4 bits
542	 *	 (if we have restored WSBITS-1 frames).
543	 */
544
5452:	j	common_exception_exit
546
547	/* This is the kernel exception exit.
548	 * We avoided to do a MOVSP when we entered the exception, but we
549	 * have to do it here.
550	 */
551
552kernel_exception_exit:
553
554	/* Disable interrupts (a3 holds PT_PS) */
555
556	wsr	a3, PS
557
558#ifdef PREEMPTIBLE_KERNEL
559
560#ifdef CONFIG_PREEMPT
561
562	/*
563	 * Note: We've just returned from a call4, so we have
564	 * at least 4 addt'l regs.
565	 */
566
567	/* Check current_thread_info->preempt_count */
568
569	GET_THREAD_INFO(a2)
570	l32i	a3, a2, TI_PREEMPT
571	bnez	a3, 1f
572
573	l32i	a2, a2, TI_FLAGS
574
5751:
576
577#endif
578
579#endif
580
581	/* Check if we have to do a movsp.
582	 *
583	 * We only have to do a movsp if the previous window-frame has
584	 * been spilled to the *temporary* exception stack instead of the
585	 * task's stack. This is the case if the corresponding bit in
586	 * WINDOWSTART for the previous window-frame was set before
587	 * (not spilled) but is zero now (spilled).
588	 * If this bit is zero, all other bits except the one for the
589	 * current window frame are also zero. So, we can use a simple test:
590	 * 'and' WINDOWSTART and WINDOWSTART-1:
591	 *
592	 *  (XXXXXX1[0]* - 1) AND XXXXXX1[0]* = XXXXXX0[0]*
593	 *
594	 * The result is zero only if one bit was set.
595	 *
596	 * (Note: We might have gone through several task switches before
597	 *        we come back to the current task, so WINDOWBASE might be
598	 *        different from the time the exception occurred.)
599	 */
600
601	/* Test WINDOWSTART before and after the exception.
602	 * We actually have WMASK, so we only have to test if it is 1 or not.
603	 */
604
605	l32i	a2, a1, PT_WMASK
606	_beqi	a2, 1, common_exception_exit	# Spilled before exception,jump
607
608	/* Test WINDOWSTART now. If spilled, do the movsp */
609
610	rsr     a3, WINDOWSTART
611	addi	a0, a3, -1
612	and     a3, a3, a0
613	_bnez	a3, common_exception_exit
614
615	/* Do a movsp (we returned from a call4, so we have at least a0..a7) */
616
617	addi    a0, a1, -16
618	l32i    a3, a0, 0
619	l32i    a4, a0, 4
620	s32i    a3, a1, PT_SIZE+0
621	s32i    a4, a1, PT_SIZE+4
622	l32i    a3, a0, 8
623	l32i    a4, a0, 12
624	s32i    a3, a1, PT_SIZE+8
625	s32i    a4, a1, PT_SIZE+12
626
627	/* Common exception exit.
628	 * We restore the special register and the current window frame, and
629	 * return from the exception.
630	 *
631	 * Note: We expect a2 to hold PT_WMASK
632	 */
633
634common_exception_exit:
635
636	_bbsi.l	a2, 1, 1f
637	l32i	a4,  a1, PT_AREG4
638	l32i	a5,  a1, PT_AREG5
639	l32i	a6,  a1, PT_AREG6
640	l32i	a7,  a1, PT_AREG7
641	_bbsi.l	a2, 2, 1f
642	l32i	a8,  a1, PT_AREG8
643	l32i	a9,  a1, PT_AREG9
644	l32i	a10, a1, PT_AREG10
645	l32i	a11, a1, PT_AREG11
646	_bbsi.l	a2, 3, 1f
647	l32i	a12, a1, PT_AREG12
648	l32i	a13, a1, PT_AREG13
649	l32i	a14, a1, PT_AREG14
650	l32i	a15, a1, PT_AREG15
651
652	/* Restore PC, SAR */
653
6541:	l32i	a2, a1, PT_PC
655	l32i	a3, a1, PT_SAR
656	wsr	a2, EPC_1
657	wsr	a3, SAR
658
659	/* Restore LBEG, LEND, LCOUNT */
660
661	l32i	a2, a1, PT_LBEG
662	l32i	a3, a1, PT_LEND
663	wsr	a2, LBEG
664	l32i	a2, a1, PT_LCOUNT
665	wsr	a3, LEND
666	wsr	a2, LCOUNT
667
668	/* Check if it was double exception. */
669
670	l32i	a0, a1, PT_DEPC
671	l32i	a3, a1, PT_AREG3
672	l32i	a2, a1, PT_AREG2
673	_bgeui	a0, VALID_DOUBLE_EXCEPTION_ADDRESS, 1f
674
675	/* Restore a0...a3 and return */
676
677	l32i	a0, a1, PT_AREG0
678	l32i	a1, a1, PT_AREG1
679	rfe
680
6811:	wsr	a0, DEPC
682	l32i	a0, a1, PT_AREG0
683	l32i	a1, a1, PT_AREG1
684	rfde
685
686/*
687 * Debug exception handler.
688 *
689 * Currently, we don't support KGDB, so only user application can be debugged.
690 *
691 * When we get here,  a0 is trashed and saved to excsave[debuglevel]
692 */
693
694ENTRY(debug_exception)
695
696	rsr	a0, EPS + XCHAL_DEBUGLEVEL
697	bbsi.l	a0, PS_EXCM_SHIFT, 1f	# exception mode
698
699	/* Set EPC_1 and EXCCAUSE */
700
701	wsr	a2, DEPC		# save a2 temporarily
702	rsr	a2, EPC + XCHAL_DEBUGLEVEL
703	wsr	a2, EPC_1
704
705	movi	a2, EXCCAUSE_MAPPED_DEBUG
706	wsr	a2, EXCCAUSE
707
708	/* Restore PS to the value before the debug exc but with PS.EXCM set.*/
709
710	movi	a2, 1 << PS_EXCM_SHIFT
711	or	a2, a0, a2
712	movi	a0, debug_exception	# restore a3, debug jump vector
713	wsr	a2, PS
714	xsr	a0, EXCSAVE + XCHAL_DEBUGLEVEL
715
716	/* Switch to kernel/user stack, restore jump vector, and save a0 */
717
718	bbsi.l	a2, PS_UM_SHIFT, 2f	# jump if user mode
719
720	addi	a2, a1, -16-PT_SIZE	# assume kernel stack
721	s32i	a0, a2, PT_AREG0
722	movi	a0, 0
723	s32i	a1, a2, PT_AREG1
724	s32i	a0, a2, PT_DEPC		# mark it as a regular exception
725	xsr	a0, DEPC
726	s32i	a3, a2, PT_AREG3
727	s32i	a0, a2, PT_AREG2
728	mov	a1, a2
729	j	_kernel_exception
730
7312:	rsr	a2, EXCSAVE_1
732	l32i	a2, a2, EXC_TABLE_KSTK	# load kernel stack pointer
733	s32i	a0, a2, PT_AREG0
734	movi	a0, 0
735	s32i	a1, a2, PT_AREG1
736	s32i	a0, a2, PT_DEPC
737	xsr	a0, DEPC
738	s32i	a3, a2, PT_AREG3
739	s32i	a0, a2, PT_AREG2
740	mov	a1, a2
741	j	_user_exception
742
743	/* Debug exception while in exception mode. */
7441:	j	1b	// FIXME!!
745
746
747/*
748 * We get here in case of an unrecoverable exception.
749 * The only thing we can do is to be nice and print a panic message.
750 * We only produce a single stack frame for panic, so ???
751 *
752 *
753 * Entry conditions:
754 *
755 *   - a0 contains the caller address; original value saved in excsave1.
756 *   - the original a0 contains a valid return address (backtrace) or 0.
757 *   - a2 contains a valid stackpointer
758 *
759 * Notes:
760 *
761 *   - If the stack pointer could be invalid, the caller has to setup a
762 *     dummy stack pointer (e.g. the stack of the init_task)
763 *
764 *   - If the return address could be invalid, the caller has to set it
765 *     to 0, so the backtrace would stop.
766 *
767 */
768	.align 4
769unrecoverable_text:
770	.ascii "Unrecoverable error in exception handler\0"
771
772ENTRY(unrecoverable_exception)
773
774	movi	a0, 1
775	movi	a1, 0
776
777	wsr	a0, WINDOWSTART
778	wsr	a1, WINDOWBASE
779	rsync
780
781	movi	a1, PS_WOE_MASK | 1
782	wsr	a1, PS
783	rsync
784
785	movi	a1, init_task
786	movi	a0, 0
787	addi	a1, a1, PT_REGS_OFFSET
788
789	movi	a4, panic
790	movi	a6, unrecoverable_text
791
792	callx4	a4
793
7941:	j	1b
795
796
797/* -------------------------- FAST EXCEPTION HANDLERS ----------------------- */
798
799/*
800 * Fast-handler for alloca exceptions
801 *
802 *  The ALLOCA handler is entered when user code executes the MOVSP
803 *  instruction and the caller's frame is not in the register file.
804 *  In this case, the caller frame's a0..a3 are on the stack just
805 *  below sp (a1), and this handler moves them.
806 *
807 *  For "MOVSP <ar>,<as>" without destination register a1, this routine
808 *  simply moves the value from <as> to <ar> without moving the save area.
809 *
810 * Entry condition:
811 *
812 *   a0:	trashed, original value saved on stack (PT_AREG0)
813 *   a1:	a1
814 *   a2:	new stack pointer, original in DEPC
815 *   a3:	dispatch table
816 *   depc:	a2, original value saved on stack (PT_DEPC)
817 *   excsave_1:	a3
818 *
819 *   PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC
820 *	     <  VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception
821 */
822
823#if XCHAL_HAVE_BE
824#define _EXTUI_MOVSP_SRC(ar)	extui ar, ar, 4, 4
825#define _EXTUI_MOVSP_DST(ar)	extui ar, ar, 0, 4
826#else
827#define _EXTUI_MOVSP_SRC(ar)	extui ar, ar, 0, 4
828#define _EXTUI_MOVSP_DST(ar)	extui ar, ar, 4, 4
829#endif
830
831ENTRY(fast_alloca)
832
833	/* We shouldn't be in a double exception. */
834
835	l32i	a0, a2, PT_DEPC
836	_bgeui	a0, VALID_DOUBLE_EXCEPTION_ADDRESS, .Lunhandled_double
837
838	rsr	a0, DEPC		# get a2
839	s32i	a4, a2, PT_AREG4	# save a4 and
840	s32i	a0, a2, PT_AREG2	# a2 to stack
841
842	/* Exit critical section. */
843
844	movi	a0, 0
845	s32i	a0, a3, EXC_TABLE_FIXUP
846
847	/* Restore a3, excsave_1 */
848
849	xsr	a3, EXCSAVE_1		# make sure excsave_1 is valid for dbl.
850	rsr	a4, EPC_1		# get exception address
851	s32i	a3, a2, PT_AREG3	# save a3 to stack
852
853#ifdef ALLOCA_EXCEPTION_IN_IRAM
854#error	iram not supported
855#else
856	/* Note: l8ui not allowed in IRAM/IROM!! */
857	l8ui	a0, a4, 1		# read as(src) from MOVSP instruction
858#endif
859	movi	a3, .Lmovsp_src
860	_EXTUI_MOVSP_SRC(a0)		# extract source register number
861	addx8	a3, a0, a3
862	jx	a3
863
864.Lunhandled_double:
865	wsr	a0, EXCSAVE_1
866	movi	a0, unrecoverable_exception
867	callx0	a0
868
869	.align 8
870.Lmovsp_src:
871	l32i	a3, a2, PT_AREG0;	_j 1f;	.align 8
872	mov	a3, a1;			_j 1f;	.align 8
873	l32i	a3, a2, PT_AREG2;	_j 1f;	.align 8
874	l32i	a3, a2, PT_AREG3;	_j 1f;	.align 8
875	l32i	a3, a2, PT_AREG4;	_j 1f;	.align 8
876	mov	a3, a5;			_j 1f;	.align 8
877	mov	a3, a6;			_j 1f;	.align 8
878	mov	a3, a7;			_j 1f;	.align 8
879	mov	a3, a8;			_j 1f;	.align 8
880	mov	a3, a9;			_j 1f;	.align 8
881	mov	a3, a10;		_j 1f;	.align 8
882	mov	a3, a11;		_j 1f;	.align 8
883	mov	a3, a12;		_j 1f;	.align 8
884	mov	a3, a13;		_j 1f;	.align 8
885	mov	a3, a14;		_j 1f;	.align 8
886	mov	a3, a15;		_j 1f;	.align 8
887
8881:
889
890#ifdef ALLOCA_EXCEPTION_IN_IRAM
891#error	iram not supported
892#else
893	l8ui	a0, a4, 0		# read ar(dst) from MOVSP instruction
894#endif
895	addi	a4, a4, 3		# step over movsp
896	_EXTUI_MOVSP_DST(a0)		# extract destination register
897	wsr	a4, EPC_1		# save new epc_1
898
899	_bnei	a0, 1, 1f		# no 'movsp a1, ax': jump
900
901        /* Move the save area. This implies the use of the L32E
902	 * and S32E instructions, because this move must be done with
903	 * the user's PS.RING privilege levels, not with ring 0
904	 * (kernel's) privileges currently active with PS.EXCM
905	 * set. Note that we have stil registered a fixup routine with the
906	 * double exception vector in case a double exception occurs.
907	 */
908
909	/* a0,a4:avail a1:old user stack a2:exc. stack a3:new user stack. */
910
911	l32e	a0, a1, -16
912	l32e	a4, a1, -12
913	s32e	a0, a3, -16
914	s32e	a4, a3, -12
915	l32e	a0, a1, -8
916	l32e	a4, a1, -4
917	s32e	a0, a3, -8
918	s32e	a4, a3, -4
919
920	/* Restore stack-pointer and all the other saved registers. */
921
922	mov	a1, a3
923
924	l32i	a4, a2, PT_AREG4
925	l32i	a3, a2, PT_AREG3
926	l32i	a0, a2, PT_AREG0
927	l32i	a2, a2, PT_AREG2
928	rfe
929
930	/*  MOVSP <at>,<as>  was invoked with <at> != a1.
931	 *  Because the stack pointer is not being modified,
932	 *  we should be able to just modify the pointer
933	 *  without moving any save area.
934	 *  The processor only traps these occurrences if the
935	 *  caller window isn't live, so unfortunately we can't
936	 *  use this as an alternate trap mechanism.
937	 *  So we just do the move.  This requires that we
938	 *  resolve the destination register, not just the source,
939	 *  so there's some extra work.
940	 *  (PERHAPS NOT REALLY NEEDED, BUT CLEANER...)
941	 */
942
943	/* a0 dst-reg, a1 user-stack, a2 stack, a3 value of src reg. */
944
9451:	movi	a4, .Lmovsp_dst
946	addx8	a4, a0, a4
947	jx	a4
948
949	.align 8
950.Lmovsp_dst:
951	s32i	a3, a2, PT_AREG0;	_j 1f;	.align 8
952	mov	a1, a3;			_j 1f;	.align 8
953	s32i	a3, a2, PT_AREG2;	_j 1f;	.align 8
954	s32i	a3, a2, PT_AREG3;	_j 1f;	.align 8
955	s32i	a3, a2, PT_AREG4;	_j 1f;	.align 8
956	mov	a5, a3;			_j 1f;	.align 8
957	mov	a6, a3;			_j 1f;	.align 8
958	mov	a7, a3;			_j 1f;	.align 8
959	mov	a8, a3;			_j 1f;	.align 8
960	mov	a9, a3;			_j 1f;	.align 8
961	mov	a10, a3;		_j 1f;	.align 8
962	mov	a11, a3;		_j 1f;	.align 8
963	mov	a12, a3;		_j 1f;	.align 8
964	mov	a13, a3;		_j 1f;	.align 8
965	mov	a14, a3;		_j 1f;	.align 8
966	mov	a15, a3;		_j 1f;	.align 8
967
9681:	l32i	a4, a2, PT_AREG4
969	l32i	a3, a2, PT_AREG3
970	l32i	a0, a2, PT_AREG0
971	l32i	a2, a2, PT_AREG2
972	rfe
973
974
975/*
976 * fast system calls.
977 *
978 * WARNING:  The kernel doesn't save the entire user context before
979 * handling a fast system call.  These functions are small and short,
980 * usually offering some functionality not available to user tasks.
981 *
982 * BE CAREFUL TO PRESERVE THE USER'S CONTEXT.
983 *
984 * Entry condition:
985 *
986 *   a0:	trashed, original value saved on stack (PT_AREG0)
987 *   a1:	a1
988 *   a2:	new stack pointer, original in DEPC
989 *   a3:	dispatch table
990 *   depc:	a2, original value saved on stack (PT_DEPC)
991 *   excsave_1:	a3
992 */
993
994ENTRY(fast_syscall_kernel)
995
996	/* Skip syscall. */
997
998	rsr	a0, EPC_1
999	addi	a0, a0, 3
1000	wsr	a0, EPC_1
1001
1002	l32i	a0, a2, PT_DEPC
1003	bgeui	a0, VALID_DOUBLE_EXCEPTION_ADDRESS, fast_syscall_unrecoverable
1004
1005	rsr	a0, DEPC			# get syscall-nr
1006	_beqz	a0, fast_syscall_spill_registers
1007
1008	addi	a0, a0, -__NR_sysxtensa
1009	_beqz	a0, fast_syscall_sysxtensa
1010
1011	j	kernel_exception
1012
1013
1014ENTRY(fast_syscall_user)
1015
1016	/* Skip syscall. */
1017
1018	rsr	a0, EPC_1
1019	addi	a0, a0, 3
1020	wsr	a0, EPC_1
1021
1022	l32i	a0, a2, PT_DEPC
1023	bgeui	a0, VALID_DOUBLE_EXCEPTION_ADDRESS, fast_syscall_unrecoverable
1024
1025	rsr	a0, DEPC			# get syscall-nr
1026	_beqz	a0, fast_syscall_spill_registers
1027
1028	addi	a0, a0, -__NR_sysxtensa
1029	_beqz	a0, fast_syscall_sysxtensa
1030
1031	j	user_exception
1032
1033ENTRY(fast_syscall_unrecoverable)
1034
1035        /* Restore all states. */
1036
1037        l32i    a0, a2, PT_AREG0        # restore a0
1038        xsr     a2, DEPC                # restore a2, depc
1039        rsr     a3, EXCSAVE_1
1040
1041        wsr     a0, EXCSAVE_1
1042        movi    a0, unrecoverable_exception
1043        callx0  a0
1044
1045
1046
1047/*
1048 * sysxtensa syscall handler
1049 *
1050 * int sysxtensa (XTENSA_ATOMIC_SET, ptr, val, unused);
1051 * int sysxtensa (XTENSA_ATOMIC_ADD, ptr, val, unused);
1052 * int sysxtensa (XTENSA_ATOMIC_EXG_ADD, ptr, val, unused);
1053 * int sysxtensa (XTENSA_ATOMIC_CMP_SWP, ptr, oldval, newval);
1054 * a2                    a6              a3    a4      a5
1055 *
1056 * Entry condition:
1057 *
1058 *   a0:	trashed, original value saved on stack (PT_AREG0)
1059 *   a1:	a1
1060 *   a2:	new stack pointer, original in DEPC
1061 *   a3:	dispatch table
1062 *   depc:	a2, original value saved on stack (PT_DEPC)
1063 *   excsave_1:	a3
1064 *
1065 *   PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC
1066 *	     <  VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception
1067 *
1068 * Note: we don't have to save a2; a2 holds the return value
1069 *
1070 * We use the two macros TRY and CATCH:
1071 *
1072 * TRY	 adds an entry to the __ex_table fixup table for the immediately
1073 *	 following instruction.
1074 *
1075 * CATCH catches any exception that occurred at one of the preceeding TRY
1076 *       statements and continues from there
1077 *
1078 * Usage TRY	l32i	a0, a1, 0
1079 *		<other code>
1080 *	 done:	rfe
1081 *	 CATCH	<set return code>
1082 *		j done
1083 */
1084
1085#define TRY								\
1086	.section __ex_table, "a";					\
1087	.word	66f, 67f;						\
1088	.text;								\
108966:
1090
1091#define CATCH								\
109267:
1093
1094ENTRY(fast_syscall_sysxtensa)
1095
1096	_beqz	a6, 1f
1097	_blti	a6, SYSXTENSA_COUNT, 2f
1098
10991:	j	user_exception
1100
11012:	xsr	a3, EXCSAVE_1		# restore a3, excsave1
1102	s32i	a7, a2, PT_AREG7
1103
1104	movi	a7, 4			# sizeof(unsigned int)
1105	access_ok a0, a3, a7, a2, .Leac
1106
1107	_beqi	a6, SYSXTENSA_ATOMIC_SET, .Lset
1108	_beqi	a6, SYSXTENSA_ATOMIC_EXG_ADD, .Lexg
1109	_beqi	a6, SYSXTENSA_ATOMIC_ADD, .Ladd
1110
1111	/* Fall through for SYSXTENSA_ATOMIC_CMP_SWP */
1112
1113.Lswp:	/* Atomic compare and swap */
1114
1115TRY	l32i	a7, a3, 0		# read old value
1116	bne	a7, a4, 1f		# same as old value? jump
1117	s32i	a5, a3, 0		# different, modify value
1118	movi	a7, 1			# and return 1
1119	j	.Lret
1120
11211:	movi	a7, 0			# same values: return 0
1122	j	.Lret
1123
1124.Ladd:	/* Atomic add */
1125.Lexg:	/* Atomic (exchange) add */
1126
1127TRY	l32i	a7, a3, 0
1128	add	a4, a4, a7
1129	s32i	a4, a3, 0
1130	j	.Lret
1131
1132.Lset:	/* Atomic set */
1133
1134TRY	l32i	a7, a3, 0		# read old value as return value
1135	s32i	a4, a3, 0		# write new value
1136
1137.Lret:	mov	a0, a2
1138	mov	a2, a7
1139	l32i	a7, a0, PT_AREG7
1140	l32i	a3, a0, PT_AREG3
1141	l32i	a0, a0, PT_AREG0
1142	rfe
1143
1144CATCH
1145.Leac:	movi	a7, -EFAULT
1146	j	.Lret
1147
1148
1149
1150/* fast_syscall_spill_registers.
1151 *
1152 * Entry condition:
1153 *
1154 *   a0:	trashed, original value saved on stack (PT_AREG0)
1155 *   a1:	a1
1156 *   a2:	new stack pointer, original in DEPC
1157 *   a3:	dispatch table
1158 *   depc:	a2, original value saved on stack (PT_DEPC)
1159 *   excsave_1:	a3
1160 *
1161 * Note: We assume the stack pointer is EXC_TABLE_KSTK in the fixup handler.
1162 * Note: We don't need to save a2 in depc (return value)
1163 */
1164
1165ENTRY(fast_syscall_spill_registers)
1166
1167	/* Register a FIXUP handler (pass current wb as a parameter) */
1168
1169	movi	a0, fast_syscall_spill_registers_fixup
1170	s32i	a0, a3, EXC_TABLE_FIXUP
1171	rsr	a0, WINDOWBASE
1172	s32i	a0, a3, EXC_TABLE_PARAM
1173
1174	/* Save a3 and SAR on stack. */
1175
1176	rsr	a0, SAR
1177	xsr	a3, EXCSAVE_1		# restore a3 and excsave_1
1178	s32i	a0, a2, PT_AREG4	# store SAR to PT_AREG4
1179	s32i	a3, a2, PT_AREG3
1180
1181	/* The spill routine might clobber a7, a11, and a15. */
1182
1183	s32i	a7, a2, PT_AREG5
1184	s32i	a11, a2, PT_AREG6
1185	s32i	a15, a2, PT_AREG7
1186
1187	call0	_spill_registers	# destroys a3, DEPC, and SAR
1188
1189	/* Advance PC, restore registers and SAR, and return from exception. */
1190
1191	l32i	a3, a2, PT_AREG4
1192	l32i	a0, a2, PT_AREG0
1193	wsr	a3, SAR
1194	l32i	a3, a2, PT_AREG3
1195
1196	/* Restore clobbered registers. */
1197
1198	l32i	a7, a2, PT_AREG5
1199	l32i	a11, a2, PT_AREG6
1200	l32i	a15, a2, PT_AREG7
1201
1202	movi	a2, 0
1203	rfe
1204
1205/* Fixup handler.
1206 *
1207 * We get here if the spill routine causes an exception, e.g. tlb miss.
1208 * We basically restore WINDOWBASE and WINDOWSTART to the condition when
1209 * we entered the spill routine and jump to the user exception handler.
1210 *
1211 * a0: value of depc, original value in depc
1212 * a2: trashed, original value in EXC_TABLE_DOUBLE_SAVE
1213 * a3: exctable, original value in excsave1
1214 */
1215
1216fast_syscall_spill_registers_fixup:
1217
1218	rsr	a2, WINDOWBASE	# get current windowbase (a2 is saved)
1219	xsr	a0, DEPC	# restore depc and a0
1220	ssl	a2		# set shift (32 - WB)
1221
1222	/* We need to make sure the current registers (a0-a3) are preserved.
1223	 * To do this, we simply set the bit for the current window frame
1224	 * in WS, so that the exception handlers save them to the task stack.
1225	 */
1226
1227	rsr	a3, EXCSAVE_1	# get spill-mask
1228	slli	a2, a3, 1	# shift left by one
1229
1230	slli	a3, a2, 32-WSBITS
1231	src	a2, a2, a3	# a1 = xxwww1yyxxxwww1yy......
1232	wsr	a2, WINDOWSTART	# set corrected windowstart
1233
1234	movi	a3, exc_table
1235	l32i	a2, a3, EXC_TABLE_DOUBLE_SAVE	# restore a2
1236	l32i	a3, a3, EXC_TABLE_PARAM	# original WB (in user task)
1237
1238	/* Return to the original (user task) WINDOWBASE.
1239	 * We leave the following frame behind:
1240	 * a0, a1, a2	same
1241	 * a3:		trashed (saved in excsave_1)
1242	 * depc:	depc (we have to return to that address)
1243	 * excsave_1:	a3
1244	 */
1245
1246	wsr	a3, WINDOWBASE
1247	rsync
1248
1249	/* We are now in the original frame when we entered _spill_registers:
1250	 *  a0: return address
1251	 *  a1: used, stack pointer
1252	 *  a2: kernel stack pointer
1253	 *  a3: available, saved in EXCSAVE_1
1254	 *  depc: exception address
1255	 *  excsave: a3
1256	 * Note: This frame might be the same as above.
1257	 */
1258
1259#ifdef SIGNAL_HANDLING_IN_DOUBLE_EXCEPTION
1260	/* Restore registers we precautiously saved.
1261	 * We have the value of the 'right' a3
1262	 */
1263
1264	l32i	a7, a2, PT_AREG5
1265	l32i	a11, a2, PT_AREG6
1266	l32i	a15, a2, PT_AREG7
1267#endif
1268
1269	/* Setup stack pointer. */
1270
1271	addi	a2, a2, -PT_USER_SIZE
1272	s32i	a0, a2, PT_AREG0
1273
1274	/* Make sure we return to this fixup handler. */
1275
1276	movi	a3, fast_syscall_spill_registers_fixup_return
1277	s32i	a3, a2, PT_DEPC		# setup depc
1278
1279	/* Jump to the exception handler. */
1280
1281	movi	a3, exc_table
1282	rsr	a0, EXCCAUSE
1283        addx4   a0, a0, a3              	# find entry in table
1284        l32i    a0, a0, EXC_TABLE_FAST_USER     # load handler
1285        jx      a0
1286
1287fast_syscall_spill_registers_fixup_return:
1288
1289	/* When we return here, all registers have been restored (a2: DEPC) */
1290
1291	wsr	a2, DEPC		# exception address
1292
1293	/* Restore fixup handler. */
1294
1295	xsr	a3, EXCSAVE_1
1296	movi	a2, fast_syscall_spill_registers_fixup
1297	s32i	a2, a3, EXC_TABLE_FIXUP
1298	rsr	a2, WINDOWBASE
1299	s32i	a2, a3, EXC_TABLE_PARAM
1300	l32i	a2, a3, EXC_TABLE_KSTK
1301
1302#ifdef SIGNAL_HANDLING_IN_DOUBLE_EXCEPTION
1303	/* Save registers again that might be clobbered. */
1304
1305	s32i	a7, a2, PT_AREG5
1306	s32i	a11, a2, PT_AREG6
1307	s32i	a15, a2, PT_AREG7
1308#endif
1309
1310	/* Load WB at the time the exception occurred. */
1311
1312	rsr	a3, SAR			# WB is still in SAR
1313	neg	a3, a3
1314	wsr	a3, WINDOWBASE
1315	rsync
1316
1317	/* Restore a3 and return. */
1318
1319	movi	a3, exc_table
1320	xsr	a3, EXCSAVE_1
1321
1322	rfde
1323
1324
1325/*
1326 * spill all registers.
1327 *
1328 * This is not a real function. The following conditions must be met:
1329 *
1330 *  - must be called with call0.
1331 *  - uses DEPC, a3 and SAR.
1332 *  - the last 'valid' register of each frame are clobbered.
1333 *  - the caller must have registered a fixup handler
1334 *    (or be inside a critical section)
1335 *  - PS_EXCM must be set (PS_WOE cleared?)
1336 */
1337
1338ENTRY(_spill_registers)
1339
1340	/*
1341	 * Rotate ws so that the current windowbase is at bit 0.
1342	 * Assume ws = xxxwww1yy (www1 current window frame).
1343	 * Rotate ws right so that a2 = yyxxxwww1.
1344	 */
1345
1346	wsr	a2, DEPC		# preserve a2
1347	rsr	a2, WINDOWBASE
1348	rsr	a3, WINDOWSTART
1349	ssr	a2			# holds WB
1350	slli	a2, a3, WSBITS
1351	or	a3, a3, a2		# a2 = xxxwww1yyxxxwww1yy
1352	srl	a3, a3
1353
1354	/* We are done if there are no more than the current register frame. */
1355
1356	extui	a3, a3, 1, WSBITS-2	# a3 = 0yyxxxwww
1357	movi	a2, (1 << (WSBITS-1))
1358	_beqz	a3, .Lnospill		# only one active frame? jump
1359
1360	/* We want 1 at the top, so that we return to the current windowbase */
1361
1362	or	a3, a3, a2		# 1yyxxxwww
1363
1364	/* Skip empty frames - get 'oldest' WINDOWSTART-bit. */
1365
1366	wsr	a3, WINDOWSTART		# save shifted windowstart
1367	neg	a2, a3
1368	and	a3, a2, a3		# first bit set from right: 000010000
1369
1370	ffs_ws	a2, a3			# a2: shifts to skip empty frames
1371	movi	a3, WSBITS
1372	sub	a2, a3, a2		# WSBITS-a2:number of 0-bits from right
1373	ssr	a2			# save in SAR for later.
1374
1375	rsr	a3, WINDOWBASE
1376	add	a3, a3, a2
1377	rsr	a2, DEPC		# restore a2
1378	wsr	a3, WINDOWBASE
1379	rsync
1380
1381	rsr	a3, WINDOWSTART
1382	srl	a3, a3			# shift windowstart
1383
1384	/* WB is now just one frame below the oldest frame in the register
1385	   window. WS is shifted so the oldest frame is in bit 0, thus, WB
1386	   and WS differ by one 4-register frame. */
1387
1388	/* Save frames. Depending what call was used (call4, call8, call12),
1389	 * we have to save 4,8. or 12 registers.
1390	 */
1391
1392	_bbsi.l	a3, 1, .Lc4
1393	_bbsi.l	a3, 2, .Lc8
1394
1395	/* Special case: we have a call12-frame starting at a4. */
1396
1397	_bbci.l	a3, 3, .Lc12	# bit 3 shouldn't be zero! (Jump to Lc12 first)
1398
1399	s32e	a4, a1, -16	# a1 is valid with an empty spill area
1400	l32e	a4, a5, -12
1401	s32e	a8, a4, -48
1402	mov	a8, a4
1403	l32e	a4, a1, -16
1404	j	.Lc12c
1405
1406.Lloop: _bbsi.l	a3, 1, .Lc4
1407	_bbci.l	a3, 2, .Lc12
1408
1409.Lc8:	s32e	a4, a13, -16
1410	l32e	a4, a5, -12
1411	s32e	a8, a4, -32
1412	s32e	a5, a13, -12
1413	s32e	a6, a13, -8
1414	s32e	a7, a13, -4
1415	s32e	a9, a4, -28
1416	s32e	a10, a4, -24
1417	s32e	a11, a4, -20
1418
1419	srli	a11, a3, 2		# shift windowbase by 2
1420	rotw	2
1421	_bnei	a3, 1, .Lloop
1422
1423.Lexit: /* Done. Do the final rotation, set WS, and return. */
1424
1425	rotw	1
1426	rsr	a3, WINDOWBASE
1427	ssl	a3
1428	movi	a3, 1
1429	sll	a3, a3
1430	wsr	a3, WINDOWSTART
1431
1432.Lnospill:
1433	jx	a0
1434
1435.Lc4:	s32e	a4, a9, -16
1436	s32e	a5, a9, -12
1437	s32e	a6, a9, -8
1438	s32e	a7, a9, -4
1439
1440	srli	a7, a3, 1
1441	rotw	1
1442	_bnei	a3, 1, .Lloop
1443	j	.Lexit
1444
1445.Lc12:	_bbci.l	a3, 3, .Linvalid_mask	# bit 2 shouldn't be zero!
1446
1447	/* 12-register frame (call12) */
1448
1449	l32e	a2, a5, -12
1450	s32e	a8, a2, -48
1451	mov	a8, a2
1452
1453.Lc12c: s32e	a9, a8, -44
1454	s32e	a10, a8, -40
1455	s32e	a11, a8, -36
1456	s32e	a12, a8, -32
1457	s32e	a13, a8, -28
1458	s32e	a14, a8, -24
1459	s32e	a15, a8, -20
1460	srli	a15, a3, 3
1461
1462	/* The stack pointer for a4..a7 is out of reach, so we rotate the
1463	 * window, grab the stackpointer, and rotate back.
1464	 * Alternatively, we could also use the following approach, but that
1465	 * makes the fixup routine much more complicated:
1466	 * rotw	1
1467	 * s32e	a0, a13, -16
1468	 * ...
1469	 * rotw 2
1470	 */
1471
1472	rotw	1
1473	mov	a5, a13
1474	rotw	-1
1475
1476	s32e	a4, a9, -16
1477	s32e	a5, a9, -12
1478	s32e	a6, a9, -8
1479	s32e	a7, a9, -4
1480
1481	rotw	3
1482
1483	_beqi	a3, 1, .Lexit
1484	j	.Lloop
1485
1486.Linvalid_mask:
1487
1488	/* We get here because of an unrecoverable error in the window
1489	 * registers. If we are in user space, we kill the application,
1490	 * however, this condition is unrecoverable in kernel space.
1491	 */
1492
1493	rsr	a0, PS
1494	_bbci.l	a0, PS_UM_SHIFT, 1f
1495
1496 	/* User space: Setup a dummy frame and kill application.
1497	 * Note: We assume EXC_TABLE_KSTK contains a valid stack pointer.
1498	 */
1499
1500	movi	a0, 1
1501	movi	a1, 0
1502
1503	wsr	a0, WINDOWSTART
1504	wsr	a1, WINDOWBASE
1505	rsync
1506
1507	movi	a0, 0
1508
1509	movi	a3, exc_table
1510	l32i	a1, a3, EXC_TABLE_KSTK
1511	wsr	a3, EXCSAVE_1
1512
1513	movi	a4, PS_WOE_MASK | 1
1514	wsr	a4, PS
1515	rsync
1516
1517	movi	a6, SIGSEGV
1518	movi	a4, do_exit
1519	callx4	a4
1520
15211:	/* Kernel space: PANIC! */
1522
1523	wsr	a0, EXCSAVE_1
1524	movi	a0, unrecoverable_exception
1525	callx0	a0		# should not return
15261:	j	1b
1527
1528/*
1529 * We should never get here. Bail out!
1530 */
1531
1532ENTRY(fast_second_level_miss_double_kernel)
1533
15341:	movi	a0, unrecoverable_exception
1535	callx0	a0		# should not return
15361:	j	1b
1537
1538/* First-level entry handler for user, kernel, and double 2nd-level
1539 * TLB miss exceptions.  Note that for now, user and kernel miss
1540 * exceptions share the same entry point and are handled identically.
1541 *
1542 * An old, less-efficient C version of this function used to exist.
1543 * We include it below, interleaved as comments, for reference.
1544 *
1545 * Entry condition:
1546 *
1547 *   a0:	trashed, original value saved on stack (PT_AREG0)
1548 *   a1:	a1
1549 *   a2:	new stack pointer, original in DEPC
1550 *   a3:	dispatch table
1551 *   depc:	a2, original value saved on stack (PT_DEPC)
1552 *   excsave_1:	a3
1553 *
1554 *   PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC
1555 *	     <  VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception
1556 */
1557
1558ENTRY(fast_second_level_miss)
1559
1560	/* Save a1. Note: we don't expect a double exception. */
1561
1562	s32i	a1, a2, PT_AREG1
1563
1564	/* We need to map the page of PTEs for the user task.  Find
1565	 * the pointer to that page.  Also, it's possible for tsk->mm
1566	 * to be NULL while tsk->active_mm is nonzero if we faulted on
1567	 * a vmalloc address.  In that rare case, we must use
1568	 * active_mm instead to avoid a fault in this handler.  See
1569	 *
1570	 * http://mail.nl.linux.org/linux-mm/2002-08/msg00258.html
1571	 *   (or search Internet on "mm vs. active_mm")
1572	 *
1573	 *	if (!mm)
1574	 *		mm = tsk->active_mm;
1575	 *	pgd = pgd_offset (mm, regs->excvaddr);
1576	 *	pmd = pmd_offset (pgd, regs->excvaddr);
1577	 *	pmdval = *pmd;
1578	 */
1579
1580	GET_CURRENT(a1,a2)
1581	l32i	a0, a1, TASK_MM		# tsk->mm
1582	beqz	a0, 9f
1583
15848:	rsr	a1, EXCVADDR		# fault address
1585	_PGD_OFFSET(a0, a1, a1)
1586	l32i	a0, a0, 0		# read pmdval
1587	//beqi	a0, _PAGE_USER, 2f
1588	beqz	a0, 2f
1589
1590	/* Read ptevaddr and convert to top of page-table page.
1591	 *
1592	 * 	vpnval = read_ptevaddr_register() & PAGE_MASK;
1593	 * 	vpnval += DTLB_WAY_PGTABLE;
1594	 *	pteval = mk_pte (virt_to_page(pmd_val(pmdval)), PAGE_KERNEL);
1595	 *	write_dtlb_entry (pteval, vpnval);
1596	 *
1597	 * The messy computation for 'pteval' above really simplifies
1598	 * into the following:
1599	 *
1600	 * pteval = ((pmdval - PAGE_OFFSET) & PAGE_MASK) | PAGE_KERNEL
1601	 */
1602
1603	movi	a1, -PAGE_OFFSET
1604	add	a0, a0, a1		# pmdval - PAGE_OFFSET
1605	extui	a1, a0, 0, PAGE_SHIFT	# ... & PAGE_MASK
1606	xor	a0, a0, a1
1607
1608
1609	movi	a1, PAGE_DIRECTORY
1610	or	a0, a0, a1		# ... | PAGE_DIRECTORY
1611
1612	rsr	a1, PTEVADDR
1613	srli	a1, a1, PAGE_SHIFT
1614	slli	a1, a1, PAGE_SHIFT	# ptevaddr & PAGE_MASK
1615	addi	a1, a1, DTLB_WAY_PGTABLE	# ... + way_number
1616
1617	wdtlb	a0, a1
1618	dsync
1619
1620	/* Exit critical section. */
1621
1622	movi	a0, 0
1623	s32i	a0, a3, EXC_TABLE_FIXUP
1624
1625	/* Restore the working registers, and return. */
1626
1627	l32i	a0, a2, PT_AREG0
1628	l32i	a1, a2, PT_AREG1
1629	l32i	a2, a2, PT_DEPC
1630	xsr	a3, EXCSAVE_1
1631
1632	bgeui	a2, VALID_DOUBLE_EXCEPTION_ADDRESS, 1f
1633
1634	/* Restore excsave1 and return. */
1635
1636	rsr	a2, DEPC
1637	rfe
1638
1639	/* Return from double exception. */
1640
16411:	xsr	a2, DEPC
1642	esync
1643	rfde
1644
16459:	l32i	a0, a1, TASK_ACTIVE_MM	# unlikely case mm == 0
1646	j	8b
1647
16482:	/* Invalid PGD, default exception handling */
1649
1650	rsr	a1, DEPC
1651	xsr	a3, EXCSAVE_1
1652	s32i	a1, a2, PT_AREG2
1653	s32i	a3, a2, PT_AREG3
1654	mov	a1, a2
1655
1656	rsr	a2, PS
1657	bbsi.l	a2, PS_UM_SHIFT, 1f
1658	j	_kernel_exception
16591:	j	_user_exception
1660
1661
1662/*
1663 * StoreProhibitedException
1664 *
1665 * Update the pte and invalidate the itlb mapping for this pte.
1666 *
1667 * Entry condition:
1668 *
1669 *   a0:	trashed, original value saved on stack (PT_AREG0)
1670 *   a1:	a1
1671 *   a2:	new stack pointer, original in DEPC
1672 *   a3:	dispatch table
1673 *   depc:	a2, original value saved on stack (PT_DEPC)
1674 *   excsave_1:	a3
1675 *
1676 *   PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC
1677 *	     <  VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception
1678 */
1679
1680ENTRY(fast_store_prohibited)
1681
1682	/* Save a1 and a4. */
1683
1684	s32i	a1, a2, PT_AREG1
1685	s32i	a4, a2, PT_AREG4
1686
1687	GET_CURRENT(a1,a2)
1688	l32i	a0, a1, TASK_MM		# tsk->mm
1689	beqz	a0, 9f
1690
16918:	rsr	a1, EXCVADDR		# fault address
1692	_PGD_OFFSET(a0, a1, a4)
1693	l32i	a0, a0, 0
1694	//beqi	a0, _PAGE_USER, 2f	# FIXME use _PAGE_INVALID
1695	beqz	a0, 2f
1696
1697	_PTE_OFFSET(a0, a1, a4)
1698	l32i	a4, a0, 0		# read pteval
1699	movi	a1, _PAGE_VALID | _PAGE_RW
1700	bnall	a4, a1, 2f
1701
1702	movi	a1, _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_WRENABLE
1703	or	a4, a4, a1
1704	rsr	a1, EXCVADDR
1705	s32i	a4, a0, 0
1706
1707	/* We need to flush the cache if we have page coloring. */
1708#if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK
1709	dhwb	a0, 0
1710#endif
1711	pdtlb	a0, a1
1712	beqz	a0, 1f
1713	idtlb	a0		// FIXME do we need this?
1714	wdtlb	a4, a0
17151:
1716
1717	/* Exit critical section. */
1718
1719	movi	a0, 0
1720	s32i	a0, a3, EXC_TABLE_FIXUP
1721
1722	/* Restore the working registers, and return. */
1723
1724	l32i	a4, a2, PT_AREG4
1725	l32i	a1, a2, PT_AREG1
1726	l32i	a0, a2, PT_AREG0
1727	l32i	a2, a2, PT_DEPC
1728
1729	/* Restore excsave1 and a3. */
1730
1731	xsr	a3, EXCSAVE_1
1732	bgeui	a2, VALID_DOUBLE_EXCEPTION_ADDRESS, 1f
1733
1734	rsr	a2, DEPC
1735	rfe
1736
1737	/* Double exception. Restore FIXUP handler and return. */
1738
17391:	xsr	a2, DEPC
1740	esync
1741	rfde
1742
17439:	l32i	a0, a1, TASK_ACTIVE_MM	# unlikely case mm == 0
1744	j	8b
1745
17462:	/* If there was a problem, handle fault in C */
1747
1748	rsr	a4, DEPC	# still holds a2
1749	xsr	a3, EXCSAVE_1
1750	s32i	a4, a2, PT_AREG2
1751	s32i	a3, a2, PT_AREG3
1752	l32i	a4, a2, PT_AREG4
1753	mov	a1, a2
1754
1755	rsr	a2, PS
1756	bbsi.l	a2, PS_UM_SHIFT, 1f
1757	j	_kernel_exception
17581:	j	_user_exception
1759
1760
1761#if XCHAL_EXTRA_SA_SIZE
1762
1763#warning fast_coprocessor untested
1764
1765/*
1766 * Entry condition:
1767 *
1768 *   a0:	trashed, original value saved on stack (PT_AREG0)
1769 *   a1:	a1
1770 *   a2:	new stack pointer, original in DEPC
1771 *   a3:	dispatch table
1772 *   depc:	a2, original value saved on stack (PT_DEPC)
1773 *   excsave_1:	a3
1774 *
1775 *   PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC
1776 *	     <  VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception
1777 */
1778
1779ENTRY(fast_coprocessor_double)
1780	wsr	a0, EXCSAVE_1
1781	movi	a0, unrecoverable_exception
1782	callx0	a0
1783
1784ENTRY(fast_coprocessor)
1785
1786	/* Fatal if we are in a double exception. */
1787
1788	l32i	a0, a2, PT_DEPC
1789	_bgeui	a0, VALID_DOUBLE_EXCEPTION_ADDRESS, fast_coprocessor_double
1790
1791	/* Save some registers a1, a3, a4, SAR */
1792
1793	xsr	a3, EXCSAVE_1
1794	s32i	a3, a2, PT_AREG3
1795	rsr	a3, SAR
1796	s32i	a4, a2, PT_AREG4
1797	s32i	a1, a2, PT_AREG1
1798	s32i	a5, a1, PT_AREG5
1799	s32i	a3, a2, PT_SAR
1800	mov	a1, a2
1801
1802	/* Currently, the HAL macros only guarantee saving a0 and a1.
1803	 * These can and will be refined in the future, but for now,
1804	 * just save the remaining registers of a2...a15.
1805	 */
1806	s32i	a6, a1, PT_AREG6
1807	s32i	a7, a1, PT_AREG7
1808	s32i	a8, a1, PT_AREG8
1809	s32i	a9, a1, PT_AREG9
1810	s32i	a10, a1, PT_AREG10
1811	s32i	a11, a1, PT_AREG11
1812	s32i	a12, a1, PT_AREG12
1813	s32i	a13, a1, PT_AREG13
1814	s32i	a14, a1, PT_AREG14
1815	s32i	a15, a1, PT_AREG15
1816
1817	/* Find coprocessor number. Subtract first CP EXCCAUSE from EXCCAUSE */
1818
1819	rsr	a0, EXCCAUSE
1820	addi	a3, a0, -XCHAL_EXCCAUSE_COPROCESSOR0_DISABLED
1821
1822	/* Set corresponding CPENABLE bit */
1823
1824	movi	a4, 1
1825	ssl	a3			# SAR: 32 - coprocessor_number
1826	rsr	a5, CPENABLE
1827	sll	a4, a4
1828	or	a4, a5, a4
1829	wsr	a4, CPENABLE
1830	rsync
1831	movi	a5, coprocessor_info	# list of owner and offset into cp_save
1832	addx8	a0, a4, a5		# entry for CP
1833
1834	bne	a4, a5, .Lload		# bit wasn't set before, cp not in use
1835
1836	/* Now compare the current task with the owner of the coprocessor.
1837	 * If they are the same, there is no reason to save or restore any
1838	 * coprocessor state. Having already enabled the coprocessor,
1839	 * branch ahead to return.
1840	 */
1841	GET_CURRENT(a5,a1)
1842	l32i	a4, a0, COPROCESSOR_INFO_OWNER	# a4: current owner for this CP
1843	beq	a4, a5, .Ldone
1844
1845	/* Find location to dump current coprocessor state:
1846	 *  task_struct->task_cp_save_offset + coprocessor_offset[coprocessor]
1847	 *
1848	 * Note: a0 pointer to the entry in the coprocessor owner table,
1849	 *	 a3 coprocessor number,
1850         *	 a4 current owner of coprocessor.
1851	 */
1852	l32i	a5, a0, COPROCESSOR_INFO_OFFSET
1853	addi	a2, a4, THREAD_CP_SAVE
1854	add	a2, a2, a5
1855
1856	/* Store current coprocessor states. (a5 still has CP number) */
1857
1858	xchal_cpi_store_funcbody
1859
1860	/* The macro might have destroyed a3 (coprocessor number), but
1861	 * SAR still has 32 - coprocessor_number!
1862	 */
1863	movi	a3, 32
1864	rsr	a4, SAR
1865	sub	a3, a3, a4
1866
1867.Lload:	/* A new task now owns the corpocessors. Save its TCB pointer into
1868	 * the coprocessor owner table.
1869	 *
1870	 * Note: a0 pointer to the entry in the coprocessor owner table,
1871	 *	 a3 coprocessor number.
1872	 */
1873	GET_CURRENT(a4,a1)
1874	s32i	a4, a0, 0
1875
1876	/* Find location from where to restore the current coprocessor state.*/
1877
1878	l32i	a5, a0, COPROCESSOR_INFO_OFFSET
1879	addi	a2, a4, THREAD_CP_SAVE
1880	add	a2, a2, a4
1881
1882	xchal_cpi_load_funcbody
1883
1884	/* We must assume that the xchal_cpi_store_funcbody macro destroyed
1885	 * registers a2..a15.
1886	 */
1887
1888.Ldone:	l32i	a15, a1, PT_AREG15
1889	l32i	a14, a1, PT_AREG14
1890	l32i	a13, a1, PT_AREG13
1891	l32i	a12, a1, PT_AREG12
1892	l32i	a11, a1, PT_AREG11
1893	l32i	a10, a1, PT_AREG10
1894	l32i	a9, a1, PT_AREG9
1895	l32i	a8, a1, PT_AREG8
1896	l32i	a7, a1, PT_AREG7
1897	l32i	a6, a1, PT_AREG6
1898	l32i	a5, a1, PT_AREG5
1899	l32i	a4, a1, PT_AREG4
1900	l32i	a3, a1, PT_AREG3
1901	l32i	a2, a1, PT_AREG2
1902	l32i	a0, a1, PT_AREG0
1903	l32i	a1, a1, PT_AREG1
1904
1905	rfe
1906
1907#endif /* XCHAL_EXTRA_SA_SIZE */
1908
1909/*
1910 * Task switch.
1911 *
1912 * struct task*  _switch_to (struct task* prev, struct task* next)
1913 *         a2                              a2                 a3
1914 */
1915
1916ENTRY(_switch_to)
1917
1918	entry	a1, 16
1919
1920	mov	a4, a3			# preserve a3
1921
1922	s32i	a0, a2, THREAD_RA	# save return address
1923	s32i	a1, a2, THREAD_SP	# save stack pointer
1924
1925	/* Disable ints while we manipulate the stack pointer; spill regs. */
1926
1927	movi	a5, PS_EXCM_MASK | LOCKLEVEL
1928	xsr	a5, PS
1929	rsr	a3, EXCSAVE_1
1930	rsync
1931	s32i	a3, a3, EXC_TABLE_FIXUP	/* enter critical section */
1932
1933	call0	_spill_registers
1934
1935	/* Set kernel stack (and leave critical section)
1936	 * Note: It's save to set it here. The stack will not be overwritten
1937	 *       because the kernel stack will only be loaded again after
1938	 *       we return from kernel space.
1939	 */
1940
1941	l32i	a0, a4, TASK_THREAD_INFO
1942	rsr	a3, EXCSAVE_1		# exc_table
1943	movi	a1, 0
1944	addi	a0, a0, PT_REGS_OFFSET
1945	s32i	a1, a3, EXC_TABLE_FIXUP
1946	s32i	a0, a3, EXC_TABLE_KSTK
1947
1948	/* restore context of the task that 'next' addresses */
1949
1950	l32i	a0, a4, THREAD_RA	/* restore return address */
1951	l32i	a1, a4, THREAD_SP	/* restore stack pointer */
1952
1953	wsr	a5, PS
1954	rsync
1955
1956	retw
1957
1958
1959ENTRY(ret_from_fork)
1960
1961	/* void schedule_tail (struct task_struct *prev)
1962	 * Note: prev is still in a6 (return value from fake call4 frame)
1963	 */
1964	movi	a4, schedule_tail
1965	callx4	a4
1966
1967	movi	a4, do_syscall_trace
1968	callx4	a4
1969
1970	j	common_exception_return
1971
1972
1973
1974/*
1975 * Table of syscalls
1976 */
1977
1978.data
1979.align  4
1980.global sys_call_table
1981sys_call_table:
1982
1983#define SYSCALL(call, narg) .word call
1984#include "syscalls.h"
1985
1986/*
1987 * Number of arguments of each syscall
1988 */
1989
1990.global sys_narg_table
1991sys_narg_table:
1992
1993#undef SYSCALL
1994#define SYSCALL(call, narg) .byte narg
1995#include "syscalls.h"
1996
1997