xref: /linux/arch/microblaze/kernel/entry.S (revision 27258e448eb301cf89e351df87aa8cb916653bf2)
1/*
2 * Low-level system-call handling, trap handlers and context-switching
3 *
4 * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu>
5 * Copyright (C) 2008-2009 PetaLogix
6 * Copyright (C) 2003		John Williams <jwilliams@itee.uq.edu.au>
7 * Copyright (C) 2001,2002	NEC Corporation
8 * Copyright (C) 2001,2002	Miles Bader <miles@gnu.org>
9 *
10 * This file is subject to the terms and conditions of the GNU General
11 * Public License. See the file COPYING in the main directory of this
12 * archive for more details.
13 *
14 * Written by Miles Bader <miles@gnu.org>
15 * Heavily modified by John Williams for Microblaze
16 */
17
18#include <linux/sys.h>
19#include <linux/linkage.h>
20
21#include <asm/entry.h>
22#include <asm/current.h>
23#include <asm/processor.h>
24#include <asm/exceptions.h>
25#include <asm/asm-offsets.h>
26#include <asm/thread_info.h>
27
28#include <asm/page.h>
29#include <asm/unistd.h>
30
31#include <linux/errno.h>
32#include <asm/signal.h>
33
34/* The size of a state save frame. */
35#define STATE_SAVE_SIZE		(PT_SIZE + STATE_SAVE_ARG_SPACE)
36
37/* The offset of the struct pt_regs in a `state save frame' on the stack. */
38#define PTO	STATE_SAVE_ARG_SPACE /* 24 the space for args */
39
40#define C_ENTRY(name)	.globl name; .align 4; name
41
42/*
43 * Various ways of setting and clearing BIP in flags reg.
44 * This is mucky, but necessary using microblaze version that
45 * allows msr ops to write to BIP
46 */
47#if CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR
48	.macro	clear_bip
49	msrclr	r11, MSR_BIP
50	nop
51	.endm
52
53	.macro	set_bip
54	msrset	r11, MSR_BIP
55	nop
56	.endm
57
58	.macro	clear_eip
59	msrclr	r11, MSR_EIP
60	nop
61	.endm
62
63	.macro	set_ee
64	msrset	r11, MSR_EE
65	nop
66	.endm
67
68	.macro	disable_irq
69	msrclr	r11, MSR_IE
70	nop
71	.endm
72
73	.macro	enable_irq
74	msrset	r11, MSR_IE
75	nop
76	.endm
77
78	.macro	set_ums
79	msrset	r11, MSR_UMS
80	nop
81	msrclr	r11, MSR_VMS
82	nop
83	.endm
84
85	.macro	set_vms
86	msrclr	r11, MSR_UMS
87	nop
88	msrset	r11, MSR_VMS
89	nop
90	.endm
91
92	.macro	clear_vms_ums
93	msrclr	r11, MSR_VMS
94	nop
95	msrclr	r11, MSR_UMS
96	nop
97	.endm
98#else
99	.macro	clear_bip
100	mfs	r11, rmsr
101	nop
102	andi	r11, r11, ~MSR_BIP
103	mts	rmsr, r11
104	nop
105	.endm
106
107	.macro	set_bip
108	mfs	r11, rmsr
109	nop
110	ori	r11, r11, MSR_BIP
111	mts	rmsr, r11
112	nop
113	.endm
114
115	.macro	clear_eip
116	mfs	r11, rmsr
117	nop
118	andi	r11, r11, ~MSR_EIP
119	mts	rmsr, r11
120	nop
121	.endm
122
123	.macro	set_ee
124	mfs	r11, rmsr
125	nop
126	ori	r11, r11, MSR_EE
127	mts	rmsr, r11
128	nop
129	.endm
130
131	.macro	disable_irq
132	mfs	r11, rmsr
133	nop
134	andi	r11, r11, ~MSR_IE
135	mts	rmsr, r11
136	nop
137	.endm
138
139	.macro	enable_irq
140	mfs	r11, rmsr
141	nop
142	ori	r11, r11, MSR_IE
143	mts	rmsr, r11
144	nop
145	.endm
146
147	.macro set_ums
148	mfs	r11, rmsr
149	nop
150	ori	r11, r11, MSR_VMS
151	andni	r11, r11, MSR_UMS
152	mts	rmsr, r11
153	nop
154	.endm
155
156	.macro	set_vms
157	mfs	r11, rmsr
158	nop
159	ori	r11, r11, MSR_VMS
160	andni	r11, r11, MSR_UMS
161	mts	rmsr, r11
162	nop
163	.endm
164
165	.macro	clear_vms_ums
166	mfs	r11, rmsr
167	nop
168	andni	r11, r11, (MSR_VMS|MSR_UMS)
169	mts	rmsr,r11
170	nop
171	.endm
172#endif
173
174/* Define how to call high-level functions. With MMU, virtual mode must be
175 * enabled when calling the high-level function. Clobbers R11.
176 * VM_ON, VM_OFF, DO_JUMP_BIPCLR, DO_CALL
177 */
178
179/* turn on virtual protected mode save */
180#define VM_ON		\
181	set_ums;		\
182	rted	r0, 2f;	\
1832: nop;
184
185/* turn off virtual protected mode save and user mode save*/
186#define VM_OFF			\
187	clear_vms_ums;			\
188	rted	r0, TOPHYS(1f);	\
1891: nop;
190
191#define SAVE_REGS \
192	swi	r2, r1, PTO+PT_R2;	/* Save SDA */			\
193	swi	r5, r1, PTO+PT_R5;					\
194	swi	r6, r1, PTO+PT_R6;					\
195	swi	r7, r1, PTO+PT_R7;					\
196	swi	r8, r1, PTO+PT_R8;					\
197	swi	r9, r1, PTO+PT_R9;					\
198	swi	r10, r1, PTO+PT_R10;					\
199	swi	r11, r1, PTO+PT_R11;	/* save clobbered regs after rval */\
200	swi	r12, r1, PTO+PT_R12;					\
201	swi	r13, r1, PTO+PT_R13;	/* Save SDA2 */			\
202	swi	r14, r1, PTO+PT_PC;	/* PC, before IRQ/trap */	\
203	swi	r15, r1, PTO+PT_R15;	/* Save LP */			\
204	swi	r18, r1, PTO+PT_R18;	/* Save asm scratch reg */	\
205	swi	r19, r1, PTO+PT_R19;					\
206	swi	r20, r1, PTO+PT_R20;					\
207	swi	r21, r1, PTO+PT_R21;					\
208	swi	r22, r1, PTO+PT_R22;					\
209	swi	r23, r1, PTO+PT_R23;					\
210	swi	r24, r1, PTO+PT_R24;					\
211	swi	r25, r1, PTO+PT_R25;					\
212	swi	r26, r1, PTO+PT_R26;					\
213	swi	r27, r1, PTO+PT_R27;					\
214	swi	r28, r1, PTO+PT_R28;					\
215	swi	r29, r1, PTO+PT_R29;					\
216	swi	r30, r1, PTO+PT_R30;					\
217	swi	r31, r1, PTO+PT_R31;	/* Save current task reg */	\
218	mfs	r11, rmsr;		/* save MSR */			\
219	nop;								\
220	swi	r11, r1, PTO+PT_MSR;
221
222#define RESTORE_REGS \
223	lwi	r11, r1, PTO+PT_MSR;					\
224	mts	rmsr , r11;						\
225	nop;								\
226	lwi	r2, r1, PTO+PT_R2;	/* restore SDA */		\
227	lwi	r5, r1, PTO+PT_R5;					\
228	lwi	r6, r1, PTO+PT_R6;					\
229	lwi	r7, r1, PTO+PT_R7;					\
230	lwi	r8, r1, PTO+PT_R8;					\
231	lwi	r9, r1, PTO+PT_R9;					\
232	lwi	r10, r1, PTO+PT_R10;					\
233	lwi	r11, r1, PTO+PT_R11;	/* restore clobbered regs after rval */\
234	lwi	r12, r1, PTO+PT_R12;					\
235	lwi	r13, r1, PTO+PT_R13;	/* restore SDA2 */		\
236	lwi	r14, r1, PTO+PT_PC;	/* RESTORE_LINK PC, before IRQ/trap */\
237	lwi	r15, r1, PTO+PT_R15;	/* restore LP */		\
238	lwi	r18, r1, PTO+PT_R18;	/* restore asm scratch reg */	\
239	lwi	r19, r1, PTO+PT_R19;					\
240	lwi	r20, r1, PTO+PT_R20;					\
241	lwi	r21, r1, PTO+PT_R21;					\
242	lwi	r22, r1, PTO+PT_R22;					\
243	lwi	r23, r1, PTO+PT_R23;					\
244	lwi	r24, r1, PTO+PT_R24;					\
245	lwi	r25, r1, PTO+PT_R25;					\
246	lwi	r26, r1, PTO+PT_R26;					\
247	lwi	r27, r1, PTO+PT_R27;					\
248	lwi	r28, r1, PTO+PT_R28;					\
249	lwi	r29, r1, PTO+PT_R29;					\
250	lwi	r30, r1, PTO+PT_R30;					\
251	lwi	r31, r1, PTO+PT_R31;	/* Restore cur task reg */
252
253.text
254
255/*
256 * User trap.
257 *
258 * System calls are handled here.
259 *
260 * Syscall protocol:
261 * Syscall number in r12, args in r5-r10
262 * Return value in r3
263 *
264 * Trap entered via brki instruction, so BIP bit is set, and interrupts
265 * are masked. This is nice, means we don't have to CLI before state save
266 */
267C_ENTRY(_user_exception):
268	swi	r1, r0, TOPHYS(PER_CPU(ENTRY_SP)) /* save stack */
269	addi	r14, r14, 4	/* return address is 4 byte after call */
270	swi	r11, r0, TOPHYS(r0_ram + PTO + PT_R11);	/* Save r11 */
271
272	lwi	r11, r0, TOPHYS(PER_CPU(KM));/* See if already in kernel mode.*/
273	beqi	r11, 1f;		/* Jump ahead if coming from user */
274/* Kernel-mode state save. */
275	lwi	r11, r0, TOPHYS(PER_CPU(ENTRY_SP)); /* Reload kernel stack-ptr*/
276	tophys(r1,r11);
277	swi	r11, r1, (PT_R1-PT_SIZE); /* Save original SP. */
278	lwi	r11, r0, TOPHYS(r0_ram + PTO + PT_R11); /* restore r11 */
279
280	addik	r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack. */
281	SAVE_REGS
282
283	addi	r11, r0, 1; 		/* Was in kernel-mode. */
284	swi	r11, r1, PTO+PT_MODE; /* pt_regs -> kernel mode */
285	brid	2f;
286	nop;				/* Fill delay slot */
287
288/* User-mode state save.  */
2891:
290	lwi	r11, r0, TOPHYS(r0_ram + PTO + PT_R11);	/* restore r11 */
291	lwi	r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */
292	tophys(r1,r1);
293	lwi	r1, r1, TS_THREAD_INFO;	/* get stack from task_struct */
294/* calculate kernel stack pointer from task struct 8k */
295	addik	r1, r1, THREAD_SIZE;
296	tophys(r1,r1);
297
298	addik	r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack.  */
299	SAVE_REGS
300
301	swi	r0, r1, PTO+PT_MODE;			/* Was in user-mode. */
302	lwi	r11, r0, TOPHYS(PER_CPU(ENTRY_SP));
303	swi	r11, r1, PTO+PT_R1;		/* Store user SP.  */
304	addi	r11, r0, 1;
305	swi	r11, r0, TOPHYS(PER_CPU(KM));	/* Now we're in kernel-mode.  */
3062:	lwi	r31, r0, TOPHYS(PER_CPU(CURRENT_SAVE));	/* get saved current */
307	/* Save away the syscall number.  */
308	swi	r12, r1, PTO+PT_R0;
309	tovirt(r1,r1)
310
311	la	r15, r0, ret_from_trap-8
312/* where the trap should return need -8 to adjust for rtsd r15, 8*/
313/* Jump to the appropriate function for the system call number in r12
314 * (r12 is not preserved), or return an error if r12 is not valid. The LP
315 * register should point to the location where
316 * the called function should return.  [note that MAKE_SYS_CALL uses label 1] */
317	/* See if the system call number is valid.  */
318	addi	r11, r12, -__NR_syscalls;
319	bgei	r11,1f;
320	/* Figure out which function to use for this system call.  */
321	/* Note Microblaze barrel shift is optional, so don't rely on it */
322	add	r12, r12, r12;			/* convert num -> ptr */
323	add	r12, r12, r12;
324
325	/* Trac syscalls and stored them to r0_ram */
326	lwi	r3, r12, 0x400 + TOPHYS(r0_ram)
327	addi	r3, r3, 1
328	swi	r3, r12, 0x400 + TOPHYS(r0_ram)
329
330	lwi	r12, r12, TOPHYS(sys_call_table); /* Function ptr */
331	/* Make the system call.  to r12*/
332	set_vms;
333	rtid	r12, 0;
334	nop;
335	/* The syscall number is invalid, return an error.  */
3361:	VM_ON;	/* RETURN() expects virtual mode*/
337	addi	r3, r0, -ENOSYS;
338	rtsd	r15,8;		/* looks like a normal subroutine return */
339	or 	r0, r0, r0
340
341
342/* Entry point used to return from a syscall/trap.  */
343/* We re-enable BIP bit before state restore */
344C_ENTRY(ret_from_trap):
345	set_bip;			/*  Ints masked for state restore*/
346	lwi	r11, r1, PTO+PT_MODE;
347/* See if returning to kernel mode, if so, skip resched &c.  */
348	bnei	r11, 2f;
349
350	/* We're returning to user mode, so check for various conditions that
351	 * trigger rescheduling. */
352	/* Get current task ptr into r11 */
353	add	r11, r0, CURRENT_TASK;	/* Get current task ptr into r11 */
354	lwi	r11, r11, TS_THREAD_INFO;	/* get thread info */
355	lwi	r11, r11, TI_FLAGS;		/* get flags in thread info */
356	andi	r11, r11, _TIF_NEED_RESCHED;
357	beqi	r11, 5f;
358
359	swi	r3, r1, PTO + PT_R3; /* store syscall result */
360	swi	r4, r1, PTO + PT_R4;
361	bralid	r15, schedule;	/* Call scheduler */
362	nop;				/* delay slot */
363	lwi	r3, r1, PTO + PT_R3; /* restore syscall result */
364	lwi	r4, r1, PTO + PT_R4;
365
366	/* Maybe handle a signal */
3675:	add	r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */
368	lwi	r11, r11, TS_THREAD_INFO;	/* get thread info */
369	lwi	r11, r11, TI_FLAGS;	/* get flags in thread info */
370	andi	r11, r11, _TIF_SIGPENDING;
371	beqi	r11, 1f;		/* Signals to handle, handle them */
372
373	swi	r3, r1, PTO + PT_R3; /* store syscall result */
374	swi	r4, r1, PTO + PT_R4;
375	la	r5, r1, PTO;		/* Arg 1: struct pt_regs *regs */
376	add	r6, r0, r0;		/* Arg 2: sigset_t *oldset */
377	addi	r7, r0, 1;		/* Arg 3: int in_syscall */
378	bralid	r15, do_signal;	/* Handle any signals */
379	nop;
380	lwi	r3, r1, PTO + PT_R3; /* restore syscall result */
381	lwi	r4, r1, PTO + PT_R4;
382
383/* Finally, return to user state.  */
3841:	swi	r0, r0, PER_CPU(KM);	/* Now officially in user state. */
385	add	r11, r0, CURRENT_TASK;	/* Get current task ptr into r11 */
386	swi	r11, r0, PER_CPU(CURRENT_SAVE); /* save current */
387	VM_OFF;
388	tophys(r1,r1);
389	RESTORE_REGS;
390	addik	r1, r1, STATE_SAVE_SIZE		/* Clean up stack space.  */
391	lwi	r1, r1, PT_R1 - PT_SIZE;/* Restore user stack pointer. */
392	bri	6f;
393
394/* Return to kernel state.  */
3952:	VM_OFF;
396	tophys(r1,r1);
397	RESTORE_REGS;
398	addik	r1, r1, STATE_SAVE_SIZE		/* Clean up stack space.  */
399	tovirt(r1,r1);
4006:
401TRAP_return:		/* Make global symbol for debugging */
402	rtbd	r14, 0;	/* Instructions to return from an IRQ */
403	nop;
404
405
406/* These syscalls need access to the struct pt_regs on the stack, so we
407   implement them in assembly (they're basically all wrappers anyway).  */
408
409C_ENTRY(sys_fork_wrapper):
410	addi	r5, r0, SIGCHLD			/* Arg 0: flags */
411	lwi	r6, r1, PTO+PT_R1	/* Arg 1: child SP (use parent's) */
412	la	r7, r1, PTO			/* Arg 2: parent context */
413	add	r8. r0, r0			/* Arg 3: (unused) */
414	add	r9, r0, r0;			/* Arg 4: (unused) */
415	add	r10, r0, r0;			/* Arg 5: (unused) */
416	brid	do_fork		/* Do real work (tail-call) */
417	nop;
418
419/* This the initial entry point for a new child thread, with an appropriate
420   stack in place that makes it look the the child is in the middle of an
421   syscall.  This function is actually `returned to' from switch_thread
422   (copy_thread makes ret_from_fork the return address in each new thread's
423   saved context).  */
424C_ENTRY(ret_from_fork):
425	bralid	r15, schedule_tail; /* ...which is schedule_tail's arg */
426	add	r3, r5, r0;	/* switch_thread returns the prev task */
427				/* ( in the delay slot ) */
428	add	r3, r0, r0;	/* Child's fork call should return 0. */
429	brid	ret_from_trap;	/* Do normal trap return */
430	nop;
431
432C_ENTRY(sys_vfork):
433	brid	microblaze_vfork	/* Do real work (tail-call) */
434	la	r5, r1, PTO
435
436C_ENTRY(sys_clone):
437	bnei	r6, 1f;			/* See if child SP arg (arg 1) is 0. */
438	lwi	r6, r1, PTO+PT_R1;	/* If so, use paret's stack ptr */
4391:	la	r7, r1, PTO;			/* Arg 2: parent context */
440	add	r8, r0, r0;			/* Arg 3: (unused) */
441	add	r9, r0, r0;			/* Arg 4: (unused) */
442	add	r10, r0, r0;			/* Arg 5: (unused) */
443	brid	do_fork		/* Do real work (tail-call) */
444	nop;
445
446C_ENTRY(sys_execve):
447	la	r8, r1, PTO;		/* add user context as 4th arg */
448	brid	microblaze_execve;	/* Do real work (tail-call).*/
449	nop;
450
451C_ENTRY(sys_rt_sigsuspend_wrapper):
452	swi	r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */
453	swi	r4, r1, PTO+PT_R4;
454	la	r7, r1, PTO;		/* add user context as 3rd arg */
455	brlid	r15, sys_rt_sigsuspend;	/* Do real work.*/
456	nop;
457	lwi	r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */
458	lwi	r4, r1, PTO+PT_R4;
459	bri ret_from_trap /* fall through will not work here due to align */
460	nop;
461
462C_ENTRY(sys_rt_sigreturn_wrapper):
463	swi	r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */
464	swi	r4, r1, PTO+PT_R4;
465	la	r5, r1, PTO;		/* add user context as 1st arg */
466	brlid	r15, sys_rt_sigreturn	/* Do real work */
467	nop;
468	lwi	r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */
469	lwi	r4, r1, PTO+PT_R4;
470	bri ret_from_trap /* fall through will not work here due to align */
471	nop;
472
473/*
474 * HW EXCEPTION rutine start
475 */
476
477#define SAVE_STATE	\
478	swi	r11, r0, TOPHYS(r0_ram + PTO + PT_R11); /* Save r11 */	\
479	set_bip;	/*equalize initial state for all possible entries*/\
480	clear_eip;							\
481	enable_irq;							\
482	set_ee;								\
483	/* See if already in kernel mode.*/				\
484	lwi	r11, r0, TOPHYS(PER_CPU(KM));				\
485	beqi	r11, 1f;		/* Jump ahead if coming from user */\
486	/* Kernel-mode state save.  */					\
487	/* Reload kernel stack-ptr. */					\
488	lwi	r11, r0, TOPHYS(PER_CPU(ENTRY_SP));			\
489	tophys(r1,r11);							\
490	swi	r11, r1, (PT_R1-PT_SIZE); /* Save original SP. */	\
491	lwi	r11, r0, TOPHYS(r0_ram + PTO + PT_R11); /* restore r11 */\
492	addik	r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack.  */\
493	/* store return registers separately because			\
494	 * this macros is use for others exceptions */			\
495	swi	r3, r1, PTO + PT_R3;					\
496	swi	r4, r1, PTO + PT_R4;					\
497	SAVE_REGS							\
498	/* PC, before IRQ/trap - this is one instruction above */	\
499	swi	r17, r1, PTO+PT_PC;					\
500									\
501	addi	r11, r0, 1; 		/* Was in kernel-mode.  */	\
502	swi	r11, r1, PTO+PT_MODE; 	 				\
503	brid	2f;							\
504	nop;				/* Fill delay slot */		\
5051:	/* User-mode state save.  */					\
506	lwi	r11, r0, TOPHYS(r0_ram + PTO + PT_R11); /* restore r11 */\
507	lwi	r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */\
508	tophys(r1,r1);							\
509	lwi	r1, r1, TS_THREAD_INFO;	/* get the thread info */	\
510	addik	r1, r1, THREAD_SIZE;	/* calculate kernel stack pointer */\
511	tophys(r1,r1);							\
512									\
513	addik	r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack.  */\
514	/* store return registers separately because this macros	\
515	 * is use for others exceptions */				\
516	swi	r3, r1, PTO + PT_R3; 					\
517	swi	r4, r1, PTO + PT_R4;					\
518	SAVE_REGS							\
519	/* PC, before IRQ/trap - this is one instruction above FIXME*/	\
520	swi	r17, r1, PTO+PT_PC;					\
521									\
522	swi	r0, r1, PTO+PT_MODE; /* Was in user-mode.  */		\
523	lwi	r11, r0, TOPHYS(PER_CPU(ENTRY_SP));			\
524	swi	r11, r1, PTO+PT_R1; /* Store user SP.  */		\
525	addi	r11, r0, 1;						\
526	swi	r11, r0, TOPHYS(PER_CPU(KM)); /* Now we're in kernel-mode.*/\
5272:	lwi	r31, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */\
528	/* Save away the syscall number.  */				\
529	swi	r0, r1, PTO+PT_R0;					\
530	tovirt(r1,r1)
531
532C_ENTRY(full_exception_trap):
533	swi	r1, r0, TOPHYS(PER_CPU(ENTRY_SP)) /* save stack */
534	/* adjust exception address for privileged instruction
535	 * for finding where is it */
536	addik	r17, r17, -4
537	SAVE_STATE /* Save registers */
538	/* FIXME this can be store directly in PT_ESR reg.
539	 * I tested it but there is a fault */
540	/* where the trap should return need -8 to adjust for rtsd r15, 8 */
541	la	r15, r0, ret_from_exc - 8
542	la	r5, r1, PTO		 /* parameter struct pt_regs * regs */
543	mfs	r6, resr
544	nop
545	mfs	r7, rfsr;		/* save FSR */
546	nop
547	la	r12, r0, full_exception
548	set_vms;
549	rtbd	r12, 0;
550	nop;
551
552/*
553 * Unaligned data trap.
554 *
555 * Unaligned data trap last on 4k page is handled here.
556 *
557 * Trap entered via exception, so EE bit is set, and interrupts
558 * are masked.  This is nice, means we don't have to CLI before state save
559 *
560 * The assembler routine is in "arch/microblaze/kernel/hw_exception_handler.S"
561 */
562C_ENTRY(unaligned_data_trap):
563	swi	r1, r0, TOPHYS(PER_CPU(ENTRY_SP)) /* save stack */
564	SAVE_STATE		/* Save registers.*/
565	/* where the trap should return need -8 to adjust for rtsd r15, 8 */
566	la	r15, r0, ret_from_exc-8
567	mfs	r3, resr		/* ESR */
568	nop
569	mfs	r4, rear		/* EAR */
570	nop
571	la	r7, r1, PTO		/* parameter struct pt_regs * regs */
572	la	r12, r0, _unaligned_data_exception
573	set_vms;
574	rtbd	r12, 0;	/* interrupts enabled */
575	nop;
576
577/*
578 * Page fault traps.
579 *
580 * If the real exception handler (from hw_exception_handler.S) didn't find
581 * the mapping for the process, then we're thrown here to handle such situation.
582 *
583 * Trap entered via exceptions, so EE bit is set, and interrupts
584 * are masked.  This is nice, means we don't have to CLI before state save
585 *
586 * Build a standard exception frame for TLB Access errors.  All TLB exceptions
587 * will bail out to this point if they can't resolve the lightweight TLB fault.
588 *
589 * The C function called is in "arch/microblaze/mm/fault.c", declared as:
590 * void do_page_fault(struct pt_regs *regs,
591 *				unsigned long address,
592 *				unsigned long error_code)
593 */
594/* data and intruction trap - which is choose is resolved int fault.c */
595C_ENTRY(page_fault_data_trap):
596	swi	r1, r0, TOPHYS(PER_CPU(ENTRY_SP)) /* save stack */
597	SAVE_STATE		/* Save registers.*/
598	/* where the trap should return need -8 to adjust for rtsd r15, 8 */
599	la	r15, r0, ret_from_exc-8
600	la	r5, r1, PTO		/* parameter struct pt_regs * regs */
601	mfs	r6, rear		/* parameter unsigned long address */
602	nop
603	mfs	r7, resr		/* parameter unsigned long error_code */
604	nop
605	la	r12, r0, do_page_fault
606	set_vms;
607	rtbd	r12, 0;	/* interrupts enabled */
608	nop;
609
610C_ENTRY(page_fault_instr_trap):
611	swi	r1, r0, TOPHYS(PER_CPU(ENTRY_SP)) /* save stack */
612	SAVE_STATE		/* Save registers.*/
613	/* where the trap should return need -8 to adjust for rtsd r15, 8 */
614	la	r15, r0, ret_from_exc-8
615	la	r5, r1, PTO		/* parameter struct pt_regs * regs */
616	mfs	r6, rear		/* parameter unsigned long address */
617	nop
618	ori	r7, r0, 0		/* parameter unsigned long error_code */
619	la	r12, r0, do_page_fault
620	set_vms;
621	rtbd	r12, 0;	/* interrupts enabled */
622	nop;
623
624/* Entry point used to return from an exception.  */
625C_ENTRY(ret_from_exc):
626	set_bip;			/*  Ints masked for state restore*/
627	lwi	r11, r1, PTO+PT_MODE;
628	bnei	r11, 2f;		/* See if returning to kernel mode, */
629					/* ... if so, skip resched &c.  */
630
631	/* We're returning to user mode, so check for various conditions that
632	   trigger rescheduling. */
633	/* Get current task ptr into r11 */
634	add	r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */
635	lwi	r11, r11, TS_THREAD_INFO;	/* get thread info */
636	lwi	r11, r11, TI_FLAGS;	/* get flags in thread info */
637	andi	r11, r11, _TIF_NEED_RESCHED;
638	beqi	r11, 5f;
639
640/* Call the scheduler before returning from a syscall/trap. */
641	bralid	r15, schedule;	/* Call scheduler */
642	nop;				/* delay slot */
643
644	/* Maybe handle a signal */
6455:	add	r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */
646	lwi	r11, r11, TS_THREAD_INFO;	/* get thread info */
647	lwi	r11, r11, TI_FLAGS;	/* get flags in thread info */
648	andi	r11, r11, _TIF_SIGPENDING;
649	beqi	r11, 1f;		/* Signals to handle, handle them */
650
651	/*
652	 * Handle a signal return; Pending signals should be in r18.
653	 *
654	 * Not all registers are saved by the normal trap/interrupt entry
655	 * points (for instance, call-saved registers (because the normal
656	 * C-compiler calling sequence in the kernel makes sure they're
657	 * preserved), and call-clobbered registers in the case of
658	 * traps), but signal handlers may want to examine or change the
659	 * complete register state.  Here we save anything not saved by
660	 * the normal entry sequence, so that it may be safely restored
661	 * (in a possibly modified form) after do_signal returns.
662	 * store return registers separately because this macros is use
663	 * for others exceptions */
664	swi	r3, r1, PTO + PT_R3;
665	swi	r4, r1, PTO + PT_R4;
666	la	r5, r1, PTO;		/* Arg 1: struct pt_regs *regs */
667	add	r6, r0, r0;		/* Arg 2: sigset_t *oldset */
668	addi	r7, r0, 0;		/* Arg 3: int in_syscall */
669	bralid	r15, do_signal;	/* Handle any signals */
670	nop;
671	lwi	r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */
672	lwi	r4, r1, PTO+PT_R4;
673
674/* Finally, return to user state.  */
6751:	swi	r0, r0, PER_CPU(KM);	/* Now officially in user state. */
676	add	r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */
677	swi	r11, r0, PER_CPU(CURRENT_SAVE); /* save current */
678	VM_OFF;
679	tophys(r1,r1);
680
681	lwi	r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */
682	lwi	r4, r1, PTO+PT_R4;
683	RESTORE_REGS;
684	addik	r1, r1, STATE_SAVE_SIZE		/* Clean up stack space.  */
685
686	lwi	r1, r1, PT_R1 - PT_SIZE; /* Restore user stack pointer. */
687	bri	6f;
688/* Return to kernel state.  */
6892:	VM_OFF;
690	tophys(r1,r1);
691	lwi	r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */
692	lwi	r4, r1, PTO+PT_R4;
693	RESTORE_REGS;
694	addik	r1, r1, STATE_SAVE_SIZE		/* Clean up stack space.  */
695
696	tovirt(r1,r1);
6976:
698EXC_return:		/* Make global symbol for debugging */
699	rtbd	r14, 0;	/* Instructions to return from an IRQ */
700	nop;
701
702/*
703 * HW EXCEPTION rutine end
704 */
705
706/*
707 * Hardware maskable interrupts.
708 *
709 * The stack-pointer (r1) should have already been saved to the memory
710 * location PER_CPU(ENTRY_SP).
711 */
712C_ENTRY(_interrupt):
713/* MS: we are in physical address */
714/* Save registers, switch to proper stack, convert SP to virtual.*/
715	swi	r1, r0, TOPHYS(PER_CPU(ENTRY_SP))
716	swi	r11, r0, TOPHYS(PER_CPU(R11_SAVE));
717	/* MS: See if already in kernel mode. */
718	lwi	r11, r0, TOPHYS(PER_CPU(KM));
719	beqi	r11, 1f; /* MS: Jump ahead if coming from user */
720
721/* Kernel-mode state save. */
722	or	r11, r1, r0
723	tophys(r1,r11); /* MS: I have in r1 physical address where stack is */
724/* MS: Save original SP - position PT_R1 to next stack frame 4 *1 - 152*/
725	swi	r11, r1, (PT_R1 - PT_SIZE);
726/* MS: restore r11 because of saving in SAVE_REGS */
727	lwi	r11, r0, TOPHYS(PER_CPU(R11_SAVE));
728	/* save registers */
729/* MS: Make room on the stack -> activation record */
730	addik	r1, r1, -STATE_SAVE_SIZE;
731/* MS: store return registers separately because
732 * this macros is use for others exceptions */
733	swi	r3, r1, PTO + PT_R3;
734	swi	r4, r1, PTO + PT_R4;
735	SAVE_REGS
736	/* MS: store mode */
737	addi	r11, r0, 1; /* MS: Was in kernel-mode. */
738	swi	r11, r1, PTO + PT_MODE; /* MS: and save it */
739	brid	2f;
740	nop; /* MS: Fill delay slot */
741
7421:
743/* User-mode state save. */
744/* MS: restore r11 -> FIXME move before SAVE_REG */
745	lwi	r11, r0, TOPHYS(PER_CPU(R11_SAVE));
746 /* MS: get the saved current */
747	lwi	r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE));
748	tophys(r1,r1);
749	lwi	r1, r1, TS_THREAD_INFO;
750	addik	r1, r1, THREAD_SIZE;
751	tophys(r1,r1);
752	/* save registers */
753	addik	r1, r1, -STATE_SAVE_SIZE;
754	swi	r3, r1, PTO+PT_R3;
755	swi	r4, r1, PTO+PT_R4;
756	SAVE_REGS
757	/* calculate mode */
758	swi	r0, r1, PTO + PT_MODE;
759	lwi	r11, r0, TOPHYS(PER_CPU(ENTRY_SP));
760	swi	r11, r1, PTO+PT_R1;
761	/* setup kernel mode to KM */
762	addi	r11, r0, 1;
763	swi	r11, r0, TOPHYS(PER_CPU(KM));
764
7652:
766	lwi	r31, r0, TOPHYS(PER_CPU(CURRENT_SAVE));
767	swi	r0, r1, PTO + PT_R0;
768	tovirt(r1,r1)
769	la	r5, r1, PTO;
770	set_vms;
771	la	r11, r0, do_IRQ;
772	la	r15, r0, irq_call;
773irq_call:rtbd	r11, 0;
774	nop;
775
776/* MS: we are in virtual mode */
777ret_from_irq:
778	lwi	r11, r1, PTO + PT_MODE;
779	bnei	r11, 2f;
780
781	add	r11, r0, CURRENT_TASK;
782	lwi	r11, r11, TS_THREAD_INFO;
783	lwi	r11, r11, TI_FLAGS; /* MS: get flags from thread info */
784	andi	r11, r11, _TIF_NEED_RESCHED;
785	beqi	r11, 5f
786	bralid	r15, schedule;
787	nop; /* delay slot */
788
789    /* Maybe handle a signal */
7905:	add	r11, r0, CURRENT_TASK;
791	lwi	r11, r11, TS_THREAD_INFO; /* MS: get thread info */
792	lwi	r11, r11, TI_FLAGS; /* get flags in thread info */
793	andi	r11, r11, _TIF_SIGPENDING;
794	beqid	r11, no_intr_resched
795/* Handle a signal return; Pending signals should be in r18. */
796	addi	r7, r0, 0; /* Arg 3: int in_syscall */
797	la	r5, r1, PTO; /* Arg 1: struct pt_regs *regs */
798	bralid	r15, do_signal;	/* Handle any signals */
799	add	r6, r0, r0; /* Arg 2: sigset_t *oldset */
800
801/* Finally, return to user state. */
802no_intr_resched:
803    /* Disable interrupts, we are now committed to the state restore */
804	disable_irq
805	swi	r0, r0, PER_CPU(KM); /* MS: Now officially in user state. */
806	add	r11, r0, CURRENT_TASK;
807	swi	r11, r0, PER_CPU(CURRENT_SAVE);
808	VM_OFF;
809	tophys(r1,r1);
810	lwi	r3, r1, PTO + PT_R3; /* MS: restore saved r3, r4 registers */
811	lwi	r4, r1, PTO + PT_R4;
812	RESTORE_REGS
813	addik	r1, r1, STATE_SAVE_SIZE /* MS: Clean up stack space. */
814	lwi	r1, r1, PT_R1 - PT_SIZE;
815	bri	6f;
816/* MS: Return to kernel state. */
8172:	VM_OFF /* MS: turn off MMU */
818	tophys(r1,r1)
819	lwi	r3, r1, PTO + PT_R3; /* MS: restore saved r3, r4 registers */
820	lwi	r4, r1, PTO + PT_R4;
821	RESTORE_REGS
822	addik	r1, r1, STATE_SAVE_SIZE	/* MS: Clean up stack space. */
823	tovirt(r1,r1);
8246:
825IRQ_return: /* MS: Make global symbol for debugging */
826	rtid	r14, 0
827	nop
828
829/*
830 * `Debug' trap
831 *  We enter dbtrap in "BIP" (breakpoint) mode.
832 *  So we exit the breakpoint mode with an 'rtbd' and proceed with the
833 *  original dbtrap.
834 *  however, wait to save state first
835 */
836C_ENTRY(_debug_exception):
837	/* BIP bit is set on entry, no interrupts can occur */
838	swi	r1, r0, TOPHYS(PER_CPU(ENTRY_SP))
839
840	swi	r11, r0, TOPHYS(r0_ram + PTO + PT_R11); /* Save r11 */
841	set_bip;	/*equalize initial state for all possible entries*/
842	clear_eip;
843	enable_irq;
844	lwi	r11, r0, TOPHYS(PER_CPU(KM));/* See if already in kernel mode.*/
845	beqi	r11, 1f;		/* Jump ahead if coming from user */
846	/* Kernel-mode state save.  */
847	lwi	r11, r0, TOPHYS(PER_CPU(ENTRY_SP)); /* Reload kernel stack-ptr*/
848	tophys(r1,r11);
849	swi	r11, r1, (PT_R1-PT_SIZE); /* Save original SP. */
850	lwi	r11, r0, TOPHYS(r0_ram + PTO + PT_R11); /* restore r11 */
851
852	addik	r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack.  */
853	swi	r3, r1, PTO + PT_R3;
854	swi	r4, r1, PTO + PT_R4;
855	SAVE_REGS;
856
857	addi	r11, r0, 1; 		/* Was in kernel-mode.  */
858	swi	r11, r1, PTO + PT_MODE;
859	brid	2f;
860	nop;				/* Fill delay slot */
8611:      /* User-mode state save.  */
862	lwi	r11, r0, TOPHYS(r0_ram + PTO + PT_R11); /* restore r11 */
863	lwi	r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */
864	tophys(r1,r1);
865	lwi	r1, r1, TS_THREAD_INFO;	/* get the thread info */
866	addik	r1, r1, THREAD_SIZE;	/* calculate kernel stack pointer */
867	tophys(r1,r1);
868
869	addik	r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack.  */
870	swi	r3, r1, PTO + PT_R3;
871	swi	r4, r1, PTO + PT_R4;
872	SAVE_REGS;
873
874	swi	r0, r1, PTO+PT_MODE; /* Was in user-mode.  */
875	lwi	r11, r0, TOPHYS(PER_CPU(ENTRY_SP));
876	swi	r11, r1, PTO+PT_R1; /* Store user SP.  */
877	addi	r11, r0, 1;
878	swi	r11, r0, TOPHYS(PER_CPU(KM));	/* Now we're in kernel-mode.  */
8792:	lwi	r31, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */
880	/* Save away the syscall number.  */
881	swi	r0, r1, PTO+PT_R0;
882	tovirt(r1,r1)
883
884	addi	r5, r0, SIGTRAP		     /* send the trap signal */
885	add	r6, r0, CURRENT_TASK; /* Get current task ptr into r11 */
886	addk	r7, r0, r0		     /* 3rd param zero */
887
888	set_vms;
889	la	r11, r0, send_sig;
890	la	r15, r0, dbtrap_call;
891dbtrap_call:	rtbd	r11, 0;
892	nop;
893
894	set_bip;			/*  Ints masked for state restore*/
895	lwi	r11, r1, PTO+PT_MODE;
896	bnei	r11, 2f;
897
898	/* Get current task ptr into r11 */
899	add	r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */
900	lwi	r11, r11, TS_THREAD_INFO;	/* get thread info */
901	lwi	r11, r11, TI_FLAGS;	/* get flags in thread info */
902	andi	r11, r11, _TIF_NEED_RESCHED;
903	beqi	r11, 5f;
904
905/* Call the scheduler before returning from a syscall/trap. */
906
907	bralid	r15, schedule;	/* Call scheduler */
908	nop;				/* delay slot */
909	/* XXX Is PT_DTRACE handling needed here? */
910	/* XXX m68knommu also checks TASK_STATE & TASK_COUNTER here.  */
911
912	/* Maybe handle a signal */
9135:	add	r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */
914	lwi	r11, r11, TS_THREAD_INFO;	/* get thread info */
915	lwi	r11, r11, TI_FLAGS;	/* get flags in thread info */
916	andi	r11, r11, _TIF_SIGPENDING;
917	beqi	r11, 1f;		/* Signals to handle, handle them */
918
919/* Handle a signal return; Pending signals should be in r18.  */
920	/* Not all registers are saved by the normal trap/interrupt entry
921	   points (for instance, call-saved registers (because the normal
922	   C-compiler calling sequence in the kernel makes sure they're
923	   preserved), and call-clobbered registers in the case of
924	   traps), but signal handlers may want to examine or change the
925	   complete register state.  Here we save anything not saved by
926	   the normal entry sequence, so that it may be safely restored
927	   (in a possibly modified form) after do_signal returns.  */
928
929	la	r5, r1, PTO;		/* Arg 1: struct pt_regs *regs */
930	add	r6, r0, r0;		/* Arg 2: sigset_t *oldset */
931	addi  r7, r0, 0;	/* Arg 3: int in_syscall */
932	bralid	r15, do_signal;	/* Handle any signals */
933	nop;
934
935
936/* Finally, return to user state.  */
9371:	swi	r0, r0, PER_CPU(KM);	/* Now officially in user state. */
938	add	r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */
939	swi	r11, r0, PER_CPU(CURRENT_SAVE); /* save current */
940	VM_OFF;
941	tophys(r1,r1);
942
943	lwi	r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */
944	lwi	r4, r1, PTO+PT_R4;
945	RESTORE_REGS
946	addik	r1, r1, STATE_SAVE_SIZE		/* Clean up stack space.  */
947
948
949	lwi	r1, r1, PT_R1 - PT_SIZE;
950					/* Restore user stack pointer. */
951	bri	6f;
952
953/* Return to kernel state.  */
9542:	VM_OFF;
955	tophys(r1,r1);
956	lwi	r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */
957	lwi	r4, r1, PTO+PT_R4;
958	RESTORE_REGS
959	addik	r1, r1, STATE_SAVE_SIZE		/* Clean up stack space.  */
960
961	tovirt(r1,r1);
9626:
963DBTRAP_return:		/* Make global symbol for debugging */
964	rtbd	r14, 0;	/* Instructions to return from an IRQ */
965	nop;
966
967
968
969ENTRY(_switch_to)
970	/* prepare return value */
971	addk	r3, r0, r31
972
973	/* save registers in cpu_context */
974	/* use r11 and r12, volatile registers, as temp register */
975	/* give start of cpu_context for previous process */
976	addik	r11, r5, TI_CPU_CONTEXT
977	swi	r1, r11, CC_R1
978	swi	r2, r11, CC_R2
979	/* skip volatile registers.
980	 * they are saved on stack when we jumped to _switch_to() */
981	/* dedicated registers */
982	swi	r13, r11, CC_R13
983	swi	r14, r11, CC_R14
984	swi	r15, r11, CC_R15
985	swi	r16, r11, CC_R16
986	swi	r17, r11, CC_R17
987	swi	r18, r11, CC_R18
988	/* save non-volatile registers */
989	swi	r19, r11, CC_R19
990	swi	r20, r11, CC_R20
991	swi	r21, r11, CC_R21
992	swi	r22, r11, CC_R22
993	swi	r23, r11, CC_R23
994	swi	r24, r11, CC_R24
995	swi	r25, r11, CC_R25
996	swi	r26, r11, CC_R26
997	swi	r27, r11, CC_R27
998	swi	r28, r11, CC_R28
999	swi	r29, r11, CC_R29
1000	swi	r30, r11, CC_R30
1001	/* special purpose registers */
1002	mfs	r12, rmsr
1003	nop
1004	swi	r12, r11, CC_MSR
1005	mfs	r12, rear
1006	nop
1007	swi	r12, r11, CC_EAR
1008	mfs	r12, resr
1009	nop
1010	swi	r12, r11, CC_ESR
1011	mfs	r12, rfsr
1012	nop
1013	swi	r12, r11, CC_FSR
1014
1015	/* update r31, the current */
1016	lwi	r31, r6, TI_TASK/* give me pointer to task which will be next */
1017	/* stored it to current_save too */
1018	swi	r31, r0, PER_CPU(CURRENT_SAVE)
1019
1020	/* get new process' cpu context and restore */
1021	/* give me start where start context of next task */
1022	addik	r11, r6, TI_CPU_CONTEXT
1023
1024	/* non-volatile registers */
1025	lwi	r30, r11, CC_R30
1026	lwi	r29, r11, CC_R29
1027	lwi	r28, r11, CC_R28
1028	lwi	r27, r11, CC_R27
1029	lwi	r26, r11, CC_R26
1030	lwi	r25, r11, CC_R25
1031	lwi	r24, r11, CC_R24
1032	lwi	r23, r11, CC_R23
1033	lwi	r22, r11, CC_R22
1034	lwi	r21, r11, CC_R21
1035	lwi	r20, r11, CC_R20
1036	lwi	r19, r11, CC_R19
1037	/* dedicated registers */
1038	lwi	r18, r11, CC_R18
1039	lwi	r17, r11, CC_R17
1040	lwi	r16, r11, CC_R16
1041	lwi	r15, r11, CC_R15
1042	lwi	r14, r11, CC_R14
1043	lwi	r13, r11, CC_R13
1044	/* skip volatile registers */
1045	lwi	r2, r11, CC_R2
1046	lwi	r1, r11, CC_R1
1047
1048	/* special purpose registers */
1049	lwi	r12, r11, CC_FSR
1050	mts	rfsr, r12
1051	nop
1052	lwi	r12, r11, CC_MSR
1053	mts	rmsr, r12
1054	nop
1055
1056	rtsd	r15, 8
1057	nop
1058
1059ENTRY(_reset)
1060	brai	0x70; /* Jump back to FS-boot */
1061
1062ENTRY(_break)
1063	mfs	r5, rmsr
1064	nop
1065	swi	r5, r0, 0x250 + TOPHYS(r0_ram)
1066	mfs	r5, resr
1067	nop
1068	swi	r5, r0, 0x254 + TOPHYS(r0_ram)
1069	bri	0
1070
1071	/* These are compiled and loaded into high memory, then
1072	 * copied into place in mach_early_setup */
1073	.section	.init.ivt, "ax"
1074	.org	0x0
1075	/* this is very important - here is the reset vector */
1076	/* in current MMU branch you don't care what is here - it is
1077	 * used from bootloader site - but this is correct for FS-BOOT */
1078	brai	0x70
1079	nop
1080	brai	TOPHYS(_user_exception); /* syscall handler */
1081	brai	TOPHYS(_interrupt);	/* Interrupt handler */
1082	brai	TOPHYS(_break);		/* nmi trap handler */
1083	brai	TOPHYS(_hw_exception_handler);	/* HW exception handler */
1084
1085	.org	0x60
1086	brai	TOPHYS(_debug_exception);	/* debug trap handler*/
1087
1088.section .rodata,"a"
1089#include "syscall_table.S"
1090
1091syscall_table_size=(.-sys_call_table)
1092
1093