xref: /linux/arch/microblaze/kernel/entry.S (revision 913df4453f85f1fe79b35ecf3c9a0c0b707d22a2)
1/*
2 * Low-level system-call handling, trap handlers and context-switching
3 *
4 * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu>
5 * Copyright (C) 2008-2009 PetaLogix
6 * Copyright (C) 2003		John Williams <jwilliams@itee.uq.edu.au>
7 * Copyright (C) 2001,2002	NEC Corporation
8 * Copyright (C) 2001,2002	Miles Bader <miles@gnu.org>
9 *
10 * This file is subject to the terms and conditions of the GNU General
11 * Public License. See the file COPYING in the main directory of this
12 * archive for more details.
13 *
14 * Written by Miles Bader <miles@gnu.org>
15 * Heavily modified by John Williams for Microblaze
16 */
17
18#include <linux/sys.h>
19#include <linux/linkage.h>
20
21#include <asm/entry.h>
22#include <asm/current.h>
23#include <asm/processor.h>
24#include <asm/exceptions.h>
25#include <asm/asm-offsets.h>
26#include <asm/thread_info.h>
27
28#include <asm/page.h>
29#include <asm/unistd.h>
30
31#include <linux/errno.h>
32#include <asm/signal.h>
33
34/* The size of a state save frame. */
35#define STATE_SAVE_SIZE		(PT_SIZE + STATE_SAVE_ARG_SPACE)
36
37/* The offset of the struct pt_regs in a `state save frame' on the stack. */
38#define PTO	STATE_SAVE_ARG_SPACE /* 24 the space for args */
39
40#define C_ENTRY(name)	.globl name; .align 4; name
41
42/*
43 * Various ways of setting and clearing BIP in flags reg.
44 * This is mucky, but necessary using microblaze version that
45 * allows msr ops to write to BIP
46 */
47#if CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR
48	.macro	clear_bip
49	msrclr	r11, MSR_BIP
50	nop
51	.endm
52
53	.macro	set_bip
54	msrset	r11, MSR_BIP
55	nop
56	.endm
57
58	.macro	clear_eip
59	msrclr	r11, MSR_EIP
60	nop
61	.endm
62
63	.macro	set_ee
64	msrset	r11, MSR_EE
65	nop
66	.endm
67
68	.macro	disable_irq
69	msrclr	r11, MSR_IE
70	nop
71	.endm
72
73	.macro	enable_irq
74	msrset	r11, MSR_IE
75	nop
76	.endm
77
78	.macro	set_ums
79	msrset	r11, MSR_UMS
80	nop
81	msrclr	r11, MSR_VMS
82	nop
83	.endm
84
85	.macro	set_vms
86	msrclr	r11, MSR_UMS
87	nop
88	msrset	r11, MSR_VMS
89	nop
90	.endm
91
92	.macro	clear_vms_ums
93	msrclr	r11, MSR_VMS
94	nop
95	msrclr	r11, MSR_UMS
96	nop
97	.endm
98#else
99	.macro	clear_bip
100	mfs	r11, rmsr
101	nop
102	andi	r11, r11, ~MSR_BIP
103	mts	rmsr, r11
104	nop
105	.endm
106
107	.macro	set_bip
108	mfs	r11, rmsr
109	nop
110	ori	r11, r11, MSR_BIP
111	mts	rmsr, r11
112	nop
113	.endm
114
115	.macro	clear_eip
116	mfs	r11, rmsr
117	nop
118	andi	r11, r11, ~MSR_EIP
119	mts	rmsr, r11
120	nop
121	.endm
122
123	.macro	set_ee
124	mfs	r11, rmsr
125	nop
126	ori	r11, r11, MSR_EE
127	mts	rmsr, r11
128	nop
129	.endm
130
131	.macro	disable_irq
132	mfs	r11, rmsr
133	nop
134	andi	r11, r11, ~MSR_IE
135	mts	rmsr, r11
136	nop
137	.endm
138
139	.macro	enable_irq
140	mfs	r11, rmsr
141	nop
142	ori	r11, r11, MSR_IE
143	mts	rmsr, r11
144	nop
145	.endm
146
147	.macro set_ums
148	mfs	r11, rmsr
149	nop
150	ori	r11, r11, MSR_VMS
151	andni	r11, r11, MSR_UMS
152	mts	rmsr, r11
153	nop
154	.endm
155
156	.macro	set_vms
157	mfs	r11, rmsr
158	nop
159	ori	r11, r11, MSR_VMS
160	andni	r11, r11, MSR_UMS
161	mts	rmsr, r11
162	nop
163	.endm
164
165	.macro	clear_vms_ums
166	mfs	r11, rmsr
167	nop
168	andni	r11, r11, (MSR_VMS|MSR_UMS)
169	mts	rmsr,r11
170	nop
171	.endm
172#endif
173
174/* Define how to call high-level functions. With MMU, virtual mode must be
175 * enabled when calling the high-level function. Clobbers R11.
176 * VM_ON, VM_OFF, DO_JUMP_BIPCLR, DO_CALL
177 */
178
179/* turn on virtual protected mode save */
180#define VM_ON		\
181	set_ums;		\
182	rted	r0, 2f;	\
1832: nop;
184
185/* turn off virtual protected mode save and user mode save*/
186#define VM_OFF			\
187	clear_vms_ums;			\
188	rted	r0, TOPHYS(1f);	\
1891: nop;
190
191#define SAVE_REGS \
192	swi	r2, r1, PTO+PT_R2;	/* Save SDA */			\
193	swi	r5, r1, PTO+PT_R5;					\
194	swi	r6, r1, PTO+PT_R6;					\
195	swi	r7, r1, PTO+PT_R7;					\
196	swi	r8, r1, PTO+PT_R8;					\
197	swi	r9, r1, PTO+PT_R9;					\
198	swi	r10, r1, PTO+PT_R10;					\
199	swi	r11, r1, PTO+PT_R11;	/* save clobbered regs after rval */\
200	swi	r12, r1, PTO+PT_R12;					\
201	swi	r13, r1, PTO+PT_R13;	/* Save SDA2 */			\
202	swi	r14, r1, PTO+PT_PC;	/* PC, before IRQ/trap */	\
203	swi	r15, r1, PTO+PT_R15;	/* Save LP */			\
204	swi	r18, r1, PTO+PT_R18;	/* Save asm scratch reg */	\
205	swi	r19, r1, PTO+PT_R19;					\
206	swi	r20, r1, PTO+PT_R20;					\
207	swi	r21, r1, PTO+PT_R21;					\
208	swi	r22, r1, PTO+PT_R22;					\
209	swi	r23, r1, PTO+PT_R23;					\
210	swi	r24, r1, PTO+PT_R24;					\
211	swi	r25, r1, PTO+PT_R25;					\
212	swi	r26, r1, PTO+PT_R26;					\
213	swi	r27, r1, PTO+PT_R27;					\
214	swi	r28, r1, PTO+PT_R28;					\
215	swi	r29, r1, PTO+PT_R29;					\
216	swi	r30, r1, PTO+PT_R30;					\
217	swi	r31, r1, PTO+PT_R31;	/* Save current task reg */	\
218	mfs	r11, rmsr;		/* save MSR */			\
219	nop;								\
220	swi	r11, r1, PTO+PT_MSR;
221
222#define RESTORE_REGS \
223	lwi	r11, r1, PTO+PT_MSR;					\
224	mts	rmsr , r11;						\
225	nop;								\
226	lwi	r2, r1, PTO+PT_R2;	/* restore SDA */		\
227	lwi	r5, r1, PTO+PT_R5;					\
228	lwi	r6, r1, PTO+PT_R6;					\
229	lwi	r7, r1, PTO+PT_R7;					\
230	lwi	r8, r1, PTO+PT_R8;					\
231	lwi	r9, r1, PTO+PT_R9;					\
232	lwi	r10, r1, PTO+PT_R10;					\
233	lwi	r11, r1, PTO+PT_R11;	/* restore clobbered regs after rval */\
234	lwi	r12, r1, PTO+PT_R12;					\
235	lwi	r13, r1, PTO+PT_R13;	/* restore SDA2 */		\
236	lwi	r14, r1, PTO+PT_PC;	/* RESTORE_LINK PC, before IRQ/trap */\
237	lwi	r15, r1, PTO+PT_R15;	/* restore LP */		\
238	lwi	r18, r1, PTO+PT_R18;	/* restore asm scratch reg */	\
239	lwi	r19, r1, PTO+PT_R19;					\
240	lwi	r20, r1, PTO+PT_R20;					\
241	lwi	r21, r1, PTO+PT_R21;					\
242	lwi	r22, r1, PTO+PT_R22;					\
243	lwi	r23, r1, PTO+PT_R23;					\
244	lwi	r24, r1, PTO+PT_R24;					\
245	lwi	r25, r1, PTO+PT_R25;					\
246	lwi	r26, r1, PTO+PT_R26;					\
247	lwi	r27, r1, PTO+PT_R27;					\
248	lwi	r28, r1, PTO+PT_R28;					\
249	lwi	r29, r1, PTO+PT_R29;					\
250	lwi	r30, r1, PTO+PT_R30;					\
251	lwi	r31, r1, PTO+PT_R31;	/* Restore cur task reg */
252
253.text
254
255/*
256 * User trap.
257 *
258 * System calls are handled here.
259 *
260 * Syscall protocol:
261 * Syscall number in r12, args in r5-r10
262 * Return value in r3
263 *
264 * Trap entered via brki instruction, so BIP bit is set, and interrupts
265 * are masked. This is nice, means we don't have to CLI before state save
266 */
267C_ENTRY(_user_exception):
268	swi	r1, r0, TOPHYS(PER_CPU(ENTRY_SP)) /* save stack */
269	addi	r14, r14, 4	/* return address is 4 byte after call */
270	swi	r11, r0, TOPHYS(r0_ram + PTO + PT_R11);	/* Save r11 */
271
272	lwi	r11, r0, TOPHYS(PER_CPU(KM));/* See if already in kernel mode.*/
273	beqi	r11, 1f;		/* Jump ahead if coming from user */
274/* Kernel-mode state save. */
275	lwi	r11, r0, TOPHYS(PER_CPU(ENTRY_SP)); /* Reload kernel stack-ptr*/
276	tophys(r1,r11);
277	swi	r11, r1, (PT_R1-PT_SIZE); /* Save original SP. */
278	lwi	r11, r0, TOPHYS(r0_ram + PTO + PT_R11); /* restore r11 */
279
280	addik	r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack. */
281	SAVE_REGS
282
283	addi	r11, r0, 1; 		/* Was in kernel-mode. */
284	swi	r11, r1, PTO+PT_MODE; /* pt_regs -> kernel mode */
285	brid	2f;
286	nop;				/* Fill delay slot */
287
288/* User-mode state save.  */
2891:
290	lwi	r11, r0, TOPHYS(r0_ram + PTO + PT_R11);	/* restore r11 */
291	lwi	r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */
292	tophys(r1,r1);
293	lwi	r1, r1, TS_THREAD_INFO;	/* get stack from task_struct */
294/* calculate kernel stack pointer from task struct 8k */
295	addik	r1, r1, THREAD_SIZE;
296	tophys(r1,r1);
297
298	addik	r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack.  */
299	SAVE_REGS
300
301	swi	r0, r1, PTO+PT_MODE;			/* Was in user-mode. */
302	lwi	r11, r0, TOPHYS(PER_CPU(ENTRY_SP));
303	swi	r11, r1, PTO+PT_R1;		/* Store user SP.  */
304	addi	r11, r0, 1;
305	swi	r11, r0, TOPHYS(PER_CPU(KM));	/* Now we're in kernel-mode.  */
3062:	lwi	r31, r0, TOPHYS(PER_CPU(CURRENT_SAVE));	/* get saved current */
307	/* Save away the syscall number.  */
308	swi	r12, r1, PTO+PT_R0;
309	tovirt(r1,r1)
310
311/* where the trap should return need -8 to adjust for rtsd r15, 8*/
312/* Jump to the appropriate function for the system call number in r12
313 * (r12 is not preserved), or return an error if r12 is not valid. The LP
314 * register should point to the location where
315 * the called function should return.  [note that MAKE_SYS_CALL uses label 1] */
316
317	# Step into virtual mode.
318	set_vms;
319	addik	r11, r0, 3f
320	rtid	r11, 0
321	nop
3223:
323	add	r11, r0, CURRENT_TASK	 /* Get current task ptr into r11 */
324	lwi	r11, r11, TS_THREAD_INFO /* get thread info */
325	lwi	r11, r11, TI_FLAGS	 /* get flags in thread info */
326	andi	r11, r11, _TIF_WORK_SYSCALL_MASK
327	beqi	r11, 4f
328
329	addik	r3, r0, -ENOSYS
330	swi	r3, r1, PTO + PT_R3
331	brlid	r15, do_syscall_trace_enter
332	addik	r5, r1, PTO + PT_R0
333
334	# do_syscall_trace_enter returns the new syscall nr.
335	addk	r12, r0, r3
336	lwi	r5, r1, PTO+PT_R5;
337	lwi	r6, r1, PTO+PT_R6;
338	lwi	r7, r1, PTO+PT_R7;
339	lwi	r8, r1, PTO+PT_R8;
340	lwi	r9, r1, PTO+PT_R9;
341	lwi	r10, r1, PTO+PT_R10;
3424:
343/* Jump to the appropriate function for the system call number in r12
344 * (r12 is not preserved), or return an error if r12 is not valid.
345 * The LP register should point to the location where the called function
346 * should return.  [note that MAKE_SYS_CALL uses label 1] */
347	/* See if the system call number is valid */
348	addi	r11, r12, -__NR_syscalls;
349	bgei	r11,5f;
350	/* Figure out which function to use for this system call.  */
351	/* Note Microblaze barrel shift is optional, so don't rely on it */
352	add	r12, r12, r12;			/* convert num -> ptr */
353	add	r12, r12, r12;
354
355	/* Trac syscalls and stored them to r0_ram */
356	lwi	r3, r12, 0x400 + r0_ram
357	addi	r3, r3, 1
358	swi	r3, r12, 0x400 + r0_ram
359
360	# Find and jump into the syscall handler.
361	lwi	r12, r12, sys_call_table
362	/* where the trap should return need -8 to adjust for rtsd r15, 8 */
363	la	r15, r0, ret_from_trap-8
364	bra	r12
365
366	/* The syscall number is invalid, return an error.  */
3675:
368	addi	r3, r0, -ENOSYS;
369	rtsd	r15,8;		/* looks like a normal subroutine return */
370	or 	r0, r0, r0
371
372
373/* Entry point used to return from a syscall/trap */
374/* We re-enable BIP bit before state restore */
375C_ENTRY(ret_from_trap):
376	set_bip;			/*  Ints masked for state restore*/
377	lwi	r11, r1, PTO+PT_MODE;
378/* See if returning to kernel mode, if so, skip resched &c.  */
379	bnei	r11, 2f;
380
381	/* We're returning to user mode, so check for various conditions that
382	 * trigger rescheduling. */
383	# FIXME: Restructure all these flag checks.
384	add	r11, r0, CURRENT_TASK;	/* Get current task ptr into r11 */
385	lwi	r11, r11, TS_THREAD_INFO;	/* get thread info */
386	lwi	r11, r11, TI_FLAGS;		/* get flags in thread info */
387	andi	r11, r11, _TIF_WORK_SYSCALL_MASK
388	beqi	r11, 1f
389
390	swi	r3, r1, PTO + PT_R3
391	swi	r4, r1, PTO + PT_R4
392	brlid	r15, do_syscall_trace_leave
393	addik	r5, r1, PTO + PT_R0
394	lwi	r3, r1, PTO + PT_R3
395	lwi	r4, r1, PTO + PT_R4
3961:
397
398	/* We're returning to user mode, so check for various conditions that
399	 * trigger rescheduling. */
400	/* Get current task ptr into r11 */
401	add	r11, r0, CURRENT_TASK;	/* Get current task ptr into r11 */
402	lwi	r11, r11, TS_THREAD_INFO;	/* get thread info */
403	lwi	r11, r11, TI_FLAGS;		/* get flags in thread info */
404	andi	r11, r11, _TIF_NEED_RESCHED;
405	beqi	r11, 5f;
406
407	swi	r3, r1, PTO + PT_R3; /* store syscall result */
408	swi	r4, r1, PTO + PT_R4;
409	bralid	r15, schedule;	/* Call scheduler */
410	nop;				/* delay slot */
411	lwi	r3, r1, PTO + PT_R3; /* restore syscall result */
412	lwi	r4, r1, PTO + PT_R4;
413
414	/* Maybe handle a signal */
4155:	add	r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */
416	lwi	r11, r11, TS_THREAD_INFO;	/* get thread info */
417	lwi	r11, r11, TI_FLAGS;	/* get flags in thread info */
418	andi	r11, r11, _TIF_SIGPENDING;
419	beqi	r11, 1f;		/* Signals to handle, handle them */
420
421	swi	r3, r1, PTO + PT_R3; /* store syscall result */
422	swi	r4, r1, PTO + PT_R4;
423	la	r5, r1, PTO;		/* Arg 1: struct pt_regs *regs */
424	add	r6, r0, r0;		/* Arg 2: sigset_t *oldset */
425	addi	r7, r0, 1;		/* Arg 3: int in_syscall */
426	bralid	r15, do_signal;	/* Handle any signals */
427	nop;
428	lwi	r3, r1, PTO + PT_R3; /* restore syscall result */
429	lwi	r4, r1, PTO + PT_R4;
430
431/* Finally, return to user state.  */
4321:	swi	r0, r0, PER_CPU(KM);	/* Now officially in user state. */
433	add	r11, r0, CURRENT_TASK;	/* Get current task ptr into r11 */
434	swi	r11, r0, PER_CPU(CURRENT_SAVE); /* save current */
435	VM_OFF;
436	tophys(r1,r1);
437	RESTORE_REGS;
438	addik	r1, r1, STATE_SAVE_SIZE		/* Clean up stack space.  */
439	lwi	r1, r1, PT_R1 - PT_SIZE;/* Restore user stack pointer. */
440	bri	6f;
441
442/* Return to kernel state.  */
4432:	VM_OFF;
444	tophys(r1,r1);
445	RESTORE_REGS;
446	addik	r1, r1, STATE_SAVE_SIZE		/* Clean up stack space.  */
447	tovirt(r1,r1);
4486:
449TRAP_return:		/* Make global symbol for debugging */
450	rtbd	r14, 0;	/* Instructions to return from an IRQ */
451	nop;
452
453
454/* These syscalls need access to the struct pt_regs on the stack, so we
455   implement them in assembly (they're basically all wrappers anyway).  */
456
457C_ENTRY(sys_fork_wrapper):
458	addi	r5, r0, SIGCHLD			/* Arg 0: flags */
459	lwi	r6, r1, PTO+PT_R1	/* Arg 1: child SP (use parent's) */
460	la	r7, r1, PTO			/* Arg 2: parent context */
461	add	r8. r0, r0			/* Arg 3: (unused) */
462	add	r9, r0, r0;			/* Arg 4: (unused) */
463	add	r10, r0, r0;			/* Arg 5: (unused) */
464	brid	do_fork		/* Do real work (tail-call) */
465	nop;
466
467/* This the initial entry point for a new child thread, with an appropriate
468   stack in place that makes it look the the child is in the middle of an
469   syscall.  This function is actually `returned to' from switch_thread
470   (copy_thread makes ret_from_fork the return address in each new thread's
471   saved context).  */
472C_ENTRY(ret_from_fork):
473	bralid	r15, schedule_tail; /* ...which is schedule_tail's arg */
474	add	r3, r5, r0;	/* switch_thread returns the prev task */
475				/* ( in the delay slot ) */
476	add	r3, r0, r0;	/* Child's fork call should return 0. */
477	brid	ret_from_trap;	/* Do normal trap return */
478	nop;
479
480C_ENTRY(sys_vfork):
481	brid	microblaze_vfork	/* Do real work (tail-call) */
482	la	r5, r1, PTO
483
484C_ENTRY(sys_clone):
485	bnei	r6, 1f;			/* See if child SP arg (arg 1) is 0. */
486	lwi	r6, r1, PTO+PT_R1;	/* If so, use paret's stack ptr */
4871:	la	r7, r1, PTO;			/* Arg 2: parent context */
488	add	r8, r0, r0;			/* Arg 3: (unused) */
489	add	r9, r0, r0;			/* Arg 4: (unused) */
490	add	r10, r0, r0;			/* Arg 5: (unused) */
491	brid	do_fork		/* Do real work (tail-call) */
492	nop;
493
494C_ENTRY(sys_execve):
495	la	r8, r1, PTO;		/* add user context as 4th arg */
496	brid	microblaze_execve;	/* Do real work (tail-call).*/
497	nop;
498
499C_ENTRY(sys_rt_sigsuspend_wrapper):
500	swi	r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */
501	swi	r4, r1, PTO+PT_R4;
502	la	r7, r1, PTO;		/* add user context as 3rd arg */
503	brlid	r15, sys_rt_sigsuspend;	/* Do real work.*/
504	nop;
505	lwi	r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */
506	lwi	r4, r1, PTO+PT_R4;
507	bri ret_from_trap /* fall through will not work here due to align */
508	nop;
509
510C_ENTRY(sys_rt_sigreturn_wrapper):
511	swi	r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */
512	swi	r4, r1, PTO+PT_R4;
513	la	r5, r1, PTO;		/* add user context as 1st arg */
514	brlid	r15, sys_rt_sigreturn	/* Do real work */
515	nop;
516	lwi	r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */
517	lwi	r4, r1, PTO+PT_R4;
518	bri ret_from_trap /* fall through will not work here due to align */
519	nop;
520
521/*
522 * HW EXCEPTION rutine start
523 */
524
525#define SAVE_STATE	\
526	swi	r11, r0, TOPHYS(r0_ram + PTO + PT_R11); /* Save r11 */	\
527	set_bip;	/*equalize initial state for all possible entries*/\
528	clear_eip;							\
529	enable_irq;							\
530	set_ee;								\
531	/* See if already in kernel mode.*/				\
532	lwi	r11, r0, TOPHYS(PER_CPU(KM));				\
533	beqi	r11, 1f;		/* Jump ahead if coming from user */\
534	/* Kernel-mode state save.  */					\
535	/* Reload kernel stack-ptr. */					\
536	lwi	r11, r0, TOPHYS(PER_CPU(ENTRY_SP));			\
537	tophys(r1,r11);							\
538	swi	r11, r1, (PT_R1-PT_SIZE); /* Save original SP. */	\
539	lwi	r11, r0, TOPHYS(r0_ram + PTO + PT_R11); /* restore r11 */\
540	addik	r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack.  */\
541	/* store return registers separately because			\
542	 * this macros is use for others exceptions */			\
543	swi	r3, r1, PTO + PT_R3;					\
544	swi	r4, r1, PTO + PT_R4;					\
545	SAVE_REGS							\
546	/* PC, before IRQ/trap - this is one instruction above */	\
547	swi	r17, r1, PTO+PT_PC;					\
548									\
549	addi	r11, r0, 1; 		/* Was in kernel-mode.  */	\
550	swi	r11, r1, PTO+PT_MODE; 	 				\
551	brid	2f;							\
552	nop;				/* Fill delay slot */		\
5531:	/* User-mode state save.  */					\
554	lwi	r11, r0, TOPHYS(r0_ram + PTO + PT_R11); /* restore r11 */\
555	lwi	r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */\
556	tophys(r1,r1);							\
557	lwi	r1, r1, TS_THREAD_INFO;	/* get the thread info */	\
558	addik	r1, r1, THREAD_SIZE;	/* calculate kernel stack pointer */\
559	tophys(r1,r1);							\
560									\
561	addik	r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack.  */\
562	/* store return registers separately because this macros	\
563	 * is use for others exceptions */				\
564	swi	r3, r1, PTO + PT_R3; 					\
565	swi	r4, r1, PTO + PT_R4;					\
566	SAVE_REGS							\
567	/* PC, before IRQ/trap - this is one instruction above FIXME*/	\
568	swi	r17, r1, PTO+PT_PC;					\
569									\
570	swi	r0, r1, PTO+PT_MODE; /* Was in user-mode.  */		\
571	lwi	r11, r0, TOPHYS(PER_CPU(ENTRY_SP));			\
572	swi	r11, r1, PTO+PT_R1; /* Store user SP.  */		\
573	addi	r11, r0, 1;						\
574	swi	r11, r0, TOPHYS(PER_CPU(KM)); /* Now we're in kernel-mode.*/\
5752:	lwi	r31, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */\
576	/* Save away the syscall number.  */				\
577	swi	r0, r1, PTO+PT_R0;					\
578	tovirt(r1,r1)
579
580C_ENTRY(full_exception_trap):
581	swi	r1, r0, TOPHYS(PER_CPU(ENTRY_SP)) /* save stack */
582	/* adjust exception address for privileged instruction
583	 * for finding where is it */
584	addik	r17, r17, -4
585	SAVE_STATE /* Save registers */
586	/* FIXME this can be store directly in PT_ESR reg.
587	 * I tested it but there is a fault */
588	/* where the trap should return need -8 to adjust for rtsd r15, 8 */
589	la	r15, r0, ret_from_exc - 8
590	la	r5, r1, PTO		 /* parameter struct pt_regs * regs */
591	mfs	r6, resr
592	nop
593	mfs	r7, rfsr;		/* save FSR */
594	nop
595	mts	rfsr, r0;	/* Clear sticky fsr */
596	nop
597	la	r12, r0, full_exception
598	set_vms;
599	rtbd	r12, 0;
600	nop;
601
602/*
603 * Unaligned data trap.
604 *
605 * Unaligned data trap last on 4k page is handled here.
606 *
607 * Trap entered via exception, so EE bit is set, and interrupts
608 * are masked.  This is nice, means we don't have to CLI before state save
609 *
610 * The assembler routine is in "arch/microblaze/kernel/hw_exception_handler.S"
611 */
612C_ENTRY(unaligned_data_trap):
613	swi	r1, r0, TOPHYS(PER_CPU(ENTRY_SP)) /* save stack */
614	SAVE_STATE		/* Save registers.*/
615	/* where the trap should return need -8 to adjust for rtsd r15, 8 */
616	la	r15, r0, ret_from_exc-8
617	mfs	r3, resr		/* ESR */
618	nop
619	mfs	r4, rear		/* EAR */
620	nop
621	la	r7, r1, PTO		/* parameter struct pt_regs * regs */
622	la	r12, r0, _unaligned_data_exception
623	set_vms;
624	rtbd	r12, 0;	/* interrupts enabled */
625	nop;
626
627/*
628 * Page fault traps.
629 *
630 * If the real exception handler (from hw_exception_handler.S) didn't find
631 * the mapping for the process, then we're thrown here to handle such situation.
632 *
633 * Trap entered via exceptions, so EE bit is set, and interrupts
634 * are masked.  This is nice, means we don't have to CLI before state save
635 *
636 * Build a standard exception frame for TLB Access errors.  All TLB exceptions
637 * will bail out to this point if they can't resolve the lightweight TLB fault.
638 *
639 * The C function called is in "arch/microblaze/mm/fault.c", declared as:
640 * void do_page_fault(struct pt_regs *regs,
641 *				unsigned long address,
642 *				unsigned long error_code)
643 */
644/* data and intruction trap - which is choose is resolved int fault.c */
645C_ENTRY(page_fault_data_trap):
646	swi	r1, r0, TOPHYS(PER_CPU(ENTRY_SP)) /* save stack */
647	SAVE_STATE		/* Save registers.*/
648	/* where the trap should return need -8 to adjust for rtsd r15, 8 */
649	la	r15, r0, ret_from_exc-8
650	la	r5, r1, PTO		/* parameter struct pt_regs * regs */
651	mfs	r6, rear		/* parameter unsigned long address */
652	nop
653	mfs	r7, resr		/* parameter unsigned long error_code */
654	nop
655	la	r12, r0, do_page_fault
656	set_vms;
657	rtbd	r12, 0;	/* interrupts enabled */
658	nop;
659
660C_ENTRY(page_fault_instr_trap):
661	swi	r1, r0, TOPHYS(PER_CPU(ENTRY_SP)) /* save stack */
662	SAVE_STATE		/* Save registers.*/
663	/* where the trap should return need -8 to adjust for rtsd r15, 8 */
664	la	r15, r0, ret_from_exc-8
665	la	r5, r1, PTO		/* parameter struct pt_regs * regs */
666	mfs	r6, rear		/* parameter unsigned long address */
667	nop
668	ori	r7, r0, 0		/* parameter unsigned long error_code */
669	la	r12, r0, do_page_fault
670	set_vms;
671	rtbd	r12, 0;	/* interrupts enabled */
672	nop;
673
674/* Entry point used to return from an exception.  */
675C_ENTRY(ret_from_exc):
676	set_bip;			/*  Ints masked for state restore*/
677	lwi	r11, r1, PTO+PT_MODE;
678	bnei	r11, 2f;		/* See if returning to kernel mode, */
679					/* ... if so, skip resched &c.  */
680
681	/* We're returning to user mode, so check for various conditions that
682	   trigger rescheduling. */
683	/* Get current task ptr into r11 */
684	add	r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */
685	lwi	r11, r11, TS_THREAD_INFO;	/* get thread info */
686	lwi	r11, r11, TI_FLAGS;	/* get flags in thread info */
687	andi	r11, r11, _TIF_NEED_RESCHED;
688	beqi	r11, 5f;
689
690/* Call the scheduler before returning from a syscall/trap. */
691	bralid	r15, schedule;	/* Call scheduler */
692	nop;				/* delay slot */
693
694	/* Maybe handle a signal */
6955:	add	r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */
696	lwi	r11, r11, TS_THREAD_INFO;	/* get thread info */
697	lwi	r11, r11, TI_FLAGS;	/* get flags in thread info */
698	andi	r11, r11, _TIF_SIGPENDING;
699	beqi	r11, 1f;		/* Signals to handle, handle them */
700
701	/*
702	 * Handle a signal return; Pending signals should be in r18.
703	 *
704	 * Not all registers are saved by the normal trap/interrupt entry
705	 * points (for instance, call-saved registers (because the normal
706	 * C-compiler calling sequence in the kernel makes sure they're
707	 * preserved), and call-clobbered registers in the case of
708	 * traps), but signal handlers may want to examine or change the
709	 * complete register state.  Here we save anything not saved by
710	 * the normal entry sequence, so that it may be safely restored
711	 * (in a possibly modified form) after do_signal returns.
712	 * store return registers separately because this macros is use
713	 * for others exceptions */
714	swi	r3, r1, PTO + PT_R3;
715	swi	r4, r1, PTO + PT_R4;
716	la	r5, r1, PTO;		/* Arg 1: struct pt_regs *regs */
717	add	r6, r0, r0;		/* Arg 2: sigset_t *oldset */
718	addi	r7, r0, 0;		/* Arg 3: int in_syscall */
719	bralid	r15, do_signal;	/* Handle any signals */
720	nop;
721	lwi	r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */
722	lwi	r4, r1, PTO+PT_R4;
723
724/* Finally, return to user state.  */
7251:	swi	r0, r0, PER_CPU(KM);	/* Now officially in user state. */
726	add	r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */
727	swi	r11, r0, PER_CPU(CURRENT_SAVE); /* save current */
728	VM_OFF;
729	tophys(r1,r1);
730
731	lwi	r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */
732	lwi	r4, r1, PTO+PT_R4;
733	RESTORE_REGS;
734	addik	r1, r1, STATE_SAVE_SIZE		/* Clean up stack space.  */
735
736	lwi	r1, r1, PT_R1 - PT_SIZE; /* Restore user stack pointer. */
737	bri	6f;
738/* Return to kernel state.  */
7392:	VM_OFF;
740	tophys(r1,r1);
741	lwi	r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */
742	lwi	r4, r1, PTO+PT_R4;
743	RESTORE_REGS;
744	addik	r1, r1, STATE_SAVE_SIZE		/* Clean up stack space.  */
745
746	tovirt(r1,r1);
7476:
748EXC_return:		/* Make global symbol for debugging */
749	rtbd	r14, 0;	/* Instructions to return from an IRQ */
750	nop;
751
752/*
753 * HW EXCEPTION rutine end
754 */
755
756/*
757 * Hardware maskable interrupts.
758 *
759 * The stack-pointer (r1) should have already been saved to the memory
760 * location PER_CPU(ENTRY_SP).
761 */
762C_ENTRY(_interrupt):
763/* MS: we are in physical address */
764/* Save registers, switch to proper stack, convert SP to virtual.*/
765	swi	r1, r0, TOPHYS(PER_CPU(ENTRY_SP))
766	swi	r11, r0, TOPHYS(PER_CPU(R11_SAVE));
767	/* MS: See if already in kernel mode. */
768	lwi	r11, r0, TOPHYS(PER_CPU(KM));
769	beqi	r11, 1f; /* MS: Jump ahead if coming from user */
770
771/* Kernel-mode state save. */
772	or	r11, r1, r0
773	tophys(r1,r11); /* MS: I have in r1 physical address where stack is */
774/* MS: Save original SP - position PT_R1 to next stack frame 4 *1 - 152*/
775	swi	r11, r1, (PT_R1 - PT_SIZE);
776/* MS: restore r11 because of saving in SAVE_REGS */
777	lwi	r11, r0, TOPHYS(PER_CPU(R11_SAVE));
778	/* save registers */
779/* MS: Make room on the stack -> activation record */
780	addik	r1, r1, -STATE_SAVE_SIZE;
781/* MS: store return registers separately because
782 * this macros is use for others exceptions */
783	swi	r3, r1, PTO + PT_R3;
784	swi	r4, r1, PTO + PT_R4;
785	SAVE_REGS
786	/* MS: store mode */
787	addi	r11, r0, 1; /* MS: Was in kernel-mode. */
788	swi	r11, r1, PTO + PT_MODE; /* MS: and save it */
789	brid	2f;
790	nop; /* MS: Fill delay slot */
791
7921:
793/* User-mode state save. */
794/* MS: restore r11 -> FIXME move before SAVE_REG */
795	lwi	r11, r0, TOPHYS(PER_CPU(R11_SAVE));
796 /* MS: get the saved current */
797	lwi	r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE));
798	tophys(r1,r1);
799	lwi	r1, r1, TS_THREAD_INFO;
800	addik	r1, r1, THREAD_SIZE;
801	tophys(r1,r1);
802	/* save registers */
803	addik	r1, r1, -STATE_SAVE_SIZE;
804	swi	r3, r1, PTO+PT_R3;
805	swi	r4, r1, PTO+PT_R4;
806	SAVE_REGS
807	/* calculate mode */
808	swi	r0, r1, PTO + PT_MODE;
809	lwi	r11, r0, TOPHYS(PER_CPU(ENTRY_SP));
810	swi	r11, r1, PTO+PT_R1;
811	/* setup kernel mode to KM */
812	addi	r11, r0, 1;
813	swi	r11, r0, TOPHYS(PER_CPU(KM));
814
8152:
816	lwi	r31, r0, TOPHYS(PER_CPU(CURRENT_SAVE));
817	swi	r0, r1, PTO + PT_R0;
818	tovirt(r1,r1)
819	la	r5, r1, PTO;
820	set_vms;
821	la	r11, r0, do_IRQ;
822	la	r15, r0, irq_call;
823irq_call:rtbd	r11, 0;
824	nop;
825
826/* MS: we are in virtual mode */
827ret_from_irq:
828	lwi	r11, r1, PTO + PT_MODE;
829	bnei	r11, 2f;
830
831	add	r11, r0, CURRENT_TASK;
832	lwi	r11, r11, TS_THREAD_INFO;
833	lwi	r11, r11, TI_FLAGS; /* MS: get flags from thread info */
834	andi	r11, r11, _TIF_NEED_RESCHED;
835	beqi	r11, 5f
836	bralid	r15, schedule;
837	nop; /* delay slot */
838
839    /* Maybe handle a signal */
8405:	add	r11, r0, CURRENT_TASK;
841	lwi	r11, r11, TS_THREAD_INFO; /* MS: get thread info */
842	lwi	r11, r11, TI_FLAGS; /* get flags in thread info */
843	andi	r11, r11, _TIF_SIGPENDING;
844	beqid	r11, no_intr_resched
845/* Handle a signal return; Pending signals should be in r18. */
846	addi	r7, r0, 0; /* Arg 3: int in_syscall */
847	la	r5, r1, PTO; /* Arg 1: struct pt_regs *regs */
848	bralid	r15, do_signal;	/* Handle any signals */
849	add	r6, r0, r0; /* Arg 2: sigset_t *oldset */
850
851/* Finally, return to user state. */
852no_intr_resched:
853    /* Disable interrupts, we are now committed to the state restore */
854	disable_irq
855	swi	r0, r0, PER_CPU(KM); /* MS: Now officially in user state. */
856	add	r11, r0, CURRENT_TASK;
857	swi	r11, r0, PER_CPU(CURRENT_SAVE);
858	VM_OFF;
859	tophys(r1,r1);
860	lwi	r3, r1, PTO + PT_R3; /* MS: restore saved r3, r4 registers */
861	lwi	r4, r1, PTO + PT_R4;
862	RESTORE_REGS
863	addik	r1, r1, STATE_SAVE_SIZE /* MS: Clean up stack space. */
864	lwi	r1, r1, PT_R1 - PT_SIZE;
865	bri	6f;
866/* MS: Return to kernel state. */
8672:	VM_OFF /* MS: turn off MMU */
868	tophys(r1,r1)
869	lwi	r3, r1, PTO + PT_R3; /* MS: restore saved r3, r4 registers */
870	lwi	r4, r1, PTO + PT_R4;
871	RESTORE_REGS
872	addik	r1, r1, STATE_SAVE_SIZE	/* MS: Clean up stack space. */
873	tovirt(r1,r1);
8746:
875IRQ_return: /* MS: Make global symbol for debugging */
876	rtid	r14, 0
877	nop
878
879/*
880 * `Debug' trap
881 *  We enter dbtrap in "BIP" (breakpoint) mode.
882 *  So we exit the breakpoint mode with an 'rtbd' and proceed with the
883 *  original dbtrap.
884 *  however, wait to save state first
885 */
886C_ENTRY(_debug_exception):
887	/* BIP bit is set on entry, no interrupts can occur */
888	swi	r1, r0, TOPHYS(PER_CPU(ENTRY_SP))
889
890	swi	r11, r0, TOPHYS(r0_ram + PTO + PT_R11); /* Save r11 */
891	set_bip;	/*equalize initial state for all possible entries*/
892	clear_eip;
893	enable_irq;
894	lwi	r11, r0, TOPHYS(PER_CPU(KM));/* See if already in kernel mode.*/
895	beqi	r11, 1f;		/* Jump ahead if coming from user */
896	/* Kernel-mode state save.  */
897	lwi	r11, r0, TOPHYS(PER_CPU(ENTRY_SP)); /* Reload kernel stack-ptr*/
898	tophys(r1,r11);
899	swi	r11, r1, (PT_R1-PT_SIZE); /* Save original SP. */
900	lwi	r11, r0, TOPHYS(r0_ram + PTO + PT_R11); /* restore r11 */
901
902	addik	r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack.  */
903	swi	r3, r1, PTO + PT_R3;
904	swi	r4, r1, PTO + PT_R4;
905	SAVE_REGS;
906
907	addi	r11, r0, 1; 		/* Was in kernel-mode.  */
908	swi	r11, r1, PTO + PT_MODE;
909	brid	2f;
910	nop;				/* Fill delay slot */
9111:      /* User-mode state save.  */
912	lwi	r11, r0, TOPHYS(r0_ram + PTO + PT_R11); /* restore r11 */
913	lwi	r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */
914	tophys(r1,r1);
915	lwi	r1, r1, TS_THREAD_INFO;	/* get the thread info */
916	addik	r1, r1, THREAD_SIZE;	/* calculate kernel stack pointer */
917	tophys(r1,r1);
918
919	addik	r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack.  */
920	swi	r3, r1, PTO + PT_R3;
921	swi	r4, r1, PTO + PT_R4;
922	SAVE_REGS;
923
924	swi	r0, r1, PTO+PT_MODE; /* Was in user-mode.  */
925	lwi	r11, r0, TOPHYS(PER_CPU(ENTRY_SP));
926	swi	r11, r1, PTO+PT_R1; /* Store user SP.  */
927	addi	r11, r0, 1;
928	swi	r11, r0, TOPHYS(PER_CPU(KM));	/* Now we're in kernel-mode.  */
9292:	lwi	r31, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */
930	/* Save away the syscall number.  */
931	swi	r0, r1, PTO+PT_R0;
932	tovirt(r1,r1)
933
934	addi	r5, r0, SIGTRAP		     /* send the trap signal */
935	add	r6, r0, CURRENT_TASK; /* Get current task ptr into r11 */
936	addk	r7, r0, r0		     /* 3rd param zero */
937
938	set_vms;
939	la	r11, r0, send_sig;
940	la	r15, r0, dbtrap_call;
941dbtrap_call:	rtbd	r11, 0;
942	nop;
943
944	set_bip;			/*  Ints masked for state restore*/
945	lwi	r11, r1, PTO+PT_MODE;
946	bnei	r11, 2f;
947
948	/* Get current task ptr into r11 */
949	add	r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */
950	lwi	r11, r11, TS_THREAD_INFO;	/* get thread info */
951	lwi	r11, r11, TI_FLAGS;	/* get flags in thread info */
952	andi	r11, r11, _TIF_NEED_RESCHED;
953	beqi	r11, 5f;
954
955/* Call the scheduler before returning from a syscall/trap. */
956
957	bralid	r15, schedule;	/* Call scheduler */
958	nop;				/* delay slot */
959	/* XXX Is PT_DTRACE handling needed here? */
960	/* XXX m68knommu also checks TASK_STATE & TASK_COUNTER here.  */
961
962	/* Maybe handle a signal */
9635:	add	r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */
964	lwi	r11, r11, TS_THREAD_INFO;	/* get thread info */
965	lwi	r11, r11, TI_FLAGS;	/* get flags in thread info */
966	andi	r11, r11, _TIF_SIGPENDING;
967	beqi	r11, 1f;		/* Signals to handle, handle them */
968
969/* Handle a signal return; Pending signals should be in r18.  */
970	/* Not all registers are saved by the normal trap/interrupt entry
971	   points (for instance, call-saved registers (because the normal
972	   C-compiler calling sequence in the kernel makes sure they're
973	   preserved), and call-clobbered registers in the case of
974	   traps), but signal handlers may want to examine or change the
975	   complete register state.  Here we save anything not saved by
976	   the normal entry sequence, so that it may be safely restored
977	   (in a possibly modified form) after do_signal returns.  */
978
979	la	r5, r1, PTO;		/* Arg 1: struct pt_regs *regs */
980	add	r6, r0, r0;		/* Arg 2: sigset_t *oldset */
981	addi  r7, r0, 0;	/* Arg 3: int in_syscall */
982	bralid	r15, do_signal;	/* Handle any signals */
983	nop;
984
985
986/* Finally, return to user state.  */
9871:	swi	r0, r0, PER_CPU(KM);	/* Now officially in user state. */
988	add	r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */
989	swi	r11, r0, PER_CPU(CURRENT_SAVE); /* save current */
990	VM_OFF;
991	tophys(r1,r1);
992
993	lwi	r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */
994	lwi	r4, r1, PTO+PT_R4;
995	RESTORE_REGS
996	addik	r1, r1, STATE_SAVE_SIZE		/* Clean up stack space.  */
997
998
999	lwi	r1, r1, PT_R1 - PT_SIZE;
1000					/* Restore user stack pointer. */
1001	bri	6f;
1002
1003/* Return to kernel state.  */
10042:	VM_OFF;
1005	tophys(r1,r1);
1006	lwi	r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */
1007	lwi	r4, r1, PTO+PT_R4;
1008	RESTORE_REGS
1009	addik	r1, r1, STATE_SAVE_SIZE		/* Clean up stack space.  */
1010
1011	tovirt(r1,r1);
10126:
1013DBTRAP_return:		/* Make global symbol for debugging */
1014	rtbd	r14, 0;	/* Instructions to return from an IRQ */
1015	nop;
1016
1017
1018
1019ENTRY(_switch_to)
1020	/* prepare return value */
1021	addk	r3, r0, r31
1022
1023	/* save registers in cpu_context */
1024	/* use r11 and r12, volatile registers, as temp register */
1025	/* give start of cpu_context for previous process */
1026	addik	r11, r5, TI_CPU_CONTEXT
1027	swi	r1, r11, CC_R1
1028	swi	r2, r11, CC_R2
1029	/* skip volatile registers.
1030	 * they are saved on stack when we jumped to _switch_to() */
1031	/* dedicated registers */
1032	swi	r13, r11, CC_R13
1033	swi	r14, r11, CC_R14
1034	swi	r15, r11, CC_R15
1035	swi	r16, r11, CC_R16
1036	swi	r17, r11, CC_R17
1037	swi	r18, r11, CC_R18
1038	/* save non-volatile registers */
1039	swi	r19, r11, CC_R19
1040	swi	r20, r11, CC_R20
1041	swi	r21, r11, CC_R21
1042	swi	r22, r11, CC_R22
1043	swi	r23, r11, CC_R23
1044	swi	r24, r11, CC_R24
1045	swi	r25, r11, CC_R25
1046	swi	r26, r11, CC_R26
1047	swi	r27, r11, CC_R27
1048	swi	r28, r11, CC_R28
1049	swi	r29, r11, CC_R29
1050	swi	r30, r11, CC_R30
1051	/* special purpose registers */
1052	mfs	r12, rmsr
1053	nop
1054	swi	r12, r11, CC_MSR
1055	mfs	r12, rear
1056	nop
1057	swi	r12, r11, CC_EAR
1058	mfs	r12, resr
1059	nop
1060	swi	r12, r11, CC_ESR
1061	mfs	r12, rfsr
1062	nop
1063	swi	r12, r11, CC_FSR
1064
1065	/* update r31, the current */
1066	lwi	r31, r6, TI_TASK/* give me pointer to task which will be next */
1067	/* stored it to current_save too */
1068	swi	r31, r0, PER_CPU(CURRENT_SAVE)
1069
1070	/* get new process' cpu context and restore */
1071	/* give me start where start context of next task */
1072	addik	r11, r6, TI_CPU_CONTEXT
1073
1074	/* non-volatile registers */
1075	lwi	r30, r11, CC_R30
1076	lwi	r29, r11, CC_R29
1077	lwi	r28, r11, CC_R28
1078	lwi	r27, r11, CC_R27
1079	lwi	r26, r11, CC_R26
1080	lwi	r25, r11, CC_R25
1081	lwi	r24, r11, CC_R24
1082	lwi	r23, r11, CC_R23
1083	lwi	r22, r11, CC_R22
1084	lwi	r21, r11, CC_R21
1085	lwi	r20, r11, CC_R20
1086	lwi	r19, r11, CC_R19
1087	/* dedicated registers */
1088	lwi	r18, r11, CC_R18
1089	lwi	r17, r11, CC_R17
1090	lwi	r16, r11, CC_R16
1091	lwi	r15, r11, CC_R15
1092	lwi	r14, r11, CC_R14
1093	lwi	r13, r11, CC_R13
1094	/* skip volatile registers */
1095	lwi	r2, r11, CC_R2
1096	lwi	r1, r11, CC_R1
1097
1098	/* special purpose registers */
1099	lwi	r12, r11, CC_FSR
1100	mts	rfsr, r12
1101	nop
1102	lwi	r12, r11, CC_MSR
1103	mts	rmsr, r12
1104	nop
1105
1106	rtsd	r15, 8
1107	nop
1108
1109ENTRY(_reset)
1110	brai	0x70; /* Jump back to FS-boot */
1111
1112ENTRY(_break)
1113	mfs	r5, rmsr
1114	nop
1115	swi	r5, r0, 0x250 + TOPHYS(r0_ram)
1116	mfs	r5, resr
1117	nop
1118	swi	r5, r0, 0x254 + TOPHYS(r0_ram)
1119	bri	0
1120
1121	/* These are compiled and loaded into high memory, then
1122	 * copied into place in mach_early_setup */
1123	.section	.init.ivt, "ax"
1124	.org	0x0
1125	/* this is very important - here is the reset vector */
1126	/* in current MMU branch you don't care what is here - it is
1127	 * used from bootloader site - but this is correct for FS-BOOT */
1128	brai	0x70
1129	nop
1130	brai	TOPHYS(_user_exception); /* syscall handler */
1131	brai	TOPHYS(_interrupt);	/* Interrupt handler */
1132	brai	TOPHYS(_break);		/* nmi trap handler */
1133	brai	TOPHYS(_hw_exception_handler);	/* HW exception handler */
1134
1135	.org	0x60
1136	brai	TOPHYS(_debug_exception);	/* debug trap handler*/
1137
1138.section .rodata,"a"
1139#include "syscall_table.S"
1140
1141syscall_table_size=(.-sys_call_table)
1142
1143