xref: /linux/arch/microblaze/kernel/entry.S (revision e27ecdd94d81e5bc3d1f68591701db5adb342f0d)
1/*
2 * Low-level system-call handling, trap handlers and context-switching
3 *
4 * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu>
5 * Copyright (C) 2008-2009 PetaLogix
6 * Copyright (C) 2003		John Williams <jwilliams@itee.uq.edu.au>
7 * Copyright (C) 2001,2002	NEC Corporation
8 * Copyright (C) 2001,2002	Miles Bader <miles@gnu.org>
9 *
10 * This file is subject to the terms and conditions of the GNU General
11 * Public License. See the file COPYING in the main directory of this
12 * archive for more details.
13 *
14 * Written by Miles Bader <miles@gnu.org>
15 * Heavily modified by John Williams for Microblaze
16 */
17
18#include <linux/sys.h>
19#include <linux/linkage.h>
20
21#include <asm/entry.h>
22#include <asm/current.h>
23#include <asm/processor.h>
24#include <asm/exceptions.h>
25#include <asm/asm-offsets.h>
26#include <asm/thread_info.h>
27
28#include <asm/page.h>
29#include <asm/unistd.h>
30
31#include <linux/errno.h>
32#include <asm/signal.h>
33
34/* The size of a state save frame. */
35#define STATE_SAVE_SIZE		(PT_SIZE + STATE_SAVE_ARG_SPACE)
36
37/* The offset of the struct pt_regs in a `state save frame' on the stack. */
38#define PTO	STATE_SAVE_ARG_SPACE /* 24 the space for args */
39
40#define C_ENTRY(name)	.globl name; .align 4; name
41
42/*
43 * Various ways of setting and clearing BIP in flags reg.
44 * This is mucky, but necessary using microblaze version that
45 * allows msr ops to write to BIP
46 */
47#if CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR
48	.macro	clear_bip
49	msrclr	r11, MSR_BIP
50	nop
51	.endm
52
53	.macro	set_bip
54	msrset	r11, MSR_BIP
55	nop
56	.endm
57
58	.macro	clear_eip
59	msrclr	r11, MSR_EIP
60	nop
61	.endm
62
63	.macro	set_ee
64	msrset	r11, MSR_EE
65	nop
66	.endm
67
68	.macro	disable_irq
69	msrclr	r11, MSR_IE
70	nop
71	.endm
72
73	.macro	enable_irq
74	msrset	r11, MSR_IE
75	nop
76	.endm
77
78	.macro	set_ums
79	msrset	r11, MSR_UMS
80	nop
81	msrclr	r11, MSR_VMS
82	nop
83	.endm
84
85	.macro	set_vms
86	msrclr	r11, MSR_UMS
87	nop
88	msrset	r11, MSR_VMS
89	nop
90	.endm
91
92	.macro	clear_vms_ums
93	msrclr	r11, MSR_VMS
94	nop
95	msrclr	r11, MSR_UMS
96	nop
97	.endm
98#else
99	.macro	clear_bip
100	mfs	r11, rmsr
101	nop
102	andi	r11, r11, ~MSR_BIP
103	mts	rmsr, r11
104	nop
105	.endm
106
107	.macro	set_bip
108	mfs	r11, rmsr
109	nop
110	ori	r11, r11, MSR_BIP
111	mts	rmsr, r11
112	nop
113	.endm
114
115	.macro	clear_eip
116	mfs	r11, rmsr
117	nop
118	andi	r11, r11, ~MSR_EIP
119	mts	rmsr, r11
120	nop
121	.endm
122
123	.macro	set_ee
124	mfs	r11, rmsr
125	nop
126	ori	r11, r11, MSR_EE
127	mts	rmsr, r11
128	nop
129	.endm
130
131	.macro	disable_irq
132	mfs	r11, rmsr
133	nop
134	andi	r11, r11, ~MSR_IE
135	mts	rmsr, r11
136	nop
137	.endm
138
139	.macro	enable_irq
140	mfs	r11, rmsr
141	nop
142	ori	r11, r11, MSR_IE
143	mts	rmsr, r11
144	nop
145	.endm
146
147	.macro set_ums
148	mfs	r11, rmsr
149	nop
150	ori	r11, r11, MSR_VMS
151	andni	r11, r11, MSR_UMS
152	mts	rmsr, r11
153	nop
154	.endm
155
156	.macro	set_vms
157	mfs	r11, rmsr
158	nop
159	ori	r11, r11, MSR_VMS
160	andni	r11, r11, MSR_UMS
161	mts	rmsr, r11
162	nop
163	.endm
164
165	.macro	clear_vms_ums
166	mfs	r11, rmsr
167	nop
168	andni	r11, r11, (MSR_VMS|MSR_UMS)
169	mts	rmsr,r11
170	nop
171	.endm
172#endif
173
174/* Define how to call high-level functions. With MMU, virtual mode must be
175 * enabled when calling the high-level function. Clobbers R11.
176 * VM_ON, VM_OFF, DO_JUMP_BIPCLR, DO_CALL
177 */
178
179/* turn on virtual protected mode save */
180#define VM_ON		\
181	set_ums;		\
182	rted	r0, 2f;	\
1832: nop;
184
185/* turn off virtual protected mode save and user mode save*/
186#define VM_OFF			\
187	clear_vms_ums;			\
188	rted	r0, TOPHYS(1f);	\
1891: nop;
190
191#define SAVE_REGS \
192	swi	r2, r1, PTO+PT_R2;	/* Save SDA */			\
193	swi	r5, r1, PTO+PT_R5;					\
194	swi	r6, r1, PTO+PT_R6;					\
195	swi	r7, r1, PTO+PT_R7;					\
196	swi	r8, r1, PTO+PT_R8;					\
197	swi	r9, r1, PTO+PT_R9;					\
198	swi	r10, r1, PTO+PT_R10;					\
199	swi	r11, r1, PTO+PT_R11;	/* save clobbered regs after rval */\
200	swi	r12, r1, PTO+PT_R12;					\
201	swi	r13, r1, PTO+PT_R13;	/* Save SDA2 */			\
202	swi	r14, r1, PTO+PT_PC;	/* PC, before IRQ/trap */	\
203	swi	r15, r1, PTO+PT_R15;	/* Save LP */			\
204	swi	r18, r1, PTO+PT_R18;	/* Save asm scratch reg */	\
205	swi	r19, r1, PTO+PT_R19;					\
206	swi	r20, r1, PTO+PT_R20;					\
207	swi	r21, r1, PTO+PT_R21;					\
208	swi	r22, r1, PTO+PT_R22;					\
209	swi	r23, r1, PTO+PT_R23;					\
210	swi	r24, r1, PTO+PT_R24;					\
211	swi	r25, r1, PTO+PT_R25;					\
212	swi	r26, r1, PTO+PT_R26;					\
213	swi	r27, r1, PTO+PT_R27;					\
214	swi	r28, r1, PTO+PT_R28;					\
215	swi	r29, r1, PTO+PT_R29;					\
216	swi	r30, r1, PTO+PT_R30;					\
217	swi	r31, r1, PTO+PT_R31;	/* Save current task reg */	\
218	mfs	r11, rmsr;		/* save MSR */			\
219	nop;								\
220	swi	r11, r1, PTO+PT_MSR;
221
222#define RESTORE_REGS \
223	lwi	r11, r1, PTO+PT_MSR;					\
224	mts	rmsr , r11;						\
225	nop;								\
226	lwi	r2, r1, PTO+PT_R2;	/* restore SDA */		\
227	lwi	r5, r1, PTO+PT_R5;					\
228	lwi	r6, r1, PTO+PT_R6;					\
229	lwi	r7, r1, PTO+PT_R7;					\
230	lwi	r8, r1, PTO+PT_R8;					\
231	lwi	r9, r1, PTO+PT_R9;					\
232	lwi	r10, r1, PTO+PT_R10;					\
233	lwi	r11, r1, PTO+PT_R11;	/* restore clobbered regs after rval */\
234	lwi	r12, r1, PTO+PT_R12;					\
235	lwi	r13, r1, PTO+PT_R13;	/* restore SDA2 */		\
236	lwi	r14, r1, PTO+PT_PC;	/* RESTORE_LINK PC, before IRQ/trap */\
237	lwi	r15, r1, PTO+PT_R15;	/* restore LP */		\
238	lwi	r18, r1, PTO+PT_R18;	/* restore asm scratch reg */	\
239	lwi	r19, r1, PTO+PT_R19;					\
240	lwi	r20, r1, PTO+PT_R20;					\
241	lwi	r21, r1, PTO+PT_R21;					\
242	lwi	r22, r1, PTO+PT_R22;					\
243	lwi	r23, r1, PTO+PT_R23;					\
244	lwi	r24, r1, PTO+PT_R24;					\
245	lwi	r25, r1, PTO+PT_R25;					\
246	lwi	r26, r1, PTO+PT_R26;					\
247	lwi	r27, r1, PTO+PT_R27;					\
248	lwi	r28, r1, PTO+PT_R28;					\
249	lwi	r29, r1, PTO+PT_R29;					\
250	lwi	r30, r1, PTO+PT_R30;					\
251	lwi	r31, r1, PTO+PT_R31;	/* Restore cur task reg */
252
253.text
254
255/*
256 * User trap.
257 *
258 * System calls are handled here.
259 *
260 * Syscall protocol:
261 * Syscall number in r12, args in r5-r10
262 * Return value in r3
263 *
264 * Trap entered via brki instruction, so BIP bit is set, and interrupts
265 * are masked. This is nice, means we don't have to CLI before state save
266 */
267C_ENTRY(_user_exception):
268	swi	r1, r0, TOPHYS(PER_CPU(ENTRY_SP)) /* save stack */
269	addi	r14, r14, 4	/* return address is 4 byte after call */
270	swi	r11, r0, TOPHYS(r0_ram + PTO + PT_R11);	/* Save r11 */
271
272	lwi	r11, r0, TOPHYS(PER_CPU(KM));/* See if already in kernel mode.*/
273	beqi	r11, 1f;		/* Jump ahead if coming from user */
274/* Kernel-mode state save. */
275	lwi	r11, r0, TOPHYS(PER_CPU(ENTRY_SP)); /* Reload kernel stack-ptr*/
276	tophys(r1,r11);
277	swi	r11, r1, (PT_R1-PT_SIZE); /* Save original SP. */
278	lwi	r11, r0, TOPHYS(r0_ram + PTO + PT_R11); /* restore r11 */
279
280	addik	r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack. */
281	SAVE_REGS
282
283	addi	r11, r0, 1; 		/* Was in kernel-mode. */
284	swi	r11, r1, PTO+PT_MODE; /* pt_regs -> kernel mode */
285	brid	2f;
286	nop;				/* Fill delay slot */
287
288/* User-mode state save.  */
2891:
290	lwi	r11, r0, TOPHYS(r0_ram + PTO + PT_R11);	/* restore r11 */
291	lwi	r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */
292	tophys(r1,r1);
293	lwi	r1, r1, TS_THREAD_INFO;	/* get stack from task_struct */
294/* calculate kernel stack pointer from task struct 8k */
295	addik	r1, r1, THREAD_SIZE;
296	tophys(r1,r1);
297
298	addik	r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack.  */
299	SAVE_REGS
300
301	swi	r0, r1, PTO+PT_MODE;			/* Was in user-mode. */
302	lwi	r11, r0, TOPHYS(PER_CPU(ENTRY_SP));
303	swi	r11, r1, PTO+PT_R1;		/* Store user SP.  */
304	addi	r11, r0, 1;
305	swi	r11, r0, TOPHYS(PER_CPU(KM));	/* Now we're in kernel-mode.  */
3062:	lwi	r31, r0, TOPHYS(PER_CPU(CURRENT_SAVE));	/* get saved current */
307	/* Save away the syscall number.  */
308	swi	r12, r1, PTO+PT_R0;
309	tovirt(r1,r1)
310
311	la	r15, r0, ret_from_trap-8
312/* where the trap should return need -8 to adjust for rtsd r15, 8*/
313/* Jump to the appropriate function for the system call number in r12
314 * (r12 is not preserved), or return an error if r12 is not valid. The LP
315 * register should point to the location where
316 * the called function should return.  [note that MAKE_SYS_CALL uses label 1] */
317	/* See if the system call number is valid.  */
318	addi	r11, r12, -__NR_syscalls;
319	bgei	r11,1f;
320	/* Figure out which function to use for this system call.  */
321	/* Note Microblaze barrel shift is optional, so don't rely on it */
322	add	r12, r12, r12;			/* convert num -> ptr */
323	add	r12, r12, r12;
324
325	/* Trac syscalls and stored them to r0_ram */
326	lwi	r3, r12, 0x400 + TOPHYS(r0_ram)
327	addi	r3, r3, 1
328	swi	r3, r12, 0x400 + TOPHYS(r0_ram)
329
330	lwi	r12, r12, TOPHYS(sys_call_table); /* Function ptr */
331	/* Make the system call.  to r12*/
332	set_vms;
333	rtid	r12, 0;
334	nop;
335	/* The syscall number is invalid, return an error.  */
3361:	VM_ON;	/* RETURN() expects virtual mode*/
337	addi	r3, r0, -ENOSYS;
338	rtsd	r15,8;		/* looks like a normal subroutine return */
339	or 	r0, r0, r0
340
341
342/* Entry point used to return from a syscall/trap.  */
343/* We re-enable BIP bit before state restore */
344C_ENTRY(ret_from_trap):
345	set_bip;			/*  Ints masked for state restore*/
346	lwi	r11, r1, PTO+PT_MODE;
347/* See if returning to kernel mode, if so, skip resched &c.  */
348	bnei	r11, 2f;
349
350	/* We're returning to user mode, so check for various conditions that
351	 * trigger rescheduling. */
352	/* Get current task ptr into r11 */
353	add	r11, r0, CURRENT_TASK;	/* Get current task ptr into r11 */
354	lwi	r11, r11, TS_THREAD_INFO;	/* get thread info */
355	lwi	r11, r11, TI_FLAGS;		/* get flags in thread info */
356	andi	r11, r11, _TIF_NEED_RESCHED;
357	beqi	r11, 5f;
358
359	swi	r3, r1, PTO + PT_R3; /* store syscall result */
360	swi	r4, r1, PTO + PT_R4;
361	bralid	r15, schedule;	/* Call scheduler */
362	nop;				/* delay slot */
363	lwi	r3, r1, PTO + PT_R3; /* restore syscall result */
364	lwi	r4, r1, PTO + PT_R4;
365
366	/* Maybe handle a signal */
3675:	add	r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */
368	lwi	r11, r11, TS_THREAD_INFO;	/* get thread info */
369	lwi	r11, r11, TI_FLAGS;	/* get flags in thread info */
370	andi	r11, r11, _TIF_SIGPENDING;
371	beqi	r11, 1f;		/* Signals to handle, handle them */
372
373	swi	r3, r1, PTO + PT_R3; /* store syscall result */
374	swi	r4, r1, PTO + PT_R4;
375	la	r5, r1, PTO;		/* Arg 1: struct pt_regs *regs */
376	add	r6, r0, r0;		/* Arg 2: sigset_t *oldset */
377	addi	r7, r0, 1;		/* Arg 3: int in_syscall */
378	bralid	r15, do_signal;	/* Handle any signals */
379	nop;
380	lwi	r3, r1, PTO + PT_R3; /* restore syscall result */
381	lwi	r4, r1, PTO + PT_R4;
382
383/* Finally, return to user state.  */
3841:	swi	r0, r0, PER_CPU(KM);	/* Now officially in user state. */
385	add	r11, r0, CURRENT_TASK;	/* Get current task ptr into r11 */
386	swi	r11, r0, PER_CPU(CURRENT_SAVE); /* save current */
387	VM_OFF;
388	tophys(r1,r1);
389	RESTORE_REGS;
390	addik	r1, r1, STATE_SAVE_SIZE		/* Clean up stack space.  */
391	lwi	r1, r1, PT_R1 - PT_SIZE;/* Restore user stack pointer. */
392	bri	6f;
393
394/* Return to kernel state.  */
3952:	VM_OFF;
396	tophys(r1,r1);
397	RESTORE_REGS;
398	addik	r1, r1, STATE_SAVE_SIZE		/* Clean up stack space.  */
399	tovirt(r1,r1);
4006:
401TRAP_return:		/* Make global symbol for debugging */
402	rtbd	r14, 0;	/* Instructions to return from an IRQ */
403	nop;
404
405
406/* These syscalls need access to the struct pt_regs on the stack, so we
407   implement them in assembly (they're basically all wrappers anyway).  */
408
409C_ENTRY(sys_fork_wrapper):
410	addi	r5, r0, SIGCHLD			/* Arg 0: flags */
411	lwi	r6, r1, PTO+PT_R1	/* Arg 1: child SP (use parent's) */
412	la	r7, r1, PTO			/* Arg 2: parent context */
413	add	r8. r0, r0			/* Arg 3: (unused) */
414	add	r9, r0, r0;			/* Arg 4: (unused) */
415	add	r10, r0, r0;			/* Arg 5: (unused) */
416	brid	do_fork		/* Do real work (tail-call) */
417	nop;
418
419/* This the initial entry point for a new child thread, with an appropriate
420   stack in place that makes it look the the child is in the middle of an
421   syscall.  This function is actually `returned to' from switch_thread
422   (copy_thread makes ret_from_fork the return address in each new thread's
423   saved context).  */
424C_ENTRY(ret_from_fork):
425	bralid	r15, schedule_tail; /* ...which is schedule_tail's arg */
426	add	r3, r5, r0;	/* switch_thread returns the prev task */
427				/* ( in the delay slot ) */
428	add	r3, r0, r0;	/* Child's fork call should return 0. */
429	brid	ret_from_trap;	/* Do normal trap return */
430	nop;
431
432C_ENTRY(sys_vfork_wrapper):
433	la	r5, r1, PTO
434	brid	sys_vfork	/* Do real work (tail-call) */
435	nop
436
437C_ENTRY(sys_clone_wrapper):
438	bnei	r6, 1f;			/* See if child SP arg (arg 1) is 0. */
439	lwi	r6, r1, PTO+PT_R1;	/* If so, use paret's stack ptr */
4401:	la	r7, r1, PTO;			/* Arg 2: parent context */
441	add	r8, r0, r0;			/* Arg 3: (unused) */
442	add	r9, r0, r0;			/* Arg 4: (unused) */
443	add	r10, r0, r0;			/* Arg 5: (unused) */
444	brid	do_fork		/* Do real work (tail-call) */
445	nop;
446
447C_ENTRY(sys_execve_wrapper):
448	la	r8, r1, PTO;		/* add user context as 4th arg */
449	brid	sys_execve;	/* Do real work (tail-call).*/
450	nop;
451
452C_ENTRY(sys_sigsuspend_wrapper):
453	swi	r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */
454	swi	r4, r1, PTO+PT_R4;
455	la	r6, r1, PTO;		/* add user context as 2nd arg */
456	bralid	r15, sys_sigsuspend; /* Do real work.*/
457	nop;
458	lwi	r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */
459	lwi	r4, r1, PTO+PT_R4;
460	bri ret_from_trap /* fall through will not work here due to align */
461	nop;
462
463C_ENTRY(sys_rt_sigsuspend_wrapper):
464	swi	r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */
465	swi	r4, r1, PTO+PT_R4;
466	la	r7, r1, PTO;		/* add user context as 3rd arg */
467	brlid	r15, sys_rt_sigsuspend;	/* Do real work.*/
468	nop;
469	lwi	r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */
470	lwi	r4, r1, PTO+PT_R4;
471	bri ret_from_trap /* fall through will not work here due to align */
472	nop;
473
474
475C_ENTRY(sys_sigreturn_wrapper):
476	swi	r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */
477	swi	r4, r1, PTO+PT_R4;
478	la	r5, r1, PTO;		/* add user context as 1st arg */
479	brlid	r15, sys_sigreturn;	/* Do real work.*/
480	nop;
481	lwi	r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */
482	lwi	r4, r1, PTO+PT_R4;
483	bri ret_from_trap /* fall through will not work here due to align */
484	nop;
485
486C_ENTRY(sys_rt_sigreturn_wrapper):
487	swi	r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */
488	swi	r4, r1, PTO+PT_R4;
489	la	r5, r1, PTO;		/* add user context as 1st arg */
490	brlid	r15, sys_rt_sigreturn	/* Do real work */
491	nop;
492	lwi	r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */
493	lwi	r4, r1, PTO+PT_R4;
494	bri ret_from_trap /* fall through will not work here due to align */
495	nop;
496
497/*
498 * HW EXCEPTION rutine start
499 */
500
501#define SAVE_STATE	\
502	swi	r11, r0, TOPHYS(r0_ram + PTO + PT_R11); /* Save r11 */	\
503	set_bip;	/*equalize initial state for all possible entries*/\
504	clear_eip;							\
505	enable_irq;							\
506	set_ee;								\
507	/* See if already in kernel mode.*/				\
508	lwi	r11, r0, TOPHYS(PER_CPU(KM));				\
509	beqi	r11, 1f;		/* Jump ahead if coming from user */\
510	/* Kernel-mode state save.  */					\
511	/* Reload kernel stack-ptr. */					\
512	lwi	r11, r0, TOPHYS(PER_CPU(ENTRY_SP));			\
513	tophys(r1,r11);							\
514	swi	r11, r1, (PT_R1-PT_SIZE); /* Save original SP. */	\
515	lwi	r11, r0, TOPHYS(r0_ram + PTO + PT_R11); /* restore r11 */\
516	addik	r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack.  */\
517	/* store return registers separately because			\
518	 * this macros is use for others exceptions */			\
519	swi	r3, r1, PTO + PT_R3;					\
520	swi	r4, r1, PTO + PT_R4;					\
521	SAVE_REGS							\
522	/* PC, before IRQ/trap - this is one instruction above */	\
523	swi	r17, r1, PTO+PT_PC;					\
524									\
525	addi	r11, r0, 1; 		/* Was in kernel-mode.  */	\
526	swi	r11, r1, PTO+PT_MODE; 	 				\
527	brid	2f;							\
528	nop;				/* Fill delay slot */		\
5291:	/* User-mode state save.  */					\
530	lwi	r11, r0, TOPHYS(r0_ram + PTO + PT_R11); /* restore r11 */\
531	lwi	r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */\
532	tophys(r1,r1);							\
533	lwi	r1, r1, TS_THREAD_INFO;	/* get the thread info */	\
534	addik	r1, r1, THREAD_SIZE;	/* calculate kernel stack pointer */\
535	tophys(r1,r1);							\
536									\
537	addik	r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack.  */\
538	/* store return registers separately because this macros	\
539	 * is use for others exceptions */				\
540	swi	r3, r1, PTO + PT_R3; 					\
541	swi	r4, r1, PTO + PT_R4;					\
542	SAVE_REGS							\
543	/* PC, before IRQ/trap - this is one instruction above FIXME*/	\
544	swi	r17, r1, PTO+PT_PC;					\
545									\
546	swi	r0, r1, PTO+PT_MODE; /* Was in user-mode.  */		\
547	lwi	r11, r0, TOPHYS(PER_CPU(ENTRY_SP));			\
548	swi	r11, r1, PTO+PT_R1; /* Store user SP.  */		\
549	addi	r11, r0, 1;						\
550	swi	r11, r0, TOPHYS(PER_CPU(KM)); /* Now we're in kernel-mode.*/\
5512:	lwi	r31, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */\
552	/* Save away the syscall number.  */				\
553	swi	r0, r1, PTO+PT_R0;					\
554	tovirt(r1,r1)
555
556C_ENTRY(full_exception_trap):
557	swi	r1, r0, TOPHYS(PER_CPU(ENTRY_SP)) /* save stack */
558	/* adjust exception address for privileged instruction
559	 * for finding where is it */
560	addik	r17, r17, -4
561	SAVE_STATE /* Save registers */
562	/* FIXME this can be store directly in PT_ESR reg.
563	 * I tested it but there is a fault */
564	/* where the trap should return need -8 to adjust for rtsd r15, 8 */
565	la	r15, r0, ret_from_exc - 8
566	la	r5, r1, PTO		 /* parameter struct pt_regs * regs */
567	mfs	r6, resr
568	nop
569	mfs	r7, rfsr;		/* save FSR */
570	nop
571	la	r12, r0, full_exception
572	set_vms;
573	rtbd	r12, 0;
574	nop;
575
576/*
577 * Unaligned data trap.
578 *
579 * Unaligned data trap last on 4k page is handled here.
580 *
581 * Trap entered via exception, so EE bit is set, and interrupts
582 * are masked.  This is nice, means we don't have to CLI before state save
583 *
584 * The assembler routine is in "arch/microblaze/kernel/hw_exception_handler.S"
585 */
586C_ENTRY(unaligned_data_trap):
587	swi	r1, r0, TOPHYS(PER_CPU(ENTRY_SP)) /* save stack */
588	SAVE_STATE		/* Save registers.*/
589	/* where the trap should return need -8 to adjust for rtsd r15, 8 */
590	la	r15, r0, ret_from_exc-8
591	mfs	r3, resr		/* ESR */
592	nop
593	mfs	r4, rear		/* EAR */
594	nop
595	la	r7, r1, PTO		/* parameter struct pt_regs * regs */
596	la	r12, r0, _unaligned_data_exception
597	set_vms;
598	rtbd	r12, 0;	/* interrupts enabled */
599	nop;
600
601/*
602 * Page fault traps.
603 *
604 * If the real exception handler (from hw_exception_handler.S) didn't find
605 * the mapping for the process, then we're thrown here to handle such situation.
606 *
607 * Trap entered via exceptions, so EE bit is set, and interrupts
608 * are masked.  This is nice, means we don't have to CLI before state save
609 *
610 * Build a standard exception frame for TLB Access errors.  All TLB exceptions
611 * will bail out to this point if they can't resolve the lightweight TLB fault.
612 *
613 * The C function called is in "arch/microblaze/mm/fault.c", declared as:
614 * void do_page_fault(struct pt_regs *regs,
615 *				unsigned long address,
616 *				unsigned long error_code)
617 */
618/* data and intruction trap - which is choose is resolved int fault.c */
619C_ENTRY(page_fault_data_trap):
620	swi	r1, r0, TOPHYS(PER_CPU(ENTRY_SP)) /* save stack */
621	SAVE_STATE		/* Save registers.*/
622	/* where the trap should return need -8 to adjust for rtsd r15, 8 */
623	la	r15, r0, ret_from_exc-8
624	la	r5, r1, PTO		/* parameter struct pt_regs * regs */
625	mfs	r6, rear		/* parameter unsigned long address */
626	nop
627	mfs	r7, resr		/* parameter unsigned long error_code */
628	nop
629	la	r12, r0, do_page_fault
630	set_vms;
631	rtbd	r12, 0;	/* interrupts enabled */
632	nop;
633
634C_ENTRY(page_fault_instr_trap):
635	swi	r1, r0, TOPHYS(PER_CPU(ENTRY_SP)) /* save stack */
636	SAVE_STATE		/* Save registers.*/
637	/* where the trap should return need -8 to adjust for rtsd r15, 8 */
638	la	r15, r0, ret_from_exc-8
639	la	r5, r1, PTO		/* parameter struct pt_regs * regs */
640	mfs	r6, rear		/* parameter unsigned long address */
641	nop
642	ori	r7, r0, 0		/* parameter unsigned long error_code */
643	la	r12, r0, do_page_fault
644	set_vms;
645	rtbd	r12, 0;	/* interrupts enabled */
646	nop;
647
648/* Entry point used to return from an exception.  */
649C_ENTRY(ret_from_exc):
650	set_bip;			/*  Ints masked for state restore*/
651	lwi	r11, r1, PTO+PT_MODE;
652	bnei	r11, 2f;		/* See if returning to kernel mode, */
653					/* ... if so, skip resched &c.  */
654
655	/* We're returning to user mode, so check for various conditions that
656	   trigger rescheduling. */
657	/* Get current task ptr into r11 */
658	add	r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */
659	lwi	r11, r11, TS_THREAD_INFO;	/* get thread info */
660	lwi	r11, r11, TI_FLAGS;	/* get flags in thread info */
661	andi	r11, r11, _TIF_NEED_RESCHED;
662	beqi	r11, 5f;
663
664/* Call the scheduler before returning from a syscall/trap. */
665	bralid	r15, schedule;	/* Call scheduler */
666	nop;				/* delay slot */
667
668	/* Maybe handle a signal */
6695:	add	r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */
670	lwi	r11, r11, TS_THREAD_INFO;	/* get thread info */
671	lwi	r11, r11, TI_FLAGS;	/* get flags in thread info */
672	andi	r11, r11, _TIF_SIGPENDING;
673	beqi	r11, 1f;		/* Signals to handle, handle them */
674
675	/*
676	 * Handle a signal return; Pending signals should be in r18.
677	 *
678	 * Not all registers are saved by the normal trap/interrupt entry
679	 * points (for instance, call-saved registers (because the normal
680	 * C-compiler calling sequence in the kernel makes sure they're
681	 * preserved), and call-clobbered registers in the case of
682	 * traps), but signal handlers may want to examine or change the
683	 * complete register state.  Here we save anything not saved by
684	 * the normal entry sequence, so that it may be safely restored
685	 * (in a possibly modified form) after do_signal returns.
686	 * store return registers separately because this macros is use
687	 * for others exceptions */
688	swi	r3, r1, PTO + PT_R3;
689	swi	r4, r1, PTO + PT_R4;
690	la	r5, r1, PTO;		/* Arg 1: struct pt_regs *regs */
691	add	r6, r0, r0;		/* Arg 2: sigset_t *oldset */
692	addi	r7, r0, 0;		/* Arg 3: int in_syscall */
693	bralid	r15, do_signal;	/* Handle any signals */
694	nop;
695	lwi	r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */
696	lwi	r4, r1, PTO+PT_R4;
697
698/* Finally, return to user state.  */
6991:	swi	r0, r0, PER_CPU(KM);	/* Now officially in user state. */
700	add	r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */
701	swi	r11, r0, PER_CPU(CURRENT_SAVE); /* save current */
702	VM_OFF;
703	tophys(r1,r1);
704
705	lwi	r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */
706	lwi	r4, r1, PTO+PT_R4;
707	RESTORE_REGS;
708	addik	r1, r1, STATE_SAVE_SIZE		/* Clean up stack space.  */
709
710	lwi	r1, r1, PT_R1 - PT_SIZE; /* Restore user stack pointer. */
711	bri	6f;
712/* Return to kernel state.  */
7132:	VM_OFF;
714	tophys(r1,r1);
715	lwi	r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */
716	lwi	r4, r1, PTO+PT_R4;
717	RESTORE_REGS;
718	addik	r1, r1, STATE_SAVE_SIZE		/* Clean up stack space.  */
719
720	tovirt(r1,r1);
7216:
722EXC_return:		/* Make global symbol for debugging */
723	rtbd	r14, 0;	/* Instructions to return from an IRQ */
724	nop;
725
726/*
727 * HW EXCEPTION rutine end
728 */
729
730/*
731 * Hardware maskable interrupts.
732 *
733 * The stack-pointer (r1) should have already been saved to the memory
734 * location PER_CPU(ENTRY_SP).
735 */
736C_ENTRY(_interrupt):
737/* MS: we are in physical address */
738/* Save registers, switch to proper stack, convert SP to virtual.*/
739	swi	r1, r0, TOPHYS(PER_CPU(ENTRY_SP))
740	swi	r11, r0, TOPHYS(PER_CPU(R11_SAVE));
741	/* MS: See if already in kernel mode. */
742	lwi	r11, r0, TOPHYS(PER_CPU(KM));
743	beqi	r11, 1f; /* MS: Jump ahead if coming from user */
744
745/* Kernel-mode state save. */
746	or	r11, r1, r0
747	tophys(r1,r11); /* MS: I have in r1 physical address where stack is */
748/* MS: Save original SP - position PT_R1 to next stack frame 4 *1 - 152*/
749	swi	r11, r1, (PT_R1 - PT_SIZE);
750/* MS: restore r11 because of saving in SAVE_REGS */
751	lwi	r11, r0, TOPHYS(PER_CPU(R11_SAVE));
752	/* save registers */
753/* MS: Make room on the stack -> activation record */
754	addik	r1, r1, -STATE_SAVE_SIZE;
755/* MS: store return registers separately because
756 * this macros is use for others exceptions */
757	swi	r3, r1, PTO + PT_R3;
758	swi	r4, r1, PTO + PT_R4;
759	SAVE_REGS
760	/* MS: store mode */
761	addi	r11, r0, 1; /* MS: Was in kernel-mode. */
762	swi	r11, r1, PTO + PT_MODE; /* MS: and save it */
763	brid	2f;
764	nop; /* MS: Fill delay slot */
765
7661:
767/* User-mode state save. */
768/* MS: restore r11 -> FIXME move before SAVE_REG */
769	lwi	r11, r0, TOPHYS(PER_CPU(R11_SAVE));
770 /* MS: get the saved current */
771	lwi	r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE));
772	tophys(r1,r1);
773	lwi	r1, r1, TS_THREAD_INFO;
774	addik	r1, r1, THREAD_SIZE;
775	tophys(r1,r1);
776	/* save registers */
777	addik	r1, r1, -STATE_SAVE_SIZE;
778	swi	r3, r1, PTO+PT_R3;
779	swi	r4, r1, PTO+PT_R4;
780	SAVE_REGS
781	/* calculate mode */
782	swi	r0, r1, PTO + PT_MODE;
783	lwi	r11, r0, TOPHYS(PER_CPU(ENTRY_SP));
784	swi	r11, r1, PTO+PT_R1;
785	/* setup kernel mode to KM */
786	addi	r11, r0, 1;
787	swi	r11, r0, TOPHYS(PER_CPU(KM));
788
7892:
790	lwi	r31, r0, TOPHYS(PER_CPU(CURRENT_SAVE));
791	swi	r0, r1, PTO + PT_R0;
792	tovirt(r1,r1)
793	la	r5, r1, PTO;
794	set_vms;
795	la	r11, r0, do_IRQ;
796	la	r15, r0, irq_call;
797irq_call:rtbd	r11, 0;
798	nop;
799
800/* MS: we are in virtual mode */
801ret_from_irq:
802	lwi	r11, r1, PTO + PT_MODE;
803	bnei	r11, 2f;
804
805	add	r11, r0, CURRENT_TASK;
806	lwi	r11, r11, TS_THREAD_INFO;
807	lwi	r11, r11, TI_FLAGS; /* MS: get flags from thread info */
808	andi	r11, r11, _TIF_NEED_RESCHED;
809	beqi	r11, 5f
810	bralid	r15, schedule;
811	nop; /* delay slot */
812
813    /* Maybe handle a signal */
8145:	add	r11, r0, CURRENT_TASK;
815	lwi	r11, r11, TS_THREAD_INFO; /* MS: get thread info */
816	lwi	r11, r11, TI_FLAGS; /* get flags in thread info */
817	andi	r11, r11, _TIF_SIGPENDING;
818	beqid	r11, no_intr_resched
819/* Handle a signal return; Pending signals should be in r18. */
820	addi	r7, r0, 0; /* Arg 3: int in_syscall */
821	la	r5, r1, PTO; /* Arg 1: struct pt_regs *regs */
822	bralid	r15, do_signal;	/* Handle any signals */
823	add	r6, r0, r0; /* Arg 2: sigset_t *oldset */
824
825/* Finally, return to user state. */
826no_intr_resched:
827    /* Disable interrupts, we are now committed to the state restore */
828	disable_irq
829	swi	r0, r0, PER_CPU(KM); /* MS: Now officially in user state. */
830	add	r11, r0, CURRENT_TASK;
831	swi	r11, r0, PER_CPU(CURRENT_SAVE);
832	VM_OFF;
833	tophys(r1,r1);
834	lwi	r3, r1, PTO + PT_R3; /* MS: restore saved r3, r4 registers */
835	lwi	r4, r1, PTO + PT_R4;
836	RESTORE_REGS
837	addik	r1, r1, STATE_SAVE_SIZE /* MS: Clean up stack space. */
838	lwi	r1, r1, PT_R1 - PT_SIZE;
839	bri	6f;
840/* MS: Return to kernel state. */
8412:	VM_OFF /* MS: turn off MMU */
842	tophys(r1,r1)
843	lwi	r3, r1, PTO + PT_R3; /* MS: restore saved r3, r4 registers */
844	lwi	r4, r1, PTO + PT_R4;
845	RESTORE_REGS
846	addik	r1, r1, STATE_SAVE_SIZE	/* MS: Clean up stack space. */
847	tovirt(r1,r1);
8486:
849IRQ_return: /* MS: Make global symbol for debugging */
850	rtid	r14, 0
851	nop
852
853/*
854 * `Debug' trap
855 *  We enter dbtrap in "BIP" (breakpoint) mode.
856 *  So we exit the breakpoint mode with an 'rtbd' and proceed with the
857 *  original dbtrap.
858 *  however, wait to save state first
859 */
860C_ENTRY(_debug_exception):
861	/* BIP bit is set on entry, no interrupts can occur */
862	swi	r1, r0, TOPHYS(PER_CPU(ENTRY_SP))
863
864	swi	r11, r0, TOPHYS(r0_ram + PTO + PT_R11); /* Save r11 */
865	set_bip;	/*equalize initial state for all possible entries*/
866	clear_eip;
867	enable_irq;
868	lwi	r11, r0, TOPHYS(PER_CPU(KM));/* See if already in kernel mode.*/
869	beqi	r11, 1f;		/* Jump ahead if coming from user */
870	/* Kernel-mode state save.  */
871	lwi	r11, r0, TOPHYS(PER_CPU(ENTRY_SP)); /* Reload kernel stack-ptr*/
872	tophys(r1,r11);
873	swi	r11, r1, (PT_R1-PT_SIZE); /* Save original SP. */
874	lwi	r11, r0, TOPHYS(r0_ram + PTO + PT_R11); /* restore r11 */
875
876	addik	r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack.  */
877	swi	r3, r1, PTO + PT_R3;
878	swi	r4, r1, PTO + PT_R4;
879	SAVE_REGS;
880
881	addi	r11, r0, 1; 		/* Was in kernel-mode.  */
882	swi	r11, r1, PTO + PT_MODE;
883	brid	2f;
884	nop;				/* Fill delay slot */
8851:      /* User-mode state save.  */
886	lwi	r11, r0, TOPHYS(r0_ram + PTO + PT_R11); /* restore r11 */
887	lwi	r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */
888	tophys(r1,r1);
889	lwi	r1, r1, TS_THREAD_INFO;	/* get the thread info */
890	addik	r1, r1, THREAD_SIZE;	/* calculate kernel stack pointer */
891	tophys(r1,r1);
892
893	addik	r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack.  */
894	swi	r3, r1, PTO + PT_R3;
895	swi	r4, r1, PTO + PT_R4;
896	SAVE_REGS;
897
898	swi	r0, r1, PTO+PT_MODE; /* Was in user-mode.  */
899	lwi	r11, r0, TOPHYS(PER_CPU(ENTRY_SP));
900	swi	r11, r1, PTO+PT_R1; /* Store user SP.  */
901	addi	r11, r0, 1;
902	swi	r11, r0, TOPHYS(PER_CPU(KM));	/* Now we're in kernel-mode.  */
9032:	lwi	r31, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */
904	/* Save away the syscall number.  */
905	swi	r0, r1, PTO+PT_R0;
906	tovirt(r1,r1)
907
908	addi	r5, r0, SIGTRAP		     /* send the trap signal */
909	add	r6, r0, CURRENT_TASK; /* Get current task ptr into r11 */
910	addk	r7, r0, r0		     /* 3rd param zero */
911
912	set_vms;
913	la	r11, r0, send_sig;
914	la	r15, r0, dbtrap_call;
915dbtrap_call:	rtbd	r11, 0;
916	nop;
917
918	set_bip;			/*  Ints masked for state restore*/
919	lwi	r11, r1, PTO+PT_MODE;
920	bnei	r11, 2f;
921
922	/* Get current task ptr into r11 */
923	add	r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */
924	lwi	r11, r11, TS_THREAD_INFO;	/* get thread info */
925	lwi	r11, r11, TI_FLAGS;	/* get flags in thread info */
926	andi	r11, r11, _TIF_NEED_RESCHED;
927	beqi	r11, 5f;
928
929/* Call the scheduler before returning from a syscall/trap. */
930
931	bralid	r15, schedule;	/* Call scheduler */
932	nop;				/* delay slot */
933	/* XXX Is PT_DTRACE handling needed here? */
934	/* XXX m68knommu also checks TASK_STATE & TASK_COUNTER here.  */
935
936	/* Maybe handle a signal */
9375:	add	r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */
938	lwi	r11, r11, TS_THREAD_INFO;	/* get thread info */
939	lwi	r11, r11, TI_FLAGS;	/* get flags in thread info */
940	andi	r11, r11, _TIF_SIGPENDING;
941	beqi	r11, 1f;		/* Signals to handle, handle them */
942
943/* Handle a signal return; Pending signals should be in r18.  */
944	/* Not all registers are saved by the normal trap/interrupt entry
945	   points (for instance, call-saved registers (because the normal
946	   C-compiler calling sequence in the kernel makes sure they're
947	   preserved), and call-clobbered registers in the case of
948	   traps), but signal handlers may want to examine or change the
949	   complete register state.  Here we save anything not saved by
950	   the normal entry sequence, so that it may be safely restored
951	   (in a possibly modified form) after do_signal returns.  */
952
953	la	r5, r1, PTO;		/* Arg 1: struct pt_regs *regs */
954	add	r6, r0, r0;		/* Arg 2: sigset_t *oldset */
955	addi  r7, r0, 0;	/* Arg 3: int in_syscall */
956	bralid	r15, do_signal;	/* Handle any signals */
957	nop;
958
959
960/* Finally, return to user state.  */
9611:	swi	r0, r0, PER_CPU(KM);	/* Now officially in user state. */
962	add	r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */
963	swi	r11, r0, PER_CPU(CURRENT_SAVE); /* save current */
964	VM_OFF;
965	tophys(r1,r1);
966
967	lwi	r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */
968	lwi	r4, r1, PTO+PT_R4;
969	RESTORE_REGS
970	addik	r1, r1, STATE_SAVE_SIZE		/* Clean up stack space.  */
971
972
973	lwi	r1, r1, PT_R1 - PT_SIZE;
974					/* Restore user stack pointer. */
975	bri	6f;
976
977/* Return to kernel state.  */
9782:	VM_OFF;
979	tophys(r1,r1);
980	lwi	r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */
981	lwi	r4, r1, PTO+PT_R4;
982	RESTORE_REGS
983	addik	r1, r1, STATE_SAVE_SIZE		/* Clean up stack space.  */
984
985	tovirt(r1,r1);
9866:
987DBTRAP_return:		/* Make global symbol for debugging */
988	rtbd	r14, 0;	/* Instructions to return from an IRQ */
989	nop;
990
991
992
993ENTRY(_switch_to)
994	/* prepare return value */
995	addk	r3, r0, r31
996
997	/* save registers in cpu_context */
998	/* use r11 and r12, volatile registers, as temp register */
999	/* give start of cpu_context for previous process */
1000	addik	r11, r5, TI_CPU_CONTEXT
1001	swi	r1, r11, CC_R1
1002	swi	r2, r11, CC_R2
1003	/* skip volatile registers.
1004	 * they are saved on stack when we jumped to _switch_to() */
1005	/* dedicated registers */
1006	swi	r13, r11, CC_R13
1007	swi	r14, r11, CC_R14
1008	swi	r15, r11, CC_R15
1009	swi	r16, r11, CC_R16
1010	swi	r17, r11, CC_R17
1011	swi	r18, r11, CC_R18
1012	/* save non-volatile registers */
1013	swi	r19, r11, CC_R19
1014	swi	r20, r11, CC_R20
1015	swi	r21, r11, CC_R21
1016	swi	r22, r11, CC_R22
1017	swi	r23, r11, CC_R23
1018	swi	r24, r11, CC_R24
1019	swi	r25, r11, CC_R25
1020	swi	r26, r11, CC_R26
1021	swi	r27, r11, CC_R27
1022	swi	r28, r11, CC_R28
1023	swi	r29, r11, CC_R29
1024	swi	r30, r11, CC_R30
1025	/* special purpose registers */
1026	mfs	r12, rmsr
1027	nop
1028	swi	r12, r11, CC_MSR
1029	mfs	r12, rear
1030	nop
1031	swi	r12, r11, CC_EAR
1032	mfs	r12, resr
1033	nop
1034	swi	r12, r11, CC_ESR
1035	mfs	r12, rfsr
1036	nop
1037	swi	r12, r11, CC_FSR
1038
1039	/* update r31, the current */
1040	lwi	r31, r6, TI_TASK/* give me pointer to task which will be next */
1041	/* stored it to current_save too */
1042	swi	r31, r0, PER_CPU(CURRENT_SAVE)
1043
1044	/* get new process' cpu context and restore */
1045	/* give me start where start context of next task */
1046	addik	r11, r6, TI_CPU_CONTEXT
1047
1048	/* non-volatile registers */
1049	lwi	r30, r11, CC_R30
1050	lwi	r29, r11, CC_R29
1051	lwi	r28, r11, CC_R28
1052	lwi	r27, r11, CC_R27
1053	lwi	r26, r11, CC_R26
1054	lwi	r25, r11, CC_R25
1055	lwi	r24, r11, CC_R24
1056	lwi	r23, r11, CC_R23
1057	lwi	r22, r11, CC_R22
1058	lwi	r21, r11, CC_R21
1059	lwi	r20, r11, CC_R20
1060	lwi	r19, r11, CC_R19
1061	/* dedicated registers */
1062	lwi	r18, r11, CC_R18
1063	lwi	r17, r11, CC_R17
1064	lwi	r16, r11, CC_R16
1065	lwi	r15, r11, CC_R15
1066	lwi	r14, r11, CC_R14
1067	lwi	r13, r11, CC_R13
1068	/* skip volatile registers */
1069	lwi	r2, r11, CC_R2
1070	lwi	r1, r11, CC_R1
1071
1072	/* special purpose registers */
1073	lwi	r12, r11, CC_FSR
1074	mts	rfsr, r12
1075	nop
1076	lwi	r12, r11, CC_MSR
1077	mts	rmsr, r12
1078	nop
1079
1080	rtsd	r15, 8
1081	nop
1082
1083ENTRY(_reset)
1084	brai	0x70; /* Jump back to FS-boot */
1085
1086ENTRY(_break)
1087	mfs	r5, rmsr
1088	nop
1089	swi	r5, r0, 0x250 + TOPHYS(r0_ram)
1090	mfs	r5, resr
1091	nop
1092	swi	r5, r0, 0x254 + TOPHYS(r0_ram)
1093	bri	0
1094
1095	/* These are compiled and loaded into high memory, then
1096	 * copied into place in mach_early_setup */
1097	.section	.init.ivt, "ax"
1098	.org	0x0
1099	/* this is very important - here is the reset vector */
1100	/* in current MMU branch you don't care what is here - it is
1101	 * used from bootloader site - but this is correct for FS-BOOT */
1102	brai	0x70
1103	nop
1104	brai	TOPHYS(_user_exception); /* syscall handler */
1105	brai	TOPHYS(_interrupt);	/* Interrupt handler */
1106	brai	TOPHYS(_break);		/* nmi trap handler */
1107	brai	TOPHYS(_hw_exception_handler);	/* HW exception handler */
1108
1109	.org	0x60
1110	brai	TOPHYS(_debug_exception);	/* debug trap handler*/
1111
1112.section .rodata,"a"
1113#include "syscall_table.S"
1114
1115syscall_table_size=(.-sys_call_table)
1116
1117