xref: /linux/arch/microblaze/kernel/entry.S (revision a5c4300389bb33ade2515c082709217f0614cf15)
1/*
2 * Low-level system-call handling, trap handlers and context-switching
3 *
4 * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu>
5 * Copyright (C) 2008-2009 PetaLogix
6 * Copyright (C) 2003		John Williams <jwilliams@itee.uq.edu.au>
7 * Copyright (C) 2001,2002	NEC Corporation
8 * Copyright (C) 2001,2002	Miles Bader <miles@gnu.org>
9 *
10 * This file is subject to the terms and conditions of the GNU General
11 * Public License. See the file COPYING in the main directory of this
12 * archive for more details.
13 *
14 * Written by Miles Bader <miles@gnu.org>
15 * Heavily modified by John Williams for Microblaze
16 */
17
18#include <linux/sys.h>
19#include <linux/linkage.h>
20
21#include <asm/entry.h>
22#include <asm/current.h>
23#include <asm/processor.h>
24#include <asm/exceptions.h>
25#include <asm/asm-offsets.h>
26#include <asm/thread_info.h>
27
28#include <asm/page.h>
29#include <asm/unistd.h>
30
31#include <linux/errno.h>
32#include <asm/signal.h>
33
34#undef DEBUG
35
36/* The size of a state save frame. */
37#define STATE_SAVE_SIZE		(PT_SIZE + STATE_SAVE_ARG_SPACE)
38
39/* The offset of the struct pt_regs in a `state save frame' on the stack. */
40#define PTO	STATE_SAVE_ARG_SPACE /* 24 the space for args */
41
42#define C_ENTRY(name)	.globl name; .align 4; name
43
44/*
45 * Various ways of setting and clearing BIP in flags reg.
46 * This is mucky, but necessary using microblaze version that
47 * allows msr ops to write to BIP
48 */
49#if CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR
50	.macro	clear_bip
51	msrclr	r11, MSR_BIP
52	nop
53	.endm
54
55	.macro	set_bip
56	msrset	r11, MSR_BIP
57	nop
58	.endm
59
60	.macro	clear_eip
61	msrclr	r11, MSR_EIP
62	nop
63	.endm
64
65	.macro	set_ee
66	msrset	r11, MSR_EE
67	nop
68	.endm
69
70	.macro	disable_irq
71	msrclr	r11, MSR_IE
72	nop
73	.endm
74
75	.macro	enable_irq
76	msrset	r11, MSR_IE
77	nop
78	.endm
79
80	.macro	set_ums
81	msrset	r11, MSR_UMS
82	nop
83	msrclr	r11, MSR_VMS
84	nop
85	.endm
86
87	.macro	set_vms
88	msrclr	r11, MSR_UMS
89	nop
90	msrset	r11, MSR_VMS
91	nop
92	.endm
93
94	.macro	clear_vms_ums
95	msrclr	r11, MSR_VMS
96	nop
97	msrclr	r11, MSR_UMS
98	nop
99	.endm
100#else
101	.macro	clear_bip
102	mfs	r11, rmsr
103	nop
104	andi	r11, r11, ~MSR_BIP
105	mts	rmsr, r11
106	nop
107	.endm
108
109	.macro	set_bip
110	mfs	r11, rmsr
111	nop
112	ori	r11, r11, MSR_BIP
113	mts	rmsr, r11
114	nop
115	.endm
116
117	.macro	clear_eip
118	mfs	r11, rmsr
119	nop
120	andi	r11, r11, ~MSR_EIP
121	mts	rmsr, r11
122	nop
123	.endm
124
125	.macro	set_ee
126	mfs	r11, rmsr
127	nop
128	ori	r11, r11, MSR_EE
129	mts	rmsr, r11
130	nop
131	.endm
132
133	.macro	disable_irq
134	mfs	r11, rmsr
135	nop
136	andi	r11, r11, ~MSR_IE
137	mts	rmsr, r11
138	nop
139	.endm
140
141	.macro	enable_irq
142	mfs	r11, rmsr
143	nop
144	ori	r11, r11, MSR_IE
145	mts	rmsr, r11
146	nop
147	.endm
148
149	.macro set_ums
150	mfs	r11, rmsr
151	nop
152	ori	r11, r11, MSR_VMS
153	andni	r11, r11, MSR_UMS
154	mts	rmsr, r11
155	nop
156	.endm
157
158	.macro	set_vms
159	mfs	r11, rmsr
160	nop
161	ori	r11, r11, MSR_VMS
162	andni	r11, r11, MSR_UMS
163	mts	rmsr, r11
164	nop
165	.endm
166
167	.macro	clear_vms_ums
168	mfs	r11, rmsr
169	nop
170	andni	r11, r11, (MSR_VMS|MSR_UMS)
171	mts	rmsr,r11
172	nop
173	.endm
174#endif
175
176/* Define how to call high-level functions. With MMU, virtual mode must be
177 * enabled when calling the high-level function. Clobbers R11.
178 * VM_ON, VM_OFF, DO_JUMP_BIPCLR, DO_CALL
179 */
180
181/* turn on virtual protected mode save */
182#define VM_ON		\
183	set_ums;		\
184	rted	r0, 2f;	\
1852: nop;
186
187/* turn off virtual protected mode save and user mode save*/
188#define VM_OFF			\
189	clear_vms_ums;			\
190	rted	r0, TOPHYS(1f);	\
1911: nop;
192
193#define SAVE_REGS \
194	swi	r2, r1, PTO+PT_R2;	/* Save SDA */			\
195	swi	r5, r1, PTO+PT_R5;					\
196	swi	r6, r1, PTO+PT_R6;					\
197	swi	r7, r1, PTO+PT_R7;					\
198	swi	r8, r1, PTO+PT_R8;					\
199	swi	r9, r1, PTO+PT_R9;					\
200	swi	r10, r1, PTO+PT_R10;					\
201	swi	r11, r1, PTO+PT_R11;	/* save clobbered regs after rval */\
202	swi	r12, r1, PTO+PT_R12;					\
203	swi	r13, r1, PTO+PT_R13;	/* Save SDA2 */			\
204	swi	r14, r1, PTO+PT_PC;	/* PC, before IRQ/trap */	\
205	swi	r15, r1, PTO+PT_R15;	/* Save LP */			\
206	swi	r18, r1, PTO+PT_R18;	/* Save asm scratch reg */	\
207	swi	r19, r1, PTO+PT_R19;					\
208	swi	r20, r1, PTO+PT_R20;					\
209	swi	r21, r1, PTO+PT_R21;					\
210	swi	r22, r1, PTO+PT_R22;					\
211	swi	r23, r1, PTO+PT_R23;					\
212	swi	r24, r1, PTO+PT_R24;					\
213	swi	r25, r1, PTO+PT_R25;					\
214	swi	r26, r1, PTO+PT_R26;					\
215	swi	r27, r1, PTO+PT_R27;					\
216	swi	r28, r1, PTO+PT_R28;					\
217	swi	r29, r1, PTO+PT_R29;					\
218	swi	r30, r1, PTO+PT_R30;					\
219	swi	r31, r1, PTO+PT_R31;	/* Save current task reg */	\
220	mfs	r11, rmsr;		/* save MSR */			\
221	nop;								\
222	swi	r11, r1, PTO+PT_MSR;
223
224#define RESTORE_REGS \
225	lwi	r11, r1, PTO+PT_MSR;					\
226	mts	rmsr , r11;						\
227	nop;								\
228	lwi	r2, r1, PTO+PT_R2;	/* restore SDA */		\
229	lwi	r5, r1, PTO+PT_R5;					\
230	lwi	r6, r1, PTO+PT_R6;					\
231	lwi	r7, r1, PTO+PT_R7;					\
232	lwi	r8, r1, PTO+PT_R8;					\
233	lwi	r9, r1, PTO+PT_R9;					\
234	lwi	r10, r1, PTO+PT_R10;					\
235	lwi	r11, r1, PTO+PT_R11;	/* restore clobbered regs after rval */\
236	lwi	r12, r1, PTO+PT_R12;					\
237	lwi	r13, r1, PTO+PT_R13;	/* restore SDA2 */		\
238	lwi	r14, r1, PTO+PT_PC;	/* RESTORE_LINK PC, before IRQ/trap */\
239	lwi	r15, r1, PTO+PT_R15;	/* restore LP */		\
240	lwi	r18, r1, PTO+PT_R18;	/* restore asm scratch reg */	\
241	lwi	r19, r1, PTO+PT_R19;					\
242	lwi	r20, r1, PTO+PT_R20;					\
243	lwi	r21, r1, PTO+PT_R21;					\
244	lwi	r22, r1, PTO+PT_R22;					\
245	lwi	r23, r1, PTO+PT_R23;					\
246	lwi	r24, r1, PTO+PT_R24;					\
247	lwi	r25, r1, PTO+PT_R25;					\
248	lwi	r26, r1, PTO+PT_R26;					\
249	lwi	r27, r1, PTO+PT_R27;					\
250	lwi	r28, r1, PTO+PT_R28;					\
251	lwi	r29, r1, PTO+PT_R29;					\
252	lwi	r30, r1, PTO+PT_R30;					\
253	lwi	r31, r1, PTO+PT_R31;	/* Restore cur task reg */
254
255.text
256
257/*
258 * User trap.
259 *
260 * System calls are handled here.
261 *
262 * Syscall protocol:
263 * Syscall number in r12, args in r5-r10
264 * Return value in r3
265 *
266 * Trap entered via brki instruction, so BIP bit is set, and interrupts
267 * are masked. This is nice, means we don't have to CLI before state save
268 */
269C_ENTRY(_user_exception):
270	swi	r1, r0, TOPHYS(PER_CPU(ENTRY_SP)) /* save stack */
271	addi	r14, r14, 4	/* return address is 4 byte after call */
272	swi	r11, r0, TOPHYS(r0_ram + PTO + PT_R11);	/* Save r11 */
273
274	lwi	r11, r0, TOPHYS(PER_CPU(KM));/* See if already in kernel mode.*/
275	beqi	r11, 1f;		/* Jump ahead if coming from user */
276/* Kernel-mode state save. */
277	lwi	r11, r0, TOPHYS(PER_CPU(ENTRY_SP)); /* Reload kernel stack-ptr*/
278	tophys(r1,r11);
279	swi	r11, r1, (PT_R1-PT_SIZE); /* Save original SP. */
280	lwi	r11, r0, TOPHYS(r0_ram + PTO + PT_R11); /* restore r11 */
281
282	addik	r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack. */
283	SAVE_REGS
284
285	addi	r11, r0, 1; 		/* Was in kernel-mode. */
286	swi	r11, r1, PTO+PT_MODE; /* pt_regs -> kernel mode */
287	brid	2f;
288	nop;				/* Fill delay slot */
289
290/* User-mode state save.  */
2911:
292	lwi	r11, r0, TOPHYS(r0_ram + PTO + PT_R11);	/* restore r11 */
293	lwi	r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */
294	tophys(r1,r1);
295	lwi	r1, r1, TS_THREAD_INFO;	/* get stack from task_struct */
296/* calculate kernel stack pointer from task struct 8k */
297	addik	r1, r1, THREAD_SIZE;
298	tophys(r1,r1);
299
300	addik	r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack.  */
301	SAVE_REGS
302
303	swi	r0, r1, PTO+PT_MODE;			/* Was in user-mode. */
304	lwi	r11, r0, TOPHYS(PER_CPU(ENTRY_SP));
305	swi	r11, r1, PTO+PT_R1;		/* Store user SP.  */
306	addi	r11, r0, 1;
307	swi	r11, r0, TOPHYS(PER_CPU(KM));	/* Now we're in kernel-mode.  */
3082:	lwi	CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE));
309	/* Save away the syscall number.  */
310	swi	r12, r1, PTO+PT_R0;
311	tovirt(r1,r1)
312
313/* where the trap should return need -8 to adjust for rtsd r15, 8*/
314/* Jump to the appropriate function for the system call number in r12
315 * (r12 is not preserved), or return an error if r12 is not valid. The LP
316 * register should point to the location where
317 * the called function should return.  [note that MAKE_SYS_CALL uses label 1] */
318
319	# Step into virtual mode.
320	set_vms;
321	addik	r11, r0, 3f
322	rtid	r11, 0
323	nop
3243:
325	lwi	r11, CURRENT_TASK, TS_THREAD_INFO /* get thread info */
326	lwi	r11, r11, TI_FLAGS	 /* get flags in thread info */
327	andi	r11, r11, _TIF_WORK_SYSCALL_MASK
328	beqi	r11, 4f
329
330	addik	r3, r0, -ENOSYS
331	swi	r3, r1, PTO + PT_R3
332	brlid	r15, do_syscall_trace_enter
333	addik	r5, r1, PTO + PT_R0
334
335	# do_syscall_trace_enter returns the new syscall nr.
336	addk	r12, r0, r3
337	lwi	r5, r1, PTO+PT_R5;
338	lwi	r6, r1, PTO+PT_R6;
339	lwi	r7, r1, PTO+PT_R7;
340	lwi	r8, r1, PTO+PT_R8;
341	lwi	r9, r1, PTO+PT_R9;
342	lwi	r10, r1, PTO+PT_R10;
3434:
344/* Jump to the appropriate function for the system call number in r12
345 * (r12 is not preserved), or return an error if r12 is not valid.
346 * The LP register should point to the location where the called function
347 * should return.  [note that MAKE_SYS_CALL uses label 1] */
348	/* See if the system call number is valid */
349	addi	r11, r12, -__NR_syscalls;
350	bgei	r11,5f;
351	/* Figure out which function to use for this system call.  */
352	/* Note Microblaze barrel shift is optional, so don't rely on it */
353	add	r12, r12, r12;			/* convert num -> ptr */
354	add	r12, r12, r12;
355
356#ifdef DEBUG
357	/* Trac syscalls and stored them to r0_ram */
358	lwi	r3, r12, 0x400 + r0_ram
359	addi	r3, r3, 1
360	swi	r3, r12, 0x400 + r0_ram
361#endif
362
363	# Find and jump into the syscall handler.
364	lwi	r12, r12, sys_call_table
365	/* where the trap should return need -8 to adjust for rtsd r15, 8 */
366	la	r15, r0, ret_from_trap-8
367	bra	r12
368
369	/* The syscall number is invalid, return an error.  */
3705:
371	addi	r3, r0, -ENOSYS;
372	rtsd	r15,8;		/* looks like a normal subroutine return */
373	or 	r0, r0, r0
374
375
376/* Entry point used to return from a syscall/trap */
377/* We re-enable BIP bit before state restore */
378C_ENTRY(ret_from_trap):
379	set_bip;			/*  Ints masked for state restore*/
380	lwi	r11, r1, PTO+PT_MODE;
381/* See if returning to kernel mode, if so, skip resched &c.  */
382	bnei	r11, 2f;
383
384	swi	r3, r1, PTO + PT_R3
385	swi	r4, r1, PTO + PT_R4
386
387	/* We're returning to user mode, so check for various conditions that
388	 * trigger rescheduling. */
389	/* FIXME: Restructure all these flag checks. */
390	lwi	r11, CURRENT_TASK, TS_THREAD_INFO;	/* get thread info */
391	lwi	r11, r11, TI_FLAGS;		/* get flags in thread info */
392	andi	r11, r11, _TIF_WORK_SYSCALL_MASK
393	beqi	r11, 1f
394
395	brlid	r15, do_syscall_trace_leave
396	addik	r5, r1, PTO + PT_R0
3971:
398	/* We're returning to user mode, so check for various conditions that
399	 * trigger rescheduling. */
400	/* get thread info from current task */
401	lwi	r11, CURRENT_TASK, TS_THREAD_INFO;
402	lwi	r11, r11, TI_FLAGS;		/* get flags in thread info */
403	andi	r11, r11, _TIF_NEED_RESCHED;
404	beqi	r11, 5f;
405
406	bralid	r15, schedule;	/* Call scheduler */
407	nop;				/* delay slot */
408
409	/* Maybe handle a signal */
4105:	/* get thread info from current task*/
411	lwi	r11, CURRENT_TASK, TS_THREAD_INFO;
412	lwi	r11, r11, TI_FLAGS;	/* get flags in thread info */
413	andi	r11, r11, _TIF_SIGPENDING;
414	beqi	r11, 1f;		/* Signals to handle, handle them */
415
416	la	r5, r1, PTO;		/* Arg 1: struct pt_regs *regs */
417	addi	r7, r0, 1;		/* Arg 3: int in_syscall */
418	bralid	r15, do_signal;	/* Handle any signals */
419	add	r6, r0, r0;		/* Arg 2: sigset_t *oldset */
420
421/* Finally, return to user state.  */
4221:
423	lwi	r3, r1, PTO + PT_R3; /* restore syscall result */
424	lwi	r4, r1, PTO + PT_R4;
425
426	swi	r0, r0, PER_CPU(KM);	/* Now officially in user state. */
427	swi	CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE); /* save current */
428	VM_OFF;
429	tophys(r1,r1);
430	RESTORE_REGS;
431	addik	r1, r1, STATE_SAVE_SIZE		/* Clean up stack space.  */
432	lwi	r1, r1, PT_R1 - PT_SIZE;/* Restore user stack pointer. */
433	bri	6f;
434
435/* Return to kernel state.  */
4362:	VM_OFF;
437	tophys(r1,r1);
438	RESTORE_REGS;
439	addik	r1, r1, STATE_SAVE_SIZE		/* Clean up stack space.  */
440	tovirt(r1,r1);
4416:
442TRAP_return:		/* Make global symbol for debugging */
443	rtbd	r14, 0;	/* Instructions to return from an IRQ */
444	nop;
445
446
447/* These syscalls need access to the struct pt_regs on the stack, so we
448   implement them in assembly (they're basically all wrappers anyway).  */
449
450C_ENTRY(sys_fork_wrapper):
451	addi	r5, r0, SIGCHLD			/* Arg 0: flags */
452	lwi	r6, r1, PTO+PT_R1	/* Arg 1: child SP (use parent's) */
453	la	r7, r1, PTO			/* Arg 2: parent context */
454	add	r8. r0, r0			/* Arg 3: (unused) */
455	add	r9, r0, r0;			/* Arg 4: (unused) */
456	add	r10, r0, r0;			/* Arg 5: (unused) */
457	brid	do_fork		/* Do real work (tail-call) */
458	nop;
459
460/* This the initial entry point for a new child thread, with an appropriate
461   stack in place that makes it look the the child is in the middle of an
462   syscall.  This function is actually `returned to' from switch_thread
463   (copy_thread makes ret_from_fork the return address in each new thread's
464   saved context).  */
465C_ENTRY(ret_from_fork):
466	bralid	r15, schedule_tail; /* ...which is schedule_tail's arg */
467	add	r3, r5, r0;	/* switch_thread returns the prev task */
468				/* ( in the delay slot ) */
469	add	r3, r0, r0;	/* Child's fork call should return 0. */
470	brid	ret_from_trap;	/* Do normal trap return */
471	nop;
472
473C_ENTRY(sys_vfork):
474	brid	microblaze_vfork	/* Do real work (tail-call) */
475	la	r5, r1, PTO
476
477C_ENTRY(sys_clone):
478	bnei	r6, 1f;			/* See if child SP arg (arg 1) is 0. */
479	lwi	r6, r1, PTO+PT_R1;	/* If so, use paret's stack ptr */
4801:	la	r7, r1, PTO;			/* Arg 2: parent context */
481	add	r8, r0, r0;			/* Arg 3: (unused) */
482	add	r9, r0, r0;			/* Arg 4: (unused) */
483	add	r10, r0, r0;			/* Arg 5: (unused) */
484	brid	do_fork		/* Do real work (tail-call) */
485	nop;
486
487C_ENTRY(sys_execve):
488	la	r8, r1, PTO;		/* add user context as 4th arg */
489	brid	microblaze_execve;	/* Do real work (tail-call).*/
490	nop;
491
492C_ENTRY(sys_rt_sigreturn_wrapper):
493	swi	r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */
494	swi	r4, r1, PTO+PT_R4;
495	la	r5, r1, PTO;		/* add user context as 1st arg */
496	brlid	r15, sys_rt_sigreturn	/* Do real work */
497	nop;
498	lwi	r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */
499	lwi	r4, r1, PTO+PT_R4;
500	bri ret_from_trap /* fall through will not work here due to align */
501	nop;
502
503/*
504 * HW EXCEPTION rutine start
505 */
506
507#define SAVE_STATE	\
508	swi	r11, r0, TOPHYS(r0_ram + PTO + PT_R11); /* Save r11 */	\
509	set_bip;	/*equalize initial state for all possible entries*/\
510	clear_eip;							\
511	enable_irq;							\
512	set_ee;								\
513	/* See if already in kernel mode.*/				\
514	lwi	r11, r0, TOPHYS(PER_CPU(KM));				\
515	beqi	r11, 1f;		/* Jump ahead if coming from user */\
516	/* Kernel-mode state save.  */					\
517	/* Reload kernel stack-ptr. */					\
518	lwi	r11, r0, TOPHYS(PER_CPU(ENTRY_SP));			\
519	tophys(r1,r11);							\
520	swi	r11, r1, (PT_R1-PT_SIZE); /* Save original SP. */	\
521	lwi	r11, r0, TOPHYS(r0_ram + PTO + PT_R11); /* restore r11 */\
522	addik	r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack.  */\
523	/* store return registers separately because			\
524	 * this macros is use for others exceptions */			\
525	swi	r3, r1, PTO + PT_R3;					\
526	swi	r4, r1, PTO + PT_R4;					\
527	SAVE_REGS							\
528	/* PC, before IRQ/trap - this is one instruction above */	\
529	swi	r17, r1, PTO+PT_PC;					\
530									\
531	addi	r11, r0, 1; 		/* Was in kernel-mode.  */	\
532	swi	r11, r1, PTO+PT_MODE; 	 				\
533	brid	2f;							\
534	nop;				/* Fill delay slot */		\
5351:	/* User-mode state save.  */					\
536	lwi	r11, r0, TOPHYS(r0_ram + PTO + PT_R11); /* restore r11 */\
537	lwi	r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */\
538	tophys(r1,r1);							\
539	lwi	r1, r1, TS_THREAD_INFO;	/* get the thread info */	\
540	addik	r1, r1, THREAD_SIZE;	/* calculate kernel stack pointer */\
541	tophys(r1,r1);							\
542									\
543	addik	r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack.  */\
544	/* store return registers separately because this macros	\
545	 * is use for others exceptions */				\
546	swi	r3, r1, PTO + PT_R3; 					\
547	swi	r4, r1, PTO + PT_R4;					\
548	SAVE_REGS							\
549	/* PC, before IRQ/trap - this is one instruction above FIXME*/	\
550	swi	r17, r1, PTO+PT_PC;					\
551									\
552	swi	r0, r1, PTO+PT_MODE; /* Was in user-mode.  */		\
553	lwi	r11, r0, TOPHYS(PER_CPU(ENTRY_SP));			\
554	swi	r11, r1, PTO+PT_R1; /* Store user SP.  */		\
555	addi	r11, r0, 1;						\
556	swi	r11, r0, TOPHYS(PER_CPU(KM)); /* Now we're in kernel-mode.*/\
5572:	lwi	CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE));	\
558	/* Save away the syscall number.  */				\
559	swi	r0, r1, PTO+PT_R0;					\
560	tovirt(r1,r1)
561
562C_ENTRY(full_exception_trap):
563	swi	r1, r0, TOPHYS(PER_CPU(ENTRY_SP)) /* save stack */
564	/* adjust exception address for privileged instruction
565	 * for finding where is it */
566	addik	r17, r17, -4
567	SAVE_STATE /* Save registers */
568	/* FIXME this can be store directly in PT_ESR reg.
569	 * I tested it but there is a fault */
570	/* where the trap should return need -8 to adjust for rtsd r15, 8 */
571	la	r15, r0, ret_from_exc - 8
572	la	r5, r1, PTO		 /* parameter struct pt_regs * regs */
573	mfs	r6, resr
574	nop
575	mfs	r7, rfsr;		/* save FSR */
576	nop
577	mts	rfsr, r0;	/* Clear sticky fsr */
578	nop
579	la	r12, r0, full_exception
580	set_vms;
581	rtbd	r12, 0;
582	nop;
583
584/*
585 * Unaligned data trap.
586 *
587 * Unaligned data trap last on 4k page is handled here.
588 *
589 * Trap entered via exception, so EE bit is set, and interrupts
590 * are masked.  This is nice, means we don't have to CLI before state save
591 *
592 * The assembler routine is in "arch/microblaze/kernel/hw_exception_handler.S"
593 */
594C_ENTRY(unaligned_data_trap):
595	swi	r1, r0, TOPHYS(PER_CPU(ENTRY_SP)) /* save stack */
596	SAVE_STATE		/* Save registers.*/
597	/* where the trap should return need -8 to adjust for rtsd r15, 8 */
598	la	r15, r0, ret_from_exc-8
599	mfs	r3, resr		/* ESR */
600	nop
601	mfs	r4, rear		/* EAR */
602	nop
603	la	r7, r1, PTO		/* parameter struct pt_regs * regs */
604	la	r12, r0, _unaligned_data_exception
605	set_vms;
606	rtbd	r12, 0;	/* interrupts enabled */
607	nop;
608
609/*
610 * Page fault traps.
611 *
612 * If the real exception handler (from hw_exception_handler.S) didn't find
613 * the mapping for the process, then we're thrown here to handle such situation.
614 *
615 * Trap entered via exceptions, so EE bit is set, and interrupts
616 * are masked.  This is nice, means we don't have to CLI before state save
617 *
618 * Build a standard exception frame for TLB Access errors.  All TLB exceptions
619 * will bail out to this point if they can't resolve the lightweight TLB fault.
620 *
621 * The C function called is in "arch/microblaze/mm/fault.c", declared as:
622 * void do_page_fault(struct pt_regs *regs,
623 *				unsigned long address,
624 *				unsigned long error_code)
625 */
626/* data and intruction trap - which is choose is resolved int fault.c */
627C_ENTRY(page_fault_data_trap):
628	swi	r1, r0, TOPHYS(PER_CPU(ENTRY_SP)) /* save stack */
629	SAVE_STATE		/* Save registers.*/
630	/* where the trap should return need -8 to adjust for rtsd r15, 8 */
631	la	r15, r0, ret_from_exc-8
632	la	r5, r1, PTO		/* parameter struct pt_regs * regs */
633	mfs	r6, rear		/* parameter unsigned long address */
634	nop
635	mfs	r7, resr		/* parameter unsigned long error_code */
636	nop
637	la	r12, r0, do_page_fault
638	set_vms;
639	rtbd	r12, 0;	/* interrupts enabled */
640	nop;
641
642C_ENTRY(page_fault_instr_trap):
643	swi	r1, r0, TOPHYS(PER_CPU(ENTRY_SP)) /* save stack */
644	SAVE_STATE		/* Save registers.*/
645	/* where the trap should return need -8 to adjust for rtsd r15, 8 */
646	la	r15, r0, ret_from_exc-8
647	la	r5, r1, PTO		/* parameter struct pt_regs * regs */
648	mfs	r6, rear		/* parameter unsigned long address */
649	nop
650	ori	r7, r0, 0		/* parameter unsigned long error_code */
651	la	r12, r0, do_page_fault
652	set_vms;
653	rtbd	r12, 0;	/* interrupts enabled */
654	nop;
655
656/* Entry point used to return from an exception.  */
657C_ENTRY(ret_from_exc):
658	set_bip;			/*  Ints masked for state restore*/
659	lwi	r11, r1, PTO+PT_MODE;
660	bnei	r11, 2f;		/* See if returning to kernel mode, */
661					/* ... if so, skip resched &c.  */
662
663	/* We're returning to user mode, so check for various conditions that
664	   trigger rescheduling. */
665	lwi	r11, CURRENT_TASK, TS_THREAD_INFO;	/* get thread info */
666	lwi	r11, r11, TI_FLAGS;	/* get flags in thread info */
667	andi	r11, r11, _TIF_NEED_RESCHED;
668	beqi	r11, 5f;
669
670/* Call the scheduler before returning from a syscall/trap. */
671	bralid	r15, schedule;	/* Call scheduler */
672	nop;				/* delay slot */
673
674	/* Maybe handle a signal */
6755:	lwi	r11, CURRENT_TASK, TS_THREAD_INFO;	/* get thread info */
676	lwi	r11, r11, TI_FLAGS;	/* get flags in thread info */
677	andi	r11, r11, _TIF_SIGPENDING;
678	beqi	r11, 1f;		/* Signals to handle, handle them */
679
680	/*
681	 * Handle a signal return; Pending signals should be in r18.
682	 *
683	 * Not all registers are saved by the normal trap/interrupt entry
684	 * points (for instance, call-saved registers (because the normal
685	 * C-compiler calling sequence in the kernel makes sure they're
686	 * preserved), and call-clobbered registers in the case of
687	 * traps), but signal handlers may want to examine or change the
688	 * complete register state.  Here we save anything not saved by
689	 * the normal entry sequence, so that it may be safely restored
690	 * (in a possibly modified form) after do_signal returns.
691	 * store return registers separately because this macros is use
692	 * for others exceptions */
693	la	r5, r1, PTO;		/* Arg 1: struct pt_regs *regs */
694	addi	r7, r0, 0;		/* Arg 3: int in_syscall */
695	bralid	r15, do_signal;	/* Handle any signals */
696	add	r6, r0, r0;		/* Arg 2: sigset_t *oldset */
697
698/* Finally, return to user state.  */
6991:	swi	r0, r0, PER_CPU(KM);	/* Now officially in user state. */
700	swi	CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE); /* save current */
701	VM_OFF;
702	tophys(r1,r1);
703
704	lwi	r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */
705	lwi	r4, r1, PTO+PT_R4;
706	RESTORE_REGS;
707	addik	r1, r1, STATE_SAVE_SIZE		/* Clean up stack space.  */
708
709	lwi	r1, r1, PT_R1 - PT_SIZE; /* Restore user stack pointer. */
710	bri	6f;
711/* Return to kernel state.  */
7122:	VM_OFF;
713	tophys(r1,r1);
714	lwi	r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */
715	lwi	r4, r1, PTO+PT_R4;
716	RESTORE_REGS;
717	addik	r1, r1, STATE_SAVE_SIZE		/* Clean up stack space.  */
718
719	tovirt(r1,r1);
7206:
721EXC_return:		/* Make global symbol for debugging */
722	rtbd	r14, 0;	/* Instructions to return from an IRQ */
723	nop;
724
725/*
726 * HW EXCEPTION rutine end
727 */
728
729/*
730 * Hardware maskable interrupts.
731 *
732 * The stack-pointer (r1) should have already been saved to the memory
733 * location PER_CPU(ENTRY_SP).
734 */
735C_ENTRY(_interrupt):
736/* MS: we are in physical address */
737/* Save registers, switch to proper stack, convert SP to virtual.*/
738	swi	r1, r0, TOPHYS(PER_CPU(ENTRY_SP))
739	swi	r11, r0, TOPHYS(PER_CPU(R11_SAVE));
740	/* MS: See if already in kernel mode. */
741	lwi	r11, r0, TOPHYS(PER_CPU(KM));
742	beqi	r11, 1f; /* MS: Jump ahead if coming from user */
743
744/* Kernel-mode state save. */
745	or	r11, r1, r0
746	tophys(r1,r11); /* MS: I have in r1 physical address where stack is */
747/* MS: Save original SP - position PT_R1 to next stack frame 4 *1 - 152*/
748	swi	r11, r1, (PT_R1 - PT_SIZE);
749/* MS: restore r11 because of saving in SAVE_REGS */
750	lwi	r11, r0, TOPHYS(PER_CPU(R11_SAVE));
751	/* save registers */
752/* MS: Make room on the stack -> activation record */
753	addik	r1, r1, -STATE_SAVE_SIZE;
754/* MS: store return registers separately because
755 * this macros is use for others exceptions */
756	swi	r3, r1, PTO + PT_R3;
757	swi	r4, r1, PTO + PT_R4;
758	SAVE_REGS
759	/* MS: store mode */
760	addi	r11, r0, 1; /* MS: Was in kernel-mode. */
761	swi	r11, r1, PTO + PT_MODE; /* MS: and save it */
762	brid	2f;
763	nop; /* MS: Fill delay slot */
764
7651:
766/* User-mode state save. */
767/* MS: restore r11 -> FIXME move before SAVE_REG */
768	lwi	r11, r0, TOPHYS(PER_CPU(R11_SAVE));
769 /* MS: get the saved current */
770	lwi	r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE));
771	tophys(r1,r1);
772	lwi	r1, r1, TS_THREAD_INFO;
773	addik	r1, r1, THREAD_SIZE;
774	tophys(r1,r1);
775	/* save registers */
776	addik	r1, r1, -STATE_SAVE_SIZE;
777	swi	r3, r1, PTO+PT_R3;
778	swi	r4, r1, PTO+PT_R4;
779	SAVE_REGS
780	/* calculate mode */
781	swi	r0, r1, PTO + PT_MODE;
782	lwi	r11, r0, TOPHYS(PER_CPU(ENTRY_SP));
783	swi	r11, r1, PTO+PT_R1;
784	/* setup kernel mode to KM */
785	addi	r11, r0, 1;
786	swi	r11, r0, TOPHYS(PER_CPU(KM));
787
7882:
789	lwi	CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE));
790	swi	r0, r1, PTO + PT_R0;
791	tovirt(r1,r1)
792	la	r5, r1, PTO;
793	set_vms;
794	la	r11, r0, do_IRQ;
795	la	r15, r0, irq_call;
796irq_call:rtbd	r11, 0;
797	nop;
798
799/* MS: we are in virtual mode */
800ret_from_irq:
801	lwi	r11, r1, PTO + PT_MODE;
802	bnei	r11, 2f;
803
804	lwi	r11, CURRENT_TASK, TS_THREAD_INFO;
805	lwi	r11, r11, TI_FLAGS; /* MS: get flags from thread info */
806	andi	r11, r11, _TIF_NEED_RESCHED;
807	beqi	r11, 5f
808	bralid	r15, schedule;
809	nop; /* delay slot */
810
811    /* Maybe handle a signal */
8125:	lwi	r11, CURRENT_TASK, TS_THREAD_INFO; /* MS: get thread info */
813	lwi	r11, r11, TI_FLAGS; /* get flags in thread info */
814	andi	r11, r11, _TIF_SIGPENDING;
815	beqid	r11, no_intr_resched
816/* Handle a signal return; Pending signals should be in r18. */
817	addi	r7, r0, 0; /* Arg 3: int in_syscall */
818	la	r5, r1, PTO; /* Arg 1: struct pt_regs *regs */
819	bralid	r15, do_signal;	/* Handle any signals */
820	add	r6, r0, r0; /* Arg 2: sigset_t *oldset */
821
822/* Finally, return to user state. */
823no_intr_resched:
824    /* Disable interrupts, we are now committed to the state restore */
825	disable_irq
826	swi	r0, r0, PER_CPU(KM); /* MS: Now officially in user state. */
827	swi	CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE);
828	VM_OFF;
829	tophys(r1,r1);
830	lwi	r3, r1, PTO + PT_R3; /* MS: restore saved r3, r4 registers */
831	lwi	r4, r1, PTO + PT_R4;
832	RESTORE_REGS
833	addik	r1, r1, STATE_SAVE_SIZE /* MS: Clean up stack space. */
834	lwi	r1, r1, PT_R1 - PT_SIZE;
835	bri	6f;
836/* MS: Return to kernel state. */
8372:
838#ifdef CONFIG_PREEMPT
839	lwi	r11, CURRENT_TASK, TS_THREAD_INFO;
840	/* MS: get preempt_count from thread info */
841	lwi	r5, r11, TI_PREEMPT_COUNT;
842	bgti	r5, restore;
843
844	lwi	r5, r11, TI_FLAGS;		/* get flags in thread info */
845	andi	r5, r5, _TIF_NEED_RESCHED;
846	beqi	r5, restore /* if zero jump over */
847
848preempt:
849	/* interrupts are off that's why I am calling preempt_chedule_irq */
850	bralid	r15, preempt_schedule_irq
851	nop
852	lwi	r11, CURRENT_TASK, TS_THREAD_INFO;	/* get thread info */
853	lwi	r5, r11, TI_FLAGS;		/* get flags in thread info */
854	andi	r5, r5, _TIF_NEED_RESCHED;
855	bnei	r5, preempt /* if non zero jump to resched */
856restore:
857#endif
858	VM_OFF /* MS: turn off MMU */
859	tophys(r1,r1)
860	lwi	r3, r1, PTO + PT_R3; /* MS: restore saved r3, r4 registers */
861	lwi	r4, r1, PTO + PT_R4;
862	RESTORE_REGS
863	addik	r1, r1, STATE_SAVE_SIZE	/* MS: Clean up stack space. */
864	tovirt(r1,r1);
8656:
866IRQ_return: /* MS: Make global symbol for debugging */
867	rtid	r14, 0
868	nop
869
870/*
871 * `Debug' trap
872 *  We enter dbtrap in "BIP" (breakpoint) mode.
873 *  So we exit the breakpoint mode with an 'rtbd' and proceed with the
874 *  original dbtrap.
875 *  however, wait to save state first
876 */
877C_ENTRY(_debug_exception):
878	/* BIP bit is set on entry, no interrupts can occur */
879	swi	r1, r0, TOPHYS(PER_CPU(ENTRY_SP))
880
881	swi	r11, r0, TOPHYS(r0_ram + PTO + PT_R11); /* Save r11 */
882	set_bip;	/*equalize initial state for all possible entries*/
883	clear_eip;
884	enable_irq;
885	lwi	r11, r0, TOPHYS(PER_CPU(KM));/* See if already in kernel mode.*/
886	beqi	r11, 1f;		/* Jump ahead if coming from user */
887	/* Kernel-mode state save.  */
888	lwi	r11, r0, TOPHYS(PER_CPU(ENTRY_SP)); /* Reload kernel stack-ptr*/
889	tophys(r1,r11);
890	swi	r11, r1, (PT_R1-PT_SIZE); /* Save original SP. */
891	lwi	r11, r0, TOPHYS(r0_ram + PTO + PT_R11); /* restore r11 */
892
893	addik	r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack.  */
894	swi	r3, r1, PTO + PT_R3;
895	swi	r4, r1, PTO + PT_R4;
896	SAVE_REGS;
897
898	addi	r11, r0, 1; 		/* Was in kernel-mode.  */
899	swi	r11, r1, PTO + PT_MODE;
900	brid	2f;
901	nop;				/* Fill delay slot */
9021:      /* User-mode state save.  */
903	lwi	r11, r0, TOPHYS(r0_ram + PTO + PT_R11); /* restore r11 */
904	lwi	r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */
905	tophys(r1,r1);
906	lwi	r1, r1, TS_THREAD_INFO;	/* get the thread info */
907	addik	r1, r1, THREAD_SIZE;	/* calculate kernel stack pointer */
908	tophys(r1,r1);
909
910	addik	r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack.  */
911	swi	r3, r1, PTO + PT_R3;
912	swi	r4, r1, PTO + PT_R4;
913	SAVE_REGS;
914
915	swi	r0, r1, PTO+PT_MODE; /* Was in user-mode.  */
916	lwi	r11, r0, TOPHYS(PER_CPU(ENTRY_SP));
917	swi	r11, r1, PTO+PT_R1; /* Store user SP.  */
918	addi	r11, r0, 1;
919	swi	r11, r0, TOPHYS(PER_CPU(KM));	/* Now we're in kernel-mode.  */
9202:	lwi	CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE));
921	/* Save away the syscall number.  */
922	swi	r0, r1, PTO+PT_R0;
923	tovirt(r1,r1)
924
925	addi	r5, r0, SIGTRAP		     /* send the trap signal */
926	add	r6, r0, CURRENT_TASK; /* Get current task ptr into r11 */
927	addk	r7, r0, r0		     /* 3rd param zero */
928
929	set_vms;
930	la	r11, r0, send_sig;
931	la	r15, r0, dbtrap_call;
932dbtrap_call:	rtbd	r11, 0;
933	nop;
934
935	set_bip;			/*  Ints masked for state restore*/
936	lwi	r11, r1, PTO+PT_MODE;
937	bnei	r11, 2f;
938
939	/* Get current task ptr into r11 */
940	lwi	r11, CURRENT_TASK, TS_THREAD_INFO;	/* get thread info */
941	lwi	r11, r11, TI_FLAGS;	/* get flags in thread info */
942	andi	r11, r11, _TIF_NEED_RESCHED;
943	beqi	r11, 5f;
944
945/* Call the scheduler before returning from a syscall/trap. */
946
947	bralid	r15, schedule;	/* Call scheduler */
948	nop;				/* delay slot */
949	/* XXX Is PT_DTRACE handling needed here? */
950	/* XXX m68knommu also checks TASK_STATE & TASK_COUNTER here.  */
951
952	/* Maybe handle a signal */
9535:	lwi	r11, CURRENT_TASK, TS_THREAD_INFO;	/* get thread info */
954	lwi	r11, r11, TI_FLAGS;	/* get flags in thread info */
955	andi	r11, r11, _TIF_SIGPENDING;
956	beqi	r11, 1f;		/* Signals to handle, handle them */
957
958/* Handle a signal return; Pending signals should be in r18.  */
959	/* Not all registers are saved by the normal trap/interrupt entry
960	   points (for instance, call-saved registers (because the normal
961	   C-compiler calling sequence in the kernel makes sure they're
962	   preserved), and call-clobbered registers in the case of
963	   traps), but signal handlers may want to examine or change the
964	   complete register state.  Here we save anything not saved by
965	   the normal entry sequence, so that it may be safely restored
966	   (in a possibly modified form) after do_signal returns.  */
967
968	la	r5, r1, PTO;		/* Arg 1: struct pt_regs *regs */
969	addi  r7, r0, 0;	/* Arg 3: int in_syscall */
970	bralid	r15, do_signal;	/* Handle any signals */
971	add	r6, r0, r0;		/* Arg 2: sigset_t *oldset */
972
973
974/* Finally, return to user state.  */
9751:	swi	r0, r0, PER_CPU(KM);	/* Now officially in user state. */
976	swi	CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE); /* save current */
977	VM_OFF;
978	tophys(r1,r1);
979
980	lwi	r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */
981	lwi	r4, r1, PTO+PT_R4;
982	RESTORE_REGS
983	addik	r1, r1, STATE_SAVE_SIZE		/* Clean up stack space.  */
984
985
986	lwi	r1, r1, PT_R1 - PT_SIZE;
987					/* Restore user stack pointer. */
988	bri	6f;
989
990/* Return to kernel state.  */
9912:	VM_OFF;
992	tophys(r1,r1);
993	lwi	r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */
994	lwi	r4, r1, PTO+PT_R4;
995	RESTORE_REGS
996	addik	r1, r1, STATE_SAVE_SIZE		/* Clean up stack space.  */
997
998	tovirt(r1,r1);
9996:
1000DBTRAP_return:		/* Make global symbol for debugging */
1001	rtbd	r14, 0;	/* Instructions to return from an IRQ */
1002	nop;
1003
1004
1005
1006ENTRY(_switch_to)
1007	/* prepare return value */
1008	addk	r3, r0, CURRENT_TASK
1009
1010	/* save registers in cpu_context */
1011	/* use r11 and r12, volatile registers, as temp register */
1012	/* give start of cpu_context for previous process */
1013	addik	r11, r5, TI_CPU_CONTEXT
1014	swi	r1, r11, CC_R1
1015	swi	r2, r11, CC_R2
1016	/* skip volatile registers.
1017	 * they are saved on stack when we jumped to _switch_to() */
1018	/* dedicated registers */
1019	swi	r13, r11, CC_R13
1020	swi	r14, r11, CC_R14
1021	swi	r15, r11, CC_R15
1022	swi	r16, r11, CC_R16
1023	swi	r17, r11, CC_R17
1024	swi	r18, r11, CC_R18
1025	/* save non-volatile registers */
1026	swi	r19, r11, CC_R19
1027	swi	r20, r11, CC_R20
1028	swi	r21, r11, CC_R21
1029	swi	r22, r11, CC_R22
1030	swi	r23, r11, CC_R23
1031	swi	r24, r11, CC_R24
1032	swi	r25, r11, CC_R25
1033	swi	r26, r11, CC_R26
1034	swi	r27, r11, CC_R27
1035	swi	r28, r11, CC_R28
1036	swi	r29, r11, CC_R29
1037	swi	r30, r11, CC_R30
1038	/* special purpose registers */
1039	mfs	r12, rmsr
1040	nop
1041	swi	r12, r11, CC_MSR
1042	mfs	r12, rear
1043	nop
1044	swi	r12, r11, CC_EAR
1045	mfs	r12, resr
1046	nop
1047	swi	r12, r11, CC_ESR
1048	mfs	r12, rfsr
1049	nop
1050	swi	r12, r11, CC_FSR
1051
1052	/* update r31, the current-give me pointer to task which will be next */
1053	lwi	CURRENT_TASK, r6, TI_TASK
1054	/* stored it to current_save too */
1055	swi	CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE)
1056
1057	/* get new process' cpu context and restore */
1058	/* give me start where start context of next task */
1059	addik	r11, r6, TI_CPU_CONTEXT
1060
1061	/* non-volatile registers */
1062	lwi	r30, r11, CC_R30
1063	lwi	r29, r11, CC_R29
1064	lwi	r28, r11, CC_R28
1065	lwi	r27, r11, CC_R27
1066	lwi	r26, r11, CC_R26
1067	lwi	r25, r11, CC_R25
1068	lwi	r24, r11, CC_R24
1069	lwi	r23, r11, CC_R23
1070	lwi	r22, r11, CC_R22
1071	lwi	r21, r11, CC_R21
1072	lwi	r20, r11, CC_R20
1073	lwi	r19, r11, CC_R19
1074	/* dedicated registers */
1075	lwi	r18, r11, CC_R18
1076	lwi	r17, r11, CC_R17
1077	lwi	r16, r11, CC_R16
1078	lwi	r15, r11, CC_R15
1079	lwi	r14, r11, CC_R14
1080	lwi	r13, r11, CC_R13
1081	/* skip volatile registers */
1082	lwi	r2, r11, CC_R2
1083	lwi	r1, r11, CC_R1
1084
1085	/* special purpose registers */
1086	lwi	r12, r11, CC_FSR
1087	mts	rfsr, r12
1088	nop
1089	lwi	r12, r11, CC_MSR
1090	mts	rmsr, r12
1091	nop
1092
1093	rtsd	r15, 8
1094	nop
1095
1096ENTRY(_reset)
1097	brai	0x70; /* Jump back to FS-boot */
1098
1099ENTRY(_break)
1100	mfs	r5, rmsr
1101	nop
1102	swi	r5, r0, 0x250 + TOPHYS(r0_ram)
1103	mfs	r5, resr
1104	nop
1105	swi	r5, r0, 0x254 + TOPHYS(r0_ram)
1106	bri	0
1107
1108	/* These are compiled and loaded into high memory, then
1109	 * copied into place in mach_early_setup */
1110	.section	.init.ivt, "ax"
1111	.org	0x0
1112	/* this is very important - here is the reset vector */
1113	/* in current MMU branch you don't care what is here - it is
1114	 * used from bootloader site - but this is correct for FS-BOOT */
1115	brai	0x70
1116	nop
1117	brai	TOPHYS(_user_exception); /* syscall handler */
1118	brai	TOPHYS(_interrupt);	/* Interrupt handler */
1119	brai	TOPHYS(_break);		/* nmi trap handler */
1120	brai	TOPHYS(_hw_exception_handler);	/* HW exception handler */
1121
1122	.org	0x60
1123	brai	TOPHYS(_debug_exception);	/* debug trap handler*/
1124
1125.section .rodata,"a"
1126#include "syscall_table.S"
1127
1128syscall_table_size=(.-sys_call_table)
1129
1130