xref: /linux/arch/parisc/kernel/syscall.S (revision 173d6681380aa1d60dfc35ed7178bd7811ba2784)
1/*
2 * Linux/PA-RISC Project (http://www.parisc-linux.org/)
3 *
4 * System call entry code Copyright (c) Matthew Wilcox 1999 <willy@bofh.ai>
5 * Licensed under the GNU GPL.
6 * thanks to Philipp Rumpf, Mike Shaver and various others
7 * sorry about the wall, puffin..
8 */
9
10#include <asm/asm-offsets.h>
11#include <asm/unistd.h>
12#include <asm/errno.h>
13#include <asm/psw.h>
14#include <asm/thread_info.h>
15
16#include <asm/assembly.h>
17#include <asm/processor.h>
18
19	/* We fill the empty parts of the gateway page with
20 	 * something that will kill the kernel or a
21 	 * userspace application.
22	 */
23#define KILL_INSN	break	0,0
24
25#ifdef CONFIG_64BIT
26	.level          2.0w
27#else
28	.level		1.1
29#endif
30
31	.text
32
33	.import syscall_exit,code
34	.import syscall_exit_rfi,code
35	.export linux_gateway_page
36
37	/* Linux gateway page is aliased to virtual page 0 in the kernel
38	 * address space. Since it is a gateway page it cannot be
39	 * dereferenced, so null pointers will still fault. We start
40	 * the actual entry point at 0x100. We put break instructions
41	 * at the beginning of the page to trap null indirect function
42	 * pointers.
43	 */
44
45	.align ASM_PAGE_SIZE
46linux_gateway_page:
47
48        /* ADDRESS 0x00 to 0xb0 = 176 bytes / 4 bytes per insn = 44 insns */
49	.rept 44
50	KILL_INSN
51	.endr
52
53	/* ADDRESS 0xb0 to 0xb4, lws uses 1 insns for entry */
54	/* Light-weight-syscall entry must always be located at 0xb0 */
55	/* WARNING: Keep this number updated with table size changes */
56#define __NR_lws_entries (2)
57
58lws_entry:
59	/* Unconditional branch to lws_start, located on the
60	   same gateway page */
61	b,n	lws_start
62
63	/* Fill from 0xb4 to 0xe0 */
64	.rept 11
65	KILL_INSN
66	.endr
67
68	/* This function MUST be located at 0xe0 for glibc's threading
69	mechanism to work. DO NOT MOVE THIS CODE EVER! */
70set_thread_pointer:
71	gate	.+8, %r0		/* increase privilege */
72	depi	3, 31, 2, %r31		/* Ensure we return into user mode. */
73	be	0(%sr7,%r31)		/* return to user space */
74	mtctl	%r26, %cr27		/* move arg0 to the control register */
75
76	/* Increase the chance of trapping if random jumps occur to this
77	address, fill from 0xf0 to 0x100 */
78	.rept 4
79	KILL_INSN
80	.endr
81
82/* This address must remain fixed at 0x100 for glibc's syscalls to work */
83	.align 256
84linux_gateway_entry:
85	gate	.+8, %r0			/* become privileged */
86	mtsp	%r0,%sr4			/* get kernel space into sr4 */
87	mtsp	%r0,%sr5			/* get kernel space into sr5 */
88	mtsp	%r0,%sr6			/* get kernel space into sr6 */
89	mfsp    %sr7,%r1                        /* save user sr7 */
90	mtsp    %r1,%sr3                        /* and store it in sr3 */
91
92#ifdef CONFIG_64BIT
93	/* for now we can *always* set the W bit on entry to the syscall
94	 * since we don't support wide userland processes.  We could
95	 * also save the current SM other than in r0 and restore it on
96	 * exit from the syscall, and also use that value to know
97	 * whether to do narrow or wide syscalls. -PB
98	 */
99	ssm	PSW_SM_W, %r1
100	extrd,u	%r1,PSW_W_BIT,1,%r1
101	/* sp must be aligned on 4, so deposit the W bit setting into
102	 * the bottom of sp temporarily */
103	or,ev	%r1,%r30,%r30
104	b,n	1f
105	/* The top halves of argument registers must be cleared on syscall
106	 * entry from narrow executable.
107	 */
108	depdi	0, 31, 32, %r26
109	depdi	0, 31, 32, %r25
110	depdi	0, 31, 32, %r24
111	depdi	0, 31, 32, %r23
112	depdi	0, 31, 32, %r22
113	depdi	0, 31, 32, %r21
1141:
115#endif
116	mfctl   %cr30,%r1
117	xor     %r1,%r30,%r30                   /* ye olde xor trick */
118	xor     %r1,%r30,%r1
119	xor     %r1,%r30,%r30
120
121	ldo     THREAD_SZ_ALGN+FRAME_SIZE(%r30),%r30  /* set up kernel stack */
122
123	/* N.B.: It is critical that we don't set sr7 to 0 until r30
124	 *       contains a valid kernel stack pointer. It is also
125	 *       critical that we don't start using the kernel stack
126	 *       until after sr7 has been set to 0.
127	 */
128
129	mtsp	%r0,%sr7			/* get kernel space into sr7 */
130	STREGM	%r1,FRAME_SIZE(%r30)		/* save r1 (usp) here for now */
131	mfctl	%cr30,%r1			/* get task ptr in %r1 */
132	LDREG	TI_TASK(%r1),%r1
133
134	/* Save some registers for sigcontext and potential task
135	   switch (see entry.S for the details of which ones are
136	   saved/restored).  TASK_PT_PSW is zeroed so we can see whether
137	   a process is on a syscall or not.  For an interrupt the real
138	   PSW value is stored.  This is needed for gdb and sys_ptrace. */
139	STREG	%r0,  TASK_PT_PSW(%r1)
140	STREG	%r2,  TASK_PT_GR2(%r1)		/* preserve rp */
141	STREG	%r19, TASK_PT_GR19(%r1)
142
143	LDREGM	-FRAME_SIZE(%r30), %r2		/* get users sp back */
144#ifdef CONFIG_64BIT
145	extrd,u	%r2,63,1,%r19			/* W hidden in bottom bit */
146#if 0
147	xor	%r19,%r2,%r2			/* clear bottom bit */
148	depd,z	%r19,1,1,%r19
149	std	%r19,TASK_PT_PSW(%r1)
150#endif
151#endif
152	STREG	%r2,  TASK_PT_GR30(%r1)		/* ... and save it */
153
154	STREG	%r20, TASK_PT_GR20(%r1)		/* Syscall number */
155	STREG	%r21, TASK_PT_GR21(%r1)
156	STREG	%r22, TASK_PT_GR22(%r1)
157	STREG	%r23, TASK_PT_GR23(%r1)		/* 4th argument */
158	STREG	%r24, TASK_PT_GR24(%r1)		/* 3rd argument */
159	STREG	%r25, TASK_PT_GR25(%r1)		/* 2nd argument */
160	STREG	%r26, TASK_PT_GR26(%r1)	 	/* 1st argument */
161	STREG	%r27, TASK_PT_GR27(%r1)		/* user dp */
162	STREG   %r28, TASK_PT_GR28(%r1)         /* return value 0 */
163	STREG   %r28, TASK_PT_ORIG_R28(%r1)     /* return value 0 (saved for signals) */
164	STREG	%r29, TASK_PT_GR29(%r1)		/* return value 1 */
165	STREG	%r31, TASK_PT_GR31(%r1)		/* preserve syscall return ptr */
166
167	ldo	TASK_PT_FR0(%r1), %r27		/* save fpregs from the kernel */
168	save_fp	%r27				/* or potential task switch  */
169
170	mfctl	%cr11, %r27			/* i.e. SAR */
171	STREG	%r27, TASK_PT_SAR(%r1)
172
173	loadgp
174
175#ifdef CONFIG_64BIT
176	ldo	-16(%r30),%r29			/* Reference param save area */
177	copy	%r19,%r2			/* W bit back to r2 */
178#else
179	/* no need to save these on stack in wide mode because the first 8
180	 * args are passed in registers */
181	stw     %r22, -52(%r30)                 /* 5th argument */
182	stw     %r21, -56(%r30)                 /* 6th argument */
183#endif
184
185	/* Are we being ptraced? */
186	mfctl	%cr30, %r1
187	LDREG	TI_TASK(%r1),%r1
188	LDREG	TASK_PTRACE(%r1), %r1
189	bb,<,n	%r1,31,.Ltracesys
190
191	/* Note!  We cannot use the syscall table that is mapped
192	nearby since the gateway page is mapped execute-only. */
193
194#ifdef CONFIG_64BIT
195	ldil	L%sys_call_table, %r1
196	or,=	%r2,%r2,%r2
197	addil	L%(sys_call_table64-sys_call_table), %r1
198	ldo	R%sys_call_table(%r1), %r19
199	or,=	%r2,%r2,%r2
200	ldo	R%sys_call_table64(%r1), %r19
201#else
202	ldil	L%sys_call_table, %r1
203	ldo     R%sys_call_table(%r1), %r19
204#endif
205	comiclr,>>=	__NR_Linux_syscalls, %r20, %r0
206	b,n	.Lsyscall_nosys
207
208	LDREGX  %r20(%r19), %r19
209
210	/* If this is a sys_rt_sigreturn call, and the signal was received
211	 * when not in_syscall, then we want to return via syscall_exit_rfi,
212	 * not syscall_exit.  Signal no. in r20, in_syscall in r25 (see
213	 * trampoline code in signal.c).
214	 */
215	ldi	__NR_rt_sigreturn,%r2
216	comb,=	%r2,%r20,.Lrt_sigreturn
217.Lin_syscall:
218	ldil	L%syscall_exit,%r2
219	be      0(%sr7,%r19)
220	ldo	R%syscall_exit(%r2),%r2
221.Lrt_sigreturn:
222	comib,<> 0,%r25,.Lin_syscall
223	ldil	L%syscall_exit_rfi,%r2
224	be      0(%sr7,%r19)
225	ldo	R%syscall_exit_rfi(%r2),%r2
226
227	/* Note!  Because we are not running where we were linked, any
228	calls to functions external to this file must be indirect.  To
229	be safe, we apply the opposite rule to functions within this
230	file, with local labels given to them to ensure correctness. */
231
232.Lsyscall_nosys:
233syscall_nosys:
234	ldil	L%syscall_exit,%r1
235	be	R%syscall_exit(%sr7,%r1)
236	ldo	-ENOSYS(%r0),%r28		   /* set errno */
237
238
239/* Warning! This trace code is a virtual duplicate of the code above so be
240 * sure to maintain both! */
241.Ltracesys:
242tracesys:
243	/* Need to save more registers so the debugger can see where we
244	 * are.  This saves only the lower 8 bits of PSW, so that the C
245	 * bit is still clear on syscalls, and the D bit is set if this
246	 * full register save path has been executed.  We check the D
247	 * bit on syscall_return_rfi to determine which registers to
248	 * restore.  An interrupt results in a full PSW saved with the
249	 * C bit set, a non-straced syscall entry results in C and D clear
250	 * in the saved PSW.
251	 */
252	ldo     -THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1      /* get task ptr */
253	LDREG	TI_TASK(%r1), %r1
254	ssm	0,%r2
255	STREG	%r2,TASK_PT_PSW(%r1)		/* Lower 8 bits only!! */
256	mfsp	%sr0,%r2
257	STREG	%r2,TASK_PT_SR0(%r1)
258	mfsp	%sr1,%r2
259	STREG	%r2,TASK_PT_SR1(%r1)
260	mfsp	%sr2,%r2
261	STREG	%r2,TASK_PT_SR2(%r1)
262	mfsp	%sr3,%r2
263	STREG	%r2,TASK_PT_SR3(%r1)
264	STREG	%r2,TASK_PT_SR4(%r1)
265	STREG	%r2,TASK_PT_SR5(%r1)
266	STREG	%r2,TASK_PT_SR6(%r1)
267	STREG	%r2,TASK_PT_SR7(%r1)
268	STREG	%r2,TASK_PT_IASQ0(%r1)
269	STREG	%r2,TASK_PT_IASQ1(%r1)
270	LDREG	TASK_PT_GR31(%r1),%r2
271	STREG	%r2,TASK_PT_IAOQ0(%r1)
272	ldo	4(%r2),%r2
273	STREG	%r2,TASK_PT_IAOQ1(%r1)
274	ldo	TASK_REGS(%r1),%r2
275	/* reg_save %r2 */
276	STREG	%r3,PT_GR3(%r2)
277	STREG	%r4,PT_GR4(%r2)
278	STREG	%r5,PT_GR5(%r2)
279	STREG	%r6,PT_GR6(%r2)
280	STREG	%r7,PT_GR7(%r2)
281	STREG	%r8,PT_GR8(%r2)
282	STREG	%r9,PT_GR9(%r2)
283	STREG	%r10,PT_GR10(%r2)
284	STREG	%r11,PT_GR11(%r2)
285	STREG	%r12,PT_GR12(%r2)
286	STREG	%r13,PT_GR13(%r2)
287	STREG	%r14,PT_GR14(%r2)
288	STREG	%r15,PT_GR15(%r2)
289	STREG	%r16,PT_GR16(%r2)
290	STREG	%r17,PT_GR17(%r2)
291	STREG	%r18,PT_GR18(%r2)
292	/* Finished saving things for the debugger */
293
294	ldil	L%syscall_trace,%r1
295	ldil	L%tracesys_next,%r2
296	be	R%syscall_trace(%sr7,%r1)
297	ldo	R%tracesys_next(%r2),%r2
298
299tracesys_next:
300	ldil	L%sys_call_table,%r1
301	ldo     R%sys_call_table(%r1), %r19
302
303	ldo     -THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1      /* get task ptr */
304	LDREG	TI_TASK(%r1), %r1
305	LDREG   TASK_PT_GR20(%r1), %r20
306	LDREG   TASK_PT_GR26(%r1), %r26		/* Restore the users args */
307	LDREG   TASK_PT_GR25(%r1), %r25
308	LDREG   TASK_PT_GR24(%r1), %r24
309	LDREG   TASK_PT_GR23(%r1), %r23
310#ifdef CONFIG_64BIT
311	LDREG   TASK_PT_GR22(%r1), %r22
312	LDREG   TASK_PT_GR21(%r1), %r21
313	ldo	-16(%r30),%r29			/* Reference param save area */
314#endif
315
316	comiclr,>>=	__NR_Linux_syscalls, %r20, %r0
317	b,n	.Lsyscall_nosys
318
319	LDREGX  %r20(%r19), %r19
320
321	/* If this is a sys_rt_sigreturn call, and the signal was received
322	 * when not in_syscall, then we want to return via syscall_exit_rfi,
323	 * not syscall_exit.  Signal no. in r20, in_syscall in r25 (see
324	 * trampoline code in signal.c).
325	 */
326	ldi	__NR_rt_sigreturn,%r2
327	comb,=	%r2,%r20,.Ltrace_rt_sigreturn
328.Ltrace_in_syscall:
329	ldil	L%tracesys_exit,%r2
330	be      0(%sr7,%r19)
331	ldo	R%tracesys_exit(%r2),%r2
332
333	/* Do *not* call this function on the gateway page, because it
334	makes a direct call to syscall_trace. */
335
336tracesys_exit:
337	ldo     -THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1      /* get task ptr */
338	LDREG	TI_TASK(%r1), %r1
339#ifdef CONFIG_64BIT
340	ldo	-16(%r30),%r29			/* Reference param save area */
341#endif
342	bl	syscall_trace, %r2
343	STREG   %r28,TASK_PT_GR28(%r1)          /* save return value now */
344	ldo     -THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1      /* get task ptr */
345	LDREG	TI_TASK(%r1), %r1
346	LDREG   TASK_PT_GR28(%r1), %r28		/* Restore return val. */
347
348	ldil	L%syscall_exit,%r1
349	be,n	R%syscall_exit(%sr7,%r1)
350
351.Ltrace_rt_sigreturn:
352	comib,<> 0,%r25,.Ltrace_in_syscall
353	ldil	L%tracesys_sigexit,%r2
354	be      0(%sr7,%r19)
355	ldo	R%tracesys_sigexit(%r2),%r2
356
357tracesys_sigexit:
358	ldo     -THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1      /* get task ptr */
359	LDREG	0(%r1), %r1
360#ifdef CONFIG_64BIT
361	ldo	-16(%r30),%r29			/* Reference param save area */
362#endif
363	bl	syscall_trace, %r2
364	nop
365
366	ldil	L%syscall_exit_rfi,%r1
367	be,n	R%syscall_exit_rfi(%sr7,%r1)
368
369
370	/*********************************************************
371		Light-weight-syscall code
372
373		r20 - lws number
374		r26,r25,r24,r23,r22 - Input registers
375		r28 - Function return register
376		r21 - Error code.
377
378		Scracth: Any of the above that aren't being
379		currently used, including r1.
380
381		Return pointer: r31 (Not usable)
382
383		Error codes returned by entry path:
384
385		ENOSYS - r20 was an invalid LWS number.
386
387	*********************************************************/
388lws_start:
389	/* Gate and ensure we return to userspace */
390	gate	.+8, %r0
391	depi	3, 31, 2, %r31	/* Ensure we return to userspace */
392
393#ifdef CONFIG_64BIT
394	/* FIXME: If we are a 64-bit kernel just
395	 *        turn this on unconditionally.
396	 */
397	ssm	PSW_SM_W, %r1
398	extrd,u	%r1,PSW_W_BIT,1,%r1
399	/* sp must be aligned on 4, so deposit the W bit setting into
400	 * the bottom of sp temporarily */
401	or,ev	%r1,%r30,%r30
402
403	/* Clip LWS number to a 32-bit value always */
404	depdi	0, 31, 32, %r20
405#endif
406
407        /* Is the lws entry number valid? */
408	comiclr,>>=	__NR_lws_entries, %r20, %r0
409	b,n	lws_exit_nosys
410
411	/* WARNING: Trashing sr2 and sr3 */
412	mfsp	%sr7,%r1			/* get userspace into sr3 */
413	mtsp	%r1,%sr3
414	mtsp	%r0,%sr2			/* get kernel space into sr2 */
415
416	/* Load table start */
417	ldil	L%lws_table, %r1
418	ldo	R%lws_table(%r1), %r28	/* Scratch use of r28 */
419	LDREGX	%r20(%sr2,r28), %r21	/* Scratch use of r21 */
420
421	/* Jump to lws, lws table pointers already relocated */
422	be,n	0(%sr2,%r21)
423
424lws_exit_nosys:
425	ldo	-ENOSYS(%r0),%r21		   /* set errno */
426	/* Fall through: Return to userspace */
427
428lws_exit:
429#ifdef CONFIG_64BIT
430	/* decide whether to reset the wide mode bit
431	 *
432	 * For a syscall, the W bit is stored in the lowest bit
433	 * of sp.  Extract it and reset W if it is zero */
434	extrd,u,*<>	%r30,63,1,%r1
435	rsm	PSW_SM_W, %r0
436	/* now reset the lowest bit of sp if it was set */
437	xor	%r30,%r1,%r30
438#endif
439	be,n	0(%sr3, %r31)
440
441
442
443	/***************************************************
444		Implementing CAS as an atomic operation:
445
446		%r26 - Address to examine
447		%r25 - Old value to check (old)
448		%r24 - New value to set (new)
449		%r28 - Return prev through this register.
450		%r21 - Kernel error code
451
452		If debugging is DISabled:
453
454		%r21 has the following meanings:
455
456		EAGAIN - CAS is busy, ldcw failed, try again.
457		EFAULT - Read or write failed.
458
459		If debugging is enabled:
460
461		EDEADLOCK - CAS called recursively.
462		EAGAIN && r28 == 1 - CAS is busy. Lock contended.
463		EAGAIN && r28 == 2 - CAS is busy. ldcw failed.
464		EFAULT - Read or write failed.
465
466		Scratch: r20, r28, r1
467
468	****************************************************/
469
470	/* Do not enable LWS debugging */
471#define ENABLE_LWS_DEBUG 0
472
473	/* ELF64 Process entry path */
474lws_compare_and_swap64:
475#ifdef CONFIG_64BIT
476	b,n	lws_compare_and_swap
477#else
478	/* If we are not a 64-bit kernel, then we don't
479	 * implement having 64-bit input registers
480	 */
481	b,n	lws_exit_nosys
482#endif
483
484	/* ELF32 Process entry path */
485lws_compare_and_swap32:
486#ifdef CONFIG_64BIT
487	/* Clip all the input registers */
488	depdi	0, 31, 32, %r26
489	depdi	0, 31, 32, %r25
490	depdi	0, 31, 32, %r24
491#endif
492
493lws_compare_and_swap:
494#ifdef CONFIG_SMP
495	/* Load start of lock table */
496	ldil	L%lws_lock_start, %r20
497	ldo	R%lws_lock_start(%r20), %r28
498
499	/* Extract four bits from r26 and hash lock (Bits 4-7) */
500	extru  %r26, 27, 4, %r20
501
502	/* Find lock to use, the hash is either one of 0 to
503	   15, multiplied by 16 (keep it 16-byte aligned)
504	   and add to the lock table offset. */
505	shlw	%r20, 4, %r20
506	add	%r20, %r28, %r20
507
508# ifdef ENABLE_LWS_DEBUG
509	/*
510		DEBUG, check for deadlock!
511		If the thread register values are the same
512		then we were the one that locked it last and
513		this is a recurisve call that will deadlock.
514		We *must* giveup this call and fail.
515	*/
516	ldw	4(%sr2,%r20), %r28			/* Load thread register */
517	/* WARNING: If cr27 cycles to the same value we have problems */
518	mfctl	%cr27, %r21				/* Get current thread register */
519	cmpb,<>,n	%r21, %r28, cas_lock		/* Called recursive? */
520	b	lws_exit				/* Return error! */
521	ldo	-EDEADLOCK(%r0), %r21
522cas_lock:
523	cmpb,=,n	%r0, %r28, cas_nocontend	/* Is nobody using it? */
524	ldo	1(%r0), %r28				/* 1st case */
525	b	lws_exit				/* Contended... */
526	ldo	-EAGAIN(%r0), %r21			/* Spin in userspace */
527cas_nocontend:
528# endif
529/* ENABLE_LWS_DEBUG */
530
531	LDCW	0(%sr2,%r20), %r28			/* Try to acquire the lock */
532	cmpb,<>,n	%r0, %r28, cas_action		/* Did we get it? */
533cas_wouldblock:
534	ldo	2(%r0), %r28				/* 2nd case */
535	b	lws_exit				/* Contended... */
536	ldo	-EAGAIN(%r0), %r21			/* Spin in userspace */
537#endif
538/* CONFIG_SMP */
539
540	/*
541		prev = *addr;
542		if ( prev == old )
543		  *addr = new;
544		return prev;
545	*/
546
547	/* NOTES:
548		This all works becuse intr_do_signal
549		and schedule both check the return iasq
550		and see that we are on the kernel page
551		so this process is never scheduled off
552		or is ever sent any signal of any sort,
553		thus it is wholly atomic from usrspaces
554		perspective
555	*/
556cas_action:
557#if defined CONFIG_SMP && defined ENABLE_LWS_DEBUG
558	/* DEBUG */
559	mfctl	%cr27, %r1
560	stw	%r1, 4(%sr2,%r20)
561#endif
562	/* The load and store could fail */
5631:	ldw	0(%sr3,%r26), %r28
564	sub,<>	%r28, %r25, %r0
5652:	stw	%r24, 0(%sr3,%r26)
566#ifdef CONFIG_SMP
567	/* Free lock */
568	stw	%r20, 0(%sr2,%r20)
569# ifdef ENABLE_LWS_DEBUG
570	/* Clear thread register indicator */
571	stw	%r0, 4(%sr2,%r20)
572# endif
573#endif
574	/* Return to userspace, set no error */
575	b	lws_exit
576	copy	%r0, %r21
577
5783:
579	/* Error occured on load or store */
580#ifdef CONFIG_SMP
581	/* Free lock */
582	stw	%r20, 0(%sr2,%r20)
583# ifdef ENABLE_LWS_DEBUG
584	stw	%r0, 4(%sr2,%r20)
585# endif
586#endif
587	b	lws_exit
588	ldo	-EFAULT(%r0),%r21	/* set errno */
589	nop
590	nop
591	nop
592	nop
593
594	/* Two exception table entries, one for the load,
595	   the other for the store. Either return -EFAULT.
596	   Each of the entries must be relocated. */
597	.section __ex_table,"aw"
598#ifdef CONFIG_64BIT
599	/* Pad the address calculation */
600	.word	0,(2b - linux_gateway_page)
601	.word	0,(3b - linux_gateway_page)
602#else
603	.word	(2b - linux_gateway_page)
604	.word	(3b - linux_gateway_page)
605#endif
606	.previous
607
608	.section __ex_table,"aw"
609#ifdef CONFIG_64BIT
610	/* Pad the address calculation */
611	.word	0,(1b - linux_gateway_page)
612	.word	0,(3b - linux_gateway_page)
613#else
614	.word	(1b - linux_gateway_page)
615	.word	(3b - linux_gateway_page)
616#endif
617	.previous
618
619end_compare_and_swap:
620
621	/* Make sure nothing else is placed on this page */
622	.align ASM_PAGE_SIZE
623	.export end_linux_gateway_page
624end_linux_gateway_page:
625
626	/* Relocate symbols assuming linux_gateway_page is mapped
627	   to virtual address 0x0 */
628#ifdef CONFIG_64BIT
629	/* FIXME: The code will always be on the gateay page
630		  and thus it will be on the first 4k, the
631		  assembler seems to think that the final
632		  subtraction result is only a word in
633		  length, so we pad the value.
634	*/
635#define LWS_ENTRY(_name_) .word 0,(lws_##_name_ - linux_gateway_page)
636#else
637#define LWS_ENTRY(_name_) .word  (lws_##_name_ - linux_gateway_page)
638#endif
639
640	.section .rodata,"a"
641
642	.align ASM_PAGE_SIZE
643	/* Light-weight-syscall table */
644	/* Start of lws table. */
645	.export lws_table
646.Llws_table:
647lws_table:
648	LWS_ENTRY(compare_and_swap32)	/* 0 - ELF32 Atomic compare and swap */
649	LWS_ENTRY(compare_and_swap64)	/* 1 - ELF64 Atomic compare and swap */
650	/* End of lws table */
651
652	.align ASM_PAGE_SIZE
653	.export sys_call_table
654.Lsys_call_table:
655sys_call_table:
656#include "syscall_table.S"
657
658#ifdef CONFIG_64BIT
659	.align ASM_PAGE_SIZE
660	.export sys_call_table64
661.Lsys_call_table64:
662sys_call_table64:
663#define SYSCALL_TABLE_64BIT
664#include "syscall_table.S"
665#endif
666
667#ifdef CONFIG_SMP
668	/*
669		All light-weight-syscall atomic operations
670		will use this set of locks
671	*/
672	.section .data
673	.align 4096
674	.export lws_lock_start
675.Llws_lock_start:
676lws_lock_start:
677	/* lws locks */
678	.align 16
679	.rept 16
680	/* Keep locks aligned at 16-bytes */
681	.word 1
682	.word 0
683	.word 0
684	.word 0
685	.endr
686	.previous
687#endif
688/* CONFIG_SMP for lws_lock_start */
689
690.end
691
692
693