xref: /linux/arch/parisc/kernel/syscall.S (revision c8d430db8eec7d4fd13a6bea27b7086a54eda6da)
1/*
2 * Linux/PA-RISC Project (http://www.parisc-linux.org/)
3 *
4 * System call entry code / Linux gateway page
5 * Copyright (c) Matthew Wilcox 1999 <willy@infradead.org>
6 * Licensed under the GNU GPL.
7 * thanks to Philipp Rumpf, Mike Shaver and various others
8 * sorry about the wall, puffin..
9 */
10
11/*
12How does the Linux gateway page on PA-RISC work?
13------------------------------------------------
14The Linux gateway page on PA-RISC is "special".
15It actually has PAGE_GATEWAY bits set (this is linux terminology; in parisc
16terminology it's Execute, promote to PL0) in the page map.  So anything
17executing on this page executes with kernel level privilege (there's more to it
18than that: to have this happen, you also have to use a branch with a ,gate
19completer to activate the privilege promotion).  The upshot is that everything
20that runs on the gateway page runs at kernel privilege but with the current
21user process address space (although you have access to kernel space via %sr2).
22For the 0x100 syscall entry, we redo the space registers to point to the kernel
23address space (preserving the user address space in %sr3), move to wide mode if
24required, save the user registers and branch into the kernel syscall entry
25point.  For all the other functions, we execute at kernel privilege but don't
26flip address spaces. The basic upshot of this is that these code snippets are
27executed atomically (because the kernel can't be pre-empted) and they may
28perform architecturally forbidden (to PL3) operations (like setting control
29registers).
30*/
31
32
33#include <asm/asm-offsets.h>
34#include <asm/unistd.h>
35#include <asm/errno.h>
36#include <asm/page.h>
37#include <asm/psw.h>
38#include <asm/thread_info.h>
39#include <asm/assembly.h>
40#include <asm/processor.h>
41#include <asm/cache.h>
42#include <asm/spinlock_types.h>
43
44#include <linux/linkage.h>
45
46	/* We fill the empty parts of the gateway page with
47 	 * something that will kill the kernel or a
48 	 * userspace application.
49	 */
50#define KILL_INSN	break	0,0
51
52	.level          PA_ASM_LEVEL
53
54	.macro	lws_pagefault_disable reg1,reg2
55	mfctl	%cr30, \reg2
56	ldo	TASK_PAGEFAULT_DISABLED(\reg2), \reg2
57	ldw	0(%sr2,\reg2), \reg1
58	ldo	1(\reg1), \reg1
59	stw	\reg1, 0(%sr2,\reg2)
60	.endm
61
62	.macro	lws_pagefault_enable reg1,reg2
63	mfctl	%cr30, \reg2
64	ldo	TASK_PAGEFAULT_DISABLED(\reg2), \reg2
65	ldw	0(%sr2,\reg2), \reg1
66	ldo	-1(\reg1), \reg1
67	stw	\reg1, 0(%sr2,\reg2)
68	.endm
69
70	/* raise exception if spinlock content is not zero or
71	 * __ARCH_SPIN_LOCK_UNLOCKED_VAL */
72	.macro	spinlock_check spin_val,tmpreg
73#ifdef CONFIG_LIGHTWEIGHT_SPINLOCK_CHECK
74	ldi	__ARCH_SPIN_LOCK_UNLOCKED_VAL, \tmpreg
75	andcm,=	\spin_val, \tmpreg, %r0
76	.word	SPINLOCK_BREAK_INSN
77#endif
78	.endm
79
80	.text
81
82	.import syscall_exit,code
83	.import syscall_exit_rfi,code
84
85	/* Linux gateway page is aliased to virtual page 0 in the kernel
86	 * address space. Since it is a gateway page it cannot be
87	 * dereferenced, so null pointers will still fault. We start
88	 * the actual entry point at 0x100. We put break instructions
89	 * at the beginning of the page to trap null indirect function
90	 * pointers.
91	 */
92
93	.align PAGE_SIZE
94ENTRY(linux_gateway_page)
95
96        /* ADDRESS 0x00 to 0xb0 = 176 bytes / 4 bytes per insn = 44 insns */
97	.rept 44
98	KILL_INSN
99	.endr
100
101	/* ADDRESS 0xb0 to 0xb8, lws uses two insns for entry */
102	/* Light-weight-syscall entry must always be located at 0xb0 */
103	/* WARNING: Keep this number updated with table size changes */
104#define __NR_lws_entries (5)
105
106lws_entry:
107	gate	lws_start, %r0		/* increase privilege */
108	depi	PRIV_USER, 31, 2, %r31	/* Ensure we return into user mode. */
109
110	/* Fill from 0xb8 to 0xe0 */
111	.rept 10
112	KILL_INSN
113	.endr
114
115	/* This function MUST be located at 0xe0 for glibc's threading
116	mechanism to work. DO NOT MOVE THIS CODE EVER! */
117set_thread_pointer:
118	gate	.+8, %r0		/* increase privilege */
119	depi	PRIV_USER, 31, 2, %r31	/* Ensure we return into user mode. */
120	be	0(%sr7,%r31)		/* return to user space */
121	mtctl	%r26, %cr27		/* move arg0 to the control register */
122
123	/* Increase the chance of trapping if random jumps occur to this
124	address, fill from 0xf0 to 0x100 */
125	.rept 4
126	KILL_INSN
127	.endr
128
129/* This address must remain fixed at 0x100 for glibc's syscalls to work */
130	.align LINUX_GATEWAY_ADDR
131linux_gateway_entry:
132	gate	.+8, %r0			/* become privileged */
133	mtsp	%r0,%sr4			/* get kernel space into sr4 */
134	mtsp	%r0,%sr5			/* get kernel space into sr5 */
135	mtsp	%r0,%sr6			/* get kernel space into sr6 */
136
137#ifdef CONFIG_64BIT
138	/* Store W bit on entry to the syscall in case it's a wide userland
139	 * process. */
140	ssm	PSW_SM_W, %r1
141	extrd,u	%r1,PSW_W_BIT,1,%r1
142	/* sp must be aligned on 4, so deposit the W bit setting into
143	 * the bottom of sp temporarily */
144	or,ev	%r1,%r30,%r30
145	b,n	1f
146	/* The top halves of argument registers must be cleared on syscall
147	 * entry from narrow executable.
148	 */
149	depdi	0, 31, 32, %r26
150	depdi	0, 31, 32, %r25
151	depdi	0, 31, 32, %r24
152	depdi	0, 31, 32, %r23
153	depdi	0, 31, 32, %r22
154	depdi	0, 31, 32, %r21
1551:
156#endif
157
158	/* We use a rsm/ssm pair to prevent sr3 from being clobbered
159	 * by external interrupts.
160	 */
161	mfsp    %sr7,%r1                        /* save user sr7 */
162	rsm	PSW_SM_I, %r0			/* disable interrupts */
163	mtsp    %r1,%sr3                        /* and store it in sr3 */
164
165	mfctl   %cr30,%r1
166	xor     %r1,%r30,%r30                   /* ye olde xor trick */
167	xor     %r1,%r30,%r1
168	xor     %r1,%r30,%r30
169
170	LDREG	TASK_STACK(%r30),%r30		/* set up kernel stack */
171	ldo	FRAME_SIZE(%r30),%r30
172	/* N.B.: It is critical that we don't set sr7 to 0 until r30
173	 *       contains a valid kernel stack pointer. It is also
174	 *       critical that we don't start using the kernel stack
175	 *       until after sr7 has been set to 0.
176	 */
177
178	mtsp	%r0,%sr7			/* get kernel space into sr7 */
179	ssm	PSW_SM_I, %r0			/* enable interrupts */
180	STREGM	%r1,FRAME_SIZE(%r30)		/* save r1 (usp) here for now */
181	mfctl	%cr30,%r1			/* get task ptr in %r1 */
182
183	/* Save some registers for sigcontext and potential task
184	   switch (see entry.S for the details of which ones are
185	   saved/restored).  TASK_PT_PSW is zeroed so we can see whether
186	   a process is on a syscall or not.  For an interrupt the real
187	   PSW value is stored.  This is needed for gdb and sys_ptrace. */
188	STREG	%r0,  TASK_PT_PSW(%r1)
189	STREG	%r2,  TASK_PT_GR2(%r1)		/* preserve rp */
190	STREG	%r19, TASK_PT_GR19(%r1)
191
192	LDREGM	-FRAME_SIZE(%r30), %r2		/* get users sp back */
193#ifdef CONFIG_64BIT
194	extrd,u	%r2,63,1,%r19			/* W hidden in bottom bit */
195#if 0
196	xor	%r19,%r2,%r2			/* clear bottom bit */
197	depd,z	%r19,1,1,%r19
198	std	%r19,TASK_PT_PSW(%r1)
199#endif
200#endif
201	STREG	%r2,  TASK_PT_GR30(%r1)		/* ... and save it */
202
203	STREG	%r20, TASK_PT_GR20(%r1)		/* Syscall number */
204	STREG	%r21, TASK_PT_GR21(%r1)
205	STREG	%r22, TASK_PT_GR22(%r1)
206	STREG	%r23, TASK_PT_GR23(%r1)		/* 4th argument */
207	STREG	%r24, TASK_PT_GR24(%r1)		/* 3rd argument */
208	STREG	%r25, TASK_PT_GR25(%r1)		/* 2nd argument */
209	STREG	%r26, TASK_PT_GR26(%r1)	 	/* 1st argument */
210	STREG	%r27, TASK_PT_GR27(%r1)		/* user dp */
211	STREG   %r28, TASK_PT_GR28(%r1)         /* return value 0 */
212	STREG   %r0, TASK_PT_ORIG_R28(%r1)      /* don't prohibit restarts */
213	STREG	%r29, TASK_PT_GR29(%r1)		/* return value 1 */
214	STREG	%r31, TASK_PT_GR31(%r1)		/* preserve syscall return ptr */
215
216	ldo	TASK_PT_FR0(%r1), %r27		/* save fpregs from the kernel */
217	save_fp	%r27				/* or potential task switch  */
218
219	mfctl	%cr11, %r27			/* i.e. SAR */
220	STREG	%r27, TASK_PT_SAR(%r1)
221
222	loadgp
223
224#ifdef CONFIG_64BIT
225	ldo	-16(%r30),%r29			/* Reference param save area */
226	copy	%r19,%r2			/* W bit back to r2 */
227#else
228	/* no need to save these on stack in wide mode because the first 8
229	 * args are passed in registers */
230	stw     %r22, -52(%r30)                 /* 5th argument */
231	stw     %r21, -56(%r30)                 /* 6th argument */
232#endif
233
234	/* Are we being ptraced? */
235	mfctl	%cr30, %r1
236	LDREG	TASK_TI_FLAGS(%r1),%r1
237	ldi	_TIF_SYSCALL_TRACE_MASK, %r19
238	and,COND(=) %r1, %r19, %r0
239	b,n	.Ltracesys
240
241	/* Note!  We cannot use the syscall table that is mapped
242	nearby since the gateway page is mapped execute-only. */
243
244#ifdef CONFIG_64BIT
245	ldil	L%sys_call_table, %r1
246	or,ev	%r2,%r2,%r2
247	ldil	L%sys_call_table64, %r1
248	ldo	R%sys_call_table(%r1), %r19
249	or,ev	%r2,%r2,%r2
250	ldo	R%sys_call_table64(%r1), %r19
251#else
252	load32	sys_call_table, %r19
253#endif
254	comiclr,>>	__NR_Linux_syscalls, %r20, %r0
255	b,n	.Lsyscall_nosys
256
257	LDREGX  %r20(%r19), %r19
258
259	/* If this is a sys_rt_sigreturn call, and the signal was received
260	 * when not in_syscall, then we want to return via syscall_exit_rfi,
261	 * not syscall_exit.  Signal no. in r20, in_syscall in r25 (see
262	 * trampoline code in signal.c).
263	 */
264	ldi	__NR_rt_sigreturn,%r2
265	comb,=	%r2,%r20,.Lrt_sigreturn
266.Lin_syscall:
267	ldil	L%syscall_exit,%r2
268	be      0(%sr7,%r19)
269	ldo	R%syscall_exit(%r2),%r2
270.Lrt_sigreturn:
271	comib,<> 0,%r25,.Lin_syscall
272	ldil	L%syscall_exit_rfi,%r2
273	be      0(%sr7,%r19)
274	ldo	R%syscall_exit_rfi(%r2),%r2
275
276	/* Note!  Because we are not running where we were linked, any
277	calls to functions external to this file must be indirect.  To
278	be safe, we apply the opposite rule to functions within this
279	file, with local labels given to them to ensure correctness. */
280
281.Lsyscall_nosys:
282syscall_nosys:
283	ldil	L%syscall_exit,%r1
284	be	R%syscall_exit(%sr7,%r1)
285	ldo	-ENOSYS(%r0),%r28		   /* set errno */
286
287
288/* Warning! This trace code is a virtual duplicate of the code above so be
289 * sure to maintain both! */
290.Ltracesys:
291tracesys:
292	/* Need to save more registers so the debugger can see where we
293	 * are.  This saves only the lower 8 bits of PSW, so that the C
294	 * bit is still clear on syscalls, and the D bit is set if this
295	 * full register save path has been executed.  We check the D
296	 * bit on syscall_return_rfi to determine which registers to
297	 * restore.  An interrupt results in a full PSW saved with the
298	 * C bit set, a non-straced syscall entry results in C and D clear
299	 * in the saved PSW.
300	 */
301	mfctl	%cr30,%r1			/* get task ptr */
302	ssm	0,%r2
303	STREG	%r2,TASK_PT_PSW(%r1)		/* Lower 8 bits only!! */
304	mfsp	%sr0,%r2
305	STREG	%r2,TASK_PT_SR0(%r1)
306	mfsp	%sr1,%r2
307	STREG	%r2,TASK_PT_SR1(%r1)
308	mfsp	%sr2,%r2
309	STREG	%r2,TASK_PT_SR2(%r1)
310	mfsp	%sr3,%r2
311	STREG	%r2,TASK_PT_SR3(%r1)
312	STREG	%r2,TASK_PT_SR4(%r1)
313	STREG	%r2,TASK_PT_SR5(%r1)
314	STREG	%r2,TASK_PT_SR6(%r1)
315	STREG	%r2,TASK_PT_SR7(%r1)
316	STREG	%r2,TASK_PT_IASQ0(%r1)
317	STREG	%r2,TASK_PT_IASQ1(%r1)
318	LDREG	TASK_PT_GR31(%r1),%r2
319	STREG	%r2,TASK_PT_IAOQ0(%r1)
320	ldo	4(%r2),%r2
321	STREG	%r2,TASK_PT_IAOQ1(%r1)
322	ldo	TASK_REGS(%r1),%r2
323	/* reg_save %r2 */
324	STREG	%r3,PT_GR3(%r2)
325	STREG	%r4,PT_GR4(%r2)
326	STREG	%r5,PT_GR5(%r2)
327	STREG	%r6,PT_GR6(%r2)
328	STREG	%r7,PT_GR7(%r2)
329	STREG	%r8,PT_GR8(%r2)
330	STREG	%r9,PT_GR9(%r2)
331	STREG	%r10,PT_GR10(%r2)
332	STREG	%r11,PT_GR11(%r2)
333	STREG	%r12,PT_GR12(%r2)
334	STREG	%r13,PT_GR13(%r2)
335	STREG	%r14,PT_GR14(%r2)
336	STREG	%r15,PT_GR15(%r2)
337	STREG	%r16,PT_GR16(%r2)
338	STREG	%r17,PT_GR17(%r2)
339	STREG	%r18,PT_GR18(%r2)
340	/* Finished saving things for the debugger */
341
342	copy	%r2,%r26
343	ldil	L%do_syscall_trace_enter,%r1
344	ldil	L%tracesys_next,%r2
345	be	R%do_syscall_trace_enter(%sr7,%r1)
346	ldo	R%tracesys_next(%r2),%r2
347
348tracesys_next:
349	/* do_syscall_trace_enter either returned the syscallno, or -1L,
350	 *  so we skip restoring the PT_GR20 below, since we pulled it from
351	 *  task->thread.regs.gr[20] above.
352	 */
353	copy	%ret0,%r20
354
355	mfctl	%cr30,%r1			/* get task ptr */
356	LDREG   TASK_PT_GR28(%r1), %r28		/* Restore return value */
357	LDREG   TASK_PT_GR26(%r1), %r26		/* Restore the users args */
358	LDREG   TASK_PT_GR25(%r1), %r25
359	LDREG   TASK_PT_GR24(%r1), %r24
360	LDREG   TASK_PT_GR23(%r1), %r23
361	LDREG   TASK_PT_GR22(%r1), %r22
362	LDREG   TASK_PT_GR21(%r1), %r21
363#ifdef CONFIG_64BIT
364	ldo	-16(%r30),%r29			/* Reference param save area */
365#else
366	stw     %r22, -52(%r30)                 /* 5th argument */
367	stw     %r21, -56(%r30)                 /* 6th argument */
368#endif
369
370	cmpib,COND(=),n -1,%r20,tracesys_exit /* seccomp may have returned -1 */
371	comiclr,>>	__NR_Linux_syscalls, %r20, %r0
372	b,n	.Ltracesys_nosys
373
374	/* Note!  We cannot use the syscall table that is mapped
375	nearby since the gateway page is mapped execute-only. */
376
377#ifdef CONFIG_64BIT
378	LDREG	TASK_PT_GR30(%r1), %r19		/* get users sp back */
379	extrd,u	%r19,63,1,%r2			/* W hidden in bottom bit */
380
381	ldil	L%sys_call_table, %r1
382	or,ev	%r2,%r2,%r2
383	ldil	L%sys_call_table64, %r1
384	ldo	R%sys_call_table(%r1), %r19
385	or,ev	%r2,%r2,%r2
386	ldo	R%sys_call_table64(%r1), %r19
387#else
388	load32	sys_call_table, %r19
389#endif
390
391	LDREGX  %r20(%r19), %r19
392
393	/* If this is a sys_rt_sigreturn call, and the signal was received
394	 * when not in_syscall, then we want to return via syscall_exit_rfi,
395	 * not syscall_exit.  Signal no. in r20, in_syscall in r25 (see
396	 * trampoline code in signal.c).
397	 */
398	ldi	__NR_rt_sigreturn,%r2
399	comb,=	%r2,%r20,.Ltrace_rt_sigreturn
400.Ltrace_in_syscall:
401	ldil	L%tracesys_exit,%r2
402	be      0(%sr7,%r19)
403	ldo	R%tracesys_exit(%r2),%r2
404
405.Ltracesys_nosys:
406	ldo	-ENOSYS(%r0),%r28		/* set errno */
407
408	/* Do *not* call this function on the gateway page, because it
409	makes a direct call to syscall_trace. */
410
411tracesys_exit:
412	mfctl	%cr30,%r1			/* get task ptr */
413#ifdef CONFIG_64BIT
414	ldo	-16(%r30),%r29			/* Reference param save area */
415#endif
416	ldo	TASK_REGS(%r1),%r26
417	BL	do_syscall_trace_exit,%r2
418	STREG   %r28,TASK_PT_GR28(%r1)          /* save return value now */
419	mfctl	%cr30,%r1			/* get task ptr */
420	LDREG   TASK_PT_GR28(%r1), %r28		/* Restore return val. */
421
422	ldil	L%syscall_exit,%r1
423	be,n	R%syscall_exit(%sr7,%r1)
424
425.Ltrace_rt_sigreturn:
426	comib,<> 0,%r25,.Ltrace_in_syscall
427	ldil	L%tracesys_sigexit,%r2
428	be      0(%sr7,%r19)
429	ldo	R%tracesys_sigexit(%r2),%r2
430
431tracesys_sigexit:
432	mfctl	%cr30,%r1			/* get task ptr */
433#ifdef CONFIG_64BIT
434	ldo	-16(%r30),%r29			/* Reference param save area */
435#endif
436	BL	do_syscall_trace_exit,%r2
437	ldo	TASK_REGS(%r1),%r26
438
439	ldil	L%syscall_exit_rfi,%r1
440	be,n	R%syscall_exit_rfi(%sr7,%r1)
441
442
443	/*********************************************************
444		32/64-bit Light-Weight-Syscall ABI
445
446		* - Indicates a hint for userspace inline asm
447		implementations.
448
449		Syscall number (caller-saves)
450	        - %r20
451	        * In asm clobber.
452
453		Argument registers (caller-saves)
454	        - %r26, %r25, %r24, %r23, %r22
455	        * In asm input.
456
457		Return registers (caller-saves)
458	        - %r28 (return), %r21 (errno)
459	        * In asm output.
460
461		Caller-saves registers
462	        - %r1, %r27, %r29
463	        - %r2 (return pointer)
464	        - %r31 (ble link register)
465	        * In asm clobber.
466
467		Callee-saves registers
468	        - %r3-%r18
469	        - %r30 (stack pointer)
470	        * Not in asm clobber.
471
472		If userspace is 32-bit:
473		Callee-saves registers
474	        - %r19 (32-bit PIC register)
475
476		Differences from 32-bit calling convention:
477		- Syscall number in %r20
478		- Additional argument register %r22 (arg4)
479		- Callee-saves %r19.
480
481		If userspace is 64-bit:
482		Callee-saves registers
483		- %r27 (64-bit PIC register)
484
485		Differences from 64-bit calling convention:
486		- Syscall number in %r20
487		- Additional argument register %r22 (arg4)
488		- Callee-saves %r27.
489
490		Error codes returned by entry path:
491
492		ENOSYS - r20 was an invalid LWS number.
493
494	*********************************************************/
495lws_start:
496
497#ifdef CONFIG_64BIT
498	ssm	PSW_SM_W, %r1
499	extrd,u	%r1,PSW_W_BIT,1,%r1
500	/* sp must be aligned on 4, so deposit the W bit setting into
501	 * the bottom of sp temporarily */
502	or,od	%r1,%r30,%r30
503
504	/* Clip LWS number to a 32-bit value for 32-bit processes */
505	depdi	0, 31, 32, %r20
506#endif
507
508        /* Is the lws entry number valid? */
509	comiclr,>>	__NR_lws_entries, %r20, %r0
510	b,n	lws_exit_nosys
511
512	/* Load table start */
513	ldil	L%lws_table, %r1
514	ldo	R%lws_table(%r1), %r28	/* Scratch use of r28 */
515	LDREGX	%r20(%sr2,r28), %r21	/* Scratch use of r21 */
516
517	/* Jump to lws, lws table pointers already relocated */
518	be,n	0(%sr2,%r21)
519
520lws_exit_noerror:
521	lws_pagefault_enable	%r1,%r21
522	ldi	__ARCH_SPIN_LOCK_UNLOCKED_VAL, %r21
523	stw,ma	%r21, 0(%sr2,%r20)
524	ssm	PSW_SM_I, %r0
525	b	lws_exit
526	copy	%r0, %r21
527
528lws_wouldblock:
529	ssm	PSW_SM_I, %r0
530	ldo	2(%r0), %r28
531	b	lws_exit
532	ldo	-EAGAIN(%r0), %r21
533
534lws_pagefault:
535	lws_pagefault_enable	%r1,%r21
536	ldi	__ARCH_SPIN_LOCK_UNLOCKED_VAL, %r21
537	stw,ma	%r21, 0(%sr2,%r20)
538	ssm	PSW_SM_I, %r0
539	ldo	3(%r0),%r28
540	b	lws_exit
541	ldo	-EAGAIN(%r0),%r21
542
543lws_fault:
544	ldo	1(%r0),%r28
545	b	lws_exit
546	ldo	-EFAULT(%r0),%r21
547
548lws_exit_nosys:
549	ldo	-ENOSYS(%r0),%r21
550	/* Fall through: Return to userspace */
551
552lws_exit:
553#ifdef CONFIG_64BIT
554	/* decide whether to reset the wide mode bit
555	 *
556	 * For a syscall, the W bit is stored in the lowest bit
557	 * of sp.  Extract it and reset W if it is zero */
558	extrd,u,*<>	%r30,63,1,%r1
559	rsm	PSW_SM_W, %r0
560	/* now reset the lowest bit of sp if it was set */
561	xor	%r30,%r1,%r30
562#endif
563	be,n	0(%sr7, %r31)
564
565
566
567	/***************************************************
568		Implementing 32bit CAS as an atomic operation:
569
570		%r26 - Address to examine
571		%r25 - Old value to check (old)
572		%r24 - New value to set (new)
573		%r28 - Return prev through this register.
574		%r21 - Kernel error code
575
576		%r21 returns the following error codes:
577		EAGAIN - CAS is busy, ldcw failed, try again.
578		EFAULT - Read or write failed.
579
580		If EAGAIN is returned, %r28 indicates the busy reason:
581		r28 == 1 - CAS is busy. lock contended.
582		r28 == 2 - CAS is busy. ldcw failed.
583		r28 == 3 - CAS is busy. page fault.
584
585		Scratch: r20, r28, r1
586
587	****************************************************/
588
589	/* ELF64 Process entry path */
590lws_compare_and_swap64:
591#ifdef CONFIG_64BIT
592	b,n	lws_compare_and_swap
593#else
594	/* If we are not a 64-bit kernel, then we don't
595	 * have 64-bit input registers, and calling
596	 * the 64-bit LWS CAS returns ENOSYS.
597	 */
598	b,n	lws_exit_nosys
599#endif
600
601	/* ELF32/ELF64 Process entry path */
602lws_compare_and_swap32:
603#ifdef CONFIG_64BIT
604	/* Wide mode user process? */
605	bb,<,n  %sp, 31, lws_compare_and_swap
606
607	/* Clip all the input registers for 32-bit processes */
608	depdi	0, 31, 32, %r26
609	depdi	0, 31, 32, %r25
610	depdi	0, 31, 32, %r24
611#endif
612
613lws_compare_and_swap:
614	/* Trigger memory reference interruptions without writing to memory */
6151:	ldw	0(%r26), %r28
6162:	stbys,e	%r0, 0(%r26)
617
618	/* Calculate 8-bit hash index from virtual address */
619	extru_safe	%r26, 27, 8, %r20
620
621	/* Load start of lock table */
622	ldil	L%lws_lock_start, %r28
623	ldo	R%lws_lock_start(%r28), %r28
624
625	/* Find lock to use, the hash index is one of 0 to
626	   255, multiplied by 16 (keep it 16-byte aligned)
627	   and add to the lock table offset. */
628	shlw	%r20, 4, %r20
629	add	%r20, %r28, %r20
630
631	rsm	PSW_SM_I, %r0				/* Disable interrupts */
632
633	/* Try to acquire the lock */
634	LDCW	0(%sr2,%r20), %r28
635	spinlock_check	%r28, %r21
636	comclr,<>	%r0, %r28, %r0
637	b,n	lws_wouldblock
638
639	/* Disable page faults to prevent sleeping in critical region */
640	lws_pagefault_disable	%r21,%r28
641
642	/*
643		prev = *addr;
644		if ( prev == old )
645		  *addr = new;
646		return prev;
647	*/
648
649	/* NOTES:
650		This all works because intr_do_signal
651		and schedule both check the return iasq
652		and see that we are on the kernel page
653		so this process is never scheduled off
654		or is ever sent any signal of any sort,
655		thus it is wholly atomic from usrspace's
656		perspective
657	*/
658	/* The load and store could fail */
6593:	ldw	0(%r26), %r28
660	sub,<>	%r28, %r25, %r0
6614:	stw	%r24, 0(%r26)
662	b,n	lws_exit_noerror
663
664	/* A fault occurred on load or stbys,e store */
6655:	b,n	lws_fault
666	ASM_EXCEPTIONTABLE_ENTRY(1b-linux_gateway_page, 5b-linux_gateway_page)
667	ASM_EXCEPTIONTABLE_ENTRY(2b-linux_gateway_page, 5b-linux_gateway_page)
668
669	/* A page fault occurred in critical region */
6706:	b,n	lws_pagefault
671	ASM_EXCEPTIONTABLE_ENTRY(3b-linux_gateway_page, 6b-linux_gateway_page)
672	ASM_EXCEPTIONTABLE_ENTRY(4b-linux_gateway_page, 6b-linux_gateway_page)
673
674
675	/***************************************************
676		New CAS implementation which uses pointers and variable size
677		information. The value pointed by old and new MUST NOT change
678		while performing CAS. The lock only protects the value at %r26.
679
680		%r26 - Address to examine
681		%r25 - Pointer to the value to check (old)
682		%r24 - Pointer to the value to set (new)
683		%r23 - Size of the variable (0/1/2/3 for 8/16/32/64 bit)
684		%r28 - Return non-zero on failure
685		%r21 - Kernel error code
686
687		%r21 returns the following error codes:
688		EAGAIN - CAS is busy, ldcw failed, try again.
689		EFAULT - Read or write failed.
690
691		If EAGAIN is returned, %r28 indicates the busy reason:
692		r28 == 1 - CAS is busy. lock contended.
693		r28 == 2 - CAS is busy. ldcw failed.
694		r28 == 3 - CAS is busy. page fault.
695
696		Scratch: r20, r22, r28, r29, r1, fr4 (32bit for 64bit CAS only)
697
698	****************************************************/
699
700lws_compare_and_swap_2:
701#ifdef CONFIG_64BIT
702	/* Wide mode user process? */
703	bb,<,n	%sp, 31, cas2_begin
704
705	/* Clip the input registers for 32-bit processes. We don't
706	   need to clip %r23 as we only use it for word operations */
707	depdi	0, 31, 32, %r26
708	depdi	0, 31, 32, %r25
709	depdi	0, 31, 32, %r24
710#endif
711
712cas2_begin:
713	/* Check the validity of the size pointer */
714	subi,>>= 3, %r23, %r0
715	b,n	lws_exit_nosys
716
717	/* Jump to the functions which will load the old and new values into
718	   registers depending on the their size */
719	shlw	%r23, 2, %r29
720	blr	%r29, %r0
721	nop
722
723	/* 8-bit load */
7241:	ldb	0(%r25), %r25
725	b	cas2_lock_start
7262:	ldb	0(%r24), %r24
727	nop
728	nop
729	nop
730	nop
731	nop
732
733	/* 16-bit load */
7343:	ldh	0(%r25), %r25
735	b	cas2_lock_start
7364:	ldh	0(%r24), %r24
737	nop
738	nop
739	nop
740	nop
741	nop
742
743	/* 32-bit load */
7445:	ldw	0(%r25), %r25
745	b	cas2_lock_start
7466:	ldw	0(%r24), %r24
747	nop
748	nop
749	nop
750	nop
751	nop
752
753	/* 64-bit load */
754#ifdef CONFIG_64BIT
7557:	ldd	0(%r25), %r25
7568:	ldd	0(%r24), %r24
757#else
758	/* Load old value into r22/r23 - high/low */
7597:	ldw	0(%r25), %r22
7608:	ldw	4(%r25), %r23
761	/* Load new value into fr4 for atomic store later */
7629:	flddx	0(%r24), %fr4
763#endif
764
765cas2_lock_start:
766	/* Trigger memory reference interruptions without writing to memory */
767	copy	%r26, %r28
768	depi_safe	0, 31, 2, %r28
76910:	ldw	0(%r28), %r1
77011:	stbys,e	%r0, 0(%r28)
771
772	/* Calculate 8-bit hash index from virtual address */
773	extru_safe	%r26, 27, 8, %r20
774
775	/* Load start of lock table */
776	ldil	L%lws_lock_start, %r28
777	ldo	R%lws_lock_start(%r28), %r28
778
779	/* Find lock to use, the hash index is one of 0 to
780	   255, multiplied by 16 (keep it 16-byte aligned)
781	   and add to the lock table offset. */
782	shlw	%r20, 4, %r20
783	add	%r20, %r28, %r20
784
785	rsm	PSW_SM_I, %r0			/* Disable interrupts */
786
787	/* Try to acquire the lock */
788	LDCW	0(%sr2,%r20), %r28
789	spinlock_check	%r28, %r21
790	comclr,<>	%r0, %r28, %r0
791	b,n	lws_wouldblock
792
793	/* Disable page faults to prevent sleeping in critical region */
794	lws_pagefault_disable	%r21,%r28
795
796	/*
797		prev = *addr;
798		if ( prev == old )
799		  *addr = new;
800		return prev;
801	*/
802
803	/* NOTES:
804		This all works because intr_do_signal
805		and schedule both check the return iasq
806		and see that we are on the kernel page
807		so this process is never scheduled off
808		or is ever sent any signal of any sort,
809		thus it is wholly atomic from usrspace's
810		perspective
811	*/
812
813	/* Jump to the correct function */
814	blr	%r29, %r0
815	/* Set %r28 as non-zero for now */
816	ldo	1(%r0),%r28
817
818	/* 8-bit CAS */
81912:	ldb	0(%r26), %r29
820	sub,=	%r29, %r25, %r0
821	b,n	lws_exit_noerror
82213:	stb	%r24, 0(%r26)
823	b	lws_exit_noerror
824	copy	%r0, %r28
825	nop
826	nop
827
828	/* 16-bit CAS */
82914:	ldh	0(%r26), %r29
830	sub,=	%r29, %r25, %r0
831	b,n	lws_exit_noerror
83215:	sth	%r24, 0(%r26)
833	b	lws_exit_noerror
834	copy	%r0, %r28
835	nop
836	nop
837
838	/* 32-bit CAS */
83916:	ldw	0(%r26), %r29
840	sub,=	%r29, %r25, %r0
841	b,n	lws_exit_noerror
84217:	stw	%r24, 0(%r26)
843	b	lws_exit_noerror
844	copy	%r0, %r28
845	nop
846	nop
847
848	/* 64-bit CAS */
849#ifdef CONFIG_64BIT
85018:	ldd	0(%r26), %r29
851	sub,*=	%r29, %r25, %r0
852	b,n	lws_exit_noerror
85319:	std	%r24, 0(%r26)
854	copy	%r0, %r28
855#else
856	/* Compare first word */
85718:	ldw	0(%r26), %r29
858	sub,=	%r29, %r22, %r0
859	b,n	lws_exit_noerror
860	/* Compare second word */
86119:	ldw	4(%r26), %r29
862	sub,=	%r29, %r23, %r0
863	b,n	lws_exit_noerror
864	/* Perform the store */
86520:	fstdx	%fr4, 0(%r26)
866	copy	%r0, %r28
867#endif
868	b	lws_exit_noerror
869	copy	%r0, %r28
870
871	/* A fault occurred on load or stbys,e store */
87230:	b,n	lws_fault
873	ASM_EXCEPTIONTABLE_ENTRY(1b-linux_gateway_page, 30b-linux_gateway_page)
874	ASM_EXCEPTIONTABLE_ENTRY(2b-linux_gateway_page, 30b-linux_gateway_page)
875	ASM_EXCEPTIONTABLE_ENTRY(3b-linux_gateway_page, 30b-linux_gateway_page)
876	ASM_EXCEPTIONTABLE_ENTRY(4b-linux_gateway_page, 30b-linux_gateway_page)
877	ASM_EXCEPTIONTABLE_ENTRY(5b-linux_gateway_page, 30b-linux_gateway_page)
878	ASM_EXCEPTIONTABLE_ENTRY(6b-linux_gateway_page, 30b-linux_gateway_page)
879	ASM_EXCEPTIONTABLE_ENTRY(7b-linux_gateway_page, 30b-linux_gateway_page)
880	ASM_EXCEPTIONTABLE_ENTRY(8b-linux_gateway_page, 30b-linux_gateway_page)
881#ifndef CONFIG_64BIT
882	ASM_EXCEPTIONTABLE_ENTRY(9b-linux_gateway_page, 30b-linux_gateway_page)
883#endif
884
885	ASM_EXCEPTIONTABLE_ENTRY(10b-linux_gateway_page, 30b-linux_gateway_page)
886	ASM_EXCEPTIONTABLE_ENTRY(11b-linux_gateway_page, 30b-linux_gateway_page)
887
888	/* A page fault occurred in critical region */
88931:	b,n	lws_pagefault
890	ASM_EXCEPTIONTABLE_ENTRY(12b-linux_gateway_page, 31b-linux_gateway_page)
891	ASM_EXCEPTIONTABLE_ENTRY(13b-linux_gateway_page, 31b-linux_gateway_page)
892	ASM_EXCEPTIONTABLE_ENTRY(14b-linux_gateway_page, 31b-linux_gateway_page)
893	ASM_EXCEPTIONTABLE_ENTRY(15b-linux_gateway_page, 31b-linux_gateway_page)
894	ASM_EXCEPTIONTABLE_ENTRY(16b-linux_gateway_page, 31b-linux_gateway_page)
895	ASM_EXCEPTIONTABLE_ENTRY(17b-linux_gateway_page, 31b-linux_gateway_page)
896	ASM_EXCEPTIONTABLE_ENTRY(18b-linux_gateway_page, 31b-linux_gateway_page)
897	ASM_EXCEPTIONTABLE_ENTRY(19b-linux_gateway_page, 31b-linux_gateway_page)
898#ifndef CONFIG_64BIT
899	ASM_EXCEPTIONTABLE_ENTRY(20b-linux_gateway_page, 31b-linux_gateway_page)
900#endif
901
902
903	/***************************************************
904		LWS atomic exchange.
905
906		%r26 - Exchange address
907		%r25 - Size of the variable (0/1/2/3 for 8/16/32/64 bit)
908		%r24 - Address of new value
909		%r23 - Address of old value
910		%r28 - Return non-zero on failure
911		%r21 - Kernel error code
912
913		%r21 returns the following error codes:
914		EAGAIN - CAS is busy, ldcw failed, try again.
915		EFAULT - Read or write failed.
916
917		If EAGAIN is returned, %r28 indicates the busy reason:
918		r28 == 1 - CAS is busy. lock contended.
919		r28 == 2 - CAS is busy. ldcw failed.
920		r28 == 3 - CAS is busy. page fault.
921
922		Scratch: r20, r1
923
924	****************************************************/
925
926lws_atomic_xchg:
927#ifdef CONFIG_64BIT
928	/* Wide mode user process? */
929	bb,<,n	%sp, 31, atomic_xchg_begin
930
931	/* Clip the input registers for 32-bit processes. We don't
932	   need to clip %r23 as we only use it for word operations */
933	depdi	0, 31, 32, %r26
934	depdi	0, 31, 32, %r25
935	depdi	0, 31, 32, %r24
936	depdi	0, 31, 32, %r23
937#endif
938
939atomic_xchg_begin:
940	/* Check the validity of the size pointer */
941	subi,>>= 3, %r25, %r0
942	b,n	lws_exit_nosys
943
944	/* Jump to the functions which will load the old and new values into
945	   registers depending on the their size */
946	shlw	%r25, 2, %r1
947	blr	%r1, %r0
948	nop
949
950	/* Perform exception checks */
951
952	/* 8-bit exchange */
9531:	ldb	0(%r24), %r20
954	copy	%r23, %r20
955	depi_safe	0, 31, 2, %r20
956	b	atomic_xchg_start
9572:	stbys,e	%r0, 0(%r20)
958	nop
959	nop
960	nop
961
962	/* 16-bit exchange */
9633:	ldh	0(%r24), %r20
964	copy	%r23, %r20
965	depi_safe	0, 31, 2, %r20
966	b	atomic_xchg_start
9674:	stbys,e	%r0, 0(%r20)
968	nop
969	nop
970	nop
971
972	/* 32-bit exchange */
9735:	ldw	0(%r24), %r20
974	b	atomic_xchg_start
9756:	stbys,e	%r0, 0(%r23)
976	nop
977	nop
978	nop
979	nop
980	nop
981
982	/* 64-bit exchange */
983#ifdef CONFIG_64BIT
9847:	ldd	0(%r24), %r20
9858:	stdby,e	%r0, 0(%r23)
986#else
9877:	ldw	0(%r24), %r20
9888:	ldw	4(%r24), %r20
989	copy	%r23, %r20
990	depi_safe	0, 31, 2, %r20
9919:	stbys,e	%r0, 0(%r20)
99210:	stbys,e	%r0, 4(%r20)
993#endif
994
995atomic_xchg_start:
996	/* Trigger memory reference interruptions without writing to memory */
997	copy	%r26, %r28
998	depi_safe	0, 31, 2, %r28
99911:	ldw	0(%r28), %r1
100012:	stbys,e	%r0, 0(%r28)
1001
1002	/* Calculate 8-bit hash index from virtual address */
1003	extru_safe  %r26, 27, 8, %r20
1004
1005	/* Load start of lock table */
1006	ldil	L%lws_lock_start, %r28
1007	ldo	R%lws_lock_start(%r28), %r28
1008
1009	/* Find lock to use, the hash index is one of 0 to
1010	   255, multiplied by 16 (keep it 16-byte aligned)
1011	   and add to the lock table offset. */
1012	shlw	%r20, 4, %r20
1013	add	%r20, %r28, %r20
1014
1015	rsm	PSW_SM_I, %r0			/* Disable interrupts */
1016
1017	/* Try to acquire the lock */
1018	LDCW	0(%sr2,%r20), %r28
1019	spinlock_check	%r28, %r21
1020	comclr,<>	%r0, %r28, %r0
1021	b,n	lws_wouldblock
1022
1023	/* Disable page faults to prevent sleeping in critical region */
1024	lws_pagefault_disable	%r21,%r28
1025
1026	/* NOTES:
1027		This all works because intr_do_signal
1028		and schedule both check the return iasq
1029		and see that we are on the kernel page
1030		so this process is never scheduled off
1031		or is ever sent any signal of any sort,
1032		thus it is wholly atomic from userspace's
1033		perspective
1034	*/
1035
1036	/* Jump to the correct function */
1037	blr	%r1, %r0
1038	/* Set %r28 as non-zero for now */
1039	ldo	1(%r0),%r28
1040
1041	/* 8-bit exchange */
104214:	ldb	0(%r26), %r1
104315:	stb	%r1, 0(%r23)
104415:	ldb	0(%r24), %r1
104517:	stb	%r1, 0(%r26)
1046	b	lws_exit_noerror
1047	copy	%r0, %r28
1048	nop
1049	nop
1050
1051	/* 16-bit exchange */
105218:	ldh	0(%r26), %r1
105319:	sth	%r1, 0(%r23)
105420:	ldh	0(%r24), %r1
105521:	sth	%r1, 0(%r26)
1056	b	lws_exit_noerror
1057	copy	%r0, %r28
1058	nop
1059	nop
1060
1061	/* 32-bit exchange */
106222:	ldw	0(%r26), %r1
106323:	stw	%r1, 0(%r23)
106424:	ldw	0(%r24), %r1
106525:	stw	%r1, 0(%r26)
1066	b	lws_exit_noerror
1067	copy	%r0, %r28
1068	nop
1069	nop
1070
1071	/* 64-bit exchange */
1072#ifdef CONFIG_64BIT
107326:	ldd	0(%r26), %r1
107427:	std	%r1, 0(%r23)
107528:	ldd	0(%r24), %r1
107629:	std	%r1, 0(%r26)
1077#else
107826:	flddx	0(%r26), %fr4
107927:	fstdx	%fr4, 0(%r23)
108028:	flddx	0(%r24), %fr4
108129:	fstdx	%fr4, 0(%r26)
1082#endif
1083	b	lws_exit_noerror
1084	copy	%r0, %r28
1085
1086	/* A fault occurred on load or stbys,e store */
108730:	b,n	lws_fault
1088	ASM_EXCEPTIONTABLE_ENTRY(1b-linux_gateway_page, 30b-linux_gateway_page)
1089	ASM_EXCEPTIONTABLE_ENTRY(2b-linux_gateway_page, 30b-linux_gateway_page)
1090	ASM_EXCEPTIONTABLE_ENTRY(3b-linux_gateway_page, 30b-linux_gateway_page)
1091	ASM_EXCEPTIONTABLE_ENTRY(4b-linux_gateway_page, 30b-linux_gateway_page)
1092	ASM_EXCEPTIONTABLE_ENTRY(5b-linux_gateway_page, 30b-linux_gateway_page)
1093	ASM_EXCEPTIONTABLE_ENTRY(6b-linux_gateway_page, 30b-linux_gateway_page)
1094	ASM_EXCEPTIONTABLE_ENTRY(7b-linux_gateway_page, 30b-linux_gateway_page)
1095	ASM_EXCEPTIONTABLE_ENTRY(8b-linux_gateway_page, 30b-linux_gateway_page)
1096#ifndef CONFIG_64BIT
1097	ASM_EXCEPTIONTABLE_ENTRY(9b-linux_gateway_page, 30b-linux_gateway_page)
1098	ASM_EXCEPTIONTABLE_ENTRY(10b-linux_gateway_page, 30b-linux_gateway_page)
1099#endif
1100
1101	ASM_EXCEPTIONTABLE_ENTRY(11b-linux_gateway_page, 30b-linux_gateway_page)
1102	ASM_EXCEPTIONTABLE_ENTRY(12b-linux_gateway_page, 30b-linux_gateway_page)
1103
1104	/* A page fault occurred in critical region */
110531:	b,n	lws_pagefault
1106	ASM_EXCEPTIONTABLE_ENTRY(14b-linux_gateway_page, 31b-linux_gateway_page)
1107	ASM_EXCEPTIONTABLE_ENTRY(15b-linux_gateway_page, 31b-linux_gateway_page)
1108	ASM_EXCEPTIONTABLE_ENTRY(16b-linux_gateway_page, 31b-linux_gateway_page)
1109	ASM_EXCEPTIONTABLE_ENTRY(17b-linux_gateway_page, 31b-linux_gateway_page)
1110	ASM_EXCEPTIONTABLE_ENTRY(18b-linux_gateway_page, 31b-linux_gateway_page)
1111	ASM_EXCEPTIONTABLE_ENTRY(19b-linux_gateway_page, 31b-linux_gateway_page)
1112	ASM_EXCEPTIONTABLE_ENTRY(20b-linux_gateway_page, 31b-linux_gateway_page)
1113	ASM_EXCEPTIONTABLE_ENTRY(21b-linux_gateway_page, 31b-linux_gateway_page)
1114	ASM_EXCEPTIONTABLE_ENTRY(22b-linux_gateway_page, 31b-linux_gateway_page)
1115	ASM_EXCEPTIONTABLE_ENTRY(23b-linux_gateway_page, 31b-linux_gateway_page)
1116	ASM_EXCEPTIONTABLE_ENTRY(24b-linux_gateway_page, 31b-linux_gateway_page)
1117	ASM_EXCEPTIONTABLE_ENTRY(25b-linux_gateway_page, 31b-linux_gateway_page)
1118	ASM_EXCEPTIONTABLE_ENTRY(26b-linux_gateway_page, 31b-linux_gateway_page)
1119	ASM_EXCEPTIONTABLE_ENTRY(27b-linux_gateway_page, 31b-linux_gateway_page)
1120	ASM_EXCEPTIONTABLE_ENTRY(28b-linux_gateway_page, 31b-linux_gateway_page)
1121	ASM_EXCEPTIONTABLE_ENTRY(29b-linux_gateway_page, 31b-linux_gateway_page)
1122
1123	/***************************************************
1124		LWS atomic store.
1125
1126		%r26 - Address to store
1127		%r25 - Size of the variable (0/1/2/3 for 8/16/32/64 bit)
1128		%r24 - Address of value to store
1129		%r28 - Return non-zero on failure
1130		%r21 - Kernel error code
1131
1132		%r21 returns the following error codes:
1133		EAGAIN - CAS is busy, ldcw failed, try again.
1134		EFAULT - Read or write failed.
1135
1136		If EAGAIN is returned, %r28 indicates the busy reason:
1137		r28 == 1 - CAS is busy. lock contended.
1138		r28 == 2 - CAS is busy. ldcw failed.
1139		r28 == 3 - CAS is busy. page fault.
1140
1141		Scratch: r20, r1
1142
1143	****************************************************/
1144
1145lws_atomic_store:
1146#ifdef CONFIG_64BIT
1147	/* Wide mode user process? */
1148	bb,<,n	%sp, 31, atomic_store_begin
1149
1150	/* Clip the input registers for 32-bit processes. We don't
1151	   need to clip %r23 as we only use it for word operations */
1152	depdi	0, 31, 32, %r26
1153	depdi	0, 31, 32, %r25
1154	depdi	0, 31, 32, %r24
1155#endif
1156
1157atomic_store_begin:
1158	/* Check the validity of the size pointer */
1159	subi,>>= 3, %r25, %r0
1160	b,n	lws_exit_nosys
1161
1162	shlw	%r25, 1, %r1
1163	blr	%r1, %r0
1164	nop
1165
1166	/* Perform exception checks */
1167
1168	/* 8-bit store */
11691:	ldb	0(%r24), %r20
1170	b,n	atomic_store_start
1171	nop
1172	nop
1173
1174	/* 16-bit store */
11752:	ldh	0(%r24), %r20
1176	b,n	atomic_store_start
1177	nop
1178	nop
1179
1180	/* 32-bit store */
11813:	ldw	0(%r24), %r20
1182	b,n	atomic_store_start
1183	nop
1184	nop
1185
1186	/* 64-bit store */
1187#ifdef CONFIG_64BIT
11884:	ldd	0(%r24), %r20
1189#else
11904:	ldw	0(%r24), %r20
11915:	ldw	4(%r24), %r20
1192#endif
1193
1194atomic_store_start:
1195	/* Trigger memory reference interruptions without writing to memory */
1196	copy	%r26, %r28
1197	depi_safe	0, 31, 2, %r28
11986:	ldw	0(%r28), %r1
11997:	stbys,e	%r0, 0(%r28)
1200
1201	/* Calculate 8-bit hash index from virtual address */
1202	extru_safe  %r26, 27, 8, %r20
1203
1204	/* Load start of lock table */
1205	ldil	L%lws_lock_start, %r28
1206	ldo	R%lws_lock_start(%r28), %r28
1207
1208	/* Find lock to use, the hash index is one of 0 to
1209	   255, multiplied by 16 (keep it 16-byte aligned)
1210	   and add to the lock table offset. */
1211	shlw	%r20, 4, %r20
1212	add	%r20, %r28, %r20
1213
1214	rsm	PSW_SM_I, %r0			/* Disable interrupts */
1215
1216	/* Try to acquire the lock */
1217	LDCW	0(%sr2,%r20), %r28
1218	spinlock_check	%r28, %r21
1219	comclr,<>	%r0, %r28, %r0
1220	b,n	lws_wouldblock
1221
1222	/* Disable page faults to prevent sleeping in critical region */
1223	lws_pagefault_disable	%r21,%r28
1224
1225	/* NOTES:
1226		This all works because intr_do_signal
1227		and schedule both check the return iasq
1228		and see that we are on the kernel page
1229		so this process is never scheduled off
1230		or is ever sent any signal of any sort,
1231		thus it is wholly atomic from userspace's
1232		perspective
1233	*/
1234
1235	/* Jump to the correct function */
1236	blr	%r1, %r0
1237	/* Set %r28 as non-zero for now */
1238	ldo	1(%r0),%r28
1239
1240	/* 8-bit store */
12419:	ldb	0(%r24), %r1
124210:	stb	%r1, 0(%r26)
1243	b	lws_exit_noerror
1244	copy	%r0, %r28
1245
1246	/* 16-bit store */
124711:	ldh	0(%r24), %r1
124812:	sth	%r1, 0(%r26)
1249	b	lws_exit_noerror
1250	copy	%r0, %r28
1251
1252	/* 32-bit store */
125313:	ldw	0(%r24), %r1
125414:	stw	%r1, 0(%r26)
1255	b	lws_exit_noerror
1256	copy	%r0, %r28
1257
1258	/* 64-bit store */
1259#ifdef CONFIG_64BIT
126015:	ldd	0(%r24), %r1
126116:	std	%r1, 0(%r26)
1262#else
126315:	flddx	0(%r24), %fr4
126416:	fstdx	%fr4, 0(%r26)
1265#endif
1266	b	lws_exit_noerror
1267	copy	%r0, %r28
1268
1269	/* A fault occurred on load or stbys,e store */
127030:	b,n	lws_fault
1271	ASM_EXCEPTIONTABLE_ENTRY(1b-linux_gateway_page, 30b-linux_gateway_page)
1272	ASM_EXCEPTIONTABLE_ENTRY(2b-linux_gateway_page, 30b-linux_gateway_page)
1273	ASM_EXCEPTIONTABLE_ENTRY(3b-linux_gateway_page, 30b-linux_gateway_page)
1274	ASM_EXCEPTIONTABLE_ENTRY(4b-linux_gateway_page, 30b-linux_gateway_page)
1275#ifndef CONFIG_64BIT
1276	ASM_EXCEPTIONTABLE_ENTRY(5b-linux_gateway_page, 30b-linux_gateway_page)
1277#endif
1278
1279	ASM_EXCEPTIONTABLE_ENTRY(6b-linux_gateway_page, 30b-linux_gateway_page)
1280	ASM_EXCEPTIONTABLE_ENTRY(7b-linux_gateway_page, 30b-linux_gateway_page)
1281
1282	/* A page fault occurred in critical region */
128331:	b,n	lws_pagefault
1284	ASM_EXCEPTIONTABLE_ENTRY(9b-linux_gateway_page, 31b-linux_gateway_page)
1285	ASM_EXCEPTIONTABLE_ENTRY(10b-linux_gateway_page, 31b-linux_gateway_page)
1286	ASM_EXCEPTIONTABLE_ENTRY(11b-linux_gateway_page, 31b-linux_gateway_page)
1287	ASM_EXCEPTIONTABLE_ENTRY(12b-linux_gateway_page, 31b-linux_gateway_page)
1288	ASM_EXCEPTIONTABLE_ENTRY(13b-linux_gateway_page, 31b-linux_gateway_page)
1289	ASM_EXCEPTIONTABLE_ENTRY(14b-linux_gateway_page, 31b-linux_gateway_page)
1290	ASM_EXCEPTIONTABLE_ENTRY(15b-linux_gateway_page, 31b-linux_gateway_page)
1291	ASM_EXCEPTIONTABLE_ENTRY(16b-linux_gateway_page, 31b-linux_gateway_page)
1292
1293	/* Make sure nothing else is placed on this page */
1294	.align PAGE_SIZE
1295END(linux_gateway_page)
1296ENTRY(end_linux_gateway_page)
1297
1298	/* Relocate symbols assuming linux_gateway_page is mapped
1299	   to virtual address 0x0 */
1300
1301#define LWS_ENTRY(_name_) ASM_ULONG_INSN (lws_##_name_ - linux_gateway_page)
1302
1303	.section .rodata,"a"
1304
1305	.align 8
1306	/* Light-weight-syscall table */
1307	/* Start of lws table. */
1308ENTRY(lws_table)
1309	LWS_ENTRY(compare_and_swap32)		/* 0 - ELF32 Atomic 32bit CAS */
1310	LWS_ENTRY(compare_and_swap64)		/* 1 - ELF64 Atomic 32bit CAS */
1311	LWS_ENTRY(compare_and_swap_2)		/* 2 - Atomic 64bit CAS */
1312	LWS_ENTRY(atomic_xchg)			/* 3 - Atomic Exchange */
1313	LWS_ENTRY(atomic_store)			/* 4 - Atomic Store */
1314END(lws_table)
1315	/* End of lws table */
1316
1317#ifdef CONFIG_64BIT
1318#define __SYSCALL_WITH_COMPAT(nr, native, compat)	__SYSCALL(nr, compat)
1319#else
1320#define __SYSCALL_WITH_COMPAT(nr, native, compat)	__SYSCALL(nr, native)
1321#endif
1322#define __SYSCALL(nr, entry)	ASM_ULONG_INSN entry
1323	.align 8
1324ENTRY(sys_call_table)
1325	.export sys_call_table,data
1326#include <asm/syscall_table_32.h>    /* 32-bit syscalls */
1327END(sys_call_table)
1328
1329#ifdef CONFIG_64BIT
1330#undef __SYSCALL_WITH_COMPAT
1331#define __SYSCALL_WITH_COMPAT(nr, native, compat)	__SYSCALL(nr, native)
1332	.align 8
1333ENTRY(sys_call_table64)
1334#include <asm/syscall_table_64.h>    /* 64-bit syscalls */
1335END(sys_call_table64)
1336#endif
1337
1338	/*
1339		All light-weight-syscall atomic operations
1340		will use this set of locks
1341
1342		NOTE: The lws_lock_start symbol must be
1343		at least 16-byte aligned for safe use
1344		with ldcw.
1345	*/
1346	.section .data
1347	.align	L1_CACHE_BYTES
1348ENTRY(lws_lock_start)
1349	/* lws locks */
1350	.rept 256
1351	/* Keep locks aligned at 16-bytes */
1352	.word __ARCH_SPIN_LOCK_UNLOCKED_VAL
1353	.word 0
1354	.word 0
1355	.word 0
1356	.endr
1357END(lws_lock_start)
1358	.previous
1359
1360.end
1361