xref: /linux/arch/alpha/kernel/entry.S (revision e2683c8868d03382da7e1ce8453b543a043066d1)
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * arch/alpha/kernel/entry.S
4 *
5 * Kernel entry-points.
6 */
7
8#include <asm/asm-offsets.h>
9#include <asm/thread_info.h>
10#include <asm/pal.h>
11#include <asm/errno.h>
12#include <asm/unistd.h>
13#include <linux/errno.h>
14
15	.text
16	.set noat
17	.cfi_sections	.debug_frame
18
19.macro	CFI_START_OSF_FRAME	func
20	.align	4
21	.globl	\func
22	.type	\func,@function
23\func:
24	.cfi_startproc simple
25	.cfi_return_column 64
26	.cfi_def_cfa	$sp, 48
27	.cfi_rel_offset	64, 8
28	.cfi_rel_offset	$gp, 16
29	.cfi_rel_offset	$16, 24
30	.cfi_rel_offset	$17, 32
31	.cfi_rel_offset	$18, 40
32.endm
33
34.macro	CFI_END_OSF_FRAME	func
35	.cfi_endproc
36	.size	\func, . - \func
37.endm
38
39/*
40 * SYSCALL_SKIP_RETURN_RESTART_GATE
41 *
42 * Used when syscall dispatch is skipped (seccomp/ptrace injected nr=-1).
43 *  - Ensure we never return r0==-1 with a3==0 (success); convert to ENOSYS.
44 *  - Gate whether syscall restart is allowed by preserving restart context
45 *    only for ERESTART* returns. Result:
46 *        $26 = 0  => restart allowed
47 *        $26 = 1  => restart NOT allowed
48 *        $18 = preserved syscall nr (regs->r2) if restart allowed, else 0
49 */
50.macro  SYSCALL_SKIP_RETURN_RESTART_GATE
51	/* Fix up invalid "-1 success" return state. */
52	ldq	$19, 72($sp)		/* a3 */
53	bne	$19, 1f			/* already error => skip fixup */
54
55	ldq	$20, 0($sp)		/* r0 */
56	lda	$21, -1($31)
57	cmpeq	$20, $21, $22
58	beq	$22, 1f			/* r0 != -1 => skip fixup */
59
60
61	lda	$20, ENOSYS($31)
62	stq	$20, 0($sp)		/* r0 = ENOSYS */
63	lda	$19, 1($31)
64	stq	$19, 72($sp)		/* a3 = 1 */
651:
66	/* Restart gating: success is never restartable here. */
67	ldq	$19, 72($sp)		/* a3 */
68	beq	$19, 3f			/* success => not restartable */
69
70	ldq	$20, 0($sp)		/* r0 (positive errno if a3==1) */
71	lda	$21, ERESTARTSYS($31)
72	cmpeq	$20, $21, $22
73	bne	$22, 2f
74	lda	$21, ERESTARTNOINTR($31)
75	cmpeq	$20, $21, $22
76	bne	$22, 2f
77	lda	$21, ERESTARTNOHAND($31)
78	cmpeq	$20, $21, $22
79	bne	$22, 2f
80	lda	$21, ERESTART_RESTARTBLOCK($31)
81	cmpeq	$20, $21, $22
82	bne	$22, 2f
83
843:	/* Not a restart code (or success) => restart NOT allowed. */
85	addq	$31, 1, $26		/* $26=1 => restart NOT allowed */
86	mov	0, $18
87	br	4f
88
892:	/* Restart allowed. */
90	ldq	$18, 16($sp)		/* preserved syscall nr (regs->r2) */
91	mov	$31, $26		/* $26=0 => restart allowed */
92	br	4f
934:
94.endm
95
96
97/*
98 * This defines the normal kernel pt-regs layout.
99 *
100 * regs 9-15 preserved by C code
101 * regs 16-18 saved by PAL-code
102 * regs 29-30 saved and set up by PAL-code
103 * JRP - Save regs 16-18 in a special area of the stack, so that
104 * the palcode-provided values are available to the signal handler.
105 */
106
107.macro	SAVE_ALL
108	subq	$sp, SP_OFF, $sp
109	.cfi_adjust_cfa_offset	SP_OFF
110	stq	$0, 0($sp)
111	stq	$1, 8($sp)
112	stq	$2, 16($sp)
113	stq	$3, 24($sp)
114	stq	$4, 32($sp)
115	stq	$28, 144($sp)
116	.cfi_rel_offset	$0, 0
117	.cfi_rel_offset $1, 8
118	.cfi_rel_offset	$2, 16
119	.cfi_rel_offset	$3, 24
120	.cfi_rel_offset	$4, 32
121	.cfi_rel_offset	$28, 144
122	lda	$2, alpha_mv
123	stq	$5, 40($sp)
124	stq	$6, 48($sp)
125	stq	$7, 56($sp)
126	stq	$8, 64($sp)
127	stq	$19, 72($sp)
128	stq	$20, 80($sp)
129	stq	$21, 88($sp)
130	ldq	$2, HAE_CACHE($2)
131	stq	$22, 96($sp)
132	stq	$23, 104($sp)
133	stq	$24, 112($sp)
134	stq	$25, 120($sp)
135	stq	$26, 128($sp)
136	stq	$27, 136($sp)
137	stq	$2, 152($sp)
138	stq	$16, 160($sp)
139	stq	$17, 168($sp)
140	stq	$18, 176($sp)
141	.cfi_rel_offset	$5, 40
142	.cfi_rel_offset	$6, 48
143	.cfi_rel_offset	$7, 56
144	.cfi_rel_offset	$8, 64
145	.cfi_rel_offset $19, 72
146	.cfi_rel_offset	$20, 80
147	.cfi_rel_offset	$21, 88
148	.cfi_rel_offset $22, 96
149	.cfi_rel_offset	$23, 104
150	.cfi_rel_offset	$24, 112
151	.cfi_rel_offset	$25, 120
152	.cfi_rel_offset	$26, 128
153	.cfi_rel_offset	$27, 136
154.endm
155
156.macro	RESTORE_ALL
157	lda	$19, alpha_mv
158	ldq	$0, 0($sp)
159	ldq	$1, 8($sp)
160	ldq	$2, 16($sp)
161	ldq	$3, 24($sp)
162	ldq	$21, 152($sp)
163	ldq	$20, HAE_CACHE($19)
164	ldq	$4, 32($sp)
165	ldq	$5, 40($sp)
166	ldq	$6, 48($sp)
167	ldq	$7, 56($sp)
168	subq	$20, $21, $20
169	ldq	$8, 64($sp)
170	beq	$20, 99f
171	ldq	$20, HAE_REG($19)
172	stq	$21, HAE_CACHE($19)
173	stq	$21, 0($20)
17499:	ldq	$19, 72($sp)
175	ldq	$20, 80($sp)
176	ldq	$21, 88($sp)
177	ldq	$22, 96($sp)
178	ldq	$23, 104($sp)
179	ldq	$24, 112($sp)
180	ldq	$25, 120($sp)
181	ldq	$26, 128($sp)
182	ldq	$27, 136($sp)
183	ldq	$28, 144($sp)
184	addq	$sp, SP_OFF, $sp
185	.cfi_restore	$0
186	.cfi_restore	$1
187	.cfi_restore	$2
188	.cfi_restore	$3
189	.cfi_restore	$4
190	.cfi_restore	$5
191	.cfi_restore	$6
192	.cfi_restore	$7
193	.cfi_restore	$8
194	.cfi_restore	$19
195	.cfi_restore	$20
196	.cfi_restore	$21
197	.cfi_restore	$22
198	.cfi_restore	$23
199	.cfi_restore	$24
200	.cfi_restore	$25
201	.cfi_restore	$26
202	.cfi_restore	$27
203	.cfi_restore	$28
204	.cfi_adjust_cfa_offset	-SP_OFF
205.endm
206
207.macro	DO_SWITCH_STACK
208	bsr	$1, do_switch_stack
209	.cfi_adjust_cfa_offset	SWITCH_STACK_SIZE
210	.cfi_rel_offset	$9, 0
211	.cfi_rel_offset	$10, 8
212	.cfi_rel_offset	$11, 16
213	.cfi_rel_offset	$12, 24
214	.cfi_rel_offset	$13, 32
215	.cfi_rel_offset	$14, 40
216	.cfi_rel_offset	$15, 48
217.endm
218
219.macro	UNDO_SWITCH_STACK
220	bsr	$1, undo_switch_stack
221	.cfi_restore	$9
222	.cfi_restore	$10
223	.cfi_restore	$11
224	.cfi_restore	$12
225	.cfi_restore	$13
226	.cfi_restore	$14
227	.cfi_restore	$15
228	.cfi_adjust_cfa_offset	-SWITCH_STACK_SIZE
229.endm
230
231/*
232 * Non-syscall kernel entry points.
233 */
234
235CFI_START_OSF_FRAME entInt
236	SAVE_ALL
237	lda	$8, 0x3fff
238	lda	$26, ret_from_sys_call
239	bic	$sp, $8, $8
240	mov	$sp, $19
241	jsr	$31, do_entInt
242CFI_END_OSF_FRAME entInt
243
244CFI_START_OSF_FRAME entArith
245	SAVE_ALL
246	lda	$8, 0x3fff
247	lda	$26, ret_from_sys_call
248	bic	$sp, $8, $8
249	mov	$sp, $18
250	jsr	$31, do_entArith
251CFI_END_OSF_FRAME entArith
252
253CFI_START_OSF_FRAME entMM
254	SAVE_ALL
255/* save $9 - $15 so the inline exception code can manipulate them.  */
256	subq	$sp, 64, $sp
257	.cfi_adjust_cfa_offset	64
258	stq	$9, 0($sp)
259	stq	$10, 8($sp)
260	stq	$11, 16($sp)
261	stq	$12, 24($sp)
262	stq	$13, 32($sp)
263	stq	$14, 40($sp)
264	stq	$15, 48($sp)
265	.cfi_rel_offset	$9, 0
266	.cfi_rel_offset	$10, 8
267	.cfi_rel_offset	$11, 16
268	.cfi_rel_offset	$12, 24
269	.cfi_rel_offset	$13, 32
270	.cfi_rel_offset	$14, 40
271	.cfi_rel_offset	$15, 48
272	addq	$sp, 64, $19
273/* handle the fault */
274	lda	$8, 0x3fff
275	bic	$sp, $8, $8
276	jsr	$26, do_page_fault
277/* reload the registers after the exception code played.  */
278	ldq	$9, 0($sp)
279	ldq	$10, 8($sp)
280	ldq	$11, 16($sp)
281	ldq	$12, 24($sp)
282	ldq	$13, 32($sp)
283	ldq	$14, 40($sp)
284	ldq	$15, 48($sp)
285	addq	$sp, 64, $sp
286	.cfi_restore	$9
287	.cfi_restore	$10
288	.cfi_restore	$11
289	.cfi_restore	$12
290	.cfi_restore	$13
291	.cfi_restore	$14
292	.cfi_restore	$15
293	.cfi_adjust_cfa_offset	-64
294/* finish up the syscall as normal.  */
295	br	ret_from_sys_call
296CFI_END_OSF_FRAME entMM
297
298CFI_START_OSF_FRAME entIF
299	SAVE_ALL
300	lda	$8, 0x3fff
301	lda	$26, ret_from_sys_call
302	bic	$sp, $8, $8
303	mov	$sp, $17
304	jsr	$31, do_entIF
305CFI_END_OSF_FRAME entIF
306
307CFI_START_OSF_FRAME entUna
308	lda	$sp, -256($sp)
309	.cfi_adjust_cfa_offset	256
310	stq	$0, 0($sp)
311	.cfi_rel_offset	$0, 0
312	.cfi_remember_state
313	ldq	$0, 256($sp)	/* get PS */
314	stq	$1, 8($sp)
315	stq	$2, 16($sp)
316	stq	$3, 24($sp)
317	and	$0, 8, $0		/* user mode? */
318	stq	$4, 32($sp)
319	bne	$0, entUnaUser	/* yup -> do user-level unaligned fault */
320	stq	$5, 40($sp)
321	stq	$6, 48($sp)
322	stq	$7, 56($sp)
323	stq	$8, 64($sp)
324	stq	$9, 72($sp)
325	stq	$10, 80($sp)
326	stq	$11, 88($sp)
327	stq	$12, 96($sp)
328	stq	$13, 104($sp)
329	stq	$14, 112($sp)
330	stq	$15, 120($sp)
331	/* 16-18 PAL-saved */
332	stq	$19, 152($sp)
333	stq	$20, 160($sp)
334	stq	$21, 168($sp)
335	stq	$22, 176($sp)
336	stq	$23, 184($sp)
337	stq	$24, 192($sp)
338	stq	$25, 200($sp)
339	stq	$26, 208($sp)
340	stq	$27, 216($sp)
341	stq	$28, 224($sp)
342	mov	$sp, $19
343	stq	$gp, 232($sp)
344	.cfi_rel_offset	$1, 1*8
345	.cfi_rel_offset	$2, 2*8
346	.cfi_rel_offset	$3, 3*8
347	.cfi_rel_offset	$4, 4*8
348	.cfi_rel_offset	$5, 5*8
349	.cfi_rel_offset	$6, 6*8
350	.cfi_rel_offset	$7, 7*8
351	.cfi_rel_offset	$8, 8*8
352	.cfi_rel_offset	$9, 9*8
353	.cfi_rel_offset	$10, 10*8
354	.cfi_rel_offset	$11, 11*8
355	.cfi_rel_offset	$12, 12*8
356	.cfi_rel_offset	$13, 13*8
357	.cfi_rel_offset	$14, 14*8
358	.cfi_rel_offset	$15, 15*8
359	.cfi_rel_offset	$19, 19*8
360	.cfi_rel_offset	$20, 20*8
361	.cfi_rel_offset	$21, 21*8
362	.cfi_rel_offset	$22, 22*8
363	.cfi_rel_offset	$23, 23*8
364	.cfi_rel_offset	$24, 24*8
365	.cfi_rel_offset	$25, 25*8
366	.cfi_rel_offset	$26, 26*8
367	.cfi_rel_offset	$27, 27*8
368	.cfi_rel_offset	$28, 28*8
369	.cfi_rel_offset	$29, 29*8
370	lda	$8, 0x3fff
371	stq	$31, 248($sp)
372	bic	$sp, $8, $8
373	jsr	$26, do_entUna
374	ldq	$0, 0($sp)
375	ldq	$1, 8($sp)
376	ldq	$2, 16($sp)
377	ldq	$3, 24($sp)
378	ldq	$4, 32($sp)
379	ldq	$5, 40($sp)
380	ldq	$6, 48($sp)
381	ldq	$7, 56($sp)
382	ldq	$8, 64($sp)
383	ldq	$9, 72($sp)
384	ldq	$10, 80($sp)
385	ldq	$11, 88($sp)
386	ldq	$12, 96($sp)
387	ldq	$13, 104($sp)
388	ldq	$14, 112($sp)
389	ldq	$15, 120($sp)
390	/* 16-18 PAL-saved */
391	ldq	$19, 152($sp)
392	ldq	$20, 160($sp)
393	ldq	$21, 168($sp)
394	ldq	$22, 176($sp)
395	ldq	$23, 184($sp)
396	ldq	$24, 192($sp)
397	ldq	$25, 200($sp)
398	ldq	$26, 208($sp)
399	ldq	$27, 216($sp)
400	ldq	$28, 224($sp)
401	ldq	$gp, 232($sp)
402	lda	$sp, 256($sp)
403	.cfi_restore	$1
404	.cfi_restore	$2
405	.cfi_restore	$3
406	.cfi_restore	$4
407	.cfi_restore	$5
408	.cfi_restore	$6
409	.cfi_restore	$7
410	.cfi_restore	$8
411	.cfi_restore	$9
412	.cfi_restore	$10
413	.cfi_restore	$11
414	.cfi_restore	$12
415	.cfi_restore	$13
416	.cfi_restore	$14
417	.cfi_restore	$15
418	.cfi_restore	$19
419	.cfi_restore	$20
420	.cfi_restore	$21
421	.cfi_restore	$22
422	.cfi_restore	$23
423	.cfi_restore	$24
424	.cfi_restore	$25
425	.cfi_restore	$26
426	.cfi_restore	$27
427	.cfi_restore	$28
428	.cfi_restore	$29
429	.cfi_adjust_cfa_offset	-256
430	call_pal PAL_rti
431
432	.align	4
433entUnaUser:
434	.cfi_restore_state
435	ldq	$0, 0($sp)	/* restore original $0 */
436	lda	$sp, 256($sp)	/* pop entUna's stack frame */
437	.cfi_restore	$0
438	.cfi_adjust_cfa_offset	-256
439	SAVE_ALL		/* setup normal kernel stack */
440	lda	$sp, -64($sp)
441	.cfi_adjust_cfa_offset	64
442	stq	$9, 0($sp)
443	stq	$10, 8($sp)
444	stq	$11, 16($sp)
445	stq	$12, 24($sp)
446	stq	$13, 32($sp)
447	stq	$14, 40($sp)
448	stq	$15, 48($sp)
449	.cfi_rel_offset	$9, 0
450	.cfi_rel_offset	$10, 8
451	.cfi_rel_offset	$11, 16
452	.cfi_rel_offset	$12, 24
453	.cfi_rel_offset	$13, 32
454	.cfi_rel_offset	$14, 40
455	.cfi_rel_offset	$15, 48
456	lda	$8, 0x3fff
457	addq	$sp, 64, $19
458	bic	$sp, $8, $8
459	jsr	$26, do_entUnaUser
460	ldq	$9, 0($sp)
461	ldq	$10, 8($sp)
462	ldq	$11, 16($sp)
463	ldq	$12, 24($sp)
464	ldq	$13, 32($sp)
465	ldq	$14, 40($sp)
466	ldq	$15, 48($sp)
467	lda	$sp, 64($sp)
468	.cfi_restore	$9
469	.cfi_restore	$10
470	.cfi_restore	$11
471	.cfi_restore	$12
472	.cfi_restore	$13
473	.cfi_restore	$14
474	.cfi_restore	$15
475	.cfi_adjust_cfa_offset	-64
476	br	ret_from_sys_call
477CFI_END_OSF_FRAME entUna
478
479CFI_START_OSF_FRAME entDbg
480	SAVE_ALL
481	lda	$8, 0x3fff
482	lda	$26, ret_from_sys_call
483	bic	$sp, $8, $8
484	mov	$sp, $16
485	jsr	$31, do_entDbg
486CFI_END_OSF_FRAME entDbg
487
488/*
489 * The system call entry point is special.  Most importantly, it looks
490 * like a function call to userspace as far as clobbered registers.  We
491 * do preserve the argument registers (for syscall restarts) and $26
492 * (for leaf syscall functions).
493 *
494 * So much for theory.  We don't take advantage of this yet.
495 *
496 * Note that a0-a2 are not saved by PALcode as with the other entry points.
497 *
498 * Alpha syscall ABI uses:
499 *   - r0 for return value
500 *   - r19 ("a3") as error indicator (0=success, 1=error; r0 holds errno)
501 *
502 * For seccomp/ptrace/generic syscall helpers we track the syscall
503 * number separately:
504 *   - regs->r1: current (mutable) syscall number (may be changed or set to -1)
505 *   - regs->r2: original syscall number for restart/rollback
506 *
507 * On entry PAL provides the syscall number in r0; copy it into r1/r2.
508 */
509
510	.align	4
511	.globl	entSys
512	.type	entSys, @function
513	.cfi_startproc simple
514	.cfi_return_column 64
515	.cfi_def_cfa	$sp, 48
516	.cfi_rel_offset	64, 8
517	.cfi_rel_offset	$gp, 16
518entSys:
519	SAVE_ALL
520        ldq     $1, 0($sp)          /* syscall nr from saved r0 */
521        stq     $1, 8($sp)          /* regs->r1 = shadow syscall nr */
522        stq     $1, 16($sp)         /* regs->r2 = restart syscall nr */
523
524	lda	$8, 0x3fff
525	bic	$sp, $8, $8
526	lda	$4, NR_syscalls($31)
527	stq	$16, SP_OFF+24($sp)
528	lda	$5, sys_call_table
529	lda	$27, sys_ni_syscall
530	cmpult	$0, $4, $4
531	ldl	$3, TI_FLAGS($8)
532	stq	$17, SP_OFF+32($sp)
533	s8addq	$0, $5, $5
534	stq	$18, SP_OFF+40($sp)
535	.cfi_rel_offset	$16, SP_OFF+24
536	.cfi_rel_offset	$17, SP_OFF+32
537	.cfi_rel_offset	$18, SP_OFF+40
538#ifdef CONFIG_AUDITSYSCALL
539	lda     $6, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SECCOMP
540	and     $3, $6, $3
541	bne     $3, strace
542#else
543	lda     $6, _TIF_SYSCALL_TRACE | _TIF_SECCOMP
544	and     $3, $6, $3
545	bne     $3, strace
546#endif
547	beq	$4, 1f
548	ldq	$27, 0($5)
5491:	ldq	$0, 8($sp)		/* syscall nr shadow (regs->r1) */
550
551	jsr	$26, ($27), sys_ni_syscall
552	ldgp	$gp, 0($26)
553	blt	$0, $syscall_error	/* the call failed */
554$ret_success:
555	stq	$0, 0($sp)
556	stq	$31, 72($sp)		/* a3=0 => no error */
557
558	.align	4
559	.globl	ret_from_sys_call
560ret_from_sys_call:
561	cmovne	$26, 0, $18		/* $18 = 0 => non-restartable */
562	ldq	$0, SP_OFF($sp)
563	and	$0, 8, $0
564	beq	$0, ret_to_kernel
565ret_to_user:
566	/* Make sure need_resched and sigpending don't change between
567		sampling and the rti.  */
568	lda	$16, 7
569	call_pal PAL_swpipl
570	ldl	$17, TI_FLAGS($8)
571	and	$17, _TIF_WORK_MASK, $2
572	bne	$2, work_pending
573restore_all:
574	ldl	$2, TI_STATUS($8)
575	and	$2, TS_SAVED_FP | TS_RESTORE_FP, $3
576	bne	$3, restore_fpu
577restore_other:
578	.cfi_remember_state
579	RESTORE_ALL
580	call_pal PAL_rti
581
582ret_to_kernel:
583	.cfi_restore_state
584	lda	$16, 7
585	call_pal PAL_swpipl
586	br restore_other
587
588	.align 3
589$syscall_error:
590        /* Restart syscall nr comes from saved r2 (preserved even if r0 overwritten). */
591	ldq	$18, 16($sp)	/* old syscall nr for restart */
592
593	ldq	$19, 72($sp)	/* .. and this a3 */
594	subq	$31, $0, $0	/* with error in v0 */
595	addq	$31, 1, $1	/* set a3 for errno return */
596	stq	$0, 0($sp)
597	mov	$31, $26	/* tell "ret_from_sys_call" we can restart */
598	stq	$1, 72($sp)	/* a3 for return */
599	br	ret_from_sys_call
600
601/*
602 * Do all cleanup when returning from all interrupts and system calls.
603 *
604 * Arguments:
605 *       $8: current.
606 *      $17: TI_FLAGS.
607 *      $18: The old syscall number, or zero if this is not a return
608 *           from a syscall that errored and is possibly restartable.
609 *      $19: The old a3 value
610 */
611
612	.align	4
613	.type	work_pending, @function
614work_pending:
615	and	$17, _TIF_NOTIFY_RESUME | _TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL, $2
616	bne	$2, $work_notifysig
617
618$work_resched:
619	/*
620	 * We can get here only if we returned from syscall without SIGPENDING
621	 * or got through work_notifysig already.  Either case means no syscall
622	 * restarts for us, so let $18 and $19 burn.
623	 */
624	jsr	$26, schedule
625	mov	0, $18
626	br	ret_to_user
627
628$work_notifysig:
629	mov	$sp, $16
630	DO_SWITCH_STACK
631	jsr	$26, do_work_pending
632	UNDO_SWITCH_STACK
633	br	restore_all
634
635/*
636 * PTRACE syscall handler
637 */
638
639	.align	4
640	.type	strace, @function
641strace:
642	/* set up signal stack, call syscall_trace */
643	// NB: if anyone adds preemption, this block will need to be protected
644	ldl	$1, TI_STATUS($8)
645	and	$1, TS_SAVED_FP, $3
646	or	$1, TS_SAVED_FP, $2
647	bne	$3, 1f
648	stl	$2, TI_STATUS($8)
649	bsr	$26, __save_fpu
6501:
651	DO_SWITCH_STACK
652	jsr	$26, syscall_trace_enter /* returns the syscall number */
653	UNDO_SWITCH_STACK
654
655	stq     $0, 8($sp)		/* regs->r1 = shadow syscall nr */
656
657	/* get the arguments back.. */
658	ldq	$16, SP_OFF+24($sp)
659	ldq	$17, SP_OFF+32($sp)
660	ldq	$18, SP_OFF+40($sp)
661	ldq	$19, 72($sp)
662	ldq	$20, 80($sp)
663	ldq	$21, 88($sp)
664
665	/* nr == -1: internal skip-dispatch or userspace syscall(-1)? */
666        lda     $6, -1($31)
667        cmpeq   $0, $6, $6
668	bne	$6, $strace_skip_call	/* nr == -1 => dispatch */
669
670	/* get the system call pointer.. */
671	lda	$1, NR_syscalls($31)
672	lda	$2, sys_call_table
673	lda	$27, sys_ni_syscall
674	cmpult	$0, $1, $1
675	s8addq	$0, $2, $2
676	beq	$1, 1f
677	ldq	$27, 0($2)
6781:	jsr	$26, ($27), sys_gettimeofday
679ret_from_straced:
680	ldgp	$gp, 0($26)
681
682	/* check return.. */
683	blt	$0, $strace_error	/* the call failed */
684$strace_success:
685	stq	$31, 72($sp)		/* a3=0 => no error */
686	stq	$0, 0($sp)		/* save return value */
687
688$strace_skip_call:
689	SYSCALL_SKIP_RETURN_RESTART_GATE
690	DO_SWITCH_STACK
691	jsr	$26, syscall_trace_leave
692	UNDO_SWITCH_STACK
693	br	$31, ret_from_sys_call
694
695	.align	3
696$strace_error:
697	ldq	$18, 16($sp)	/* restart syscall nr */
698	ldq	$19, 72($sp)	/* .. and this a3 */
699
700	subq	$31, $0, $0	/* with error in v0 */
701	addq	$31, 1, $1	/* set a3 for errno return */
702	stq	$0, 0($sp)
703	stq	$1, 72($sp)	/* a3 for return */
704
705	DO_SWITCH_STACK
706	mov	$18, $9		/* save old syscall number */
707	mov	$19, $10	/* save old a3 */
708	jsr	$26, syscall_trace_leave
709	mov	$9, $18
710	mov	$10, $19
711	UNDO_SWITCH_STACK
712
713	mov	$31, $26	/* tell "ret_from_sys_call" we can restart */
714	br	ret_from_sys_call
715CFI_END_OSF_FRAME entSys
716
717/*
718 * Save and restore the switch stack -- aka the balance of the user context.
719 */
720
721	.align	4
722	.type	do_switch_stack, @function
723	.cfi_startproc simple
724	.cfi_return_column 64
725	.cfi_def_cfa $sp, 0
726	.cfi_register 64, $1
727do_switch_stack:
728	lda	$sp, -SWITCH_STACK_SIZE($sp)
729	.cfi_adjust_cfa_offset	SWITCH_STACK_SIZE
730	stq	$9, 0($sp)
731	stq	$10, 8($sp)
732	stq	$11, 16($sp)
733	stq	$12, 24($sp)
734	stq	$13, 32($sp)
735	stq	$14, 40($sp)
736	stq	$15, 48($sp)
737	stq	$26, 56($sp)
738	ret	$31, ($1), 1
739	.cfi_endproc
740	.size	do_switch_stack, .-do_switch_stack
741
742	.align	4
743	.type	undo_switch_stack, @function
744	.cfi_startproc simple
745	.cfi_def_cfa $sp, 0
746	.cfi_register 64, $1
747undo_switch_stack:
748	ldq	$9, 0($sp)
749	ldq	$10, 8($sp)
750	ldq	$11, 16($sp)
751	ldq	$12, 24($sp)
752	ldq	$13, 32($sp)
753	ldq	$14, 40($sp)
754	ldq	$15, 48($sp)
755	ldq	$26, 56($sp)
756	lda	$sp, SWITCH_STACK_SIZE($sp)
757	ret	$31, ($1), 1
758	.cfi_endproc
759	.size	undo_switch_stack, .-undo_switch_stack
760
761#define FR(n) n * 8 + TI_FP($8)
762	.align	4
763	.globl	__save_fpu
764	.type	__save_fpu, @function
765__save_fpu:
766#define V(n) stt	$f##n, FR(n)
767	V( 0); V( 1); V( 2); V( 3)
768	V( 4); V( 5); V( 6); V( 7)
769	V( 8); V( 9); V(10); V(11)
770	V(12); V(13); V(14); V(15)
771	V(16); V(17); V(18); V(19)
772	V(20); V(21); V(22); V(23)
773	V(24); V(25); V(26); V(27)
774	mf_fpcr	$f0		# get fpcr
775	V(28); V(29); V(30)
776	stt	$f0, FR(31)	# save fpcr in slot of $f31
777	ldt	$f0, FR(0)	# don't let "__save_fpu" change fp state.
778	ret
779#undef V
780	.size	__save_fpu, .-__save_fpu
781
782	.align	4
783restore_fpu:
784	and	$3, TS_RESTORE_FP, $3
785	bic	$2, TS_SAVED_FP | TS_RESTORE_FP, $2
786	beq	$3, 1f
787#define V(n) ldt	$f##n, FR(n)
788	ldt	$f30, FR(31)	# get saved fpcr
789	V( 0); V( 1); V( 2); V( 3)
790	mt_fpcr	$f30		# install saved fpcr
791	V( 4); V( 5); V( 6); V( 7)
792	V( 8); V( 9); V(10); V(11)
793	V(12); V(13); V(14); V(15)
794	V(16); V(17); V(18); V(19)
795	V(20); V(21); V(22); V(23)
796	V(24); V(25); V(26); V(27)
797	V(28); V(29); V(30)
7981:	stl $2, TI_STATUS($8)
799	br restore_other
800#undef V
801
802
803/*
804 * The meat of the context switch code.
805 */
806	.align	4
807	.globl	alpha_switch_to
808	.type	alpha_switch_to, @function
809	.cfi_startproc
810alpha_switch_to:
811	DO_SWITCH_STACK
812	ldl	$1, TI_STATUS($8)
813	and	$1, TS_RESTORE_FP, $3
814	bne	$3, 1f
815	or	$1, TS_RESTORE_FP | TS_SAVED_FP, $2
816	and	$1, TS_SAVED_FP, $3
817	stl	$2, TI_STATUS($8)
818	bne	$3, 1f
819	bsr	$26, __save_fpu
8201:
821	call_pal PAL_swpctx
822	lda	$8, 0x3fff
823	UNDO_SWITCH_STACK
824	bic	$sp, $8, $8
825	mov	$17, $0
826	ret
827	.cfi_endproc
828	.size	alpha_switch_to, .-alpha_switch_to
829
830/*
831 * New processes begin life here.
832 */
833
834	.globl	ret_from_fork
835	.align	4
836	.ent	ret_from_fork
837ret_from_fork:
838	lda	$26, ret_to_user
839	mov	$17, $16
840	jmp	$31, schedule_tail
841.end ret_from_fork
842
843/*
844 * ... and new kernel threads - here
845 */
846	.align 4
847	.globl	ret_from_kernel_thread
848	.ent	ret_from_kernel_thread
849ret_from_kernel_thread:
850	mov	$17, $16
851	jsr	$26, schedule_tail
852	mov	$9, $27
853	mov	$10, $16
854	jsr	$26, ($9)
855	br	$31, ret_to_user
856.end ret_from_kernel_thread
857
858
859/*
860 * Special system calls.  Most of these are special in that they either
861 * have to play switch_stack games.
862 */
863
864.macro	fork_like name
865	.align	4
866	.globl	alpha_\name
867	.ent	alpha_\name
868alpha_\name:
869	.prologue 0
870	bsr	$1, do_switch_stack
871	// NB: if anyone adds preemption, this block will need to be protected
872	ldl	$1, TI_STATUS($8)
873	and	$1, TS_SAVED_FP, $3
874	or	$1, TS_SAVED_FP, $2
875	bne	$3, 1f
876	stl	$2, TI_STATUS($8)
877	bsr	$26, __save_fpu
8781:
879	jsr	$26, sys_\name
880	ldq	$26, 56($sp)
881	lda	$sp, SWITCH_STACK_SIZE($sp)
882	ret
883.end	alpha_\name
884.endm
885
886fork_like fork
887fork_like vfork
888fork_like clone
889fork_like clone3
890
891.macro	sigreturn_like name
892	.align	4
893	.globl	sys_\name
894	.ent	sys_\name
895sys_\name:
896	.prologue 0
897	lda	$9, ret_from_straced
898	cmpult	$26, $9, $9
899	lda	$sp, -SWITCH_STACK_SIZE($sp)
900	jsr	$26, do_\name
901	bne	$9, 1f
902	jsr	$26, syscall_trace_leave
9031:	br	$1, undo_switch_stack
904	br	ret_from_sys_call
905.end sys_\name
906.endm
907
908sigreturn_like sigreturn
909sigreturn_like rt_sigreturn
910
911	.align	4
912	.globl	alpha_syscall_zero
913	.ent	alpha_syscall_zero
914alpha_syscall_zero:
915	.prologue 0
916	/* Special because it needs to do something opposite to
917	   force_successful_syscall_return().  We use the saved
918	   syscall number for that, zero meaning "not an error".
919	   That works nicely, but for real syscall 0 we need to
920	   make sure that this logics doesn't get confused.
921	   Store a non-zero there - -ENOSYS we need in register
922	   for our return value will do just fine.
923	  */
924	lda	$0, -ENOSYS
925	unop
926	stq	$0, 0($sp)
927	ret
928.end alpha_syscall_zero
929