xref: /linux/arch/alpha/kernel/entry.S (revision 6c363eafc4d637ac4bd83d4a7dd06dd3cfbe7c5f)
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * arch/alpha/kernel/entry.S
4 *
5 * Kernel entry-points.
6 */
7
8#include <asm/asm-offsets.h>
9#include <asm/thread_info.h>
10#include <asm/pal.h>
11#include <asm/errno.h>
12#include <asm/unistd.h>
13
14	.text
15	.set noat
16	.cfi_sections	.debug_frame
17
18/* Stack offsets.  */
19#define SP_OFF			184
20#define SWITCH_STACK_SIZE	320
21
22.macro	CFI_START_OSF_FRAME	func
23	.align	4
24	.globl	\func
25	.type	\func,@function
26\func:
27	.cfi_startproc simple
28	.cfi_return_column 64
29	.cfi_def_cfa	$sp, 48
30	.cfi_rel_offset	64, 8
31	.cfi_rel_offset	$gp, 16
32	.cfi_rel_offset	$16, 24
33	.cfi_rel_offset	$17, 32
34	.cfi_rel_offset	$18, 40
35.endm
36
37.macro	CFI_END_OSF_FRAME	func
38	.cfi_endproc
39	.size	\func, . - \func
40.endm
41
42/*
43 * This defines the normal kernel pt-regs layout.
44 *
45 * regs 9-15 preserved by C code
46 * regs 16-18 saved by PAL-code
47 * regs 29-30 saved and set up by PAL-code
48 * JRP - Save regs 16-18 in a special area of the stack, so that
49 * the palcode-provided values are available to the signal handler.
50 */
51
52.macro	SAVE_ALL
53	subq	$sp, SP_OFF, $sp
54	.cfi_adjust_cfa_offset	SP_OFF
55	stq	$0, 0($sp)
56	stq	$1, 8($sp)
57	stq	$2, 16($sp)
58	stq	$3, 24($sp)
59	stq	$4, 32($sp)
60	stq	$28, 144($sp)
61	.cfi_rel_offset	$0, 0
62	.cfi_rel_offset $1, 8
63	.cfi_rel_offset	$2, 16
64	.cfi_rel_offset	$3, 24
65	.cfi_rel_offset	$4, 32
66	.cfi_rel_offset	$28, 144
67	lda	$2, alpha_mv
68	stq	$5, 40($sp)
69	stq	$6, 48($sp)
70	stq	$7, 56($sp)
71	stq	$8, 64($sp)
72	stq	$19, 72($sp)
73	stq	$20, 80($sp)
74	stq	$21, 88($sp)
75	ldq	$2, HAE_CACHE($2)
76	stq	$22, 96($sp)
77	stq	$23, 104($sp)
78	stq	$24, 112($sp)
79	stq	$25, 120($sp)
80	stq	$26, 128($sp)
81	stq	$27, 136($sp)
82	stq	$2, 152($sp)
83	stq	$16, 160($sp)
84	stq	$17, 168($sp)
85	stq	$18, 176($sp)
86	.cfi_rel_offset	$5, 40
87	.cfi_rel_offset	$6, 48
88	.cfi_rel_offset	$7, 56
89	.cfi_rel_offset	$8, 64
90	.cfi_rel_offset $19, 72
91	.cfi_rel_offset	$20, 80
92	.cfi_rel_offset	$21, 88
93	.cfi_rel_offset $22, 96
94	.cfi_rel_offset	$23, 104
95	.cfi_rel_offset	$24, 112
96	.cfi_rel_offset	$25, 120
97	.cfi_rel_offset	$26, 128
98	.cfi_rel_offset	$27, 136
99.endm
100
101.macro	RESTORE_ALL
102	lda	$19, alpha_mv
103	ldq	$0, 0($sp)
104	ldq	$1, 8($sp)
105	ldq	$2, 16($sp)
106	ldq	$3, 24($sp)
107	ldq	$21, 152($sp)
108	ldq	$20, HAE_CACHE($19)
109	ldq	$4, 32($sp)
110	ldq	$5, 40($sp)
111	ldq	$6, 48($sp)
112	ldq	$7, 56($sp)
113	subq	$20, $21, $20
114	ldq	$8, 64($sp)
115	beq	$20, 99f
116	ldq	$20, HAE_REG($19)
117	stq	$21, HAE_CACHE($19)
118	stq	$21, 0($20)
11999:	ldq	$19, 72($sp)
120	ldq	$20, 80($sp)
121	ldq	$21, 88($sp)
122	ldq	$22, 96($sp)
123	ldq	$23, 104($sp)
124	ldq	$24, 112($sp)
125	ldq	$25, 120($sp)
126	ldq	$26, 128($sp)
127	ldq	$27, 136($sp)
128	ldq	$28, 144($sp)
129	addq	$sp, SP_OFF, $sp
130	.cfi_restore	$0
131	.cfi_restore	$1
132	.cfi_restore	$2
133	.cfi_restore	$3
134	.cfi_restore	$4
135	.cfi_restore	$5
136	.cfi_restore	$6
137	.cfi_restore	$7
138	.cfi_restore	$8
139	.cfi_restore	$19
140	.cfi_restore	$20
141	.cfi_restore	$21
142	.cfi_restore	$22
143	.cfi_restore	$23
144	.cfi_restore	$24
145	.cfi_restore	$25
146	.cfi_restore	$26
147	.cfi_restore	$27
148	.cfi_restore	$28
149	.cfi_adjust_cfa_offset	-SP_OFF
150.endm
151
152.macro	DO_SWITCH_STACK
153	bsr	$1, do_switch_stack
154	.cfi_adjust_cfa_offset	SWITCH_STACK_SIZE
155	.cfi_rel_offset	$9, 0
156	.cfi_rel_offset	$10, 8
157	.cfi_rel_offset	$11, 16
158	.cfi_rel_offset	$12, 24
159	.cfi_rel_offset	$13, 32
160	.cfi_rel_offset	$14, 40
161	.cfi_rel_offset	$15, 48
162	/* We don't really care about the FP registers for debugging.  */
163.endm
164
165.macro	UNDO_SWITCH_STACK
166	bsr	$1, undo_switch_stack
167	.cfi_restore	$9
168	.cfi_restore	$10
169	.cfi_restore	$11
170	.cfi_restore	$12
171	.cfi_restore	$13
172	.cfi_restore	$14
173	.cfi_restore	$15
174	.cfi_adjust_cfa_offset	-SWITCH_STACK_SIZE
175.endm
176
177/*
178 * Non-syscall kernel entry points.
179 */
180
181CFI_START_OSF_FRAME entInt
182	SAVE_ALL
183	lda	$8, 0x3fff
184	lda	$26, ret_from_sys_call
185	bic	$sp, $8, $8
186	mov	$sp, $19
187	jsr	$31, do_entInt
188CFI_END_OSF_FRAME entInt
189
190CFI_START_OSF_FRAME entArith
191	SAVE_ALL
192	lda	$8, 0x3fff
193	lda	$26, ret_from_sys_call
194	bic	$sp, $8, $8
195	mov	$sp, $18
196	jsr	$31, do_entArith
197CFI_END_OSF_FRAME entArith
198
199CFI_START_OSF_FRAME entMM
200	SAVE_ALL
201/* save $9 - $15 so the inline exception code can manipulate them.  */
202	subq	$sp, 56, $sp
203	.cfi_adjust_cfa_offset	56
204	stq	$9, 0($sp)
205	stq	$10, 8($sp)
206	stq	$11, 16($sp)
207	stq	$12, 24($sp)
208	stq	$13, 32($sp)
209	stq	$14, 40($sp)
210	stq	$15, 48($sp)
211	.cfi_rel_offset	$9, 0
212	.cfi_rel_offset	$10, 8
213	.cfi_rel_offset	$11, 16
214	.cfi_rel_offset	$12, 24
215	.cfi_rel_offset	$13, 32
216	.cfi_rel_offset	$14, 40
217	.cfi_rel_offset	$15, 48
218	addq	$sp, 56, $19
219/* handle the fault */
220	lda	$8, 0x3fff
221	bic	$sp, $8, $8
222	jsr	$26, do_page_fault
223/* reload the registers after the exception code played.  */
224	ldq	$9, 0($sp)
225	ldq	$10, 8($sp)
226	ldq	$11, 16($sp)
227	ldq	$12, 24($sp)
228	ldq	$13, 32($sp)
229	ldq	$14, 40($sp)
230	ldq	$15, 48($sp)
231	addq	$sp, 56, $sp
232	.cfi_restore	$9
233	.cfi_restore	$10
234	.cfi_restore	$11
235	.cfi_restore	$12
236	.cfi_restore	$13
237	.cfi_restore	$14
238	.cfi_restore	$15
239	.cfi_adjust_cfa_offset	-56
240/* finish up the syscall as normal.  */
241	br	ret_from_sys_call
242CFI_END_OSF_FRAME entMM
243
244CFI_START_OSF_FRAME entIF
245	SAVE_ALL
246	lda	$8, 0x3fff
247	lda	$26, ret_from_sys_call
248	bic	$sp, $8, $8
249	mov	$sp, $17
250	jsr	$31, do_entIF
251CFI_END_OSF_FRAME entIF
252
253CFI_START_OSF_FRAME entUna
254	lda	$sp, -256($sp)
255	.cfi_adjust_cfa_offset	256
256	stq	$0, 0($sp)
257	.cfi_rel_offset	$0, 0
258	.cfi_remember_state
259	ldq	$0, 256($sp)	/* get PS */
260	stq	$1, 8($sp)
261	stq	$2, 16($sp)
262	stq	$3, 24($sp)
263	and	$0, 8, $0		/* user mode? */
264	stq	$4, 32($sp)
265	bne	$0, entUnaUser	/* yup -> do user-level unaligned fault */
266	stq	$5, 40($sp)
267	stq	$6, 48($sp)
268	stq	$7, 56($sp)
269	stq	$8, 64($sp)
270	stq	$9, 72($sp)
271	stq	$10, 80($sp)
272	stq	$11, 88($sp)
273	stq	$12, 96($sp)
274	stq	$13, 104($sp)
275	stq	$14, 112($sp)
276	stq	$15, 120($sp)
277	/* 16-18 PAL-saved */
278	stq	$19, 152($sp)
279	stq	$20, 160($sp)
280	stq	$21, 168($sp)
281	stq	$22, 176($sp)
282	stq	$23, 184($sp)
283	stq	$24, 192($sp)
284	stq	$25, 200($sp)
285	stq	$26, 208($sp)
286	stq	$27, 216($sp)
287	stq	$28, 224($sp)
288	mov	$sp, $19
289	stq	$gp, 232($sp)
290	.cfi_rel_offset	$1, 1*8
291	.cfi_rel_offset	$2, 2*8
292	.cfi_rel_offset	$3, 3*8
293	.cfi_rel_offset	$4, 4*8
294	.cfi_rel_offset	$5, 5*8
295	.cfi_rel_offset	$6, 6*8
296	.cfi_rel_offset	$7, 7*8
297	.cfi_rel_offset	$8, 8*8
298	.cfi_rel_offset	$9, 9*8
299	.cfi_rel_offset	$10, 10*8
300	.cfi_rel_offset	$11, 11*8
301	.cfi_rel_offset	$12, 12*8
302	.cfi_rel_offset	$13, 13*8
303	.cfi_rel_offset	$14, 14*8
304	.cfi_rel_offset	$15, 15*8
305	.cfi_rel_offset	$19, 19*8
306	.cfi_rel_offset	$20, 20*8
307	.cfi_rel_offset	$21, 21*8
308	.cfi_rel_offset	$22, 22*8
309	.cfi_rel_offset	$23, 23*8
310	.cfi_rel_offset	$24, 24*8
311	.cfi_rel_offset	$25, 25*8
312	.cfi_rel_offset	$26, 26*8
313	.cfi_rel_offset	$27, 27*8
314	.cfi_rel_offset	$28, 28*8
315	.cfi_rel_offset	$29, 29*8
316	lda	$8, 0x3fff
317	stq	$31, 248($sp)
318	bic	$sp, $8, $8
319	jsr	$26, do_entUna
320	ldq	$0, 0($sp)
321	ldq	$1, 8($sp)
322	ldq	$2, 16($sp)
323	ldq	$3, 24($sp)
324	ldq	$4, 32($sp)
325	ldq	$5, 40($sp)
326	ldq	$6, 48($sp)
327	ldq	$7, 56($sp)
328	ldq	$8, 64($sp)
329	ldq	$9, 72($sp)
330	ldq	$10, 80($sp)
331	ldq	$11, 88($sp)
332	ldq	$12, 96($sp)
333	ldq	$13, 104($sp)
334	ldq	$14, 112($sp)
335	ldq	$15, 120($sp)
336	/* 16-18 PAL-saved */
337	ldq	$19, 152($sp)
338	ldq	$20, 160($sp)
339	ldq	$21, 168($sp)
340	ldq	$22, 176($sp)
341	ldq	$23, 184($sp)
342	ldq	$24, 192($sp)
343	ldq	$25, 200($sp)
344	ldq	$26, 208($sp)
345	ldq	$27, 216($sp)
346	ldq	$28, 224($sp)
347	ldq	$gp, 232($sp)
348	lda	$sp, 256($sp)
349	.cfi_restore	$1
350	.cfi_restore	$2
351	.cfi_restore	$3
352	.cfi_restore	$4
353	.cfi_restore	$5
354	.cfi_restore	$6
355	.cfi_restore	$7
356	.cfi_restore	$8
357	.cfi_restore	$9
358	.cfi_restore	$10
359	.cfi_restore	$11
360	.cfi_restore	$12
361	.cfi_restore	$13
362	.cfi_restore	$14
363	.cfi_restore	$15
364	.cfi_restore	$19
365	.cfi_restore	$20
366	.cfi_restore	$21
367	.cfi_restore	$22
368	.cfi_restore	$23
369	.cfi_restore	$24
370	.cfi_restore	$25
371	.cfi_restore	$26
372	.cfi_restore	$27
373	.cfi_restore	$28
374	.cfi_restore	$29
375	.cfi_adjust_cfa_offset	-256
376	call_pal PAL_rti
377
378	.align	4
379entUnaUser:
380	.cfi_restore_state
381	ldq	$0, 0($sp)	/* restore original $0 */
382	lda	$sp, 256($sp)	/* pop entUna's stack frame */
383	.cfi_restore	$0
384	.cfi_adjust_cfa_offset	-256
385	SAVE_ALL		/* setup normal kernel stack */
386	lda	$sp, -56($sp)
387	.cfi_adjust_cfa_offset	56
388	stq	$9, 0($sp)
389	stq	$10, 8($sp)
390	stq	$11, 16($sp)
391	stq	$12, 24($sp)
392	stq	$13, 32($sp)
393	stq	$14, 40($sp)
394	stq	$15, 48($sp)
395	.cfi_rel_offset	$9, 0
396	.cfi_rel_offset	$10, 8
397	.cfi_rel_offset	$11, 16
398	.cfi_rel_offset	$12, 24
399	.cfi_rel_offset	$13, 32
400	.cfi_rel_offset	$14, 40
401	.cfi_rel_offset	$15, 48
402	lda	$8, 0x3fff
403	addq	$sp, 56, $19
404	bic	$sp, $8, $8
405	jsr	$26, do_entUnaUser
406	ldq	$9, 0($sp)
407	ldq	$10, 8($sp)
408	ldq	$11, 16($sp)
409	ldq	$12, 24($sp)
410	ldq	$13, 32($sp)
411	ldq	$14, 40($sp)
412	ldq	$15, 48($sp)
413	lda	$sp, 56($sp)
414	.cfi_restore	$9
415	.cfi_restore	$10
416	.cfi_restore	$11
417	.cfi_restore	$12
418	.cfi_restore	$13
419	.cfi_restore	$14
420	.cfi_restore	$15
421	.cfi_adjust_cfa_offset	-56
422	br	ret_from_sys_call
423CFI_END_OSF_FRAME entUna
424
425CFI_START_OSF_FRAME entDbg
426	SAVE_ALL
427	lda	$8, 0x3fff
428	lda	$26, ret_from_sys_call
429	bic	$sp, $8, $8
430	mov	$sp, $16
431	jsr	$31, do_entDbg
432CFI_END_OSF_FRAME entDbg
433
434/*
435 * The system call entry point is special.  Most importantly, it looks
436 * like a function call to userspace as far as clobbered registers.  We
437 * do preserve the argument registers (for syscall restarts) and $26
438 * (for leaf syscall functions).
439 *
440 * So much for theory.  We don't take advantage of this yet.
441 *
442 * Note that a0-a2 are not saved by PALcode as with the other entry points.
443 */
444
445	.align	4
446	.globl	entSys
447	.type	entSys, @function
448	.cfi_startproc simple
449	.cfi_return_column 64
450	.cfi_def_cfa	$sp, 48
451	.cfi_rel_offset	64, 8
452	.cfi_rel_offset	$gp, 16
453entSys:
454	SAVE_ALL
455	lda	$8, 0x3fff
456	bic	$sp, $8, $8
457	lda	$4, NR_SYSCALLS($31)
458	stq	$16, SP_OFF+24($sp)
459	lda	$5, sys_call_table
460	lda	$27, sys_ni_syscall
461	cmpult	$0, $4, $4
462	ldl	$3, TI_FLAGS($8)
463	stq	$17, SP_OFF+32($sp)
464	s8addq	$0, $5, $5
465	stq	$18, SP_OFF+40($sp)
466	.cfi_rel_offset	$16, SP_OFF+24
467	.cfi_rel_offset	$17, SP_OFF+32
468	.cfi_rel_offset	$18, SP_OFF+40
469#ifdef CONFIG_AUDITSYSCALL
470	lda     $6, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT
471	and     $3, $6, $3
472#endif
473	bne     $3, strace
474	beq	$4, 1f
475	ldq	$27, 0($5)
4761:	jsr	$26, ($27), sys_ni_syscall
477	ldgp	$gp, 0($26)
478	blt	$0, $syscall_error	/* the call failed */
479	stq	$0, 0($sp)
480	stq	$31, 72($sp)		/* a3=0 => no error */
481
482	.align	4
483	.globl	ret_from_sys_call
484ret_from_sys_call:
485	cmovne	$26, 0, $18		/* $18 = 0 => non-restartable */
486	ldq	$0, SP_OFF($sp)
487	and	$0, 8, $0
488	beq	$0, ret_to_kernel
489ret_to_user:
490	/* Make sure need_resched and sigpending don't change between
491		sampling and the rti.  */
492	lda	$16, 7
493	call_pal PAL_swpipl
494	ldl	$17, TI_FLAGS($8)
495	and	$17, _TIF_WORK_MASK, $2
496	bne	$2, work_pending
497restore_all:
498	.cfi_remember_state
499	RESTORE_ALL
500	call_pal PAL_rti
501
502ret_to_kernel:
503	.cfi_restore_state
504	lda	$16, 7
505	call_pal PAL_swpipl
506	br restore_all
507
508	.align 3
509$syscall_error:
510	/*
511	 * Some system calls (e.g., ptrace) can return arbitrary
512	 * values which might normally be mistaken as error numbers.
513	 * Those functions must zero $0 (v0) directly in the stack
514	 * frame to indicate that a negative return value wasn't an
515	 * error number..
516	 */
517	ldq	$18, 0($sp)	/* old syscall nr (zero if success) */
518	beq	$18, $ret_success
519
520	ldq	$19, 72($sp)	/* .. and this a3 */
521	subq	$31, $0, $0	/* with error in v0 */
522	addq	$31, 1, $1	/* set a3 for errno return */
523	stq	$0, 0($sp)
524	mov	$31, $26	/* tell "ret_from_sys_call" we can restart */
525	stq	$1, 72($sp)	/* a3 for return */
526	br	ret_from_sys_call
527
528$ret_success:
529	stq	$0, 0($sp)
530	stq	$31, 72($sp)	/* a3=0 => no error */
531	br	ret_from_sys_call
532
533/*
534 * Do all cleanup when returning from all interrupts and system calls.
535 *
536 * Arguments:
537 *       $8: current.
538 *      $17: TI_FLAGS.
539 *      $18: The old syscall number, or zero if this is not a return
540 *           from a syscall that errored and is possibly restartable.
541 *      $19: The old a3 value
542 */
543
544	.align	4
545	.type	work_pending, @function
546work_pending:
547	and	$17, _TIF_NOTIFY_RESUME | _TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL, $2
548	bne	$2, $work_notifysig
549
550$work_resched:
551	/*
552	 * We can get here only if we returned from syscall without SIGPENDING
553	 * or got through work_notifysig already.  Either case means no syscall
554	 * restarts for us, so let $18 and $19 burn.
555	 */
556	jsr	$26, schedule
557	mov	0, $18
558	br	ret_to_user
559
560$work_notifysig:
561	mov	$sp, $16
562	DO_SWITCH_STACK
563	jsr	$26, do_work_pending
564	UNDO_SWITCH_STACK
565	br	restore_all
566
567/*
568 * PTRACE syscall handler
569 */
570
571	.align	4
572	.type	strace, @function
573strace:
574	/* set up signal stack, call syscall_trace */
575	DO_SWITCH_STACK
576	jsr	$26, syscall_trace_enter /* returns the syscall number */
577	UNDO_SWITCH_STACK
578
579	/* get the arguments back.. */
580	ldq	$16, SP_OFF+24($sp)
581	ldq	$17, SP_OFF+32($sp)
582	ldq	$18, SP_OFF+40($sp)
583	ldq	$19, 72($sp)
584	ldq	$20, 80($sp)
585	ldq	$21, 88($sp)
586
587	/* get the system call pointer.. */
588	lda	$1, NR_SYSCALLS($31)
589	lda	$2, sys_call_table
590	lda	$27, sys_ni_syscall
591	cmpult	$0, $1, $1
592	s8addq	$0, $2, $2
593	beq	$1, 1f
594	ldq	$27, 0($2)
5951:	jsr	$26, ($27), sys_gettimeofday
596ret_from_straced:
597	ldgp	$gp, 0($26)
598
599	/* check return.. */
600	blt	$0, $strace_error	/* the call failed */
601	stq	$31, 72($sp)		/* a3=0 => no error */
602$strace_success:
603	stq	$0, 0($sp)		/* save return value */
604
605	DO_SWITCH_STACK
606	jsr	$26, syscall_trace_leave
607	UNDO_SWITCH_STACK
608	br	$31, ret_from_sys_call
609
610	.align	3
611$strace_error:
612	ldq	$18, 0($sp)	/* old syscall nr (zero if success) */
613	beq	$18, $strace_success
614	ldq	$19, 72($sp)	/* .. and this a3 */
615
616	subq	$31, $0, $0	/* with error in v0 */
617	addq	$31, 1, $1	/* set a3 for errno return */
618	stq	$0, 0($sp)
619	stq	$1, 72($sp)	/* a3 for return */
620
621	DO_SWITCH_STACK
622	mov	$18, $9		/* save old syscall number */
623	mov	$19, $10	/* save old a3 */
624	jsr	$26, syscall_trace_leave
625	mov	$9, $18
626	mov	$10, $19
627	UNDO_SWITCH_STACK
628
629	mov	$31, $26	/* tell "ret_from_sys_call" we can restart */
630	br	ret_from_sys_call
631CFI_END_OSF_FRAME entSys
632
633/*
634 * Save and restore the switch stack -- aka the balance of the user context.
635 */
636
637	.align	4
638	.type	do_switch_stack, @function
639	.cfi_startproc simple
640	.cfi_return_column 64
641	.cfi_def_cfa $sp, 0
642	.cfi_register 64, $1
643do_switch_stack:
644	lda	$sp, -SWITCH_STACK_SIZE($sp)
645	.cfi_adjust_cfa_offset	SWITCH_STACK_SIZE
646	stq	$9, 0($sp)
647	stq	$10, 8($sp)
648	stq	$11, 16($sp)
649	stq	$12, 24($sp)
650	stq	$13, 32($sp)
651	stq	$14, 40($sp)
652	stq	$15, 48($sp)
653	stq	$26, 56($sp)
654	stt	$f0, 64($sp)
655	stt	$f1, 72($sp)
656	stt	$f2, 80($sp)
657	stt	$f3, 88($sp)
658	stt	$f4, 96($sp)
659	stt	$f5, 104($sp)
660	stt	$f6, 112($sp)
661	stt	$f7, 120($sp)
662	stt	$f8, 128($sp)
663	stt	$f9, 136($sp)
664	stt	$f10, 144($sp)
665	stt	$f11, 152($sp)
666	stt	$f12, 160($sp)
667	stt	$f13, 168($sp)
668	stt	$f14, 176($sp)
669	stt	$f15, 184($sp)
670	stt	$f16, 192($sp)
671	stt	$f17, 200($sp)
672	stt	$f18, 208($sp)
673	stt	$f19, 216($sp)
674	stt	$f20, 224($sp)
675	stt	$f21, 232($sp)
676	stt	$f22, 240($sp)
677	stt	$f23, 248($sp)
678	stt	$f24, 256($sp)
679	stt	$f25, 264($sp)
680	stt	$f26, 272($sp)
681	stt	$f27, 280($sp)
682	mf_fpcr	$f0		# get fpcr
683	stt	$f28, 288($sp)
684	stt	$f29, 296($sp)
685	stt	$f30, 304($sp)
686	stt	$f0, 312($sp)	# save fpcr in slot of $f31
687	ldt	$f0, 64($sp)	# dont let "do_switch_stack" change fp state.
688	ret	$31, ($1), 1
689	.cfi_endproc
690	.size	do_switch_stack, .-do_switch_stack
691
692	.align	4
693	.type	undo_switch_stack, @function
694	.cfi_startproc simple
695	.cfi_def_cfa $sp, 0
696	.cfi_register 64, $1
697undo_switch_stack:
698	ldq	$9, 0($sp)
699	ldq	$10, 8($sp)
700	ldq	$11, 16($sp)
701	ldq	$12, 24($sp)
702	ldq	$13, 32($sp)
703	ldq	$14, 40($sp)
704	ldq	$15, 48($sp)
705	ldq	$26, 56($sp)
706	ldt	$f30, 312($sp)	# get saved fpcr
707	ldt	$f0, 64($sp)
708	ldt	$f1, 72($sp)
709	ldt	$f2, 80($sp)
710	ldt	$f3, 88($sp)
711	mt_fpcr	$f30		# install saved fpcr
712	ldt	$f4, 96($sp)
713	ldt	$f5, 104($sp)
714	ldt	$f6, 112($sp)
715	ldt	$f7, 120($sp)
716	ldt	$f8, 128($sp)
717	ldt	$f9, 136($sp)
718	ldt	$f10, 144($sp)
719	ldt	$f11, 152($sp)
720	ldt	$f12, 160($sp)
721	ldt	$f13, 168($sp)
722	ldt	$f14, 176($sp)
723	ldt	$f15, 184($sp)
724	ldt	$f16, 192($sp)
725	ldt	$f17, 200($sp)
726	ldt	$f18, 208($sp)
727	ldt	$f19, 216($sp)
728	ldt	$f20, 224($sp)
729	ldt	$f21, 232($sp)
730	ldt	$f22, 240($sp)
731	ldt	$f23, 248($sp)
732	ldt	$f24, 256($sp)
733	ldt	$f25, 264($sp)
734	ldt	$f26, 272($sp)
735	ldt	$f27, 280($sp)
736	ldt	$f28, 288($sp)
737	ldt	$f29, 296($sp)
738	ldt	$f30, 304($sp)
739	lda	$sp, SWITCH_STACK_SIZE($sp)
740	ret	$31, ($1), 1
741	.cfi_endproc
742	.size	undo_switch_stack, .-undo_switch_stack
743
744/*
745 * The meat of the context switch code.
746 */
747
748	.align	4
749	.globl	alpha_switch_to
750	.type	alpha_switch_to, @function
751	.cfi_startproc
752alpha_switch_to:
753	DO_SWITCH_STACK
754	call_pal PAL_swpctx
755	lda	$8, 0x3fff
756	UNDO_SWITCH_STACK
757	bic	$sp, $8, $8
758	mov	$17, $0
759	ret
760	.cfi_endproc
761	.size	alpha_switch_to, .-alpha_switch_to
762
763/*
764 * New processes begin life here.
765 */
766
767	.globl	ret_from_fork
768	.align	4
769	.ent	ret_from_fork
770ret_from_fork:
771	lda	$26, ret_from_sys_call
772	mov	$17, $16
773	jmp	$31, schedule_tail
774.end ret_from_fork
775
776/*
777 * ... and new kernel threads - here
778 */
779	.align 4
780	.globl	ret_from_kernel_thread
781	.ent	ret_from_kernel_thread
782ret_from_kernel_thread:
783	mov	$17, $16
784	jsr	$26, schedule_tail
785	mov	$9, $27
786	mov	$10, $16
787	jsr	$26, ($9)
788	br	$31, ret_to_user
789.end ret_from_kernel_thread
790
791
792/*
793 * Special system calls.  Most of these are special in that they either
794 * have to play switch_stack games.
795 */
796
797.macro	fork_like name
798	.align	4
799	.globl	alpha_\name
800	.ent	alpha_\name
801alpha_\name:
802	.prologue 0
803	bsr	$1, do_switch_stack
804	jsr	$26, sys_\name
805	ldq	$26, 56($sp)
806	lda	$sp, SWITCH_STACK_SIZE($sp)
807	ret
808.end	alpha_\name
809.endm
810
811fork_like fork
812fork_like vfork
813fork_like clone
814
815.macro	sigreturn_like name
816	.align	4
817	.globl	sys_\name
818	.ent	sys_\name
819sys_\name:
820	.prologue 0
821	lda	$9, ret_from_straced
822	cmpult	$26, $9, $9
823	lda	$sp, -SWITCH_STACK_SIZE($sp)
824	jsr	$26, do_\name
825	bne	$9, 1f
826	jsr	$26, syscall_trace_leave
8271:	br	$1, undo_switch_stack
828	br	ret_from_sys_call
829.end sys_\name
830.endm
831
832sigreturn_like sigreturn
833sigreturn_like rt_sigreturn
834
835	.align	4
836	.globl	alpha_syscall_zero
837	.ent	alpha_syscall_zero
838alpha_syscall_zero:
839	.prologue 0
840	/* Special because it needs to do something opposite to
841	   force_successful_syscall_return().  We use the saved
842	   syscall number for that, zero meaning "not an error".
843	   That works nicely, but for real syscall 0 we need to
844	   make sure that this logics doesn't get confused.
845	   Store a non-zero there - -ENOSYS we need in register
846	   for our return value will do just fine.
847	  */
848	lda	$0, -ENOSYS
849	unop
850	stq	$0, 0($sp)
851	ret
852.end alpha_syscall_zero
853