xref: /linux/arch/sparc/kernel/entry.S (revision 54a8a2220c936a47840c9a3d74910c5a56fae2ed)
1/* $Id: entry.S,v 1.170 2001/11/13 00:57:05 davem Exp $
2 * arch/sparc/kernel/entry.S:  Sparc trap low-level entry points.
3 *
4 * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
5 * Copyright (C) 1996 Eddie C. Dost   (ecd@skynet.be)
6 * Copyright (C) 1996 Miguel de Icaza (miguel@nuclecu.unam.mx)
7 * Copyright (C) 1996-1999 Jakub Jelinek   (jj@sunsite.mff.cuni.cz)
8 * Copyright (C) 1997 Anton Blanchard (anton@progsoc.uts.edu.au)
9 */
10
11#include <linux/config.h>
12#include <linux/errno.h>
13
14#include <asm/head.h>
15#include <asm/asi.h>
16#include <asm/smp.h>
17#include <asm/kgdb.h>
18#include <asm/contregs.h>
19#include <asm/ptrace.h>
20#include <asm/asm-offsets.h>
21#include <asm/psr.h>
22#include <asm/vaddrs.h>
23#include <asm/memreg.h>
24#include <asm/page.h>
25#ifdef CONFIG_SUN4
26#include <asm/pgtsun4.h>
27#else
28#include <asm/pgtsun4c.h>
29#endif
30#include <asm/winmacro.h>
31#include <asm/signal.h>
32#include <asm/obio.h>
33#include <asm/mxcc.h>
34#include <asm/thread_info.h>
35#include <asm/param.h>
36
37#include <asm/asmmacro.h>
38
39#define curptr      g6
40
41#define NR_SYSCALLS 284      /* Each OS is different... */
42
43/* These are just handy. */
44#define _SV	save	%sp, -STACKFRAME_SZ, %sp
45#define _RS     restore
46
47#define FLUSH_ALL_KERNEL_WINDOWS \
48	_SV; _SV; _SV; _SV; _SV; _SV; _SV; \
49	_RS; _RS; _RS; _RS; _RS; _RS; _RS;
50
51/* First, KGDB low level things.  This is a rewrite
52 * of the routines found in the sparc-stub.c asm() statement
53 * from the gdb distribution.  This is also dual-purpose
54 * as a software trap for userlevel programs.
55 */
56	.data
57	.align	4
58
59in_trap_handler:
60	.word	0
61
62	.text
63	.align	4
64
65#if 0 /* kgdb is dropped from 2.5.33 */
66! This function is called when any SPARC trap (except window overflow or
67! underflow) occurs.  It makes sure that the invalid register window is still
68! available before jumping into C code.  It will also restore the world if you
69! return from handle_exception.
70
71	.globl	trap_low
72trap_low:
73	rd	%wim, %l3
74	SAVE_ALL
75
76	sethi	%hi(in_trap_handler), %l4
77	ld	[%lo(in_trap_handler) + %l4], %l5
78	inc	%l5
79	st	%l5, [%lo(in_trap_handler) + %l4]
80
81	/* Make sure kgdb sees the same state we just saved. */
82	LOAD_PT_GLOBALS(sp)
83	LOAD_PT_INS(sp)
84	ld	[%sp + STACKFRAME_SZ + PT_Y], %l4
85	ld	[%sp + STACKFRAME_SZ + PT_WIM], %l3
86	ld	[%sp + STACKFRAME_SZ + PT_PSR], %l0
87	ld	[%sp + STACKFRAME_SZ + PT_PC], %l1
88	ld	[%sp + STACKFRAME_SZ + PT_NPC], %l2
89	rd	%tbr, %l5	/* Never changes... */
90
91	/* Make kgdb exception frame. */
92	sub	%sp,(16+1+6+1+72)*4,%sp	! Make room for input & locals
93 					! + hidden arg + arg spill
94					! + doubleword alignment
95					! + registers[72] local var
96	SAVE_KGDB_GLOBALS(sp)
97	SAVE_KGDB_INS(sp)
98	SAVE_KGDB_SREGS(sp, l4, l0, l3, l5, l1, l2)
99
100	/* We are increasing PIL, so two writes. */
101	or	%l0, PSR_PIL, %l0
102	wr	%l0, 0, %psr
103	WRITE_PAUSE
104	wr	%l0, PSR_ET, %psr
105	WRITE_PAUSE
106
107	call	handle_exception
108	 add	%sp, STACKFRAME_SZ, %o0	! Pass address of registers
109
110	/* Load new kgdb register set. */
111	LOAD_KGDB_GLOBALS(sp)
112	LOAD_KGDB_INS(sp)
113	LOAD_KGDB_SREGS(sp, l4, l0, l3, l5, l1, l2)
114	wr      %l4, 0x0, %y
115
116	sethi	%hi(in_trap_handler), %l4
117	ld	[%lo(in_trap_handler) + %l4], %l5
118	dec	%l5
119	st	%l5, [%lo(in_trap_handler) + %l4]
120
121	add	%sp,(16+1+6+1+72)*4,%sp	! Undo the kgdb trap frame.
122
123	/* Now take what kgdb did and place it into the pt_regs
124	 * frame which SparcLinux RESTORE_ALL understands.,
125	 */
126	STORE_PT_INS(sp)
127	STORE_PT_GLOBALS(sp)
128	STORE_PT_YREG(sp, g2)
129	STORE_PT_PRIV(sp, l0, l1, l2)
130
131	RESTORE_ALL
132#endif
133
134#ifdef CONFIG_BLK_DEV_FD
135	.text
136	.align	4
137	.globl	floppy_hardint
138floppy_hardint:
139	/*
140	 * This code cannot touch registers %l0 %l1 and %l2
141	 * because SAVE_ALL depends on their values. It depends
142	 * on %l3 also, but we regenerate it before a call.
143	 * Other registers are:
144	 * %l3 -- base address of fdc registers
145	 * %l4 -- pdma_vaddr
146	 * %l5 -- scratch for ld/st address
147	 * %l6 -- pdma_size
148	 * %l7 -- scratch [floppy byte, ld/st address, aux. data]
149	 */
150
151	/* Do we have work to do? */
152	sethi	%hi(doing_pdma), %l7
153	ld	[%l7 + %lo(doing_pdma)], %l7
154	cmp	%l7, 0
155	be	floppy_dosoftint
156	 nop
157
158	/* Load fdc register base */
159	sethi	%hi(fdc_status), %l3
160	ld	[%l3 + %lo(fdc_status)], %l3
161
162	/* Setup register addresses */
163	sethi	%hi(pdma_vaddr), %l5	! transfer buffer
164	ld	[%l5 + %lo(pdma_vaddr)], %l4
165	sethi	%hi(pdma_size), %l5	! bytes to go
166	ld	[%l5 + %lo(pdma_size)], %l6
167next_byte:
168  	ldub	[%l3], %l7
169
170	andcc	%l7, 0x80, %g0		! Does fifo still have data
171	bz	floppy_fifo_emptied	! fifo has been emptied...
172	 andcc	%l7, 0x20, %g0		! in non-dma mode still?
173	bz	floppy_overrun		! nope, overrun
174	 andcc	%l7, 0x40, %g0		! 0=write 1=read
175	bz	floppy_write
176	 sub	%l6, 0x1, %l6
177
178	/* Ok, actually read this byte */
179	ldub	[%l3 + 1], %l7
180	orcc	%g0, %l6, %g0
181	stb	%l7, [%l4]
182	bne	next_byte
183	 add	%l4, 0x1, %l4
184
185	b	floppy_tdone
186	 nop
187
188floppy_write:
189	/* Ok, actually write this byte */
190	ldub	[%l4], %l7
191	orcc	%g0, %l6, %g0
192	stb	%l7, [%l3 + 1]
193	bne	next_byte
194	 add	%l4, 0x1, %l4
195
196	/* fall through... */
197floppy_tdone:
198	sethi	%hi(pdma_vaddr), %l5
199	st	%l4, [%l5 + %lo(pdma_vaddr)]
200	sethi	%hi(pdma_size), %l5
201	st	%l6, [%l5 + %lo(pdma_size)]
202	/* Flip terminal count pin */
203	set	auxio_register, %l7
204	ld	[%l7], %l7
205
206	set	sparc_cpu_model, %l5
207	ld	[%l5], %l5
208	subcc   %l5, 1, %g0		/* enum { sun4c = 1 }; */
209	be	1f
210	 ldub	[%l7], %l5
211
212	or	%l5, 0xc2, %l5
213	stb	%l5, [%l7]
214	andn    %l5, 0x02, %l5
215	b	2f
216	 nop
217
2181:
219	or      %l5, 0xf4, %l5
220	stb     %l5, [%l7]
221	andn    %l5, 0x04, %l5
222
2232:
224	/* Kill some time so the bits set */
225	WRITE_PAUSE
226	WRITE_PAUSE
227
228	stb     %l5, [%l7]
229
230	/* Prevent recursion */
231	sethi	%hi(doing_pdma), %l7
232	b	floppy_dosoftint
233	 st	%g0, [%l7 + %lo(doing_pdma)]
234
235	/* We emptied the FIFO, but we haven't read everything
236	 * as of yet.  Store the current transfer address and
237	 * bytes left to read so we can continue when the next
238	 * fast IRQ comes in.
239	 */
240floppy_fifo_emptied:
241	sethi	%hi(pdma_vaddr), %l5
242	st	%l4, [%l5 + %lo(pdma_vaddr)]
243	sethi	%hi(pdma_size), %l7
244	st	%l6, [%l7 + %lo(pdma_size)]
245
246	/* Restore condition codes */
247	wr	%l0, 0x0, %psr
248	WRITE_PAUSE
249
250	jmp	%l1
251	rett	%l2
252
253floppy_overrun:
254	sethi	%hi(pdma_vaddr), %l5
255	st	%l4, [%l5 + %lo(pdma_vaddr)]
256	sethi	%hi(pdma_size), %l5
257	st	%l6, [%l5 + %lo(pdma_size)]
258	/* Prevent recursion */
259	sethi	%hi(doing_pdma), %l7
260	st	%g0, [%l7 + %lo(doing_pdma)]
261
262	/* fall through... */
263floppy_dosoftint:
264	rd	%wim, %l3
265	SAVE_ALL
266
267	/* Set all IRQs off. */
268	or	%l0, PSR_PIL, %l4
269	wr	%l4, 0x0, %psr
270	WRITE_PAUSE
271	wr	%l4, PSR_ET, %psr
272	WRITE_PAUSE
273
274	mov	11, %o0			! floppy irq level (unused anyway)
275	mov	%g0, %o1		! devid is not used in fast interrupts
276	call	sparc_floppy_irq
277	 add	%sp, STACKFRAME_SZ, %o2	! struct pt_regs *regs
278
279	RESTORE_ALL
280
281#endif /* (CONFIG_BLK_DEV_FD) */
282
283	/* Bad trap handler */
284	.globl	bad_trap_handler
285bad_trap_handler:
286	SAVE_ALL
287
288	wr	%l0, PSR_ET, %psr
289	WRITE_PAUSE
290
291	add	%sp, STACKFRAME_SZ, %o0	! pt_regs
292	call	do_hw_interrupt
293	 mov	%l7, %o1		! trap number
294
295	RESTORE_ALL
296
297/* For now all IRQ's not registered get sent here. handler_irq() will
298 * see if a routine is registered to handle this interrupt and if not
299 * it will say so on the console.
300 */
301
302	.align	4
303	.globl	real_irq_entry, patch_handler_irq
304real_irq_entry:
305	SAVE_ALL
306
307#ifdef CONFIG_SMP
308	.globl	patchme_maybe_smp_msg
309
310	cmp	%l7, 12
311patchme_maybe_smp_msg:
312	bgu	maybe_smp4m_msg
313	 nop
314#endif
315
316real_irq_continue:
317	or	%l0, PSR_PIL, %g2
318	wr	%g2, 0x0, %psr
319	WRITE_PAUSE
320	wr	%g2, PSR_ET, %psr
321	WRITE_PAUSE
322	mov	%l7, %o0		! irq level
323patch_handler_irq:
324	call	handler_irq
325	 add	%sp, STACKFRAME_SZ, %o1	! pt_regs ptr
326	or	%l0, PSR_PIL, %g2	! restore PIL after handler_irq
327	wr	%g2, PSR_ET, %psr	! keep ET up
328	WRITE_PAUSE
329
330	RESTORE_ALL
331
332#ifdef CONFIG_SMP
333	/* SMP per-cpu ticker interrupts are handled specially. */
334smp4m_ticker:
335	bne	real_irq_continue+4
336	 or	%l0, PSR_PIL, %g2
337	wr	%g2, 0x0, %psr
338	WRITE_PAUSE
339	wr	%g2, PSR_ET, %psr
340	WRITE_PAUSE
341	call	smp4m_percpu_timer_interrupt
342	 add	%sp, STACKFRAME_SZ, %o0
343	wr	%l0, PSR_ET, %psr
344	WRITE_PAUSE
345	RESTORE_ALL
346
347	/* Here is where we check for possible SMP IPI passed to us
348	 * on some level other than 15 which is the NMI and only used
349	 * for cross calls.  That has a separate entry point below.
350	 */
351maybe_smp4m_msg:
352	GET_PROCESSOR4M_ID(o3)
353	set	sun4m_interrupts, %l5
354	ld	[%l5], %o5
355	sethi	%hi(0x40000000), %o2
356	sll	%o3, 12, %o3
357	ld	[%o5 + %o3], %o1
358	andcc	%o1, %o2, %g0
359	be,a	smp4m_ticker
360	 cmp	%l7, 14
361	st	%o2, [%o5 + 0x4]
362	WRITE_PAUSE
363	ld	[%o5], %g0
364	WRITE_PAUSE
365	or	%l0, PSR_PIL, %l4
366	wr	%l4, 0x0, %psr
367	WRITE_PAUSE
368	wr	%l4, PSR_ET, %psr
369	WRITE_PAUSE
370	call	smp_reschedule_irq
371	 nop
372
373	RESTORE_ALL
374
375	.align	4
376	.globl	linux_trap_ipi15_sun4m
377linux_trap_ipi15_sun4m:
378	SAVE_ALL
379	sethi	%hi(0x80000000), %o2
380	GET_PROCESSOR4M_ID(o0)
381	set	sun4m_interrupts, %l5
382	ld	[%l5], %o5
383	sll	%o0, 12, %o0
384	add	%o5, %o0, %o5
385	ld	[%o5], %o3
386	andcc	%o3, %o2, %g0
387	be	1f			! Must be an NMI async memory error
388	 st	%o2, [%o5 + 4]
389	WRITE_PAUSE
390	ld	[%o5], %g0
391	WRITE_PAUSE
392	or	%l0, PSR_PIL, %l4
393	wr	%l4, 0x0, %psr
394	WRITE_PAUSE
395	wr	%l4, PSR_ET, %psr
396	WRITE_PAUSE
397	call	smp4m_cross_call_irq
398	 nop
399	b	ret_trap_lockless_ipi
400	 clr	%l6
4011:
402	/* NMI async memory error handling. */
403	sethi	%hi(0x80000000), %l4
404	sethi	%hi(0x4000), %o3
405	sub	%o5, %o0, %o5
406	add	%o5, %o3, %l5
407	st	%l4, [%l5 + 0xc]
408	WRITE_PAUSE
409	ld	[%l5], %g0
410	WRITE_PAUSE
411	or	%l0, PSR_PIL, %l4
412	wr	%l4, 0x0, %psr
413	WRITE_PAUSE
414	wr	%l4, PSR_ET, %psr
415	WRITE_PAUSE
416	call	sun4m_nmi
417	 nop
418	st	%l4, [%l5 + 0x8]
419	WRITE_PAUSE
420	ld	[%l5], %g0
421	WRITE_PAUSE
422	RESTORE_ALL
423
424	.globl	smp4d_ticker
425	/* SMP per-cpu ticker interrupts are handled specially. */
426smp4d_ticker:
427	SAVE_ALL
428	or	%l0, PSR_PIL, %g2
429	sethi	%hi(CC_ICLR), %o0
430	sethi	%hi(1 << 14), %o1
431	or	%o0, %lo(CC_ICLR), %o0
432	stha	%o1, [%o0] ASI_M_MXCC	/* Clear PIL 14 in MXCC's ICLR */
433	wr	%g2, 0x0, %psr
434	WRITE_PAUSE
435	wr	%g2, PSR_ET, %psr
436	WRITE_PAUSE
437	call	smp4d_percpu_timer_interrupt
438	 add	%sp, STACKFRAME_SZ, %o0
439	wr	%l0, PSR_ET, %psr
440	WRITE_PAUSE
441	RESTORE_ALL
442
443	.align	4
444	.globl	linux_trap_ipi15_sun4d
445linux_trap_ipi15_sun4d:
446	SAVE_ALL
447	sethi	%hi(CC_BASE), %o4
448	sethi	%hi(MXCC_ERR_ME|MXCC_ERR_PEW|MXCC_ERR_ASE|MXCC_ERR_PEE), %o2
449	or	%o4, (CC_EREG - CC_BASE), %o0
450	ldda	[%o0] ASI_M_MXCC, %o0
451	andcc	%o0, %o2, %g0
452	bne	1f
453	 sethi	%hi(BB_STAT2), %o2
454	lduba	[%o2] ASI_M_CTL, %o2
455	andcc	%o2, BB_STAT2_MASK, %g0
456	bne	2f
457	 or	%o4, (CC_ICLR - CC_BASE), %o0
458	sethi	%hi(1 << 15), %o1
459	stha	%o1, [%o0] ASI_M_MXCC	/* Clear PIL 15 in MXCC's ICLR */
460	or	%l0, PSR_PIL, %l4
461	wr	%l4, 0x0, %psr
462	WRITE_PAUSE
463	wr	%l4, PSR_ET, %psr
464	WRITE_PAUSE
465	call	smp4d_cross_call_irq
466	 nop
467	b	ret_trap_lockless_ipi
468	 clr	%l6
469
4701:	/* MXCC error */
4712:	/* BB error */
472	/* Disable PIL 15 */
473	set	CC_IMSK, %l4
474	lduha	[%l4] ASI_M_MXCC, %l5
475	sethi	%hi(1 << 15), %l7
476	or	%l5, %l7, %l5
477	stha	%l5, [%l4] ASI_M_MXCC
478	/* FIXME */
4791:	b,a	1b
480
481#endif /* CONFIG_SMP */
482
483	/* This routine handles illegal instructions and privileged
484	 * instruction attempts from user code.
485	 */
486	.align	4
487	.globl	bad_instruction
488bad_instruction:
489	sethi	%hi(0xc1f80000), %l4
490	ld	[%l1], %l5
491	sethi	%hi(0x81d80000), %l7
492	and	%l5, %l4, %l5
493	cmp	%l5, %l7
494	be	1f
495	SAVE_ALL
496
497	wr	%l0, PSR_ET, %psr		! re-enable traps
498	WRITE_PAUSE
499
500	add	%sp, STACKFRAME_SZ, %o0
501	mov	%l1, %o1
502	mov	%l2, %o2
503	call	do_illegal_instruction
504	 mov	%l0, %o3
505
506	RESTORE_ALL
507
5081:	/* unimplemented flush - just skip */
509	jmpl	%l2, %g0
510	 rett	%l2 + 4
511
512	.align	4
513	.globl	priv_instruction
514priv_instruction:
515	SAVE_ALL
516
517	wr	%l0, PSR_ET, %psr
518	WRITE_PAUSE
519
520	add	%sp, STACKFRAME_SZ, %o0
521	mov	%l1, %o1
522	mov	%l2, %o2
523	call	do_priv_instruction
524	 mov	%l0, %o3
525
526	RESTORE_ALL
527
528	/* This routine handles unaligned data accesses. */
529	.align	4
530	.globl	mna_handler
531mna_handler:
532	andcc	%l0, PSR_PS, %g0
533	be	mna_fromuser
534	 nop
535
536	SAVE_ALL
537
538	wr	%l0, PSR_ET, %psr
539	WRITE_PAUSE
540
541	ld	[%l1], %o1
542	call	kernel_unaligned_trap
543	 add	%sp, STACKFRAME_SZ, %o0
544
545	RESTORE_ALL
546
547mna_fromuser:
548	SAVE_ALL
549
550	wr	%l0, PSR_ET, %psr		! re-enable traps
551	WRITE_PAUSE
552
553	ld	[%l1], %o1
554	call	user_unaligned_trap
555	 add	%sp, STACKFRAME_SZ, %o0
556
557	RESTORE_ALL
558
559	/* This routine handles floating point disabled traps. */
560	.align	4
561	.globl	fpd_trap_handler
562fpd_trap_handler:
563	SAVE_ALL
564
565	wr	%l0, PSR_ET, %psr		! re-enable traps
566	WRITE_PAUSE
567
568	add	%sp, STACKFRAME_SZ, %o0
569	mov	%l1, %o1
570	mov	%l2, %o2
571	call	do_fpd_trap
572	 mov	%l0, %o3
573
574	RESTORE_ALL
575
576	/* This routine handles Floating Point Exceptions. */
577	.align	4
578	.globl	fpe_trap_handler
579fpe_trap_handler:
580	set	fpsave_magic, %l5
581	cmp	%l1, %l5
582	be	1f
583	 sethi	%hi(fpsave), %l5
584	or	%l5, %lo(fpsave), %l5
585	cmp	%l1, %l5
586	bne	2f
587	 sethi	%hi(fpsave_catch2), %l5
588	or	%l5, %lo(fpsave_catch2), %l5
589	wr	%l0, 0x0, %psr
590	WRITE_PAUSE
591	jmp	%l5
592	 rett	%l5 + 4
5931:
594	sethi	%hi(fpsave_catch), %l5
595	or	%l5, %lo(fpsave_catch), %l5
596	wr	%l0, 0x0, %psr
597	WRITE_PAUSE
598	jmp	%l5
599	 rett	%l5 + 4
600
6012:
602	SAVE_ALL
603
604	wr	%l0, PSR_ET, %psr		! re-enable traps
605	WRITE_PAUSE
606
607	add	%sp, STACKFRAME_SZ, %o0
608	mov	%l1, %o1
609	mov	%l2, %o2
610	call	do_fpe_trap
611	 mov	%l0, %o3
612
613	RESTORE_ALL
614
615	/* This routine handles Tag Overflow Exceptions. */
616	.align	4
617	.globl	do_tag_overflow
618do_tag_overflow:
619	SAVE_ALL
620
621	wr	%l0, PSR_ET, %psr		! re-enable traps
622	WRITE_PAUSE
623
624	add	%sp, STACKFRAME_SZ, %o0
625	mov	%l1, %o1
626	mov	%l2, %o2
627	call	handle_tag_overflow
628	 mov	%l0, %o3
629
630	RESTORE_ALL
631
632	/* This routine handles Watchpoint Exceptions. */
633	.align	4
634	.globl	do_watchpoint
635do_watchpoint:
636	SAVE_ALL
637
638	wr	%l0, PSR_ET, %psr		! re-enable traps
639	WRITE_PAUSE
640
641	add	%sp, STACKFRAME_SZ, %o0
642	mov	%l1, %o1
643	mov	%l2, %o2
644	call	handle_watchpoint
645	 mov	%l0, %o3
646
647	RESTORE_ALL
648
649	/* This routine handles Register Access Exceptions. */
650	.align	4
651	.globl	do_reg_access
652do_reg_access:
653	SAVE_ALL
654
655	wr	%l0, PSR_ET, %psr		! re-enable traps
656	WRITE_PAUSE
657
658	add	%sp, STACKFRAME_SZ, %o0
659	mov	%l1, %o1
660	mov	%l2, %o2
661	call	handle_reg_access
662	 mov	%l0, %o3
663
664	RESTORE_ALL
665
666	/* This routine handles Co-Processor Disabled Exceptions. */
667	.align	4
668	.globl	do_cp_disabled
669do_cp_disabled:
670	SAVE_ALL
671
672	wr	%l0, PSR_ET, %psr		! re-enable traps
673	WRITE_PAUSE
674
675	add	%sp, STACKFRAME_SZ, %o0
676	mov	%l1, %o1
677	mov	%l2, %o2
678	call	handle_cp_disabled
679	 mov	%l0, %o3
680
681	RESTORE_ALL
682
683	/* This routine handles Co-Processor Exceptions. */
684	.align	4
685	.globl	do_cp_exception
686do_cp_exception:
687	SAVE_ALL
688
689	wr	%l0, PSR_ET, %psr		! re-enable traps
690	WRITE_PAUSE
691
692	add	%sp, STACKFRAME_SZ, %o0
693	mov	%l1, %o1
694	mov	%l2, %o2
695	call	handle_cp_exception
696	 mov	%l0, %o3
697
698	RESTORE_ALL
699
700	/* This routine handles Hardware Divide By Zero Exceptions. */
701	.align	4
702	.globl	do_hw_divzero
703do_hw_divzero:
704	SAVE_ALL
705
706	wr	%l0, PSR_ET, %psr		! re-enable traps
707	WRITE_PAUSE
708
709	add	%sp, STACKFRAME_SZ, %o0
710	mov	%l1, %o1
711	mov	%l2, %o2
712	call	handle_hw_divzero
713	 mov	%l0, %o3
714
715	RESTORE_ALL
716
717	.align	4
718	.globl	do_flush_windows
719do_flush_windows:
720	SAVE_ALL
721
722	wr	%l0, PSR_ET, %psr
723	WRITE_PAUSE
724
725	andcc	%l0, PSR_PS, %g0
726	bne	dfw_kernel
727	 nop
728
729	call	flush_user_windows
730	 nop
731
732	/* Advance over the trap instruction. */
733	ld	[%sp + STACKFRAME_SZ + PT_NPC], %l1
734	add	%l1, 0x4, %l2
735	st	%l1, [%sp + STACKFRAME_SZ + PT_PC]
736	st	%l2, [%sp + STACKFRAME_SZ + PT_NPC]
737
738	RESTORE_ALL
739
740	.globl	flush_patch_one
741
742	/* We get these for debugging routines using __builtin_return_address() */
743dfw_kernel:
744flush_patch_one:
745	FLUSH_ALL_KERNEL_WINDOWS
746
747	/* Advance over the trap instruction. */
748	ld	[%sp + STACKFRAME_SZ + PT_NPC], %l1
749	add	%l1, 0x4, %l2
750	st	%l1, [%sp + STACKFRAME_SZ + PT_PC]
751	st	%l2, [%sp + STACKFRAME_SZ + PT_NPC]
752
753	RESTORE_ALL
754
755	/* The getcc software trap.  The user wants the condition codes from
756	 * the %psr in register %g1.
757	 */
758
759	.align	4
760	.globl	getcc_trap_handler
761getcc_trap_handler:
762	srl	%l0, 20, %g1	! give user
763	and	%g1, 0xf, %g1	! only ICC bits in %psr
764	jmp	%l2		! advance over trap instruction
765	rett	%l2 + 0x4	! like this...
766
767	/* The setcc software trap.  The user has condition codes in %g1
768	 * that it would like placed in the %psr.  Be careful not to flip
769	 * any unintentional bits!
770	 */
771
772	.align	4
773	.globl	setcc_trap_handler
774setcc_trap_handler:
775	sll	%g1, 0x14, %l4
776	set	PSR_ICC, %l5
777	andn	%l0, %l5, %l0	! clear ICC bits in %psr
778	and	%l4, %l5, %l4	! clear non-ICC bits in user value
779	or	%l4, %l0, %l4	! or them in... mix mix mix
780
781	wr	%l4, 0x0, %psr	! set new %psr
782	WRITE_PAUSE		! TI scumbags...
783
784	jmp	%l2		! advance over trap instruction
785	rett	%l2 + 0x4	! like this...
786
787	.align	4
788	.globl	linux_trap_nmi_sun4c
789linux_trap_nmi_sun4c:
790	SAVE_ALL
791
792	/* Ugh, we need to clear the IRQ line.  This is now
793	 * a very sun4c specific trap handler...
794	 */
795	sethi	%hi(interrupt_enable), %l5
796	ld	[%l5 + %lo(interrupt_enable)], %l5
797	ldub	[%l5], %l6
798	andn	%l6, INTS_ENAB, %l6
799	stb	%l6, [%l5]
800
801	/* Now it is safe to re-enable traps without recursion. */
802	or	%l0, PSR_PIL, %l0
803	wr	%l0, PSR_ET, %psr
804	WRITE_PAUSE
805
806	/* Now call the c-code with the pt_regs frame ptr and the
807	 * memory error registers as arguments.  The ordering chosen
808	 * here is due to unlatching semantics.
809	 */
810	sethi	%hi(AC_SYNC_ERR), %o0
811	add	%o0, 0x4, %o0
812	lda	[%o0] ASI_CONTROL, %o2	! sync vaddr
813	sub	%o0, 0x4, %o0
814	lda	[%o0] ASI_CONTROL, %o1	! sync error
815	add	%o0, 0xc, %o0
816	lda	[%o0] ASI_CONTROL, %o4	! async vaddr
817	sub	%o0, 0x4, %o0
818	lda	[%o0] ASI_CONTROL, %o3	! async error
819	call	sparc_lvl15_nmi
820	 add	%sp, STACKFRAME_SZ, %o0
821
822	RESTORE_ALL
823
824	.align	4
825	.globl	invalid_segment_patch1_ff
826	.globl	invalid_segment_patch2_ff
827invalid_segment_patch1_ff:	cmp	%l4, 0xff
828invalid_segment_patch2_ff:	mov	0xff, %l3
829
830	.align	4
831	.globl	invalid_segment_patch1_1ff
832	.globl	invalid_segment_patch2_1ff
833invalid_segment_patch1_1ff:	cmp	%l4, 0x1ff
834invalid_segment_patch2_1ff:	mov	0x1ff, %l3
835
836	.align	4
837	.globl	num_context_patch1_16, num_context_patch2_16
838num_context_patch1_16:		mov	0x10, %l7
839num_context_patch2_16:		mov	0x10, %l7
840
841	.align	4
842	.globl	vac_linesize_patch_32
843vac_linesize_patch_32:		subcc	%l7, 32, %l7
844
845	.align	4
846	.globl	vac_hwflush_patch1_on, vac_hwflush_patch2_on
847
848/*
849 * Ugly, but we cant use hardware flushing on the sun4 and we'd require
850 * two instructions (Anton)
851 */
852#ifdef CONFIG_SUN4
853vac_hwflush_patch1_on:		nop
854#else
855vac_hwflush_patch1_on:		addcc	%l7, -PAGE_SIZE, %l7
856#endif
857
858vac_hwflush_patch2_on:		sta	%g0, [%l3 + %l7] ASI_HWFLUSHSEG
859
860	.globl	invalid_segment_patch1, invalid_segment_patch2
861	.globl	num_context_patch1
862	.globl	vac_linesize_patch, vac_hwflush_patch1
863	.globl	vac_hwflush_patch2
864
865	.align	4
866	.globl	sun4c_fault
867
868! %l0 = %psr
869! %l1 = %pc
870! %l2 = %npc
871! %l3 = %wim
872! %l7 = 1 for textfault
873! We want error in %l5, vaddr in %l6
874sun4c_fault:
875#ifdef CONFIG_SUN4
876	sethi	%hi(sun4c_memerr_reg), %l4
877	ld	[%l4+%lo(sun4c_memerr_reg)], %l4  ! memerr ctrl reg addr
878	ld	[%l4], %l6		! memerr ctrl reg
879	ld	[%l4 + 4], %l5		! memerr vaddr reg
880	andcc	%l6, 0x80, %g0		! check for error type
881	st	%g0, [%l4 + 4]		! clear the error
882	be	0f			! normal error
883	 sethi	%hi(AC_BUS_ERROR), %l4	! bus err reg addr
884
885	call	prom_halt	! something weird happened
886					! what exactly did happen?
887					! what should we do here?
888
8890:	or	%l4, %lo(AC_BUS_ERROR), %l4	! bus err reg addr
890	lduba	[%l4] ASI_CONTROL, %l6	! bus err reg
891
892	cmp    %l7, 1			! text fault?
893	be	1f			! yes
894	 nop
895
896	ld     [%l1], %l4		! load instruction that caused fault
897	srl	%l4, 21, %l4
898	andcc	%l4, 1, %g0		! store instruction?
899
900	be	1f			! no
901	 sethi	%hi(SUN4C_SYNC_BADWRITE), %l4 ! yep
902					! %lo(SUN4C_SYNC_BADWRITE) = 0
903	or	%l4, %l6, %l6		! set write bit to emulate sun4c
9041:
905#else
906	sethi	%hi(AC_SYNC_ERR), %l4
907	add	%l4, 0x4, %l6			! AC_SYNC_VA in %l6
908	lda	[%l6] ASI_CONTROL, %l5		! Address
909	lda	[%l4] ASI_CONTROL, %l6		! Error, retained for a bit
910#endif
911
912	andn	%l5, 0xfff, %l5			! Encode all info into l7
913	srl	%l6, 14, %l4
914
915	and	%l4, 2, %l4
916	or	%l5, %l4, %l4
917
918	or	%l4, %l7, %l7			! l7 = [addr,write,txtfault]
919
920	andcc	%l0, PSR_PS, %g0
921	be	sun4c_fault_fromuser
922	 andcc	%l7, 1, %g0			! Text fault?
923
924	be	1f
925	 sethi	%hi(KERNBASE), %l4
926
927	mov	%l1, %l5			! PC
928
9291:
930	cmp	%l5, %l4
931	blu	sun4c_fault_fromuser
932	 sethi	%hi(~((1 << SUN4C_REAL_PGDIR_SHIFT) - 1)), %l4
933
934	/* If the kernel references a bum kernel pointer, or a pte which
935	 * points to a non existant page in ram, we will run this code
936	 * _forever_ and lock up the machine!!!!! So we must check for
937	 * this condition, the AC_SYNC_ERR bits are what we must examine.
938	 * Also a parity error would make this happen as well.  So we just
939	 * check that we are in fact servicing a tlb miss and not some
940	 * other type of fault for the kernel.
941	 */
942	andcc	%l6, 0x80, %g0
943	be	sun4c_fault_fromuser
944	 and	%l5, %l4, %l5
945
946	/* Test for NULL pte_t * in vmalloc area. */
947	sethi   %hi(VMALLOC_START), %l4
948	cmp     %l5, %l4
949	blu,a   invalid_segment_patch1
950	 lduXa	[%l5] ASI_SEGMAP, %l4
951
952	sethi   %hi(swapper_pg_dir), %l4
953	srl     %l5, SUN4C_PGDIR_SHIFT, %l6
954	or      %l4, %lo(swapper_pg_dir), %l4
955	sll     %l6, 2, %l6
956	ld      [%l4 + %l6], %l4
957#ifdef CONFIG_SUN4
958	sethi	%hi(PAGE_MASK), %l6
959	andcc	%l4, %l6, %g0
960#else
961	andcc   %l4, PAGE_MASK, %g0
962#endif
963	be      sun4c_fault_fromuser
964	 lduXa  [%l5] ASI_SEGMAP, %l4
965
966invalid_segment_patch1:
967	cmp	%l4, 0x7f
968	bne	1f
969	 sethi	%hi(sun4c_kfree_ring), %l4
970	or	%l4, %lo(sun4c_kfree_ring), %l4
971	ld	[%l4 + 0x18], %l3
972	deccc	%l3			! do we have a free entry?
973	bcs,a	2f			! no, unmap one.
974	 sethi	%hi(sun4c_kernel_ring), %l4
975
976	st	%l3, [%l4 + 0x18]	! sun4c_kfree_ring.num_entries--
977
978	ld	[%l4 + 0x00], %l6	! entry = sun4c_kfree_ring.ringhd.next
979	st	%l5, [%l6 + 0x08]	! entry->vaddr = address
980
981	ld	[%l6 + 0x00], %l3	! next = entry->next
982	ld	[%l6 + 0x04], %l7	! entry->prev
983
984	st	%l7, [%l3 + 0x04]	! next->prev = entry->prev
985	st	%l3, [%l7 + 0x00]	! entry->prev->next = next
986
987	sethi	%hi(sun4c_kernel_ring), %l4
988	or	%l4, %lo(sun4c_kernel_ring), %l4
989					! head = &sun4c_kernel_ring.ringhd
990
991	ld	[%l4 + 0x00], %l7	! head->next
992
993	st	%l4, [%l6 + 0x04]	! entry->prev = head
994	st	%l7, [%l6 + 0x00]	! entry->next = head->next
995	st	%l6, [%l7 + 0x04]	! head->next->prev = entry
996
997	st	%l6, [%l4 + 0x00]	! head->next = entry
998
999	ld	[%l4 + 0x18], %l3
1000	inc	%l3			! sun4c_kernel_ring.num_entries++
1001	st	%l3, [%l4 + 0x18]
1002	b	4f
1003	 ld	[%l6 + 0x08], %l5
1004
10052:
1006	or	%l4, %lo(sun4c_kernel_ring), %l4
1007					! head = &sun4c_kernel_ring.ringhd
1008
1009	ld	[%l4 + 0x04], %l6	! entry = head->prev
1010
1011	ld	[%l6 + 0x08], %l3	! tmp = entry->vaddr
1012
1013	! Flush segment from the cache.
1014#ifdef CONFIG_SUN4
1015	sethi	%hi((128 * 1024)), %l7
1016#else
1017	sethi	%hi((64 * 1024)), %l7
1018#endif
10199:
1020vac_hwflush_patch1:
1021vac_linesize_patch:
1022	subcc	%l7, 16, %l7
1023	bne	9b
1024vac_hwflush_patch2:
1025	 sta	%g0, [%l3 + %l7] ASI_FLUSHSEG
1026
1027	st	%l5, [%l6 + 0x08]	! entry->vaddr = address
1028
1029	ld	[%l6 + 0x00], %l5	! next = entry->next
1030	ld	[%l6 + 0x04], %l7	! entry->prev
1031
1032	st	%l7, [%l5 + 0x04]	! next->prev = entry->prev
1033	st	%l5, [%l7 + 0x00]	! entry->prev->next = next
1034	st	%l4, [%l6 + 0x04]	! entry->prev = head
1035
1036	ld	[%l4 + 0x00], %l7	! head->next
1037
1038	st	%l7, [%l6 + 0x00]	! entry->next = head->next
1039	st	%l6, [%l7 + 0x04]	! head->next->prev = entry
1040	st	%l6, [%l4 + 0x00]	! head->next = entry
1041
1042	mov	%l3, %l5		! address = tmp
1043
10444:
1045num_context_patch1:
1046	mov	0x08, %l7
1047
1048	ld	[%l6 + 0x08], %l4
1049	ldub	[%l6 + 0x0c], %l3
1050	or	%l4, %l3, %l4		! encode new vaddr/pseg into l4
1051
1052	sethi	%hi(AC_CONTEXT), %l3
1053	lduba	[%l3] ASI_CONTROL, %l6
1054
1055	/* Invalidate old mapping, instantiate new mapping,
1056	 * for each context.  Registers l6/l7 are live across
1057	 * this loop.
1058	 */
10593:	deccc	%l7
1060	sethi	%hi(AC_CONTEXT), %l3
1061	stba	%l7, [%l3] ASI_CONTROL
1062invalid_segment_patch2:
1063	mov	0x7f, %l3
1064	stXa	%l3, [%l5] ASI_SEGMAP
1065	andn	%l4, 0x1ff, %l3
1066	bne	3b
1067	 stXa	%l4, [%l3] ASI_SEGMAP
1068
1069	sethi	%hi(AC_CONTEXT), %l3
1070	stba	%l6, [%l3] ASI_CONTROL
1071
1072	andn	%l4, 0x1ff, %l5
1073
10741:
1075	sethi	%hi(VMALLOC_START), %l4
1076	cmp	%l5, %l4
1077
1078	bgeu	1f
1079	 mov	1 << (SUN4C_REAL_PGDIR_SHIFT - PAGE_SHIFT), %l7
1080
1081	sethi	%hi(KERNBASE), %l6
1082
1083	sub	%l5, %l6, %l4
1084	srl	%l4, PAGE_SHIFT, %l4
1085	sethi	%hi((SUN4C_PAGE_KERNEL & 0xf4000000)), %l3
1086	or	%l3, %l4, %l3
1087
1088	sethi	%hi(PAGE_SIZE), %l4
1089
10902:
1091	sta	%l3, [%l5] ASI_PTE
1092	deccc	%l7
1093	inc	%l3
1094	bne	2b
1095	 add	%l5, %l4, %l5
1096
1097	b	7f
1098	 sethi	%hi(sun4c_kernel_faults), %l4
1099
11001:
1101	srl	%l5, SUN4C_PGDIR_SHIFT, %l3
1102	sethi	%hi(swapper_pg_dir), %l4
1103	or	%l4, %lo(swapper_pg_dir), %l4
1104	sll	%l3, 2, %l3
1105	ld	[%l4 + %l3], %l4
1106#ifndef CONFIG_SUN4
1107	and	%l4, PAGE_MASK, %l4
1108#else
1109	sethi	%hi(PAGE_MASK), %l6
1110	and	%l4, %l6, %l4
1111#endif
1112
1113	srl	%l5, (PAGE_SHIFT - 2), %l6
1114	and	%l6, ((SUN4C_PTRS_PER_PTE - 1) << 2), %l6
1115	add	%l6, %l4, %l6
1116
1117	sethi	%hi(PAGE_SIZE), %l4
1118
11192:
1120	ld	[%l6], %l3
1121	deccc	%l7
1122	sta	%l3, [%l5] ASI_PTE
1123	add	%l6, 0x4, %l6
1124	bne	2b
1125	 add	%l5, %l4, %l5
1126
1127	sethi	%hi(sun4c_kernel_faults), %l4
11287:
1129	ld	[%l4 + %lo(sun4c_kernel_faults)], %l3
1130	inc	%l3
1131	st	%l3, [%l4 + %lo(sun4c_kernel_faults)]
1132
1133	/* Restore condition codes */
1134	wr	%l0, 0x0, %psr
1135	WRITE_PAUSE
1136	jmp	%l1
1137	 rett	%l2
1138
1139sun4c_fault_fromuser:
1140	SAVE_ALL
1141	 nop
1142
1143	mov	%l7, %o1		! Decode the info from %l7
1144	mov	%l7, %o2
1145	and	%o1, 1, %o1		! arg2 = text_faultp
1146	mov	%l7, %o3
1147	and	%o2, 2, %o2		! arg3 = writep
1148	andn	%o3, 0xfff, %o3		! arg4 = faulting address
1149
1150	wr	%l0, PSR_ET, %psr
1151	WRITE_PAUSE
1152
1153	call	do_sun4c_fault
1154	 add	%sp, STACKFRAME_SZ, %o0	! arg1 = pt_regs ptr
1155
1156	RESTORE_ALL
1157
1158	.align	4
1159	.globl	srmmu_fault
1160srmmu_fault:
1161	mov	0x400, %l5
1162	mov	0x300, %l4
1163
1164	lda	[%l5] ASI_M_MMUREGS, %l6	! read sfar first
1165	lda	[%l4] ASI_M_MMUREGS, %l5	! read sfsr last
1166
1167	andn	%l6, 0xfff, %l6
1168	srl	%l5, 6, %l5			! and encode all info into l7
1169
1170	and	%l5, 2, %l5
1171	or	%l5, %l6, %l6
1172
1173	or	%l6, %l7, %l7			! l7 = [addr,write,txtfault]
1174
1175	SAVE_ALL
1176
1177	mov	%l7, %o1
1178	mov	%l7, %o2
1179	and	%o1, 1, %o1		! arg2 = text_faultp
1180	mov	%l7, %o3
1181	and	%o2, 2, %o2		! arg3 = writep
1182	andn	%o3, 0xfff, %o3		! arg4 = faulting address
1183
1184	wr	%l0, PSR_ET, %psr
1185	WRITE_PAUSE
1186
1187	call	do_sparc_fault
1188	 add	%sp, STACKFRAME_SZ, %o0	! arg1 = pt_regs ptr
1189
1190	RESTORE_ALL
1191
1192#ifdef CONFIG_SUNOS_EMUL
1193	/* SunOS uses syscall zero as the 'indirect syscall' it looks
1194	 * like indir_syscall(scall_num, arg0, arg1, arg2...);  etc.
1195	 * This is complete brain damage.
1196	 */
1197	.globl	sunos_indir
1198sunos_indir:
1199	mov	%o7, %l4
1200	cmp	%o0, NR_SYSCALLS
1201	blu,a	1f
1202	 sll	%o0, 0x2, %o0
1203
1204	sethi	%hi(sunos_nosys), %l6
1205	b	2f
1206	 or	%l6, %lo(sunos_nosys), %l6
1207
12081:
1209	set	sunos_sys_table, %l7
1210	ld	[%l7 + %o0], %l6
1211
12122:
1213	mov	%o1, %o0
1214	mov	%o2, %o1
1215	mov	%o3, %o2
1216	mov	%o4, %o3
1217	mov	%o5, %o4
1218	call	%l6
1219	 mov	%l4, %o7
1220#endif
1221
1222	.align	4
1223	.globl	sys_nis_syscall
1224sys_nis_syscall:
1225	mov	%o7, %l5
1226	add	%sp, STACKFRAME_SZ, %o0		! pt_regs *regs arg
1227	call	c_sys_nis_syscall
1228	 mov	%l5, %o7
1229
1230	.align 4
1231	.globl	sys_ptrace
1232sys_ptrace:
1233	call	do_ptrace
1234	 add	%sp, STACKFRAME_SZ, %o0
1235
1236	ld	[%curptr + TI_FLAGS], %l5
1237	andcc	%l5, _TIF_SYSCALL_TRACE, %g0
1238	be	1f
1239	 nop
1240
1241	call	syscall_trace
1242	 nop
1243
12441:
1245	RESTORE_ALL
1246
1247	.align	4
1248	.globl	sys_execve
1249sys_execve:
1250	mov	%o7, %l5
1251	add	%sp, STACKFRAME_SZ, %o0		! pt_regs *regs arg
1252	call	sparc_execve
1253	 mov	%l5, %o7
1254
1255	.align	4
1256	.globl	sys_pipe
1257sys_pipe:
1258	mov	%o7, %l5
1259	add	%sp, STACKFRAME_SZ, %o0		! pt_regs *regs arg
1260	call	sparc_pipe
1261	 mov	%l5, %o7
1262
1263	.align	4
1264	.globl	sys_sigaltstack
1265sys_sigaltstack:
1266	mov	%o7, %l5
1267	mov	%fp, %o2
1268	call	do_sigaltstack
1269	 mov	%l5, %o7
1270
1271	.align	4
1272	.globl	sys_sigstack
1273sys_sigstack:
1274	mov	%o7, %l5
1275	mov	%fp, %o2
1276	call	do_sys_sigstack
1277	 mov	%l5, %o7
1278
1279	.align	4
1280	.globl	sys_sigpause
1281sys_sigpause:
1282	/* Note: %o0 already has correct value... */
1283	call	do_sigpause
1284	 add	%sp, STACKFRAME_SZ, %o1
1285
1286	ld	[%curptr + TI_FLAGS], %l5
1287	andcc	%l5, _TIF_SYSCALL_TRACE, %g0
1288	be	1f
1289	 nop
1290
1291	call	syscall_trace
1292	 nop
1293
12941:
1295	/* We are returning to a signal handler. */
1296	RESTORE_ALL
1297
1298	.align	4
1299	.globl	sys_sigsuspend
1300sys_sigsuspend:
1301	call	do_sigsuspend
1302	 add	%sp, STACKFRAME_SZ, %o0
1303
1304	ld	[%curptr + TI_FLAGS], %l5
1305	andcc	%l5, _TIF_SYSCALL_TRACE, %g0
1306	be	1f
1307	 nop
1308
1309	call	syscall_trace
1310	 nop
1311
13121:
1313	/* We are returning to a signal handler. */
1314	RESTORE_ALL
1315
1316	.align	4
1317	.globl	sys_rt_sigsuspend
1318sys_rt_sigsuspend:
1319	/* Note: %o0, %o1 already have correct value... */
1320	call	do_rt_sigsuspend
1321	 add	%sp, STACKFRAME_SZ, %o2
1322
1323	ld	[%curptr + TI_FLAGS], %l5
1324	andcc	%l5, _TIF_SYSCALL_TRACE, %g0
1325	be	1f
1326	 nop
1327
1328	call	syscall_trace
1329	 nop
1330
13311:
1332	/* We are returning to a signal handler. */
1333	RESTORE_ALL
1334
1335	.align	4
1336	.globl	sys_sigreturn
1337sys_sigreturn:
1338	call	do_sigreturn
1339	 add	%sp, STACKFRAME_SZ, %o0
1340
1341	ld	[%curptr + TI_FLAGS], %l5
1342	andcc	%l5, _TIF_SYSCALL_TRACE, %g0
1343	be	1f
1344	 nop
1345
1346	call	syscall_trace
1347	 nop
1348
13491:
1350	/* We don't want to muck with user registers like a
1351	 * normal syscall, just return.
1352	 */
1353	RESTORE_ALL
1354
1355	.align	4
1356	.globl	sys_rt_sigreturn
1357sys_rt_sigreturn:
1358	call	do_rt_sigreturn
1359	 add	%sp, STACKFRAME_SZ, %o0
1360
1361	ld	[%curptr + TI_FLAGS], %l5
1362	andcc	%l5, _TIF_SYSCALL_TRACE, %g0
1363	be	1f
1364	 nop
1365
1366	call	syscall_trace
1367	 nop
1368
13691:
1370	/* We are returning to a signal handler. */
1371	RESTORE_ALL
1372
1373	/* Now that we have a real sys_clone, sys_fork() is
1374	 * implemented in terms of it.  Our _real_ implementation
1375	 * of SunOS vfork() will use sys_vfork().
1376	 *
1377	 * XXX These three should be consolidated into mostly shared
1378	 * XXX code just like on sparc64... -DaveM
1379	 */
1380	.align	4
1381	.globl	sys_fork, flush_patch_two
1382sys_fork:
1383	mov	%o7, %l5
1384flush_patch_two:
1385	FLUSH_ALL_KERNEL_WINDOWS;
1386	ld	[%curptr + TI_TASK], %o4
1387	rd	%psr, %g4
1388	WRITE_PAUSE
1389	mov	SIGCHLD, %o0			! arg0:	clone flags
1390	rd	%wim, %g5
1391	WRITE_PAUSE
1392	mov	%fp, %o1			! arg1:	usp
1393	std	%g4, [%o4 + AOFF_task_thread + AOFF_thread_fork_kpsr]
1394	add	%sp, STACKFRAME_SZ, %o2		! arg2:	pt_regs ptr
1395	mov	0, %o3
1396	call	sparc_do_fork
1397	 mov	%l5, %o7
1398
1399	/* Whee, kernel threads! */
1400	.globl	sys_clone, flush_patch_three
1401sys_clone:
1402	mov	%o7, %l5
1403flush_patch_three:
1404	FLUSH_ALL_KERNEL_WINDOWS;
1405	ld	[%curptr + TI_TASK], %o4
1406	rd	%psr, %g4
1407	WRITE_PAUSE
1408
1409	/* arg0,1: flags,usp  -- loaded already */
1410	cmp	%o1, 0x0			! Is new_usp NULL?
1411	rd	%wim, %g5
1412	WRITE_PAUSE
1413	be,a	1f
1414	 mov	%fp, %o1			! yes, use callers usp
1415	andn	%o1, 7, %o1			! no, align to 8 bytes
14161:
1417	std	%g4, [%o4 + AOFF_task_thread + AOFF_thread_fork_kpsr]
1418	add	%sp, STACKFRAME_SZ, %o2		! arg2:	pt_regs ptr
1419	mov	0, %o3
1420	call	sparc_do_fork
1421	 mov	%l5, %o7
1422
1423	/* Whee, real vfork! */
1424	.globl	sys_vfork, flush_patch_four
1425sys_vfork:
1426flush_patch_four:
1427	FLUSH_ALL_KERNEL_WINDOWS;
1428	ld	[%curptr + TI_TASK], %o4
1429	rd	%psr, %g4
1430	WRITE_PAUSE
1431	rd	%wim, %g5
1432	WRITE_PAUSE
1433	std	%g4, [%o4 + AOFF_task_thread + AOFF_thread_fork_kpsr]
1434	sethi	%hi(0x4000 | 0x0100 | SIGCHLD), %o0
1435	mov	%fp, %o1
1436	or	%o0, %lo(0x4000 | 0x0100 | SIGCHLD), %o0
1437	sethi	%hi(sparc_do_fork), %l1
1438	mov	0, %o3
1439	jmpl	%l1 + %lo(sparc_do_fork), %g0
1440	 add	%sp, STACKFRAME_SZ, %o2
1441
1442        .align  4
1443linux_sparc_ni_syscall:
1444	sethi   %hi(sys_ni_syscall), %l7
1445	b       syscall_is_too_hard
1446	 or     %l7, %lo(sys_ni_syscall), %l7
1447
1448linux_fast_syscall:
1449	andn	%l7, 3, %l7
1450	mov	%i0, %o0
1451	mov	%i1, %o1
1452	mov 	%i2, %o2
1453	jmpl	%l7 + %g0, %g0
1454	 mov	%i3, %o3
1455
1456linux_syscall_trace:
1457	call	syscall_trace
1458	 nop
1459	mov	%i0, %o0
1460	mov	%i1, %o1
1461	mov	%i2, %o2
1462	mov	%i3, %o3
1463	b	2f
1464	 mov	%i4, %o4
1465
1466	.globl	ret_from_fork
1467ret_from_fork:
1468	call	schedule_tail
1469	 mov	%g3, %o0
1470	b	ret_sys_call
1471	 ld	[%sp + STACKFRAME_SZ + PT_I0], %o0
1472
1473	/* Linux native and SunOS system calls enter here... */
1474	.align	4
1475	.globl	linux_sparc_syscall
1476linux_sparc_syscall:
1477	/* Direct access to user regs, must faster. */
1478	cmp	%g1, NR_SYSCALLS
1479	bgeu	linux_sparc_ni_syscall
1480	 sll	%g1, 2, %l4
1481	ld	[%l7 + %l4], %l7
1482	andcc	%l7, 1, %g0
1483	bne	linux_fast_syscall
1484	 /* Just do first insn from SAVE_ALL in the delay slot */
1485
1486	.globl	syscall_is_too_hard
1487syscall_is_too_hard:
1488	SAVE_ALL_HEAD
1489	 rd	%wim, %l3
1490
1491	wr	%l0, PSR_ET, %psr
1492	mov	%i0, %o0
1493	mov	%i1, %o1
1494	mov	%i2, %o2
1495
1496	ld	[%curptr + TI_FLAGS], %l5
1497	mov	%i3, %o3
1498	andcc	%l5, _TIF_SYSCALL_TRACE, %g0
1499	mov	%i4, %o4
1500	bne	linux_syscall_trace
1501	 mov	%i0, %l5
15022:
1503	call	%l7
1504	 mov	%i5, %o5
1505
1506	st	%o0, [%sp + STACKFRAME_SZ + PT_I0]
1507
1508	.globl	ret_sys_call
1509ret_sys_call:
1510	ld	[%curptr + TI_FLAGS], %l6
1511	cmp	%o0, -ERESTART_RESTARTBLOCK
1512	ld	[%sp + STACKFRAME_SZ + PT_PSR], %g3
1513	set	PSR_C, %g2
1514	bgeu	1f
1515	 andcc	%l6, _TIF_SYSCALL_TRACE, %g0
1516
1517	/* System call success, clear Carry condition code. */
1518	andn	%g3, %g2, %g3
1519	clr	%l6
1520	st	%g3, [%sp + STACKFRAME_SZ + PT_PSR]
1521	bne	linux_syscall_trace2
1522	 ld	[%sp + STACKFRAME_SZ + PT_NPC], %l1 /* pc = npc */
1523	add	%l1, 0x4, %l2			/* npc = npc+4 */
1524	st	%l1, [%sp + STACKFRAME_SZ + PT_PC]
1525	b	ret_trap_entry
1526	 st	%l2, [%sp + STACKFRAME_SZ + PT_NPC]
15271:
1528	/* System call failure, set Carry condition code.
1529	 * Also, get abs(errno) to return to the process.
1530	 */
1531	sub	%g0, %o0, %o0
1532	or	%g3, %g2, %g3
1533	st	%o0, [%sp + STACKFRAME_SZ + PT_I0]
1534	mov	1, %l6
1535	st	%g3, [%sp + STACKFRAME_SZ + PT_PSR]
1536	bne	linux_syscall_trace2
1537	 ld	[%sp + STACKFRAME_SZ + PT_NPC], %l1 /* pc = npc */
1538	add	%l1, 0x4, %l2			/* npc = npc+4 */
1539	st	%l1, [%sp + STACKFRAME_SZ + PT_PC]
1540	b	ret_trap_entry
1541	 st	%l2, [%sp + STACKFRAME_SZ + PT_NPC]
1542
1543linux_syscall_trace2:
1544	call	syscall_trace
1545	 add	%l1, 0x4, %l2			/* npc = npc+4 */
1546	st	%l1, [%sp + STACKFRAME_SZ + PT_PC]
1547	b	ret_trap_entry
1548	 st	%l2, [%sp + STACKFRAME_SZ + PT_NPC]
1549
1550
1551	/*
1552	 * Solaris system calls and indirect system calls enter here.
1553         *
1554	 * I have named the solaris indirect syscalls like that because
1555	 * it seems like Solaris has some fast path syscalls that can
1556	 * be handled as indirect system calls. - mig
1557	 */
1558
1559linux_syscall_for_solaris:
1560	sethi	%hi(sys_call_table), %l7
1561	b	linux_sparc_syscall
1562	 or	%l7, %lo(sys_call_table), %l7
1563
1564	.align	4
1565	.globl	solaris_syscall
1566solaris_syscall:
1567	cmp	%g1,59
1568	be	linux_syscall_for_solaris
1569	 cmp	%g1,2
1570	be	linux_syscall_for_solaris
1571	 cmp    %g1,42
1572	be      linux_syscall_for_solaris
1573	 cmp	%g1,119
1574	be,a	linux_syscall_for_solaris
1575	 mov	2, %g1
15761:
1577	SAVE_ALL_HEAD
1578	 rd	%wim, %l3
1579
1580	wr	%l0, PSR_ET, %psr
1581	nop
1582	nop
1583	mov	%i0, %l5
1584
1585	call	do_solaris_syscall
1586	 add	%sp, STACKFRAME_SZ, %o0
1587
1588	st	%o0, [%sp + STACKFRAME_SZ + PT_I0]
1589	set	PSR_C, %g2
1590	cmp	%o0, -ERESTART_RESTARTBLOCK
1591	bgeu	1f
1592	 ld	[%sp + STACKFRAME_SZ + PT_PSR], %g3
1593
1594	/* System call success, clear Carry condition code. */
1595	andn	%g3, %g2, %g3
1596	clr	%l6
1597	b	2f
1598	 st	%g3, [%sp + STACKFRAME_SZ + PT_PSR]
1599
16001:
1601	/* System call failure, set Carry condition code.
1602	 * Also, get abs(errno) to return to the process.
1603	 */
1604	sub	%g0, %o0, %o0
1605	mov	1, %l6
1606	st	%o0, [%sp + STACKFRAME_SZ + PT_I0]
1607	or	%g3, %g2, %g3
1608	st	%g3, [%sp + STACKFRAME_SZ + PT_PSR]
1609
1610	/* Advance the pc and npc over the trap instruction.
1611	 * If the npc is unaligned (has a 1 in the lower byte), it means
1612	 * the kernel does not want us to play magic (ie, skipping over
1613	 * traps).  Mainly when the Solaris code wants to set some PC and
1614	 * nPC (setcontext).
1615	 */
16162:
1617	ld	[%sp + STACKFRAME_SZ + PT_NPC], %l1	/* pc  = npc   */
1618	andcc	%l1, 1, %g0
1619	bne	1f
1620	 add	%l1, 0x4, %l2			/* npc = npc+4 */
1621	st	%l1, [%sp + STACKFRAME_SZ + PT_PC]
1622	b	ret_trap_entry
1623	 st	%l2, [%sp + STACKFRAME_SZ + PT_NPC]
1624
1625	/* kernel knows what it is doing, fixup npc and continue */
16261:
1627	sub	%l1, 1, %l1
1628 	b	ret_trap_entry
1629	 st	%l1, [%sp + STACKFRAME_SZ + PT_NPC]
1630
1631#ifndef CONFIG_SUNOS_EMUL
1632	.align	4
1633	.globl	sunos_syscall
1634sunos_syscall:
1635	SAVE_ALL_HEAD
1636	 rd	%wim, %l3
1637	wr	%l0, PSR_ET, %psr
1638	nop
1639	nop
1640	mov	%i0, %l5
1641	call	do_sunos_syscall
1642	 add	%sp, STACKFRAME_SZ, %o0
1643#endif
1644
1645	/* {net, open}bsd system calls enter here... */
1646	.align	4
1647	.globl	bsd_syscall
1648bsd_syscall:
1649	/* Direct access to user regs, must faster. */
1650	cmp	%g1, NR_SYSCALLS
1651	blu,a	1f
1652	 sll	%g1, 2, %l4
1653
1654	set	sys_ni_syscall, %l7
1655	b	bsd_is_too_hard
1656	 nop
1657
16581:
1659	ld	[%l7 + %l4], %l7
1660
1661	.globl	bsd_is_too_hard
1662bsd_is_too_hard:
1663	rd	%wim, %l3
1664	SAVE_ALL
1665
1666	wr	%l0, PSR_ET, %psr
1667	WRITE_PAUSE
1668
16692:
1670	mov	%i0, %o0
1671	mov	%i1, %o1
1672	mov	%i2, %o2
1673	mov	%i0, %l5
1674	mov	%i3, %o3
1675	mov	%i4, %o4
1676	call	%l7
1677	 mov	%i5, %o5
1678
1679	st	%o0, [%sp + STACKFRAME_SZ + PT_I0]
1680	set	PSR_C, %g2
1681	cmp	%o0, -ERESTART_RESTARTBLOCK
1682	bgeu	1f
1683	 ld	[%sp + STACKFRAME_SZ + PT_PSR], %g3
1684
1685	/* System call success, clear Carry condition code. */
1686	andn	%g3, %g2, %g3
1687	clr	%l6
1688	b	2f
1689	 st	%g3, [%sp + STACKFRAME_SZ + PT_PSR]
1690
16911:
1692	/* System call failure, set Carry condition code.
1693	 * Also, get abs(errno) to return to the process.
1694	 */
1695	sub	%g0, %o0, %o0
1696#if 0 /* XXX todo XXX */
1697	sethi	%hi(bsd_xlatb_rorl), %o3
1698	or	%o3, %lo(bsd_xlatb_rorl), %o3
1699	sll	%o0, 2, %o0
1700	ld	[%o3 + %o0], %o0
1701#endif
1702	mov	1, %l6
1703	st	%o0, [%sp + STACKFRAME_SZ + PT_I0]
1704	or	%g3, %g2, %g3
1705	st	%g3, [%sp + STACKFRAME_SZ + PT_PSR]
1706
1707	/* Advance the pc and npc over the trap instruction. */
17082:
1709	ld	[%sp + STACKFRAME_SZ + PT_NPC], %l1	/* pc  = npc   */
1710	add	%l1, 0x4, %l2			/* npc = npc+4 */
1711	st	%l1, [%sp + STACKFRAME_SZ + PT_PC]
1712	b	ret_trap_entry
1713	 st	%l2, [%sp + STACKFRAME_SZ + PT_NPC]
1714
1715/* Saving and restoring the FPU state is best done from lowlevel code.
1716 *
1717 * void fpsave(unsigned long *fpregs, unsigned long *fsr,
1718 *             void *fpqueue, unsigned long *fpqdepth)
1719 */
1720
1721	.globl	fpsave
1722fpsave:
1723	st	%fsr, [%o1]	! this can trap on us if fpu is in bogon state
1724	ld	[%o1], %g1
1725	set	0x2000, %g4
1726	andcc	%g1, %g4, %g0
1727	be	2f
1728	 mov	0, %g2
1729
1730	/* We have an fpqueue to save. */
17311:
1732	std	%fq, [%o2]
1733fpsave_magic:
1734	st	%fsr, [%o1]
1735	ld	[%o1], %g3
1736	andcc	%g3, %g4, %g0
1737	add	%g2, 1, %g2
1738	bne	1b
1739	 add	%o2, 8, %o2
1740
17412:
1742	st	%g2, [%o3]
1743
1744	std	%f0, [%o0 + 0x00]
1745	std	%f2, [%o0 + 0x08]
1746	std	%f4, [%o0 + 0x10]
1747	std	%f6, [%o0 + 0x18]
1748	std	%f8, [%o0 + 0x20]
1749	std	%f10, [%o0 + 0x28]
1750	std	%f12, [%o0 + 0x30]
1751	std	%f14, [%o0 + 0x38]
1752	std	%f16, [%o0 + 0x40]
1753	std	%f18, [%o0 + 0x48]
1754	std	%f20, [%o0 + 0x50]
1755	std	%f22, [%o0 + 0x58]
1756	std	%f24, [%o0 + 0x60]
1757	std	%f26, [%o0 + 0x68]
1758	std	%f28, [%o0 + 0x70]
1759	retl
1760	 std	%f30, [%o0 + 0x78]
1761
1762	/* Thanks for Theo Deraadt and the authors of the Sprite/netbsd/openbsd
1763	 * code for pointing out this possible deadlock, while we save state
1764	 * above we could trap on the fsr store so our low level fpu trap
1765	 * code has to know how to deal with this.
1766	 */
1767fpsave_catch:
1768	b	fpsave_magic + 4
1769	 st	%fsr, [%o1]
1770
1771fpsave_catch2:
1772	b	fpsave + 4
1773	 st	%fsr, [%o1]
1774
1775	/* void fpload(unsigned long *fpregs, unsigned long *fsr); */
1776
1777	.globl	fpload
1778fpload:
1779	ldd	[%o0 + 0x00], %f0
1780	ldd	[%o0 + 0x08], %f2
1781	ldd	[%o0 + 0x10], %f4
1782	ldd	[%o0 + 0x18], %f6
1783	ldd	[%o0 + 0x20], %f8
1784	ldd	[%o0 + 0x28], %f10
1785	ldd	[%o0 + 0x30], %f12
1786	ldd	[%o0 + 0x38], %f14
1787	ldd	[%o0 + 0x40], %f16
1788	ldd	[%o0 + 0x48], %f18
1789	ldd	[%o0 + 0x50], %f20
1790	ldd	[%o0 + 0x58], %f22
1791	ldd	[%o0 + 0x60], %f24
1792	ldd	[%o0 + 0x68], %f26
1793	ldd	[%o0 + 0x70], %f28
1794	ldd	[%o0 + 0x78], %f30
1795	ld	[%o1], %fsr
1796	retl
1797	 nop
1798
1799	/* __ndelay and __udelay take two arguments:
1800	 * 0 - nsecs or usecs to delay
1801	 * 1 - per_cpu udelay_val (loops per jiffy)
1802	 *
1803	 * Note that ndelay gives HZ times higher resolution but has a 10ms
1804	 * limit.  udelay can handle up to 1s.
1805	 */
1806	.globl	__ndelay
1807__ndelay:
1808	save	%sp, -STACKFRAME_SZ, %sp
1809	mov	%i0, %o0
1810	call	.umul
1811	 mov	0x1ad, %o1		! 2**32 / (1 000 000 000 / HZ)
1812	call	.umul
1813	 mov	%i1, %o1		! udelay_val
1814	ba	delay_continue
1815	 mov	%o1, %o0		! >>32 later for better resolution
1816
1817	.globl	__udelay
1818__udelay:
1819	save	%sp, -STACKFRAME_SZ, %sp
1820	mov	%i0, %o0
1821	sethi	%hi(0x10c6), %o1
1822	call	.umul
1823	 or	%o1, %lo(0x10c6), %o1	! 2**32 / 1 000 000
1824	call	.umul
1825	 mov	%i1, %o1		! udelay_val
1826	call	.umul
1827	 mov	HZ, %o0			! >>32 earlier for wider range
1828
1829delay_continue:
1830	cmp	%o0, 0x0
18311:
1832	bne	1b
1833	 subcc	%o0, 1, %o0
1834
1835	ret
1836	restore
1837
1838	/* Handle a software breakpoint */
1839	/* We have to inform parent that child has stopped */
1840	.align 4
1841	.globl breakpoint_trap
1842breakpoint_trap:
1843	rd	%wim,%l3
1844	SAVE_ALL
1845	wr 	%l0, PSR_ET, %psr
1846	WRITE_PAUSE
1847
1848	st	%i0, [%sp + STACKFRAME_SZ + PT_G0] ! for restarting syscalls
1849	call	sparc_breakpoint
1850	 add	%sp, STACKFRAME_SZ, %o0
1851
1852	RESTORE_ALL
1853
1854	.align	4
1855	.globl	__handle_exception, flush_patch_exception
1856__handle_exception:
1857flush_patch_exception:
1858	FLUSH_ALL_KERNEL_WINDOWS;
1859	ldd	[%o0], %o6
1860	jmpl	%o7 + 0xc, %g0			! see asm-sparc/processor.h
1861	 mov	1, %g1				! signal EFAULT condition
1862
1863	.align	4
1864	.globl	kill_user_windows, kuw_patch1_7win
1865	.globl	kuw_patch1
1866kuw_patch1_7win:	sll	%o3, 6, %o3
1867
1868	/* No matter how much overhead this routine has in the worst
1869	 * case scenerio, it is several times better than taking the
1870	 * traps with the old method of just doing flush_user_windows().
1871	 */
1872kill_user_windows:
1873	ld	[%g6 + TI_UWINMASK], %o0	! get current umask
1874	orcc	%g0, %o0, %g0			! if no bits set, we are done
1875	be	3f				! nothing to do
1876	 rd	%psr, %o5			! must clear interrupts
1877	or	%o5, PSR_PIL, %o4		! or else that could change
1878	wr	%o4, 0x0, %psr			! the uwinmask state
1879	WRITE_PAUSE				! burn them cycles
18801:
1881	ld	[%g6 + TI_UWINMASK], %o0	! get consistent state
1882	orcc	%g0, %o0, %g0			! did an interrupt come in?
1883	be	4f				! yep, we are done
1884	 rd	%wim, %o3			! get current wim
1885	srl	%o3, 1, %o4			! simulate a save
1886kuw_patch1:
1887	sll	%o3, 7, %o3			! compute next wim
1888	or	%o4, %o3, %o3			! result
1889	andncc	%o0, %o3, %o0			! clean this bit in umask
1890	bne	kuw_patch1			! not done yet
1891	 srl	%o3, 1, %o4			! begin another save simulation
1892	wr	%o3, 0x0, %wim			! set the new wim
1893	st	%g0, [%g6 + TI_UWINMASK]	! clear uwinmask
18944:
1895	wr	%o5, 0x0, %psr			! re-enable interrupts
1896	WRITE_PAUSE				! burn baby burn
18973:
1898	retl					! return
1899	 st	%g0, [%g6 + TI_W_SAVED]		! no windows saved
1900
1901	.align	4
1902	.globl	restore_current
1903restore_current:
1904	LOAD_CURRENT(g6, o0)
1905	retl
1906	 nop
1907
1908#ifdef CONFIG_PCI
1909#include <asm/pcic.h>
1910
1911	.align	4
1912	.globl	linux_trap_ipi15_pcic
1913linux_trap_ipi15_pcic:
1914	rd	%wim, %l3
1915	SAVE_ALL
1916
1917	/*
1918	 * First deactivate NMI
1919	 * or we cannot drop ET, cannot get window spill traps.
1920	 * The busy loop is necessary because the PIO error
1921	 * sometimes does not go away quickly and we trap again.
1922	 */
1923	sethi	%hi(pcic_regs), %o1
1924	ld	[%o1 + %lo(pcic_regs)], %o2
1925
1926	! Get pending status for printouts later.
1927	ld	[%o2 + PCI_SYS_INT_PENDING], %o0
1928
1929	mov	PCI_SYS_INT_PENDING_CLEAR_ALL, %o1
1930	stb	%o1, [%o2 + PCI_SYS_INT_PENDING_CLEAR]
19311:
1932	ld	[%o2 + PCI_SYS_INT_PENDING], %o1
1933	andcc	%o1, ((PCI_SYS_INT_PENDING_PIO|PCI_SYS_INT_PENDING_PCI)>>24), %g0
1934	bne	1b
1935	 nop
1936
1937	or	%l0, PSR_PIL, %l4
1938	wr	%l4, 0x0, %psr
1939	WRITE_PAUSE
1940	wr	%l4, PSR_ET, %psr
1941	WRITE_PAUSE
1942
1943	call	pcic_nmi
1944	 add	%sp, STACKFRAME_SZ, %o1	! struct pt_regs *regs
1945	RESTORE_ALL
1946
1947	.globl	pcic_nmi_trap_patch
1948pcic_nmi_trap_patch:
1949	sethi	%hi(linux_trap_ipi15_pcic), %l3
1950	jmpl	%l3 + %lo(linux_trap_ipi15_pcic), %g0
1951	 rd	%psr, %l0
1952	.word	0
1953
1954#endif /* CONFIG_PCI */
1955
1956/* End of entry.S */
1957