xref: /linux/arch/sparc/kernel/entry.S (revision 2c1cfb2db61474040a394962872f4cde613f89fb)
1/* arch/sparc/kernel/entry.S:  Sparc trap low-level entry points.
2 *
3 * Copyright (C) 1995, 2007 David S. Miller (davem@davemloft.net)
4 * Copyright (C) 1996 Eddie C. Dost   (ecd@skynet.be)
5 * Copyright (C) 1996 Miguel de Icaza (miguel@nuclecu.unam.mx)
6 * Copyright (C) 1996-1999 Jakub Jelinek   (jj@sunsite.mff.cuni.cz)
7 * Copyright (C) 1997 Anton Blanchard (anton@progsoc.uts.edu.au)
8 */
9
10#include <linux/errno.h>
11
12#include <asm/head.h>
13#include <asm/asi.h>
14#include <asm/smp.h>
15#include <asm/contregs.h>
16#include <asm/ptrace.h>
17#include <asm/asm-offsets.h>
18#include <asm/psr.h>
19#include <asm/vaddrs.h>
20#include <asm/memreg.h>
21#include <asm/page.h>
22#include <asm/pgtable.h>
23#include <asm/pgtsun4c.h>
24#include <asm/winmacro.h>
25#include <asm/signal.h>
26#include <asm/obio.h>
27#include <asm/mxcc.h>
28#include <asm/thread_info.h>
29#include <asm/param.h>
30#include <asm/unistd.h>
31
32#include <asm/asmmacro.h>
33
34#define curptr      g6
35
36/* These are just handy. */
37#define _SV	save	%sp, -STACKFRAME_SZ, %sp
38#define _RS     restore
39
40#define FLUSH_ALL_KERNEL_WINDOWS \
41	_SV; _SV; _SV; _SV; _SV; _SV; _SV; \
42	_RS; _RS; _RS; _RS; _RS; _RS; _RS;
43
44	.text
45
46#ifdef CONFIG_KGDB
47	.align	4
48	.globl		arch_kgdb_breakpoint
49	.type		arch_kgdb_breakpoint,#function
50arch_kgdb_breakpoint:
51	ta		0x7d
52	retl
53	 nop
54	.size		arch_kgdb_breakpoint,.-arch_kgdb_breakpoint
55#endif
56
57#if defined(CONFIG_BLK_DEV_FD) || defined(CONFIG_BLK_DEV_FD_MODULE)
58	.align	4
59	.globl	floppy_hardint
60floppy_hardint:
61	/*
62	 * This code cannot touch registers %l0 %l1 and %l2
63	 * because SAVE_ALL depends on their values. It depends
64	 * on %l3 also, but we regenerate it before a call.
65	 * Other registers are:
66	 * %l3 -- base address of fdc registers
67	 * %l4 -- pdma_vaddr
68	 * %l5 -- scratch for ld/st address
69	 * %l6 -- pdma_size
70	 * %l7 -- scratch [floppy byte, ld/st address, aux. data]
71	 */
72
73	/* Do we have work to do? */
74	sethi	%hi(doing_pdma), %l7
75	ld	[%l7 + %lo(doing_pdma)], %l7
76	cmp	%l7, 0
77	be	floppy_dosoftint
78	 nop
79
80	/* Load fdc register base */
81	sethi	%hi(fdc_status), %l3
82	ld	[%l3 + %lo(fdc_status)], %l3
83
84	/* Setup register addresses */
85	sethi	%hi(pdma_vaddr), %l5	! transfer buffer
86	ld	[%l5 + %lo(pdma_vaddr)], %l4
87	sethi	%hi(pdma_size), %l5	! bytes to go
88	ld	[%l5 + %lo(pdma_size)], %l6
89next_byte:
90  	ldub	[%l3], %l7
91
92	andcc	%l7, 0x80, %g0		! Does fifo still have data
93	bz	floppy_fifo_emptied	! fifo has been emptied...
94	 andcc	%l7, 0x20, %g0		! in non-dma mode still?
95	bz	floppy_overrun		! nope, overrun
96	 andcc	%l7, 0x40, %g0		! 0=write 1=read
97	bz	floppy_write
98	 sub	%l6, 0x1, %l6
99
100	/* Ok, actually read this byte */
101	ldub	[%l3 + 1], %l7
102	orcc	%g0, %l6, %g0
103	stb	%l7, [%l4]
104	bne	next_byte
105	 add	%l4, 0x1, %l4
106
107	b	floppy_tdone
108	 nop
109
110floppy_write:
111	/* Ok, actually write this byte */
112	ldub	[%l4], %l7
113	orcc	%g0, %l6, %g0
114	stb	%l7, [%l3 + 1]
115	bne	next_byte
116	 add	%l4, 0x1, %l4
117
118	/* fall through... */
119floppy_tdone:
120	sethi	%hi(pdma_vaddr), %l5
121	st	%l4, [%l5 + %lo(pdma_vaddr)]
122	sethi	%hi(pdma_size), %l5
123	st	%l6, [%l5 + %lo(pdma_size)]
124	/* Flip terminal count pin */
125	set	auxio_register, %l7
126	ld	[%l7], %l7
127
128	set	sparc_cpu_model, %l5
129	ld	[%l5], %l5
130	subcc   %l5, 1, %g0		/* enum { sun4c = 1 }; */
131	be	1f
132	 ldub	[%l7], %l5
133
134	or	%l5, 0xc2, %l5
135	stb	%l5, [%l7]
136	andn    %l5, 0x02, %l5
137	b	2f
138	 nop
139
1401:
141	or      %l5, 0xf4, %l5
142	stb     %l5, [%l7]
143	andn    %l5, 0x04, %l5
144
1452:
146	/* Kill some time so the bits set */
147	WRITE_PAUSE
148	WRITE_PAUSE
149
150	stb     %l5, [%l7]
151
152	/* Prevent recursion */
153	sethi	%hi(doing_pdma), %l7
154	b	floppy_dosoftint
155	 st	%g0, [%l7 + %lo(doing_pdma)]
156
157	/* We emptied the FIFO, but we haven't read everything
158	 * as of yet.  Store the current transfer address and
159	 * bytes left to read so we can continue when the next
160	 * fast IRQ comes in.
161	 */
162floppy_fifo_emptied:
163	sethi	%hi(pdma_vaddr), %l5
164	st	%l4, [%l5 + %lo(pdma_vaddr)]
165	sethi	%hi(pdma_size), %l7
166	st	%l6, [%l7 + %lo(pdma_size)]
167
168	/* Restore condition codes */
169	wr	%l0, 0x0, %psr
170	WRITE_PAUSE
171
172	jmp	%l1
173	rett	%l2
174
175floppy_overrun:
176	sethi	%hi(pdma_vaddr), %l5
177	st	%l4, [%l5 + %lo(pdma_vaddr)]
178	sethi	%hi(pdma_size), %l5
179	st	%l6, [%l5 + %lo(pdma_size)]
180	/* Prevent recursion */
181	sethi	%hi(doing_pdma), %l7
182	st	%g0, [%l7 + %lo(doing_pdma)]
183
184	/* fall through... */
185floppy_dosoftint:
186	rd	%wim, %l3
187	SAVE_ALL
188
189	/* Set all IRQs off. */
190	or	%l0, PSR_PIL, %l4
191	wr	%l4, 0x0, %psr
192	WRITE_PAUSE
193	wr	%l4, PSR_ET, %psr
194	WRITE_PAUSE
195
196	mov	11, %o0			! floppy irq level (unused anyway)
197	mov	%g0, %o1		! devid is not used in fast interrupts
198	call	sparc_floppy_irq
199	 add	%sp, STACKFRAME_SZ, %o2	! struct pt_regs *regs
200
201	RESTORE_ALL
202
203#endif /* (CONFIG_BLK_DEV_FD) */
204
205	/* Bad trap handler */
206	.globl	bad_trap_handler
207bad_trap_handler:
208	SAVE_ALL
209
210	wr	%l0, PSR_ET, %psr
211	WRITE_PAUSE
212
213	add	%sp, STACKFRAME_SZ, %o0	! pt_regs
214	call	do_hw_interrupt
215	 mov	%l7, %o1		! trap number
216
217	RESTORE_ALL
218
219/* For now all IRQ's not registered get sent here. handler_irq() will
220 * see if a routine is registered to handle this interrupt and if not
221 * it will say so on the console.
222 */
223
224	.align	4
225	.globl	real_irq_entry, patch_handler_irq
226real_irq_entry:
227	SAVE_ALL
228
229#ifdef CONFIG_SMP
230	.globl	patchme_maybe_smp_msg
231
232	cmp	%l7, 11
233patchme_maybe_smp_msg:
234	bgu	maybe_smp4m_msg
235	 nop
236#endif
237
238real_irq_continue:
239	or	%l0, PSR_PIL, %g2
240	wr	%g2, 0x0, %psr
241	WRITE_PAUSE
242	wr	%g2, PSR_ET, %psr
243	WRITE_PAUSE
244	mov	%l7, %o0		! irq level
245patch_handler_irq:
246	call	handler_irq
247	 add	%sp, STACKFRAME_SZ, %o1	! pt_regs ptr
248	or	%l0, PSR_PIL, %g2	! restore PIL after handler_irq
249	wr	%g2, PSR_ET, %psr	! keep ET up
250	WRITE_PAUSE
251
252	RESTORE_ALL
253
254#ifdef CONFIG_SMP
255	/* SMP per-cpu ticker interrupts are handled specially. */
256smp4m_ticker:
257	bne	real_irq_continue+4
258	 or	%l0, PSR_PIL, %g2
259	wr	%g2, 0x0, %psr
260	WRITE_PAUSE
261	wr	%g2, PSR_ET, %psr
262	WRITE_PAUSE
263	call	smp4m_percpu_timer_interrupt
264	 add	%sp, STACKFRAME_SZ, %o0
265	wr	%l0, PSR_ET, %psr
266	WRITE_PAUSE
267	RESTORE_ALL
268
269	/* Here is where we check for possible SMP IPI passed to us
270	 * on some level other than 15 which is the NMI and only used
271	 * for cross calls.  That has a separate entry point below.
272	 *
273	 * IPIs are sent on Level 12, 13 and 14. See IRQ_IPI_*.
274	 */
275maybe_smp4m_msg:
276	GET_PROCESSOR4M_ID(o3)
277	sethi	%hi(sun4m_irq_percpu), %l5
278	sll	%o3, 2, %o3
279	or	%l5, %lo(sun4m_irq_percpu), %o5
280	sethi	%hi(0x70000000), %o2	! Check all soft-IRQs
281	ld	[%o5 + %o3], %o1
282	ld	[%o1 + 0x00], %o3	! sun4m_irq_percpu[cpu]->pending
283	andcc	%o3, %o2, %g0
284	be,a	smp4m_ticker
285	 cmp	%l7, 14
286	/* Soft-IRQ IPI */
287	st	%o2, [%o1 + 0x04]	! sun4m_irq_percpu[cpu]->clear=0x70000000
288	WRITE_PAUSE
289	ld	[%o1 + 0x00], %g0	! sun4m_irq_percpu[cpu]->pending
290	WRITE_PAUSE
291	or	%l0, PSR_PIL, %l4
292	wr	%l4, 0x0, %psr
293	WRITE_PAUSE
294	wr	%l4, PSR_ET, %psr
295	WRITE_PAUSE
296	srl	%o3, 28, %o2		! shift for simpler checks below
297maybe_smp4m_msg_check_single:
298	andcc	%o2, 0x1, %g0
299	beq,a	maybe_smp4m_msg_check_mask
300	 andcc	%o2, 0x2, %g0
301	call	smp_call_function_single_interrupt
302	 nop
303	andcc	%o2, 0x2, %g0
304maybe_smp4m_msg_check_mask:
305	beq,a	maybe_smp4m_msg_check_resched
306	 andcc	%o2, 0x4, %g0
307	call	smp_call_function_interrupt
308	 nop
309	andcc	%o2, 0x4, %g0
310maybe_smp4m_msg_check_resched:
311	/* rescheduling is done in RESTORE_ALL regardless, but incr stats */
312	beq,a	maybe_smp4m_msg_out
313	 nop
314	call	smp_resched_interrupt
315	 nop
316maybe_smp4m_msg_out:
317	RESTORE_ALL
318
319	.align	4
320	.globl	linux_trap_ipi15
321linux_trap_ipi15:
322	SAVE_ALL
323	sethi	%hi(0x80000000), %o2
324	GET_PROCESSOR4M_ID(o0)
325	sethi	%hi(sun4m_irq_percpu), %l5
326	or	%l5, %lo(sun4m_irq_percpu), %o5
327	sll	%o0, 2, %o0
328	ld	[%o5 + %o0], %o5
329	ld	[%o5 + 0x00], %o3	! sun4m_irq_percpu[cpu]->pending
330	andcc	%o3, %o2, %g0
331	be	1f			! Must be an NMI async memory error
332	 st	%o2, [%o5 + 0x04]	! sun4m_irq_percpu[cpu]->clear=0x80000000
333	WRITE_PAUSE
334	ld	[%o5 + 0x00], %g0	! sun4m_irq_percpu[cpu]->pending
335	WRITE_PAUSE
336	or	%l0, PSR_PIL, %l4
337	wr	%l4, 0x0, %psr
338	WRITE_PAUSE
339	wr	%l4, PSR_ET, %psr
340	WRITE_PAUSE
341	call	smp4m_cross_call_irq
342	 nop
343	b	ret_trap_lockless_ipi
344	 clr	%l6
3451:
346	/* NMI async memory error handling. */
347	sethi	%hi(0x80000000), %l4
348	sethi	%hi(sun4m_irq_global), %o5
349	ld	[%o5 + %lo(sun4m_irq_global)], %l5
350	st	%l4, [%l5 + 0x0c]	! sun4m_irq_global->mask_set=0x80000000
351	WRITE_PAUSE
352	ld	[%l5 + 0x00], %g0	! sun4m_irq_global->pending
353	WRITE_PAUSE
354	or	%l0, PSR_PIL, %l4
355	wr	%l4, 0x0, %psr
356	WRITE_PAUSE
357	wr	%l4, PSR_ET, %psr
358	WRITE_PAUSE
359	call	sun4m_nmi
360	 nop
361	st	%l4, [%l5 + 0x08]	! sun4m_irq_global->mask_clear=0x80000000
362	WRITE_PAUSE
363	ld	[%l5 + 0x00], %g0	! sun4m_irq_global->pending
364	WRITE_PAUSE
365	RESTORE_ALL
366
367	.globl	smp4d_ticker
368	/* SMP per-cpu ticker interrupts are handled specially. */
369smp4d_ticker:
370	SAVE_ALL
371	or	%l0, PSR_PIL, %g2
372	sethi	%hi(CC_ICLR), %o0
373	sethi	%hi(1 << 14), %o1
374	or	%o0, %lo(CC_ICLR), %o0
375	stha	%o1, [%o0] ASI_M_MXCC	/* Clear PIL 14 in MXCC's ICLR */
376	wr	%g2, 0x0, %psr
377	WRITE_PAUSE
378	wr	%g2, PSR_ET, %psr
379	WRITE_PAUSE
380	call	smp4d_percpu_timer_interrupt
381	 add	%sp, STACKFRAME_SZ, %o0
382	wr	%l0, PSR_ET, %psr
383	WRITE_PAUSE
384	RESTORE_ALL
385
386	.align	4
387	.globl	linux_trap_ipi15_sun4d
388linux_trap_ipi15_sun4d:
389	SAVE_ALL
390	sethi	%hi(CC_BASE), %o4
391	sethi	%hi(MXCC_ERR_ME|MXCC_ERR_PEW|MXCC_ERR_ASE|MXCC_ERR_PEE), %o2
392	or	%o4, (CC_EREG - CC_BASE), %o0
393	ldda	[%o0] ASI_M_MXCC, %o0
394	andcc	%o0, %o2, %g0
395	bne	1f
396	 sethi	%hi(BB_STAT2), %o2
397	lduba	[%o2] ASI_M_CTL, %o2
398	andcc	%o2, BB_STAT2_MASK, %g0
399	bne	2f
400	 or	%o4, (CC_ICLR - CC_BASE), %o0
401	sethi	%hi(1 << 15), %o1
402	stha	%o1, [%o0] ASI_M_MXCC	/* Clear PIL 15 in MXCC's ICLR */
403	or	%l0, PSR_PIL, %l4
404	wr	%l4, 0x0, %psr
405	WRITE_PAUSE
406	wr	%l4, PSR_ET, %psr
407	WRITE_PAUSE
408	call	smp4d_cross_call_irq
409	 nop
410	b	ret_trap_lockless_ipi
411	 clr	%l6
412
4131:	/* MXCC error */
4142:	/* BB error */
415	/* Disable PIL 15 */
416	set	CC_IMSK, %l4
417	lduha	[%l4] ASI_M_MXCC, %l5
418	sethi	%hi(1 << 15), %l7
419	or	%l5, %l7, %l5
420	stha	%l5, [%l4] ASI_M_MXCC
421	/* FIXME */
4221:	b,a	1b
423
424#ifdef CONFIG_SPARC_LEON
425	.globl	smpleon_ipi
426	.extern leon_ipi_interrupt
427	/* SMP per-cpu IPI interrupts are handled specially. */
428smpleon_ipi:
429        SAVE_ALL
430	or	%l0, PSR_PIL, %g2
431	wr	%g2, 0x0, %psr
432	WRITE_PAUSE
433	wr	%g2, PSR_ET, %psr
434	WRITE_PAUSE
435	call	leonsmp_ipi_interrupt
436	 add	%sp, STACKFRAME_SZ, %o1 ! pt_regs
437	wr	%l0, PSR_ET, %psr
438	WRITE_PAUSE
439	RESTORE_ALL
440
441	.align	4
442	.globl	linux_trap_ipi15_leon
443linux_trap_ipi15_leon:
444	SAVE_ALL
445	or	%l0, PSR_PIL, %l4
446	wr	%l4, 0x0, %psr
447	WRITE_PAUSE
448	wr	%l4, PSR_ET, %psr
449	WRITE_PAUSE
450	call	leon_cross_call_irq
451	 nop
452	b	ret_trap_lockless_ipi
453	 clr	%l6
454
455#endif /* CONFIG_SPARC_LEON */
456
457#endif /* CONFIG_SMP */
458
459	/* This routine handles illegal instructions and privileged
460	 * instruction attempts from user code.
461	 */
462	.align	4
463	.globl	bad_instruction
464bad_instruction:
465	sethi	%hi(0xc1f80000), %l4
466	ld	[%l1], %l5
467	sethi	%hi(0x81d80000), %l7
468	and	%l5, %l4, %l5
469	cmp	%l5, %l7
470	be	1f
471	SAVE_ALL
472
473	wr	%l0, PSR_ET, %psr		! re-enable traps
474	WRITE_PAUSE
475
476	add	%sp, STACKFRAME_SZ, %o0
477	mov	%l1, %o1
478	mov	%l2, %o2
479	call	do_illegal_instruction
480	 mov	%l0, %o3
481
482	RESTORE_ALL
483
4841:	/* unimplemented flush - just skip */
485	jmpl	%l2, %g0
486	 rett	%l2 + 4
487
488	.align	4
489	.globl	priv_instruction
490priv_instruction:
491	SAVE_ALL
492
493	wr	%l0, PSR_ET, %psr
494	WRITE_PAUSE
495
496	add	%sp, STACKFRAME_SZ, %o0
497	mov	%l1, %o1
498	mov	%l2, %o2
499	call	do_priv_instruction
500	 mov	%l0, %o3
501
502	RESTORE_ALL
503
504	/* This routine handles unaligned data accesses. */
505	.align	4
506	.globl	mna_handler
507mna_handler:
508	andcc	%l0, PSR_PS, %g0
509	be	mna_fromuser
510	 nop
511
512	SAVE_ALL
513
514	wr	%l0, PSR_ET, %psr
515	WRITE_PAUSE
516
517	ld	[%l1], %o1
518	call	kernel_unaligned_trap
519	 add	%sp, STACKFRAME_SZ, %o0
520
521	RESTORE_ALL
522
523mna_fromuser:
524	SAVE_ALL
525
526	wr	%l0, PSR_ET, %psr		! re-enable traps
527	WRITE_PAUSE
528
529	ld	[%l1], %o1
530	call	user_unaligned_trap
531	 add	%sp, STACKFRAME_SZ, %o0
532
533	RESTORE_ALL
534
535	/* This routine handles floating point disabled traps. */
536	.align	4
537	.globl	fpd_trap_handler
538fpd_trap_handler:
539	SAVE_ALL
540
541	wr	%l0, PSR_ET, %psr		! re-enable traps
542	WRITE_PAUSE
543
544	add	%sp, STACKFRAME_SZ, %o0
545	mov	%l1, %o1
546	mov	%l2, %o2
547	call	do_fpd_trap
548	 mov	%l0, %o3
549
550	RESTORE_ALL
551
552	/* This routine handles Floating Point Exceptions. */
553	.align	4
554	.globl	fpe_trap_handler
555fpe_trap_handler:
556	set	fpsave_magic, %l5
557	cmp	%l1, %l5
558	be	1f
559	 sethi	%hi(fpsave), %l5
560	or	%l5, %lo(fpsave), %l5
561	cmp	%l1, %l5
562	bne	2f
563	 sethi	%hi(fpsave_catch2), %l5
564	or	%l5, %lo(fpsave_catch2), %l5
565	wr	%l0, 0x0, %psr
566	WRITE_PAUSE
567	jmp	%l5
568	 rett	%l5 + 4
5691:
570	sethi	%hi(fpsave_catch), %l5
571	or	%l5, %lo(fpsave_catch), %l5
572	wr	%l0, 0x0, %psr
573	WRITE_PAUSE
574	jmp	%l5
575	 rett	%l5 + 4
576
5772:
578	SAVE_ALL
579
580	wr	%l0, PSR_ET, %psr		! re-enable traps
581	WRITE_PAUSE
582
583	add	%sp, STACKFRAME_SZ, %o0
584	mov	%l1, %o1
585	mov	%l2, %o2
586	call	do_fpe_trap
587	 mov	%l0, %o3
588
589	RESTORE_ALL
590
591	/* This routine handles Tag Overflow Exceptions. */
592	.align	4
593	.globl	do_tag_overflow
594do_tag_overflow:
595	SAVE_ALL
596
597	wr	%l0, PSR_ET, %psr		! re-enable traps
598	WRITE_PAUSE
599
600	add	%sp, STACKFRAME_SZ, %o0
601	mov	%l1, %o1
602	mov	%l2, %o2
603	call	handle_tag_overflow
604	 mov	%l0, %o3
605
606	RESTORE_ALL
607
608	/* This routine handles Watchpoint Exceptions. */
609	.align	4
610	.globl	do_watchpoint
611do_watchpoint:
612	SAVE_ALL
613
614	wr	%l0, PSR_ET, %psr		! re-enable traps
615	WRITE_PAUSE
616
617	add	%sp, STACKFRAME_SZ, %o0
618	mov	%l1, %o1
619	mov	%l2, %o2
620	call	handle_watchpoint
621	 mov	%l0, %o3
622
623	RESTORE_ALL
624
625	/* This routine handles Register Access Exceptions. */
626	.align	4
627	.globl	do_reg_access
628do_reg_access:
629	SAVE_ALL
630
631	wr	%l0, PSR_ET, %psr		! re-enable traps
632	WRITE_PAUSE
633
634	add	%sp, STACKFRAME_SZ, %o0
635	mov	%l1, %o1
636	mov	%l2, %o2
637	call	handle_reg_access
638	 mov	%l0, %o3
639
640	RESTORE_ALL
641
642	/* This routine handles Co-Processor Disabled Exceptions. */
643	.align	4
644	.globl	do_cp_disabled
645do_cp_disabled:
646	SAVE_ALL
647
648	wr	%l0, PSR_ET, %psr		! re-enable traps
649	WRITE_PAUSE
650
651	add	%sp, STACKFRAME_SZ, %o0
652	mov	%l1, %o1
653	mov	%l2, %o2
654	call	handle_cp_disabled
655	 mov	%l0, %o3
656
657	RESTORE_ALL
658
659	/* This routine handles Co-Processor Exceptions. */
660	.align	4
661	.globl	do_cp_exception
662do_cp_exception:
663	SAVE_ALL
664
665	wr	%l0, PSR_ET, %psr		! re-enable traps
666	WRITE_PAUSE
667
668	add	%sp, STACKFRAME_SZ, %o0
669	mov	%l1, %o1
670	mov	%l2, %o2
671	call	handle_cp_exception
672	 mov	%l0, %o3
673
674	RESTORE_ALL
675
676	/* This routine handles Hardware Divide By Zero Exceptions. */
677	.align	4
678	.globl	do_hw_divzero
679do_hw_divzero:
680	SAVE_ALL
681
682	wr	%l0, PSR_ET, %psr		! re-enable traps
683	WRITE_PAUSE
684
685	add	%sp, STACKFRAME_SZ, %o0
686	mov	%l1, %o1
687	mov	%l2, %o2
688	call	handle_hw_divzero
689	 mov	%l0, %o3
690
691	RESTORE_ALL
692
693	.align	4
694	.globl	do_flush_windows
695do_flush_windows:
696	SAVE_ALL
697
698	wr	%l0, PSR_ET, %psr
699	WRITE_PAUSE
700
701	andcc	%l0, PSR_PS, %g0
702	bne	dfw_kernel
703	 nop
704
705	call	flush_user_windows
706	 nop
707
708	/* Advance over the trap instruction. */
709	ld	[%sp + STACKFRAME_SZ + PT_NPC], %l1
710	add	%l1, 0x4, %l2
711	st	%l1, [%sp + STACKFRAME_SZ + PT_PC]
712	st	%l2, [%sp + STACKFRAME_SZ + PT_NPC]
713
714	RESTORE_ALL
715
716	.globl	flush_patch_one
717
718	/* We get these for debugging routines using __builtin_return_address() */
719dfw_kernel:
720flush_patch_one:
721	FLUSH_ALL_KERNEL_WINDOWS
722
723	/* Advance over the trap instruction. */
724	ld	[%sp + STACKFRAME_SZ + PT_NPC], %l1
725	add	%l1, 0x4, %l2
726	st	%l1, [%sp + STACKFRAME_SZ + PT_PC]
727	st	%l2, [%sp + STACKFRAME_SZ + PT_NPC]
728
729	RESTORE_ALL
730
731	/* The getcc software trap.  The user wants the condition codes from
732	 * the %psr in register %g1.
733	 */
734
735	.align	4
736	.globl	getcc_trap_handler
737getcc_trap_handler:
738	srl	%l0, 20, %g1	! give user
739	and	%g1, 0xf, %g1	! only ICC bits in %psr
740	jmp	%l2		! advance over trap instruction
741	rett	%l2 + 0x4	! like this...
742
743	/* The setcc software trap.  The user has condition codes in %g1
744	 * that it would like placed in the %psr.  Be careful not to flip
745	 * any unintentional bits!
746	 */
747
748	.align	4
749	.globl	setcc_trap_handler
750setcc_trap_handler:
751	sll	%g1, 0x14, %l4
752	set	PSR_ICC, %l5
753	andn	%l0, %l5, %l0	! clear ICC bits in %psr
754	and	%l4, %l5, %l4	! clear non-ICC bits in user value
755	or	%l4, %l0, %l4	! or them in... mix mix mix
756
757	wr	%l4, 0x0, %psr	! set new %psr
758	WRITE_PAUSE		! TI scumbags...
759
760	jmp	%l2		! advance over trap instruction
761	rett	%l2 + 0x4	! like this...
762
763#ifndef CONFIG_SMP
764	.align	4
765	.globl	linux_trap_ipi15
766linux_trap_ipi15:
767	SAVE_ALL
768
769	/* Now it is safe to re-enable traps without recursion. */
770	or	%l0, PSR_PIL, %l0
771	wr	%l0, PSR_ET, %psr
772	WRITE_PAUSE
773
774	/* Now call the c-code with the pt_regs frame ptr and the
775	 * memory error registers as arguments.  The ordering chosen
776	 * here is due to unlatching semantics.
777	 */
778	sethi	%hi(AC_SYNC_ERR), %o0
779	add	%o0, 0x4, %o0
780	lda	[%o0] ASI_CONTROL, %o2	! sync vaddr
781	sub	%o0, 0x4, %o0
782	lda	[%o0] ASI_CONTROL, %o1	! sync error
783	add	%o0, 0xc, %o0
784	lda	[%o0] ASI_CONTROL, %o4	! async vaddr
785	sub	%o0, 0x4, %o0
786	lda	[%o0] ASI_CONTROL, %o3	! async error
787	call	sparc_lvl15_nmi
788	 add	%sp, STACKFRAME_SZ, %o0
789
790	RESTORE_ALL
791
792#endif /* CONFIG_SMP */
793
794	.align	4
795	.globl	invalid_segment_patch1_ff
796	.globl	invalid_segment_patch2_ff
797invalid_segment_patch1_ff:	cmp	%l4, 0xff
798invalid_segment_patch2_ff:	mov	0xff, %l3
799
800	.align	4
801	.globl	invalid_segment_patch1_1ff
802	.globl	invalid_segment_patch2_1ff
803invalid_segment_patch1_1ff:	cmp	%l4, 0x1ff
804invalid_segment_patch2_1ff:	mov	0x1ff, %l3
805
806	.align	4
807	.globl	num_context_patch1_16, num_context_patch2_16
808num_context_patch1_16:		mov	0x10, %l7
809num_context_patch2_16:		mov	0x10, %l7
810
811	.align	4
812	.globl	vac_linesize_patch_32
813vac_linesize_patch_32:		subcc	%l7, 32, %l7
814
815	.align	4
816	.globl	vac_hwflush_patch1_on, vac_hwflush_patch2_on
817
818/*
819 * Ugly, but we can't use hardware flushing on the sun4 and we'd require
820 * two instructions (Anton)
821 */
822vac_hwflush_patch1_on:		addcc	%l7, -PAGE_SIZE, %l7
823
824vac_hwflush_patch2_on:		sta	%g0, [%l3 + %l7] ASI_HWFLUSHSEG
825
826	.globl	invalid_segment_patch1, invalid_segment_patch2
827	.globl	num_context_patch1
828	.globl	vac_linesize_patch, vac_hwflush_patch1
829	.globl	vac_hwflush_patch2
830
831	.align	4
832	.globl	sun4c_fault
833
834! %l0 = %psr
835! %l1 = %pc
836! %l2 = %npc
837! %l3 = %wim
838! %l7 = 1 for textfault
839! We want error in %l5, vaddr in %l6
840sun4c_fault:
841	sethi	%hi(AC_SYNC_ERR), %l4
842	add	%l4, 0x4, %l6			! AC_SYNC_VA in %l6
843	lda	[%l6] ASI_CONTROL, %l5		! Address
844	lda	[%l4] ASI_CONTROL, %l6		! Error, retained for a bit
845
846	andn	%l5, 0xfff, %l5			! Encode all info into l7
847	srl	%l6, 14, %l4
848
849	and	%l4, 2, %l4
850	or	%l5, %l4, %l4
851
852	or	%l4, %l7, %l7			! l7 = [addr,write,txtfault]
853
854	andcc	%l0, PSR_PS, %g0
855	be	sun4c_fault_fromuser
856	 andcc	%l7, 1, %g0			! Text fault?
857
858	be	1f
859	 sethi	%hi(KERNBASE), %l4
860
861	mov	%l1, %l5			! PC
862
8631:
864	cmp	%l5, %l4
865	blu	sun4c_fault_fromuser
866	 sethi	%hi(~((1 << SUN4C_REAL_PGDIR_SHIFT) - 1)), %l4
867
868	/* If the kernel references a bum kernel pointer, or a pte which
869	 * points to a non existent page in ram, we will run this code
870	 * _forever_ and lock up the machine!!!!! So we must check for
871	 * this condition, the AC_SYNC_ERR bits are what we must examine.
872	 * Also a parity error would make this happen as well.  So we just
873	 * check that we are in fact servicing a tlb miss and not some
874	 * other type of fault for the kernel.
875	 */
876	andcc	%l6, 0x80, %g0
877	be	sun4c_fault_fromuser
878	 and	%l5, %l4, %l5
879
880	/* Test for NULL pte_t * in vmalloc area. */
881	sethi   %hi(VMALLOC_START), %l4
882	cmp     %l5, %l4
883	blu,a   invalid_segment_patch1
884	 lduXa	[%l5] ASI_SEGMAP, %l4
885
886	sethi   %hi(swapper_pg_dir), %l4
887	srl     %l5, SUN4C_PGDIR_SHIFT, %l6
888	or      %l4, %lo(swapper_pg_dir), %l4
889	sll     %l6, 2, %l6
890	ld      [%l4 + %l6], %l4
891	andcc   %l4, PAGE_MASK, %g0
892	be      sun4c_fault_fromuser
893	 lduXa  [%l5] ASI_SEGMAP, %l4
894
895invalid_segment_patch1:
896	cmp	%l4, 0x7f
897	bne	1f
898	 sethi	%hi(sun4c_kfree_ring), %l4
899	or	%l4, %lo(sun4c_kfree_ring), %l4
900	ld	[%l4 + 0x18], %l3
901	deccc	%l3			! do we have a free entry?
902	bcs,a	2f			! no, unmap one.
903	 sethi	%hi(sun4c_kernel_ring), %l4
904
905	st	%l3, [%l4 + 0x18]	! sun4c_kfree_ring.num_entries--
906
907	ld	[%l4 + 0x00], %l6	! entry = sun4c_kfree_ring.ringhd.next
908	st	%l5, [%l6 + 0x08]	! entry->vaddr = address
909
910	ld	[%l6 + 0x00], %l3	! next = entry->next
911	ld	[%l6 + 0x04], %l7	! entry->prev
912
913	st	%l7, [%l3 + 0x04]	! next->prev = entry->prev
914	st	%l3, [%l7 + 0x00]	! entry->prev->next = next
915
916	sethi	%hi(sun4c_kernel_ring), %l4
917	or	%l4, %lo(sun4c_kernel_ring), %l4
918					! head = &sun4c_kernel_ring.ringhd
919
920	ld	[%l4 + 0x00], %l7	! head->next
921
922	st	%l4, [%l6 + 0x04]	! entry->prev = head
923	st	%l7, [%l6 + 0x00]	! entry->next = head->next
924	st	%l6, [%l7 + 0x04]	! head->next->prev = entry
925
926	st	%l6, [%l4 + 0x00]	! head->next = entry
927
928	ld	[%l4 + 0x18], %l3
929	inc	%l3			! sun4c_kernel_ring.num_entries++
930	st	%l3, [%l4 + 0x18]
931	b	4f
932	 ld	[%l6 + 0x08], %l5
933
9342:
935	or	%l4, %lo(sun4c_kernel_ring), %l4
936					! head = &sun4c_kernel_ring.ringhd
937
938	ld	[%l4 + 0x04], %l6	! entry = head->prev
939
940	ld	[%l6 + 0x08], %l3	! tmp = entry->vaddr
941
942	! Flush segment from the cache.
943	sethi	%hi((64 * 1024)), %l7
9449:
945vac_hwflush_patch1:
946vac_linesize_patch:
947	subcc	%l7, 16, %l7
948	bne	9b
949vac_hwflush_patch2:
950	 sta	%g0, [%l3 + %l7] ASI_FLUSHSEG
951
952	st	%l5, [%l6 + 0x08]	! entry->vaddr = address
953
954	ld	[%l6 + 0x00], %l5	! next = entry->next
955	ld	[%l6 + 0x04], %l7	! entry->prev
956
957	st	%l7, [%l5 + 0x04]	! next->prev = entry->prev
958	st	%l5, [%l7 + 0x00]	! entry->prev->next = next
959	st	%l4, [%l6 + 0x04]	! entry->prev = head
960
961	ld	[%l4 + 0x00], %l7	! head->next
962
963	st	%l7, [%l6 + 0x00]	! entry->next = head->next
964	st	%l6, [%l7 + 0x04]	! head->next->prev = entry
965	st	%l6, [%l4 + 0x00]	! head->next = entry
966
967	mov	%l3, %l5		! address = tmp
968
9694:
970num_context_patch1:
971	mov	0x08, %l7
972
973	ld	[%l6 + 0x08], %l4
974	ldub	[%l6 + 0x0c], %l3
975	or	%l4, %l3, %l4		! encode new vaddr/pseg into l4
976
977	sethi	%hi(AC_CONTEXT), %l3
978	lduba	[%l3] ASI_CONTROL, %l6
979
980	/* Invalidate old mapping, instantiate new mapping,
981	 * for each context.  Registers l6/l7 are live across
982	 * this loop.
983	 */
9843:	deccc	%l7
985	sethi	%hi(AC_CONTEXT), %l3
986	stba	%l7, [%l3] ASI_CONTROL
987invalid_segment_patch2:
988	mov	0x7f, %l3
989	stXa	%l3, [%l5] ASI_SEGMAP
990	andn	%l4, 0x1ff, %l3
991	bne	3b
992	 stXa	%l4, [%l3] ASI_SEGMAP
993
994	sethi	%hi(AC_CONTEXT), %l3
995	stba	%l6, [%l3] ASI_CONTROL
996
997	andn	%l4, 0x1ff, %l5
998
9991:
1000	sethi	%hi(VMALLOC_START), %l4
1001	cmp	%l5, %l4
1002
1003	bgeu	1f
1004	 mov	1 << (SUN4C_REAL_PGDIR_SHIFT - PAGE_SHIFT), %l7
1005
1006	sethi	%hi(KERNBASE), %l6
1007
1008	sub	%l5, %l6, %l4
1009	srl	%l4, PAGE_SHIFT, %l4
1010	sethi	%hi((SUN4C_PAGE_KERNEL & 0xf4000000)), %l3
1011	or	%l3, %l4, %l3
1012
1013	sethi	%hi(PAGE_SIZE), %l4
1014
10152:
1016	sta	%l3, [%l5] ASI_PTE
1017	deccc	%l7
1018	inc	%l3
1019	bne	2b
1020	 add	%l5, %l4, %l5
1021
1022	b	7f
1023	 sethi	%hi(sun4c_kernel_faults), %l4
1024
10251:
1026	srl	%l5, SUN4C_PGDIR_SHIFT, %l3
1027	sethi	%hi(swapper_pg_dir), %l4
1028	or	%l4, %lo(swapper_pg_dir), %l4
1029	sll	%l3, 2, %l3
1030	ld	[%l4 + %l3], %l4
1031	and	%l4, PAGE_MASK, %l4
1032
1033	srl	%l5, (PAGE_SHIFT - 2), %l6
1034	and	%l6, ((SUN4C_PTRS_PER_PTE - 1) << 2), %l6
1035	add	%l6, %l4, %l6
1036
1037	sethi	%hi(PAGE_SIZE), %l4
1038
10392:
1040	ld	[%l6], %l3
1041	deccc	%l7
1042	sta	%l3, [%l5] ASI_PTE
1043	add	%l6, 0x4, %l6
1044	bne	2b
1045	 add	%l5, %l4, %l5
1046
1047	sethi	%hi(sun4c_kernel_faults), %l4
10487:
1049	ld	[%l4 + %lo(sun4c_kernel_faults)], %l3
1050	inc	%l3
1051	st	%l3, [%l4 + %lo(sun4c_kernel_faults)]
1052
1053	/* Restore condition codes */
1054	wr	%l0, 0x0, %psr
1055	WRITE_PAUSE
1056	jmp	%l1
1057	 rett	%l2
1058
1059sun4c_fault_fromuser:
1060	SAVE_ALL
1061	 nop
1062
1063	mov	%l7, %o1		! Decode the info from %l7
1064	mov	%l7, %o2
1065	and	%o1, 1, %o1		! arg2 = text_faultp
1066	mov	%l7, %o3
1067	and	%o2, 2, %o2		! arg3 = writep
1068	andn	%o3, 0xfff, %o3		! arg4 = faulting address
1069
1070	wr	%l0, PSR_ET, %psr
1071	WRITE_PAUSE
1072
1073	call	do_sun4c_fault
1074	 add	%sp, STACKFRAME_SZ, %o0	! arg1 = pt_regs ptr
1075
1076	RESTORE_ALL
1077
1078	.align	4
1079	.globl	srmmu_fault
1080srmmu_fault:
1081	mov	0x400, %l5
1082	mov	0x300, %l4
1083
1084	lda	[%l5] ASI_M_MMUREGS, %l6	! read sfar first
1085	lda	[%l4] ASI_M_MMUREGS, %l5	! read sfsr last
1086
1087	andn	%l6, 0xfff, %l6
1088	srl	%l5, 6, %l5			! and encode all info into l7
1089
1090	and	%l5, 2, %l5
1091	or	%l5, %l6, %l6
1092
1093	or	%l6, %l7, %l7			! l7 = [addr,write,txtfault]
1094
1095	SAVE_ALL
1096
1097	mov	%l7, %o1
1098	mov	%l7, %o2
1099	and	%o1, 1, %o1		! arg2 = text_faultp
1100	mov	%l7, %o3
1101	and	%o2, 2, %o2		! arg3 = writep
1102	andn	%o3, 0xfff, %o3		! arg4 = faulting address
1103
1104	wr	%l0, PSR_ET, %psr
1105	WRITE_PAUSE
1106
1107	call	do_sparc_fault
1108	 add	%sp, STACKFRAME_SZ, %o0	! arg1 = pt_regs ptr
1109
1110	RESTORE_ALL
1111
1112	.align	4
1113	.globl	sys_nis_syscall
1114sys_nis_syscall:
1115	mov	%o7, %l5
1116	add	%sp, STACKFRAME_SZ, %o0		! pt_regs *regs arg
1117	call	c_sys_nis_syscall
1118	 mov	%l5, %o7
1119
1120	.align	4
1121	.globl	sys_execve
1122sys_execve:
1123	mov	%o7, %l5
1124	add	%sp, STACKFRAME_SZ, %o0		! pt_regs *regs arg
1125	call	sparc_execve
1126	 mov	%l5, %o7
1127
1128	.globl	sunos_execv
1129sunos_execv:
1130	st	%g0, [%sp + STACKFRAME_SZ + PT_I2]
1131
1132	call	sparc_execve
1133	 add	%sp, STACKFRAME_SZ, %o0
1134
1135	b	ret_sys_call
1136	 ld	[%sp + STACKFRAME_SZ + PT_I0], %o0
1137
1138	.align	4
1139	.globl	sys_sparc_pipe
1140sys_sparc_pipe:
1141	mov	%o7, %l5
1142	add	%sp, STACKFRAME_SZ, %o0		! pt_regs *regs arg
1143	call	sparc_pipe
1144	 mov	%l5, %o7
1145
1146	.align	4
1147	.globl	sys_sigaltstack
1148sys_sigaltstack:
1149	mov	%o7, %l5
1150	mov	%fp, %o2
1151	call	do_sigaltstack
1152	 mov	%l5, %o7
1153
1154	.align	4
1155	.globl	sys_sigstack
1156sys_sigstack:
1157	mov	%o7, %l5
1158	mov	%fp, %o2
1159	call	do_sys_sigstack
1160	 mov	%l5, %o7
1161
1162	.align	4
1163	.globl	sys_sigreturn
1164sys_sigreturn:
1165	call	do_sigreturn
1166	 add	%sp, STACKFRAME_SZ, %o0
1167
1168	ld	[%curptr + TI_FLAGS], %l5
1169	andcc	%l5, _TIF_SYSCALL_TRACE, %g0
1170	be	1f
1171	 nop
1172
1173	call	syscall_trace
1174	 nop
1175
11761:
1177	/* We don't want to muck with user registers like a
1178	 * normal syscall, just return.
1179	 */
1180	RESTORE_ALL
1181
1182	.align	4
1183	.globl	sys_rt_sigreturn
1184sys_rt_sigreturn:
1185	call	do_rt_sigreturn
1186	 add	%sp, STACKFRAME_SZ, %o0
1187
1188	ld	[%curptr + TI_FLAGS], %l5
1189	andcc	%l5, _TIF_SYSCALL_TRACE, %g0
1190	be	1f
1191	 nop
1192
1193	add	%sp, STACKFRAME_SZ, %o0
1194	call	syscall_trace
1195	 mov	1, %o1
1196
11971:
1198	/* We are returning to a signal handler. */
1199	RESTORE_ALL
1200
1201	/* Now that we have a real sys_clone, sys_fork() is
1202	 * implemented in terms of it.  Our _real_ implementation
1203	 * of SunOS vfork() will use sys_vfork().
1204	 *
1205	 * XXX These three should be consolidated into mostly shared
1206	 * XXX code just like on sparc64... -DaveM
1207	 */
1208	.align	4
1209	.globl	sys_fork, flush_patch_two
1210sys_fork:
1211	mov	%o7, %l5
1212flush_patch_two:
1213	FLUSH_ALL_KERNEL_WINDOWS;
1214	ld	[%curptr + TI_TASK], %o4
1215	rd	%psr, %g4
1216	WRITE_PAUSE
1217	mov	SIGCHLD, %o0			! arg0:	clone flags
1218	rd	%wim, %g5
1219	WRITE_PAUSE
1220	mov	%fp, %o1			! arg1:	usp
1221	std	%g4, [%o4 + AOFF_task_thread + AOFF_thread_fork_kpsr]
1222	add	%sp, STACKFRAME_SZ, %o2		! arg2:	pt_regs ptr
1223	mov	0, %o3
1224	call	sparc_do_fork
1225	 mov	%l5, %o7
1226
1227	/* Whee, kernel threads! */
1228	.globl	sys_clone, flush_patch_three
1229sys_clone:
1230	mov	%o7, %l5
1231flush_patch_three:
1232	FLUSH_ALL_KERNEL_WINDOWS;
1233	ld	[%curptr + TI_TASK], %o4
1234	rd	%psr, %g4
1235	WRITE_PAUSE
1236
1237	/* arg0,1: flags,usp  -- loaded already */
1238	cmp	%o1, 0x0			! Is new_usp NULL?
1239	rd	%wim, %g5
1240	WRITE_PAUSE
1241	be,a	1f
1242	 mov	%fp, %o1			! yes, use callers usp
1243	andn	%o1, 7, %o1			! no, align to 8 bytes
12441:
1245	std	%g4, [%o4 + AOFF_task_thread + AOFF_thread_fork_kpsr]
1246	add	%sp, STACKFRAME_SZ, %o2		! arg2:	pt_regs ptr
1247	mov	0, %o3
1248	call	sparc_do_fork
1249	 mov	%l5, %o7
1250
1251	/* Whee, real vfork! */
1252	.globl	sys_vfork, flush_patch_four
1253sys_vfork:
1254flush_patch_four:
1255	FLUSH_ALL_KERNEL_WINDOWS;
1256	ld	[%curptr + TI_TASK], %o4
1257	rd	%psr, %g4
1258	WRITE_PAUSE
1259	rd	%wim, %g5
1260	WRITE_PAUSE
1261	std	%g4, [%o4 + AOFF_task_thread + AOFF_thread_fork_kpsr]
1262	sethi	%hi(0x4000 | 0x0100 | SIGCHLD), %o0
1263	mov	%fp, %o1
1264	or	%o0, %lo(0x4000 | 0x0100 | SIGCHLD), %o0
1265	sethi	%hi(sparc_do_fork), %l1
1266	mov	0, %o3
1267	jmpl	%l1 + %lo(sparc_do_fork), %g0
1268	 add	%sp, STACKFRAME_SZ, %o2
1269
1270        .align  4
1271linux_sparc_ni_syscall:
1272	sethi   %hi(sys_ni_syscall), %l7
1273	b       syscall_is_too_hard
1274	 or     %l7, %lo(sys_ni_syscall), %l7
1275
1276linux_fast_syscall:
1277	andn	%l7, 3, %l7
1278	mov	%i0, %o0
1279	mov	%i1, %o1
1280	mov 	%i2, %o2
1281	jmpl	%l7 + %g0, %g0
1282	 mov	%i3, %o3
1283
1284linux_syscall_trace:
1285	add	%sp, STACKFRAME_SZ, %o0
1286	call	syscall_trace
1287	 mov	0, %o1
1288	cmp	%o0, 0
1289	bne	3f
1290	 mov	-ENOSYS, %o0
1291	mov	%i0, %o0
1292	mov	%i1, %o1
1293	mov	%i2, %o2
1294	mov	%i3, %o3
1295	b	2f
1296	 mov	%i4, %o4
1297
1298	.globl	ret_from_fork
1299ret_from_fork:
1300	call	schedule_tail
1301	 ld	[%g3 + TI_TASK], %o0
1302	b	ret_sys_call
1303	 ld	[%sp + STACKFRAME_SZ + PT_I0], %o0
1304
1305	/* Linux native system calls enter here... */
1306	.align	4
1307	.globl	linux_sparc_syscall
1308linux_sparc_syscall:
1309	sethi	%hi(PSR_SYSCALL), %l4
1310	or	%l0, %l4, %l0
1311	/* Direct access to user regs, must faster. */
1312	cmp	%g1, NR_syscalls
1313	bgeu	linux_sparc_ni_syscall
1314	 sll	%g1, 2, %l4
1315	ld	[%l7 + %l4], %l7
1316	andcc	%l7, 1, %g0
1317	bne	linux_fast_syscall
1318	 /* Just do first insn from SAVE_ALL in the delay slot */
1319
1320syscall_is_too_hard:
1321	SAVE_ALL_HEAD
1322	 rd	%wim, %l3
1323
1324	wr	%l0, PSR_ET, %psr
1325	mov	%i0, %o0
1326	mov	%i1, %o1
1327	mov	%i2, %o2
1328
1329	ld	[%curptr + TI_FLAGS], %l5
1330	mov	%i3, %o3
1331	andcc	%l5, _TIF_SYSCALL_TRACE, %g0
1332	mov	%i4, %o4
1333	bne	linux_syscall_trace
1334	 mov	%i0, %l5
13352:
1336	call	%l7
1337	 mov	%i5, %o5
1338
13393:
1340	st	%o0, [%sp + STACKFRAME_SZ + PT_I0]
1341
1342ret_sys_call:
1343	ld	[%curptr + TI_FLAGS], %l6
1344	cmp	%o0, -ERESTART_RESTARTBLOCK
1345	ld	[%sp + STACKFRAME_SZ + PT_PSR], %g3
1346	set	PSR_C, %g2
1347	bgeu	1f
1348	 andcc	%l6, _TIF_SYSCALL_TRACE, %g0
1349
1350	/* System call success, clear Carry condition code. */
1351	andn	%g3, %g2, %g3
1352	clr	%l6
1353	st	%g3, [%sp + STACKFRAME_SZ + PT_PSR]
1354	bne	linux_syscall_trace2
1355	 ld	[%sp + STACKFRAME_SZ + PT_NPC], %l1 /* pc = npc */
1356	add	%l1, 0x4, %l2			/* npc = npc+4 */
1357	st	%l1, [%sp + STACKFRAME_SZ + PT_PC]
1358	b	ret_trap_entry
1359	 st	%l2, [%sp + STACKFRAME_SZ + PT_NPC]
13601:
1361	/* System call failure, set Carry condition code.
1362	 * Also, get abs(errno) to return to the process.
1363	 */
1364	sub	%g0, %o0, %o0
1365	or	%g3, %g2, %g3
1366	st	%o0, [%sp + STACKFRAME_SZ + PT_I0]
1367	mov	1, %l6
1368	st	%g3, [%sp + STACKFRAME_SZ + PT_PSR]
1369	bne	linux_syscall_trace2
1370	 ld	[%sp + STACKFRAME_SZ + PT_NPC], %l1 /* pc = npc */
1371	add	%l1, 0x4, %l2			/* npc = npc+4 */
1372	st	%l1, [%sp + STACKFRAME_SZ + PT_PC]
1373	b	ret_trap_entry
1374	 st	%l2, [%sp + STACKFRAME_SZ + PT_NPC]
1375
1376linux_syscall_trace2:
1377	add	%sp, STACKFRAME_SZ, %o0
1378	mov	1, %o1
1379	call	syscall_trace
1380	 add	%l1, 0x4, %l2			/* npc = npc+4 */
1381	st	%l1, [%sp + STACKFRAME_SZ + PT_PC]
1382	b	ret_trap_entry
1383	 st	%l2, [%sp + STACKFRAME_SZ + PT_NPC]
1384
1385
1386/* Saving and restoring the FPU state is best done from lowlevel code.
1387 *
1388 * void fpsave(unsigned long *fpregs, unsigned long *fsr,
1389 *             void *fpqueue, unsigned long *fpqdepth)
1390 */
1391
1392	.globl	fpsave
1393fpsave:
1394	st	%fsr, [%o1]	! this can trap on us if fpu is in bogon state
1395	ld	[%o1], %g1
1396	set	0x2000, %g4
1397	andcc	%g1, %g4, %g0
1398	be	2f
1399	 mov	0, %g2
1400
1401	/* We have an fpqueue to save. */
14021:
1403	std	%fq, [%o2]
1404fpsave_magic:
1405	st	%fsr, [%o1]
1406	ld	[%o1], %g3
1407	andcc	%g3, %g4, %g0
1408	add	%g2, 1, %g2
1409	bne	1b
1410	 add	%o2, 8, %o2
1411
14122:
1413	st	%g2, [%o3]
1414
1415	std	%f0, [%o0 + 0x00]
1416	std	%f2, [%o0 + 0x08]
1417	std	%f4, [%o0 + 0x10]
1418	std	%f6, [%o0 + 0x18]
1419	std	%f8, [%o0 + 0x20]
1420	std	%f10, [%o0 + 0x28]
1421	std	%f12, [%o0 + 0x30]
1422	std	%f14, [%o0 + 0x38]
1423	std	%f16, [%o0 + 0x40]
1424	std	%f18, [%o0 + 0x48]
1425	std	%f20, [%o0 + 0x50]
1426	std	%f22, [%o0 + 0x58]
1427	std	%f24, [%o0 + 0x60]
1428	std	%f26, [%o0 + 0x68]
1429	std	%f28, [%o0 + 0x70]
1430	retl
1431	 std	%f30, [%o0 + 0x78]
1432
1433	/* Thanks for Theo Deraadt and the authors of the Sprite/netbsd/openbsd
1434	 * code for pointing out this possible deadlock, while we save state
1435	 * above we could trap on the fsr store so our low level fpu trap
1436	 * code has to know how to deal with this.
1437	 */
1438fpsave_catch:
1439	b	fpsave_magic + 4
1440	 st	%fsr, [%o1]
1441
1442fpsave_catch2:
1443	b	fpsave + 4
1444	 st	%fsr, [%o1]
1445
1446	/* void fpload(unsigned long *fpregs, unsigned long *fsr); */
1447
1448	.globl	fpload
1449fpload:
1450	ldd	[%o0 + 0x00], %f0
1451	ldd	[%o0 + 0x08], %f2
1452	ldd	[%o0 + 0x10], %f4
1453	ldd	[%o0 + 0x18], %f6
1454	ldd	[%o0 + 0x20], %f8
1455	ldd	[%o0 + 0x28], %f10
1456	ldd	[%o0 + 0x30], %f12
1457	ldd	[%o0 + 0x38], %f14
1458	ldd	[%o0 + 0x40], %f16
1459	ldd	[%o0 + 0x48], %f18
1460	ldd	[%o0 + 0x50], %f20
1461	ldd	[%o0 + 0x58], %f22
1462	ldd	[%o0 + 0x60], %f24
1463	ldd	[%o0 + 0x68], %f26
1464	ldd	[%o0 + 0x70], %f28
1465	ldd	[%o0 + 0x78], %f30
1466	ld	[%o1], %fsr
1467	retl
1468	 nop
1469
1470	/* __ndelay and __udelay take two arguments:
1471	 * 0 - nsecs or usecs to delay
1472	 * 1 - per_cpu udelay_val (loops per jiffy)
1473	 *
1474	 * Note that ndelay gives HZ times higher resolution but has a 10ms
1475	 * limit.  udelay can handle up to 1s.
1476	 */
1477	.globl	__ndelay
1478__ndelay:
1479	save	%sp, -STACKFRAME_SZ, %sp
1480	mov	%i0, %o0
1481	call	.umul			! round multiplier up so large ns ok
1482	 mov	0x1ae, %o1		! 2**32 / (1 000 000 000 / HZ)
1483	call	.umul
1484	 mov	%i1, %o1		! udelay_val
1485	ba	delay_continue
1486	 mov	%o1, %o0		! >>32 later for better resolution
1487
1488	.globl	__udelay
1489__udelay:
1490	save	%sp, -STACKFRAME_SZ, %sp
1491	mov	%i0, %o0
1492	sethi	%hi(0x10c7), %o1	! round multiplier up so large us ok
1493	call	.umul
1494	 or	%o1, %lo(0x10c7), %o1	! 2**32 / 1 000 000
1495	call	.umul
1496	 mov	%i1, %o1		! udelay_val
1497	sethi	%hi(0x028f4b62), %l0	! Add in rounding constant * 2**32,
1498	or	%g0, %lo(0x028f4b62), %l0
1499	addcc	%o0, %l0, %o0		! 2**32 * 0.009 999
1500	bcs,a	3f
1501	 add	%o1, 0x01, %o1
15023:
1503	call	.umul
1504	 mov	HZ, %o0			! >>32 earlier for wider range
1505
1506delay_continue:
1507	cmp	%o0, 0x0
15081:
1509	bne	1b
1510	 subcc	%o0, 1, %o0
1511
1512	ret
1513	restore
1514
1515	/* Handle a software breakpoint */
1516	/* We have to inform parent that child has stopped */
1517	.align 4
1518	.globl breakpoint_trap
1519breakpoint_trap:
1520	rd	%wim,%l3
1521	SAVE_ALL
1522	wr 	%l0, PSR_ET, %psr
1523	WRITE_PAUSE
1524
1525	st	%i0, [%sp + STACKFRAME_SZ + PT_G0] ! for restarting syscalls
1526	call	sparc_breakpoint
1527	 add	%sp, STACKFRAME_SZ, %o0
1528
1529	RESTORE_ALL
1530
1531#ifdef CONFIG_KGDB
1532	.align	4
1533	.globl	kgdb_trap_low
1534	.type	kgdb_trap_low,#function
1535kgdb_trap_low:
1536	rd	%wim,%l3
1537	SAVE_ALL
1538	wr 	%l0, PSR_ET, %psr
1539	WRITE_PAUSE
1540
1541	call	kgdb_trap
1542	 add	%sp, STACKFRAME_SZ, %o0
1543
1544	RESTORE_ALL
1545	.size	kgdb_trap_low,.-kgdb_trap_low
1546#endif
1547
1548	.align	4
1549	.globl	flush_patch_exception
1550flush_patch_exception:
1551	FLUSH_ALL_KERNEL_WINDOWS;
1552	ldd	[%o0], %o6
1553	jmpl	%o7 + 0xc, %g0			! see asm-sparc/processor.h
1554	 mov	1, %g1				! signal EFAULT condition
1555
1556	.align	4
1557	.globl	kill_user_windows, kuw_patch1_7win
1558	.globl	kuw_patch1
1559kuw_patch1_7win:	sll	%o3, 6, %o3
1560
1561	/* No matter how much overhead this routine has in the worst
1562	 * case scenerio, it is several times better than taking the
1563	 * traps with the old method of just doing flush_user_windows().
1564	 */
1565kill_user_windows:
1566	ld	[%g6 + TI_UWINMASK], %o0	! get current umask
1567	orcc	%g0, %o0, %g0			! if no bits set, we are done
1568	be	3f				! nothing to do
1569	 rd	%psr, %o5			! must clear interrupts
1570	or	%o5, PSR_PIL, %o4		! or else that could change
1571	wr	%o4, 0x0, %psr			! the uwinmask state
1572	WRITE_PAUSE				! burn them cycles
15731:
1574	ld	[%g6 + TI_UWINMASK], %o0	! get consistent state
1575	orcc	%g0, %o0, %g0			! did an interrupt come in?
1576	be	4f				! yep, we are done
1577	 rd	%wim, %o3			! get current wim
1578	srl	%o3, 1, %o4			! simulate a save
1579kuw_patch1:
1580	sll	%o3, 7, %o3			! compute next wim
1581	or	%o4, %o3, %o3			! result
1582	andncc	%o0, %o3, %o0			! clean this bit in umask
1583	bne	kuw_patch1			! not done yet
1584	 srl	%o3, 1, %o4			! begin another save simulation
1585	wr	%o3, 0x0, %wim			! set the new wim
1586	st	%g0, [%g6 + TI_UWINMASK]	! clear uwinmask
15874:
1588	wr	%o5, 0x0, %psr			! re-enable interrupts
1589	WRITE_PAUSE				! burn baby burn
15903:
1591	retl					! return
1592	 st	%g0, [%g6 + TI_W_SAVED]		! no windows saved
1593
1594	.align	4
1595	.globl	restore_current
1596restore_current:
1597	LOAD_CURRENT(g6, o0)
1598	retl
1599	 nop
1600
1601#ifdef CONFIG_PCIC_PCI
1602#include <asm/pcic.h>
1603
1604	.align	4
1605	.globl	linux_trap_ipi15_pcic
1606linux_trap_ipi15_pcic:
1607	rd	%wim, %l3
1608	SAVE_ALL
1609
1610	/*
1611	 * First deactivate NMI
1612	 * or we cannot drop ET, cannot get window spill traps.
1613	 * The busy loop is necessary because the PIO error
1614	 * sometimes does not go away quickly and we trap again.
1615	 */
1616	sethi	%hi(pcic_regs), %o1
1617	ld	[%o1 + %lo(pcic_regs)], %o2
1618
1619	! Get pending status for printouts later.
1620	ld	[%o2 + PCI_SYS_INT_PENDING], %o0
1621
1622	mov	PCI_SYS_INT_PENDING_CLEAR_ALL, %o1
1623	stb	%o1, [%o2 + PCI_SYS_INT_PENDING_CLEAR]
16241:
1625	ld	[%o2 + PCI_SYS_INT_PENDING], %o1
1626	andcc	%o1, ((PCI_SYS_INT_PENDING_PIO|PCI_SYS_INT_PENDING_PCI)>>24), %g0
1627	bne	1b
1628	 nop
1629
1630	or	%l0, PSR_PIL, %l4
1631	wr	%l4, 0x0, %psr
1632	WRITE_PAUSE
1633	wr	%l4, PSR_ET, %psr
1634	WRITE_PAUSE
1635
1636	call	pcic_nmi
1637	 add	%sp, STACKFRAME_SZ, %o1	! struct pt_regs *regs
1638	RESTORE_ALL
1639
1640	.globl	pcic_nmi_trap_patch
1641pcic_nmi_trap_patch:
1642	sethi	%hi(linux_trap_ipi15_pcic), %l3
1643	jmpl	%l3 + %lo(linux_trap_ipi15_pcic), %g0
1644	 rd	%psr, %l0
1645	.word	0
1646
1647#endif /* CONFIG_PCIC_PCI */
1648
1649	.globl	flushw_all
1650flushw_all:
1651	save	%sp, -0x40, %sp
1652	save	%sp, -0x40, %sp
1653	save	%sp, -0x40, %sp
1654	save	%sp, -0x40, %sp
1655	save	%sp, -0x40, %sp
1656	save	%sp, -0x40, %sp
1657	save	%sp, -0x40, %sp
1658	restore
1659	restore
1660	restore
1661	restore
1662	restore
1663	restore
1664	ret
1665	 restore
1666
1667/* End of entry.S */
1668