xref: /linux/arch/parisc/kernel/entry.S (revision f3d9478b2ce468c3115b02ecae7e975990697f15)
1/*
2 * Linux/PA-RISC Project (http://www.parisc-linux.org/)
3 *
4 * kernel entry points (interruptions, system call wrappers)
5 *  Copyright (C) 1999,2000 Philipp Rumpf
6 *  Copyright (C) 1999 SuSE GmbH Nuernberg
7 *  Copyright (C) 2000 Hewlett-Packard (John Marvin)
8 *  Copyright (C) 1999 Hewlett-Packard (Frank Rowand)
9 *
10 *    This program is free software; you can redistribute it and/or modify
11 *    it under the terms of the GNU General Public License as published by
12 *    the Free Software Foundation; either version 2, or (at your option)
13 *    any later version.
14 *
15 *    This program is distributed in the hope that it will be useful,
16 *    but WITHOUT ANY WARRANTY; without even the implied warranty of
17 *    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
18 *    GNU General Public License for more details.
19 *
20 *    You should have received a copy of the GNU General Public License
21 *    along with this program; if not, write to the Free Software
22 *    Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
23 */
24
25#include <linux/config.h>
26#include <asm/asm-offsets.h>
27
28/* we have the following possibilities to act on an interruption:
29 *  - handle in assembly and use shadowed registers only
30 *  - save registers to kernel stack and handle in assembly or C */
31
32
33#include <asm/psw.h>
34#include <asm/assembly.h>	/* for LDREG/STREG defines */
35#include <asm/pgtable.h>
36#include <asm/signal.h>
37#include <asm/unistd.h>
38#include <asm/thread_info.h>
39
40#ifdef CONFIG_64BIT
41#define CMPIB           cmpib,*
42#define CMPB            cmpb,*
43#define COND(x)		*x
44
45	.level 2.0w
46#else
47#define CMPIB           cmpib,
48#define CMPB            cmpb,
49#define COND(x)		x
50
51	.level 2.0
52#endif
53
54	.import         pa_dbit_lock,data
55
56	/* space_to_prot macro creates a prot id from a space id */
57
58#if (SPACEID_SHIFT) == 0
59	.macro  space_to_prot spc prot
60	depd,z  \spc,62,31,\prot
61	.endm
62#else
63	.macro  space_to_prot spc prot
64	extrd,u \spc,(64 - (SPACEID_SHIFT)),32,\prot
65	.endm
66#endif
67
68	/* Switch to virtual mapping, trashing only %r1 */
69	.macro  virt_map
70	/* pcxt_ssm_bug */
71	rsm	PSW_SM_I, %r0	/* barrier for "Relied upon Translation */
72	mtsp	%r0, %sr4
73	mtsp	%r0, %sr5
74	mfsp	%sr7, %r1
75	or,=    %r0,%r1,%r0	/* Only save sr7 in sr3 if sr7 != 0 */
76	mtsp	%r1, %sr3
77	tovirt_r1 %r29
78	load32	KERNEL_PSW, %r1
79
80	rsm     PSW_SM_QUIET,%r0	/* second "heavy weight" ctl op */
81	mtsp	%r0, %sr6
82	mtsp	%r0, %sr7
83	mtctl	%r0, %cr17	/* Clear IIASQ tail */
84	mtctl	%r0, %cr17	/* Clear IIASQ head */
85	mtctl	%r1, %ipsw
86	load32	4f, %r1
87	mtctl	%r1, %cr18	/* Set IIAOQ tail */
88	ldo	4(%r1), %r1
89	mtctl	%r1, %cr18	/* Set IIAOQ head */
90	rfir
91	nop
924:
93	.endm
94
95	/*
96	 * The "get_stack" macros are responsible for determining the
97	 * kernel stack value.
98	 *
99	 * For Faults:
100	 *      If sr7 == 0
101	 *          Already using a kernel stack, so call the
102	 *          get_stack_use_r30 macro to push a pt_regs structure
103	 *          on the stack, and store registers there.
104	 *      else
105	 *          Need to set up a kernel stack, so call the
106	 *          get_stack_use_cr30 macro to set up a pointer
107	 *          to the pt_regs structure contained within the
108	 *          task pointer pointed to by cr30. Set the stack
109	 *          pointer to point to the end of the task structure.
110	 *
111	 * For Interrupts:
112	 *      If sr7 == 0
113	 *          Already using a kernel stack, check to see if r30
114	 *          is already pointing to the per processor interrupt
115	 *          stack. If it is, call the get_stack_use_r30 macro
116	 *          to push a pt_regs structure on the stack, and store
117	 *          registers there. Otherwise, call get_stack_use_cr31
118	 *          to get a pointer to the base of the interrupt stack
119	 *          and push a pt_regs structure on that stack.
120	 *      else
121	 *          Need to set up a kernel stack, so call the
122	 *          get_stack_use_cr30 macro to set up a pointer
123	 *          to the pt_regs structure contained within the
124	 *          task pointer pointed to by cr30. Set the stack
125	 *          pointer to point to the end of the task structure.
126	 *          N.B: We don't use the interrupt stack for the
127	 *          first interrupt from userland, because signals/
128	 *          resched's are processed when returning to userland,
129	 *          and we can sleep in those cases.
130	 *
131	 * Note that we use shadowed registers for temps until
132	 * we can save %r26 and %r29. %r26 is used to preserve
133	 * %r8 (a shadowed register) which temporarily contained
134	 * either the fault type ("code") or the eirr. We need
135	 * to use a non-shadowed register to carry the value over
136	 * the rfir in virt_map. We use %r26 since this value winds
137	 * up being passed as the argument to either do_cpu_irq_mask
138	 * or handle_interruption. %r29 is used to hold a pointer
139	 * the register save area, and once again, it needs to
140	 * be a non-shadowed register so that it survives the rfir.
141	 *
142	 * N.B. TASK_SZ_ALGN and PT_SZ_ALGN include space for a stack frame.
143	 */
144
145	.macro  get_stack_use_cr30
146
147	/* we save the registers in the task struct */
148
149	mfctl   %cr30, %r1
150	tophys  %r1,%r9
151	LDREG	TI_TASK(%r9), %r1	/* thread_info -> task_struct */
152	tophys  %r1,%r9
153	ldo     TASK_REGS(%r9),%r9
154	STREG   %r30, PT_GR30(%r9)
155	STREG   %r29,PT_GR29(%r9)
156	STREG   %r26,PT_GR26(%r9)
157	copy    %r9,%r29
158	mfctl   %cr30, %r1
159	ldo	THREAD_SZ_ALGN(%r1), %r30
160	.endm
161
162	.macro  get_stack_use_r30
163
164	/* we put a struct pt_regs on the stack and save the registers there */
165
166	tophys  %r30,%r9
167	STREG   %r30,PT_GR30(%r9)
168	ldo	PT_SZ_ALGN(%r30),%r30
169	STREG   %r29,PT_GR29(%r9)
170	STREG   %r26,PT_GR26(%r9)
171	copy    %r9,%r29
172	.endm
173
174	.macro  rest_stack
175	LDREG   PT_GR1(%r29), %r1
176	LDREG   PT_GR30(%r29),%r30
177	LDREG   PT_GR29(%r29),%r29
178	.endm
179
180	/* default interruption handler
181	 * (calls traps.c:handle_interruption) */
182	.macro	def code
183	b	intr_save
184	ldi     \code, %r8
185	.align	32
186	.endm
187
188	/* Interrupt interruption handler
189	 * (calls irq.c:do_cpu_irq_mask) */
190	.macro	extint code
191	b	intr_extint
192	mfsp    %sr7,%r16
193	.align	32
194	.endm
195
196	.import	os_hpmc, code
197
198	/* HPMC handler */
199	.macro	hpmc code
200	nop			/* must be a NOP, will be patched later */
201	load32	PA(os_hpmc), %r3
202	bv,n	0(%r3)
203	nop
204	.word	0		/* checksum (will be patched) */
205	.word	PA(os_hpmc)	/* address of handler */
206	.word	0		/* length of handler */
207	.endm
208
209	/*
210	 * Performance Note: Instructions will be moved up into
211	 * this part of the code later on, once we are sure
212	 * that the tlb miss handlers are close to final form.
213	 */
214
215	/* Register definitions for tlb miss handler macros */
216
217	va  = r8	/* virtual address for which the trap occured */
218	spc = r24	/* space for which the trap occured */
219
220#ifndef CONFIG_64BIT
221
222	/*
223	 * itlb miss interruption handler (parisc 1.1 - 32 bit)
224	 */
225
226	.macro	itlb_11 code
227
228	mfctl	%pcsq, spc
229	b	itlb_miss_11
230	mfctl	%pcoq, va
231
232	.align		32
233	.endm
234#endif
235
236	/*
237	 * itlb miss interruption handler (parisc 2.0)
238	 */
239
240	.macro	itlb_20 code
241	mfctl	%pcsq, spc
242#ifdef CONFIG_64BIT
243	b       itlb_miss_20w
244#else
245	b	itlb_miss_20
246#endif
247	mfctl	%pcoq, va
248
249	.align		32
250	.endm
251
252#ifndef CONFIG_64BIT
253	/*
254	 * naitlb miss interruption handler (parisc 1.1 - 32 bit)
255	 *
256	 * Note: naitlb misses will be treated
257	 * as an ordinary itlb miss for now.
258	 * However, note that naitlb misses
259	 * have the faulting address in the
260	 * IOR/ISR.
261	 */
262
263	.macro	naitlb_11 code
264
265	mfctl	%isr,spc
266	b	itlb_miss_11
267	mfctl 	%ior,va
268	/* FIXME: If user causes a naitlb miss, the priv level may not be in
269	 * lower bits of va, where the itlb miss handler is expecting them
270	 */
271
272	.align		32
273	.endm
274#endif
275
276	/*
277	 * naitlb miss interruption handler (parisc 2.0)
278	 *
279	 * Note: naitlb misses will be treated
280	 * as an ordinary itlb miss for now.
281	 * However, note that naitlb misses
282	 * have the faulting address in the
283	 * IOR/ISR.
284	 */
285
286	.macro	naitlb_20 code
287
288	mfctl	%isr,spc
289#ifdef CONFIG_64BIT
290	b       itlb_miss_20w
291#else
292	b	itlb_miss_20
293#endif
294	mfctl 	%ior,va
295	/* FIXME: If user causes a naitlb miss, the priv level may not be in
296	 * lower bits of va, where the itlb miss handler is expecting them
297	 */
298
299	.align		32
300	.endm
301
302#ifndef CONFIG_64BIT
303	/*
304	 * dtlb miss interruption handler (parisc 1.1 - 32 bit)
305	 */
306
307	.macro	dtlb_11 code
308
309	mfctl	%isr, spc
310	b	dtlb_miss_11
311	mfctl	%ior, va
312
313	.align		32
314	.endm
315#endif
316
317	/*
318	 * dtlb miss interruption handler (parisc 2.0)
319	 */
320
321	.macro	dtlb_20 code
322
323	mfctl	%isr, spc
324#ifdef CONFIG_64BIT
325	b       dtlb_miss_20w
326#else
327	b	dtlb_miss_20
328#endif
329	mfctl	%ior, va
330
331	.align		32
332	.endm
333
334#ifndef CONFIG_64BIT
335	/* nadtlb miss interruption handler (parisc 1.1 - 32 bit) */
336
337	.macro	nadtlb_11 code
338
339	mfctl	%isr,spc
340	b       nadtlb_miss_11
341	mfctl	%ior,va
342
343	.align		32
344	.endm
345#endif
346
347	/* nadtlb miss interruption handler (parisc 2.0) */
348
349	.macro	nadtlb_20 code
350
351	mfctl	%isr,spc
352#ifdef CONFIG_64BIT
353	b       nadtlb_miss_20w
354#else
355	b       nadtlb_miss_20
356#endif
357	mfctl	%ior,va
358
359	.align		32
360	.endm
361
362#ifndef CONFIG_64BIT
363	/*
364	 * dirty bit trap interruption handler (parisc 1.1 - 32 bit)
365	 */
366
367	.macro	dbit_11 code
368
369	mfctl	%isr,spc
370	b	dbit_trap_11
371	mfctl	%ior,va
372
373	.align		32
374	.endm
375#endif
376
377	/*
378	 * dirty bit trap interruption handler (parisc 2.0)
379	 */
380
381	.macro	dbit_20 code
382
383	mfctl	%isr,spc
384#ifdef CONFIG_64BIT
385	b       dbit_trap_20w
386#else
387	b	dbit_trap_20
388#endif
389	mfctl	%ior,va
390
391	.align		32
392	.endm
393
394	/* The following are simple 32 vs 64 bit instruction
395	 * abstractions for the macros */
396	.macro		EXTR	reg1,start,length,reg2
397#ifdef CONFIG_64BIT
398	extrd,u		\reg1,32+\start,\length,\reg2
399#else
400	extrw,u		\reg1,\start,\length,\reg2
401#endif
402	.endm
403
404	.macro		DEP	reg1,start,length,reg2
405#ifdef CONFIG_64BIT
406	depd		\reg1,32+\start,\length,\reg2
407#else
408	depw		\reg1,\start,\length,\reg2
409#endif
410	.endm
411
412	.macro		DEPI	val,start,length,reg
413#ifdef CONFIG_64BIT
414	depdi		\val,32+\start,\length,\reg
415#else
416	depwi		\val,\start,\length,\reg
417#endif
418	.endm
419
420	/* In LP64, the space contains part of the upper 32 bits of the
421	 * fault.  We have to extract this and place it in the va,
422	 * zeroing the corresponding bits in the space register */
423	.macro		space_adjust	spc,va,tmp
424#ifdef CONFIG_64BIT
425	extrd,u		\spc,63,SPACEID_SHIFT,\tmp
426	depd		%r0,63,SPACEID_SHIFT,\spc
427	depd		\tmp,31,SPACEID_SHIFT,\va
428#endif
429	.endm
430
431	.import		swapper_pg_dir,code
432
433	/* Get the pgd.  For faults on space zero (kernel space), this
434	 * is simply swapper_pg_dir.  For user space faults, the
435	 * pgd is stored in %cr25 */
436	.macro		get_pgd		spc,reg
437	ldil		L%PA(swapper_pg_dir),\reg
438	ldo		R%PA(swapper_pg_dir)(\reg),\reg
439	or,COND(=)	%r0,\spc,%r0
440	mfctl		%cr25,\reg
441	.endm
442
443	/*
444		space_check(spc,tmp,fault)
445
446		spc - The space we saw the fault with.
447		tmp - The place to store the current space.
448		fault - Function to call on failure.
449
450		Only allow faults on different spaces from the
451		currently active one if we're the kernel
452
453	*/
454	.macro		space_check	spc,tmp,fault
455	mfsp		%sr7,\tmp
456	or,COND(<>)	%r0,\spc,%r0	/* user may execute gateway page
457					 * as kernel, so defeat the space
458					 * check if it is */
459	copy		\spc,\tmp
460	or,COND(=)	%r0,\tmp,%r0	/* nullify if executing as kernel */
461	cmpb,COND(<>),n	\tmp,\spc,\fault
462	.endm
463
464	/* Look up a PTE in a 2-Level scheme (faulting at each
465	 * level if the entry isn't present
466	 *
467	 * NOTE: we use ldw even for LP64, since the short pointers
468	 * can address up to 1TB
469	 */
470	.macro		L2_ptep	pmd,pte,index,va,fault
471#if PT_NLEVELS == 3
472	EXTR		\va,31-ASM_PMD_SHIFT,ASM_BITS_PER_PMD,\index
473#else
474	EXTR		\va,31-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index
475#endif
476	DEP             %r0,31,PAGE_SHIFT,\pmd  /* clear offset */
477	copy		%r0,\pte
478	ldw,s		\index(\pmd),\pmd
479	bb,>=,n		\pmd,_PxD_PRESENT_BIT,\fault
480	DEP		%r0,31,PxD_FLAG_SHIFT,\pmd /* clear flags */
481	copy		\pmd,%r9
482#ifdef CONFIG_64BIT
483	shld		%r9,PxD_VALUE_SHIFT,\pmd
484#else
485	shlw		%r9,PxD_VALUE_SHIFT,\pmd
486#endif
487	EXTR		\va,31-PAGE_SHIFT,ASM_BITS_PER_PTE,\index
488	DEP		%r0,31,PAGE_SHIFT,\pmd  /* clear offset */
489	shladd		\index,BITS_PER_PTE_ENTRY,\pmd,\pmd
490	LDREG		%r0(\pmd),\pte		/* pmd is now pte */
491	bb,>=,n		\pte,_PAGE_PRESENT_BIT,\fault
492	.endm
493
494	/* Look up PTE in a 3-Level scheme.
495	 *
496	 * Here we implement a Hybrid L2/L3 scheme: we allocate the
497	 * first pmd adjacent to the pgd.  This means that we can
498	 * subtract a constant offset to get to it.  The pmd and pgd
499	 * sizes are arranged so that a single pmd covers 4GB (giving
500	 * a full LP64 process access to 8TB) so our lookups are
501	 * effectively L2 for the first 4GB of the kernel (i.e. for
502	 * all ILP32 processes and all the kernel for machines with
503	 * under 4GB of memory) */
504	.macro		L3_ptep pgd,pte,index,va,fault
505#if PT_NLEVELS == 3 /* we might have a 2-Level scheme, e.g. with 16kb page size */
506	extrd,u		\va,63-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index
507	copy		%r0,\pte
508	extrd,u,*=	\va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0
509	ldw,s		\index(\pgd),\pgd
510	extrd,u,*=	\va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0
511	bb,>=,n		\pgd,_PxD_PRESENT_BIT,\fault
512	extrd,u,*=	\va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0
513	shld		\pgd,PxD_VALUE_SHIFT,\index
514	extrd,u,*=	\va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0
515	copy		\index,\pgd
516	extrd,u,*<>	\va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0
517	ldo		ASM_PGD_PMD_OFFSET(\pgd),\pgd
518#endif
519	L2_ptep		\pgd,\pte,\index,\va,\fault
520	.endm
521
522	/* Set the _PAGE_ACCESSED bit of the PTE.  Be clever and
523	 * don't needlessly dirty the cache line if it was already set */
524	.macro		update_ptep	ptep,pte,tmp,tmp1
525	ldi		_PAGE_ACCESSED,\tmp1
526	or		\tmp1,\pte,\tmp
527	and,COND(<>)	\tmp1,\pte,%r0
528	STREG		\tmp,0(\ptep)
529	.endm
530
531	/* Set the dirty bit (and accessed bit).  No need to be
532	 * clever, this is only used from the dirty fault */
533	.macro		update_dirty	ptep,pte,tmp
534	ldi		_PAGE_ACCESSED|_PAGE_DIRTY,\tmp
535	or		\tmp,\pte,\pte
536	STREG		\pte,0(\ptep)
537	.endm
538
539	/* Convert the pte and prot to tlb insertion values.  How
540	 * this happens is quite subtle, read below */
541	.macro		make_insert_tlb	spc,pte,prot
542	space_to_prot   \spc \prot        /* create prot id from space */
543	/* The following is the real subtlety.  This is depositing
544	 * T <-> _PAGE_REFTRAP
545	 * D <-> _PAGE_DIRTY
546	 * B <-> _PAGE_DMB (memory break)
547	 *
548	 * Then incredible subtlety: The access rights are
549	 * _PAGE_GATEWAY _PAGE_EXEC _PAGE_READ
550	 * See 3-14 of the parisc 2.0 manual
551	 *
552	 * Finally, _PAGE_READ goes in the top bit of PL1 (so we
553	 * trigger an access rights trap in user space if the user
554	 * tries to read an unreadable page */
555	depd            \pte,8,7,\prot
556
557	/* PAGE_USER indicates the page can be read with user privileges,
558	 * so deposit X1|11 to PL1|PL2 (remember the upper bit of PL1
559	 * contains _PAGE_READ */
560	extrd,u,*=      \pte,_PAGE_USER_BIT+32,1,%r0
561	depdi		7,11,3,\prot
562	/* If we're a gateway page, drop PL2 back to zero for promotion
563	 * to kernel privilege (so we can execute the page as kernel).
564	 * Any privilege promotion page always denys read and write */
565	extrd,u,*= 	\pte,_PAGE_GATEWAY_BIT+32,1,%r0
566	depd		%r0,11,2,\prot	/* If Gateway, Set PL2 to 0 */
567
568	/* Enforce uncacheable pages.
569	 * This should ONLY be use for MMIO on PA 2.0 machines.
570	 * Memory/DMA is cache coherent on all PA2.0 machines we support
571	 * (that means T-class is NOT supported) and the memory controllers
572	 * on most of those machines only handles cache transactions.
573	 */
574	extrd,u,*=	\pte,_PAGE_NO_CACHE_BIT+32,1,%r0
575	depi		1,12,1,\prot
576
577	/* Drop prot bits and convert to page addr for iitlbt and idtlbt */
578	extrd,u		\pte,(63-ASM_PFN_PTE_SHIFT)+(63-58),64-PAGE_SHIFT,\pte
579	depdi		_PAGE_SIZE_ENCODING_DEFAULT,63,63-58,\pte
580	.endm
581
582	/* Identical macro to make_insert_tlb above, except it
583	 * makes the tlb entry for the differently formatted pa11
584	 * insertion instructions */
585	.macro		make_insert_tlb_11	spc,pte,prot
586	zdep		\spc,30,15,\prot
587	dep		\pte,8,7,\prot
588	extru,=		\pte,_PAGE_NO_CACHE_BIT,1,%r0
589	depi		1,12,1,\prot
590	extru,=         \pte,_PAGE_USER_BIT,1,%r0
591	depi		7,11,3,\prot   /* Set for user space (1 rsvd for read) */
592	extru,= 	\pte,_PAGE_GATEWAY_BIT,1,%r0
593	depi		0,11,2,\prot	/* If Gateway, Set PL2 to 0 */
594
595	/* Get rid of prot bits and convert to page addr for iitlba */
596
597	depi		_PAGE_SIZE_ENCODING_DEFAULT,31,ASM_PFN_PTE_SHIFT,\pte
598	extru		\pte,24,25,\pte
599	.endm
600
601	/* This is for ILP32 PA2.0 only.  The TLB insertion needs
602	 * to extend into I/O space if the address is 0xfXXXXXXX
603	 * so we extend the f's into the top word of the pte in
604	 * this case */
605	.macro		f_extend	pte,tmp
606	extrd,s		\pte,42,4,\tmp
607	addi,<>		1,\tmp,%r0
608	extrd,s		\pte,63,25,\pte
609	.endm
610
611	/* The alias region is an 8MB aligned 16MB to do clear and
612	 * copy user pages at addresses congruent with the user
613	 * virtual address.
614	 *
615	 * To use the alias page, you set %r26 up with the to TLB
616	 * entry (identifying the physical page) and %r23 up with
617	 * the from tlb entry (or nothing if only a to entry---for
618	 * clear_user_page_asm) */
619	.macro		do_alias	spc,tmp,tmp1,va,pte,prot,fault
620	cmpib,COND(<>),n 0,\spc,\fault
621	ldil		L%(TMPALIAS_MAP_START),\tmp
622#if defined(CONFIG_64BIT) && (TMPALIAS_MAP_START >= 0x80000000)
623	/* on LP64, ldi will sign extend into the upper 32 bits,
624	 * which is behaviour we don't want */
625	depdi		0,31,32,\tmp
626#endif
627	copy		\va,\tmp1
628	DEPI		0,31,23,\tmp1
629	cmpb,COND(<>),n	\tmp,\tmp1,\fault
630	ldi		(_PAGE_DIRTY|_PAGE_WRITE|_PAGE_READ),\prot
631	depd,z		\prot,8,7,\prot
632	/*
633	 * OK, it is in the temp alias region, check whether "from" or "to".
634	 * Check "subtle" note in pacache.S re: r23/r26.
635	 */
636#ifdef CONFIG_64BIT
637	extrd,u,*=	\va,41,1,%r0
638#else
639	extrw,u,=	\va,9,1,%r0
640#endif
641	or,COND(tr)	%r23,%r0,\pte
642	or		%r26,%r0,\pte
643	.endm
644
645
646	/*
647	 * Align fault_vector_20 on 4K boundary so that both
648	 * fault_vector_11 and fault_vector_20 are on the
649	 * same page. This is only necessary as long as we
650	 * write protect the kernel text, which we may stop
651	 * doing once we use large page translations to cover
652	 * the static part of the kernel address space.
653	 */
654
655	.export fault_vector_20
656
657	.text
658
659	.align 4096
660
661fault_vector_20:
662	/* First vector is invalid (0) */
663	.ascii	"cows can fly"
664	.byte 0
665	.align 32
666
667	hpmc		 1
668	def		 2
669	def		 3
670	extint		 4
671	def		 5
672	itlb_20		 6
673	def		 7
674	def		 8
675	def              9
676	def		10
677	def		11
678	def		12
679	def		13
680	def		14
681	dtlb_20		15
682#if 0
683	naitlb_20	16
684#else
685	def             16
686#endif
687	nadtlb_20	17
688	def		18
689	def		19
690	dbit_20		20
691	def		21
692	def		22
693	def		23
694	def		24
695	def		25
696	def		26
697	def		27
698	def		28
699	def		29
700	def		30
701	def		31
702
703#ifndef CONFIG_64BIT
704
705	.export fault_vector_11
706
707	.align 2048
708
709fault_vector_11:
710	/* First vector is invalid (0) */
711	.ascii	"cows can fly"
712	.byte 0
713	.align 32
714
715	hpmc		 1
716	def		 2
717	def		 3
718	extint		 4
719	def		 5
720	itlb_11		 6
721	def		 7
722	def		 8
723	def              9
724	def		10
725	def		11
726	def		12
727	def		13
728	def		14
729	dtlb_11		15
730#if 0
731	naitlb_11	16
732#else
733	def             16
734#endif
735	nadtlb_11	17
736	def		18
737	def		19
738	dbit_11		20
739	def		21
740	def		22
741	def		23
742	def		24
743	def		25
744	def		26
745	def		27
746	def		28
747	def		29
748	def		30
749	def		31
750
751#endif
752
753	.import		handle_interruption,code
754	.import		do_cpu_irq_mask,code
755
756	/*
757	 * r26 = function to be called
758	 * r25 = argument to pass in
759	 * r24 = flags for do_fork()
760	 *
761	 * Kernel threads don't ever return, so they don't need
762	 * a true register context. We just save away the arguments
763	 * for copy_thread/ret_ to properly set up the child.
764	 */
765
766#define CLONE_VM 0x100	/* Must agree with <linux/sched.h> */
767#define CLONE_UNTRACED 0x00800000
768
769	.export __kernel_thread, code
770	.import do_fork
771__kernel_thread:
772	STREG	%r2, -RP_OFFSET(%r30)
773
774	copy	%r30, %r1
775	ldo	PT_SZ_ALGN(%r30),%r30
776#ifdef CONFIG_64BIT
777	/* Yo, function pointers in wide mode are little structs... -PB */
778	ldd	24(%r26), %r2
779	STREG	%r2, PT_GR27(%r1)	/* Store childs %dp */
780	ldd	16(%r26), %r26
781
782	STREG	%r22, PT_GR22(%r1)	/* save r22 (arg5) */
783	copy	%r0, %r22		/* user_tid */
784#endif
785	STREG	%r26, PT_GR26(%r1)  /* Store function & argument for child */
786	STREG	%r25, PT_GR25(%r1)
787	ldil	L%CLONE_UNTRACED, %r26
788	ldo	CLONE_VM(%r26), %r26   /* Force CLONE_VM since only init_mm */
789	or	%r26, %r24, %r26      /* will have kernel mappings.	 */
790	ldi	1, %r25			/* stack_start, signals kernel thread */
791	stw	%r0, -52(%r30)	     	/* user_tid */
792#ifdef CONFIG_64BIT
793	ldo	-16(%r30),%r29		/* Reference param save area */
794#endif
795	BL	do_fork, %r2
796	copy	%r1, %r24		/* pt_regs */
797
798	/* Parent Returns here */
799
800	LDREG	-PT_SZ_ALGN-RP_OFFSET(%r30), %r2
801	ldo	-PT_SZ_ALGN(%r30), %r30
802	bv	%r0(%r2)
803	nop
804
805	/*
806	 * Child Returns here
807	 *
808	 * copy_thread moved args from temp save area set up above
809	 * into task save area.
810	 */
811
812	.export	ret_from_kernel_thread
813ret_from_kernel_thread:
814
815	/* Call schedule_tail first though */
816	BL	schedule_tail, %r2
817	nop
818
819	LDREG	TI_TASK-THREAD_SZ_ALGN(%r30), %r1
820	LDREG	TASK_PT_GR25(%r1), %r26
821#ifdef CONFIG_64BIT
822	LDREG	TASK_PT_GR27(%r1), %r27
823	LDREG	TASK_PT_GR22(%r1), %r22
824#endif
825	LDREG	TASK_PT_GR26(%r1), %r1
826	ble	0(%sr7, %r1)
827	copy	%r31, %r2
828
829#ifdef CONFIG_64BIT
830	ldo	-16(%r30),%r29		/* Reference param save area */
831	loadgp				/* Thread could have been in a module */
832#endif
833#ifndef CONFIG_64BIT
834	b	sys_exit
835#else
836	load32	sys_exit, %r1
837	bv	%r0(%r1)
838#endif
839	ldi	0, %r26
840
841	.import	sys_execve, code
842	.export	__execve, code
843__execve:
844	copy	%r2, %r15
845	copy	%r30, %r16
846	ldo	PT_SZ_ALGN(%r30), %r30
847	STREG	%r26, PT_GR26(%r16)
848	STREG	%r25, PT_GR25(%r16)
849	STREG	%r24, PT_GR24(%r16)
850#ifdef CONFIG_64BIT
851	ldo	-16(%r30),%r29		/* Reference param save area */
852#endif
853	BL	sys_execve, %r2
854	copy	%r16, %r26
855
856	cmpib,=,n 0,%r28,intr_return    /* forward */
857
858	/* yes, this will trap and die. */
859	copy	%r15, %r2
860	copy	%r16, %r30
861	bv	%r0(%r2)
862	nop
863
864	.align 4
865
866	/*
867	 * struct task_struct *_switch_to(struct task_struct *prev,
868	 *	struct task_struct *next)
869	 *
870	 * switch kernel stacks and return prev */
871	.export	_switch_to, code
872_switch_to:
873	STREG	 %r2, -RP_OFFSET(%r30)
874
875	callee_save_float
876	callee_save
877
878	load32	_switch_to_ret, %r2
879
880	STREG	%r2, TASK_PT_KPC(%r26)
881	LDREG	TASK_PT_KPC(%r25), %r2
882
883	STREG	%r30, TASK_PT_KSP(%r26)
884	LDREG	TASK_PT_KSP(%r25), %r30
885	LDREG	TASK_THREAD_INFO(%r25), %r25
886	bv	%r0(%r2)
887	mtctl   %r25,%cr30
888
889_switch_to_ret:
890	mtctl	%r0, %cr0		/* Needed for single stepping */
891	callee_rest
892	callee_rest_float
893
894	LDREG	-RP_OFFSET(%r30), %r2
895	bv	%r0(%r2)
896	copy	%r26, %r28
897
898	/*
899	 * Common rfi return path for interruptions, kernel execve, and
900	 * sys_rt_sigreturn (sometimes).  The sys_rt_sigreturn syscall will
901	 * return via this path if the signal was received when the process
902	 * was running; if the process was blocked on a syscall then the
903	 * normal syscall_exit path is used.  All syscalls for traced
904	 * proceses exit via intr_restore.
905	 *
906	 * XXX If any syscalls that change a processes space id ever exit
907	 * this way, then we will need to copy %sr3 in to PT_SR[3..7], and
908	 * adjust IASQ[0..1].
909	 *
910	 */
911
912	.align 4096
913
914	.export	syscall_exit_rfi
915syscall_exit_rfi:
916	mfctl   %cr30,%r16
917	LDREG	TI_TASK(%r16), %r16	/* thread_info -> task_struct */
918	ldo	TASK_REGS(%r16),%r16
919	/* Force iaoq to userspace, as the user has had access to our current
920	 * context via sigcontext. Also Filter the PSW for the same reason.
921	 */
922	LDREG	PT_IAOQ0(%r16),%r19
923	depi	3,31,2,%r19
924	STREG	%r19,PT_IAOQ0(%r16)
925	LDREG	PT_IAOQ1(%r16),%r19
926	depi	3,31,2,%r19
927	STREG	%r19,PT_IAOQ1(%r16)
928	LDREG   PT_PSW(%r16),%r19
929	load32	USER_PSW_MASK,%r1
930#ifdef CONFIG_64BIT
931	load32	USER_PSW_HI_MASK,%r20
932	depd    %r20,31,32,%r1
933#endif
934	and     %r19,%r1,%r19 /* Mask out bits that user shouldn't play with */
935	load32	USER_PSW,%r1
936	or      %r19,%r1,%r19 /* Make sure default USER_PSW bits are set */
937	STREG   %r19,PT_PSW(%r16)
938
939	/*
940	 * If we aren't being traced, we never saved space registers
941	 * (we don't store them in the sigcontext), so set them
942	 * to "proper" values now (otherwise we'll wind up restoring
943	 * whatever was last stored in the task structure, which might
944	 * be inconsistent if an interrupt occured while on the gateway
945	 * page) Note that we may be "trashing" values the user put in
946	 * them, but we don't support the the user changing them.
947	 */
948
949	STREG   %r0,PT_SR2(%r16)
950	mfsp    %sr3,%r19
951	STREG   %r19,PT_SR0(%r16)
952	STREG   %r19,PT_SR1(%r16)
953	STREG   %r19,PT_SR3(%r16)
954	STREG   %r19,PT_SR4(%r16)
955	STREG   %r19,PT_SR5(%r16)
956	STREG   %r19,PT_SR6(%r16)
957	STREG   %r19,PT_SR7(%r16)
958
959intr_return:
960	/* NOTE: Need to enable interrupts incase we schedule. */
961	ssm     PSW_SM_I, %r0
962
963	/* Check for software interrupts */
964
965	.import irq_stat,data
966
967	load32	irq_stat,%r19
968#ifdef CONFIG_SMP
969	mfctl   %cr30,%r1
970	ldw	TI_CPU(%r1),%r1 /* get cpu # - int */
971	/* shift left ____cacheline_aligned (aka L1_CACHE_BYTES) amount
972	** irq_stat[] is defined using ____cacheline_aligned.
973	*/
974#ifdef CONFIG_64BIT
975	shld	%r1, 6, %r20
976#else
977	shlw	%r1, 5, %r20
978#endif
979	add     %r19,%r20,%r19	/* now have &irq_stat[smp_processor_id()] */
980#endif /* CONFIG_SMP */
981
982intr_check_resched:
983
984	/* check for reschedule */
985	mfctl   %cr30,%r1
986	LDREG   TI_FLAGS(%r1),%r19	/* sched.h: TIF_NEED_RESCHED */
987	bb,<,n	%r19,31-TIF_NEED_RESCHED,intr_do_resched /* forward */
988
989intr_check_sig:
990	/* As above */
991	mfctl   %cr30,%r1
992	LDREG	TI_FLAGS(%r1),%r19	/* sched.h: TIF_SIGPENDING */
993	bb,<,n %r19, 31-TIF_SIGPENDING, intr_do_signal /* forward */
994
995intr_restore:
996	copy            %r16,%r29
997	ldo             PT_FR31(%r29),%r1
998	rest_fp         %r1
999	rest_general    %r29
1000
1001	/* inverse of virt_map */
1002	pcxt_ssm_bug
1003	rsm             PSW_SM_QUIET,%r0	/* prepare for rfi */
1004	tophys_r1       %r29
1005
1006	/* Restore space id's and special cr's from PT_REGS
1007	 * structure pointed to by r29
1008	 */
1009	rest_specials	%r29
1010
1011	/* IMPORTANT: rest_stack restores r29 last (we are using it)!
1012	 * It also restores r1 and r30.
1013	 */
1014	rest_stack
1015
1016	rfi
1017	nop
1018	nop
1019	nop
1020	nop
1021	nop
1022	nop
1023	nop
1024	nop
1025
1026#ifndef CONFIG_PREEMPT
1027# define intr_do_preempt	intr_restore
1028#endif /* !CONFIG_PREEMPT */
1029
1030	.import schedule,code
1031intr_do_resched:
1032	/* Only call schedule on return to userspace. If we're returning
1033	 * to kernel space, we may schedule if CONFIG_PREEMPT, otherwise
1034	 * we jump back to intr_restore.
1035	 */
1036	LDREG	PT_IASQ0(%r16), %r20
1037	CMPIB=	0, %r20, intr_do_preempt
1038	nop
1039	LDREG	PT_IASQ1(%r16), %r20
1040	CMPIB=	0, %r20, intr_do_preempt
1041	nop
1042
1043#ifdef CONFIG_64BIT
1044	ldo	-16(%r30),%r29		/* Reference param save area */
1045#endif
1046
1047	ldil	L%intr_check_sig, %r2
1048#ifndef CONFIG_64BIT
1049	b	schedule
1050#else
1051	load32	schedule, %r20
1052	bv	%r0(%r20)
1053#endif
1054	ldo	R%intr_check_sig(%r2), %r2
1055
1056	/* preempt the current task on returning to kernel
1057	 * mode from an interrupt, iff need_resched is set,
1058	 * and preempt_count is 0. otherwise, we continue on
1059	 * our merry way back to the current running task.
1060	 */
1061#ifdef CONFIG_PREEMPT
1062	.import preempt_schedule_irq,code
1063intr_do_preempt:
1064	rsm	PSW_SM_I, %r0		/* disable interrupts */
1065
1066	/* current_thread_info()->preempt_count */
1067	mfctl	%cr30, %r1
1068	LDREG	TI_PRE_COUNT(%r1), %r19
1069	CMPIB<>	0, %r19, intr_restore	/* if preempt_count > 0 */
1070	nop				/* prev insn branched backwards */
1071
1072	/* check if we interrupted a critical path */
1073	LDREG	PT_PSW(%r16), %r20
1074	bb,<,n	%r20, 31 - PSW_SM_I, intr_restore
1075	nop
1076
1077	BL	preempt_schedule_irq, %r2
1078	nop
1079
1080	b	intr_restore		/* ssm PSW_SM_I done by intr_restore */
1081#endif /* CONFIG_PREEMPT */
1082
1083	.import do_signal,code
1084intr_do_signal:
1085	/*
1086		This check is critical to having LWS
1087		working. The IASQ is zero on the gateway
1088		page and we cannot deliver any signals until
1089		we get off the gateway page.
1090
1091		Only do signals if we are returning to user space
1092	*/
1093	LDREG	PT_IASQ0(%r16), %r20
1094	CMPIB= 0,%r20,intr_restore /* backward */
1095	nop
1096	LDREG	PT_IASQ1(%r16), %r20
1097	CMPIB= 0,%r20,intr_restore /* backward */
1098	nop
1099
1100	copy	%r0, %r24			/* unsigned long in_syscall */
1101	copy	%r16, %r25			/* struct pt_regs *regs */
1102#ifdef CONFIG_64BIT
1103	ldo	-16(%r30),%r29			/* Reference param save area */
1104#endif
1105
1106	BL	do_signal,%r2
1107	copy	%r0, %r26			/* sigset_t *oldset = NULL */
1108
1109	b	intr_check_sig
1110	nop
1111
1112	/*
1113	 * External interrupts.
1114	 */
1115
1116intr_extint:
1117	CMPIB=,n 0,%r16,1f
1118	get_stack_use_cr30
1119	b,n 3f
1120
11211:
1122#if 0  /* Interrupt Stack support not working yet! */
1123	mfctl	%cr31,%r1
1124	copy	%r30,%r17
1125	/* FIXME! depi below has hardcoded idea of interrupt stack size (32k)*/
1126#ifdef CONFIG_64BIT
1127	depdi	0,63,15,%r17
1128#else
1129	depi	0,31,15,%r17
1130#endif
1131	CMPB=,n	%r1,%r17,2f
1132	get_stack_use_cr31
1133	b,n 3f
1134#endif
11352:
1136	get_stack_use_r30
1137
11383:
1139	save_specials	%r29
1140	virt_map
1141	save_general	%r29
1142
1143	ldo	PT_FR0(%r29), %r24
1144	save_fp	%r24
1145
1146	loadgp
1147
1148	copy	%r29, %r26	/* arg0 is pt_regs */
1149	copy	%r29, %r16	/* save pt_regs */
1150
1151	ldil	L%intr_return, %r2
1152
1153#ifdef CONFIG_64BIT
1154	ldo	-16(%r30),%r29	/* Reference param save area */
1155#endif
1156
1157	b	do_cpu_irq_mask
1158	ldo	R%intr_return(%r2), %r2	/* return to intr_return, not here */
1159
1160
1161	/* Generic interruptions (illegal insn, unaligned, page fault, etc) */
1162
1163	.export         intr_save, code /* for os_hpmc */
1164
1165intr_save:
1166	mfsp    %sr7,%r16
1167	CMPIB=,n 0,%r16,1f
1168	get_stack_use_cr30
1169	b	2f
1170	copy    %r8,%r26
1171
11721:
1173	get_stack_use_r30
1174	copy    %r8,%r26
1175
11762:
1177	save_specials	%r29
1178
1179	/* If this trap is a itlb miss, skip saving/adjusting isr/ior */
1180
1181	/*
1182	 * FIXME: 1) Use a #define for the hardwired "6" below (and in
1183	 *           traps.c.
1184	 *        2) Once we start executing code above 4 Gb, we need
1185	 *           to adjust iasq/iaoq here in the same way we
1186	 *           adjust isr/ior below.
1187	 */
1188
1189	CMPIB=,n        6,%r26,skip_save_ior
1190
1191
1192	mfctl           %cr20, %r16 /* isr */
1193	nop		/* serialize mfctl on PA 2.0 to avoid 4 cycle penalty */
1194	mfctl           %cr21, %r17 /* ior */
1195
1196
1197#ifdef CONFIG_64BIT
1198	/*
1199	 * If the interrupted code was running with W bit off (32 bit),
1200	 * clear the b bits (bits 0 & 1) in the ior.
1201	 * save_specials left ipsw value in r8 for us to test.
1202	 */
1203	extrd,u,*<>     %r8,PSW_W_BIT,1,%r0
1204	depdi           0,1,2,%r17
1205
1206	/*
1207	 * FIXME: This code has hardwired assumptions about the split
1208	 *        between space bits and offset bits. This will change
1209	 *        when we allow alternate page sizes.
1210	 */
1211
1212	/* adjust isr/ior. */
1213	extrd,u         %r16,63,SPACEID_SHIFT,%r1	/* get high bits from isr for ior */
1214	depd            %r1,31,SPACEID_SHIFT,%r17	/* deposit them into ior */
1215	depdi           0,63,SPACEID_SHIFT,%r16		/* clear them from isr */
1216#endif
1217	STREG           %r16, PT_ISR(%r29)
1218	STREG           %r17, PT_IOR(%r29)
1219
1220
1221skip_save_ior:
1222	virt_map
1223	save_general	%r29
1224
1225	ldo		PT_FR0(%r29), %r25
1226	save_fp		%r25
1227
1228	loadgp
1229
1230	copy		%r29, %r25	/* arg1 is pt_regs */
1231#ifdef CONFIG_64BIT
1232	ldo		-16(%r30),%r29	/* Reference param save area */
1233#endif
1234
1235	ldil		L%intr_check_sig, %r2
1236	copy		%r25, %r16	/* save pt_regs */
1237
1238	b		handle_interruption
1239	ldo		R%intr_check_sig(%r2), %r2
1240
1241
1242	/*
1243	 * Note for all tlb miss handlers:
1244	 *
1245	 * cr24 contains a pointer to the kernel address space
1246	 * page directory.
1247	 *
1248	 * cr25 contains a pointer to the current user address
1249	 * space page directory.
1250	 *
1251	 * sr3 will contain the space id of the user address space
1252	 * of the current running thread while that thread is
1253	 * running in the kernel.
1254	 */
1255
1256	/*
1257	 * register number allocations.  Note that these are all
1258	 * in the shadowed registers
1259	 */
1260
1261	t0 = r1		/* temporary register 0 */
1262	va = r8		/* virtual address for which the trap occured */
1263	t1 = r9		/* temporary register 1 */
1264	pte  = r16	/* pte/phys page # */
1265	prot = r17	/* prot bits */
1266	spc  = r24	/* space for which the trap occured */
1267	ptp = r25	/* page directory/page table pointer */
1268
1269#ifdef CONFIG_64BIT
1270
1271dtlb_miss_20w:
1272	space_adjust	spc,va,t0
1273	get_pgd		spc,ptp
1274	space_check	spc,t0,dtlb_fault
1275
1276	L3_ptep		ptp,pte,t0,va,dtlb_check_alias_20w
1277
1278	update_ptep	ptp,pte,t0,t1
1279
1280	make_insert_tlb	spc,pte,prot
1281
1282	idtlbt          pte,prot
1283
1284	rfir
1285	nop
1286
1287dtlb_check_alias_20w:
1288	do_alias	spc,t0,t1,va,pte,prot,dtlb_fault
1289
1290	idtlbt          pte,prot
1291
1292	rfir
1293	nop
1294
1295nadtlb_miss_20w:
1296	space_adjust	spc,va,t0
1297	get_pgd		spc,ptp
1298	space_check	spc,t0,nadtlb_fault
1299
1300	L3_ptep		ptp,pte,t0,va,nadtlb_check_flush_20w
1301
1302	update_ptep	ptp,pte,t0,t1
1303
1304	make_insert_tlb	spc,pte,prot
1305
1306	idtlbt          pte,prot
1307
1308	rfir
1309	nop
1310
1311nadtlb_check_flush_20w:
1312	bb,>=,n          pte,_PAGE_FLUSH_BIT,nadtlb_emulate
1313
1314	/* Insert a "flush only" translation */
1315
1316	depdi,z         7,7,3,prot
1317	depdi           1,10,1,prot
1318
1319	/* Get rid of prot bits and convert to page addr for idtlbt */
1320
1321	depdi		0,63,12,pte
1322	extrd,u         pte,56,52,pte
1323	idtlbt          pte,prot
1324
1325	rfir
1326	nop
1327
1328#else
1329
1330dtlb_miss_11:
1331	get_pgd		spc,ptp
1332
1333	space_check	spc,t0,dtlb_fault
1334
1335	L2_ptep		ptp,pte,t0,va,dtlb_check_alias_11
1336
1337	update_ptep	ptp,pte,t0,t1
1338
1339	make_insert_tlb_11	spc,pte,prot
1340
1341	mfsp		%sr1,t0  /* Save sr1 so we can use it in tlb inserts */
1342	mtsp		spc,%sr1
1343
1344	idtlba		pte,(%sr1,va)
1345	idtlbp		prot,(%sr1,va)
1346
1347	mtsp		t0, %sr1	/* Restore sr1 */
1348
1349	rfir
1350	nop
1351
1352dtlb_check_alias_11:
1353
1354	/* Check to see if fault is in the temporary alias region */
1355
1356	cmpib,<>,n      0,spc,dtlb_fault /* forward */
1357	ldil            L%(TMPALIAS_MAP_START),t0
1358	copy            va,t1
1359	depwi           0,31,23,t1
1360	cmpb,<>,n       t0,t1,dtlb_fault /* forward */
1361	ldi             (_PAGE_DIRTY|_PAGE_WRITE|_PAGE_READ),prot
1362	depw,z          prot,8,7,prot
1363
1364	/*
1365	 * OK, it is in the temp alias region, check whether "from" or "to".
1366	 * Check "subtle" note in pacache.S re: r23/r26.
1367	 */
1368
1369	extrw,u,=       va,9,1,r0
1370	or,tr           %r23,%r0,pte    /* If "from" use "from" page */
1371	or              %r26,%r0,pte    /* else "to", use "to" page  */
1372
1373	idtlba          pte,(va)
1374	idtlbp          prot,(va)
1375
1376	rfir
1377	nop
1378
1379nadtlb_miss_11:
1380	get_pgd		spc,ptp
1381
1382	space_check	spc,t0,nadtlb_fault
1383
1384	L2_ptep		ptp,pte,t0,va,nadtlb_check_flush_11
1385
1386	update_ptep	ptp,pte,t0,t1
1387
1388	make_insert_tlb_11	spc,pte,prot
1389
1390
1391	mfsp		%sr1,t0  /* Save sr1 so we can use it in tlb inserts */
1392	mtsp		spc,%sr1
1393
1394	idtlba		pte,(%sr1,va)
1395	idtlbp		prot,(%sr1,va)
1396
1397	mtsp		t0, %sr1	/* Restore sr1 */
1398
1399	rfir
1400	nop
1401
1402nadtlb_check_flush_11:
1403	bb,>=,n          pte,_PAGE_FLUSH_BIT,nadtlb_emulate
1404
1405	/* Insert a "flush only" translation */
1406
1407	zdepi           7,7,3,prot
1408	depi            1,10,1,prot
1409
1410	/* Get rid of prot bits and convert to page addr for idtlba */
1411
1412	depi		0,31,12,pte
1413	extru		pte,24,25,pte
1414
1415	mfsp		%sr1,t0  /* Save sr1 so we can use it in tlb inserts */
1416	mtsp		spc,%sr1
1417
1418	idtlba		pte,(%sr1,va)
1419	idtlbp		prot,(%sr1,va)
1420
1421	mtsp		t0, %sr1	/* Restore sr1 */
1422
1423	rfir
1424	nop
1425
1426dtlb_miss_20:
1427	space_adjust	spc,va,t0
1428	get_pgd		spc,ptp
1429	space_check	spc,t0,dtlb_fault
1430
1431	L2_ptep		ptp,pte,t0,va,dtlb_check_alias_20
1432
1433	update_ptep	ptp,pte,t0,t1
1434
1435	make_insert_tlb	spc,pte,prot
1436
1437	f_extend	pte,t0
1438
1439	idtlbt          pte,prot
1440
1441	rfir
1442	nop
1443
1444dtlb_check_alias_20:
1445	do_alias	spc,t0,t1,va,pte,prot,dtlb_fault
1446
1447	idtlbt          pte,prot
1448
1449	rfir
1450	nop
1451
1452nadtlb_miss_20:
1453	get_pgd		spc,ptp
1454
1455	space_check	spc,t0,nadtlb_fault
1456
1457	L2_ptep		ptp,pte,t0,va,nadtlb_check_flush_20
1458
1459	update_ptep	ptp,pte,t0,t1
1460
1461	make_insert_tlb	spc,pte,prot
1462
1463	f_extend	pte,t0
1464
1465        idtlbt          pte,prot
1466
1467	rfir
1468	nop
1469
1470nadtlb_check_flush_20:
1471	bb,>=,n          pte,_PAGE_FLUSH_BIT,nadtlb_emulate
1472
1473	/* Insert a "flush only" translation */
1474
1475	depdi,z         7,7,3,prot
1476	depdi           1,10,1,prot
1477
1478	/* Get rid of prot bits and convert to page addr for idtlbt */
1479
1480	depdi		0,63,12,pte
1481	extrd,u         pte,56,32,pte
1482	idtlbt          pte,prot
1483
1484	rfir
1485	nop
1486#endif
1487
1488nadtlb_emulate:
1489
1490	/*
1491	 * Non access misses can be caused by fdc,fic,pdc,lpa,probe and
1492	 * probei instructions. We don't want to fault for these
1493	 * instructions (not only does it not make sense, it can cause
1494	 * deadlocks, since some flushes are done with the mmap
1495	 * semaphore held). If the translation doesn't exist, we can't
1496	 * insert a translation, so have to emulate the side effects
1497	 * of the instruction. Since we don't insert a translation
1498	 * we can get a lot of faults during a flush loop, so it makes
1499	 * sense to try to do it here with minimum overhead. We only
1500	 * emulate fdc,fic,pdc,probew,prober instructions whose base
1501	 * and index registers are not shadowed. We defer everything
1502	 * else to the "slow" path.
1503	 */
1504
1505	mfctl           %cr19,%r9 /* Get iir */
1506
1507	/* PA 2.0 Arch Ref. Book pg 382 has a good description of the insn bits.
1508	   Checks for fdc,fdce,pdc,"fic,4f",prober,probeir,probew, probeiw */
1509
1510	/* Checks for fdc,fdce,pdc,"fic,4f" only */
1511	ldi             0x280,%r16
1512	and             %r9,%r16,%r17
1513	cmpb,<>,n       %r16,%r17,nadtlb_probe_check
1514	bb,>=,n         %r9,26,nadtlb_nullify  /* m bit not set, just nullify */
1515	BL		get_register,%r25
1516	extrw,u         %r9,15,5,%r8           /* Get index register # */
1517	CMPIB=,n        -1,%r1,nadtlb_fault    /* have to use slow path */
1518	copy            %r1,%r24
1519	BL		get_register,%r25
1520	extrw,u         %r9,10,5,%r8           /* Get base register # */
1521	CMPIB=,n        -1,%r1,nadtlb_fault    /* have to use slow path */
1522	BL		set_register,%r25
1523	add,l           %r1,%r24,%r1           /* doesn't affect c/b bits */
1524
1525nadtlb_nullify:
1526	mfctl           %ipsw,%r8
1527	ldil            L%PSW_N,%r9
1528	or              %r8,%r9,%r8            /* Set PSW_N */
1529	mtctl           %r8,%ipsw
1530
1531	rfir
1532	nop
1533
1534	/*
1535		When there is no translation for the probe address then we
1536		must nullify the insn and return zero in the target regsiter.
1537		This will indicate to the calling code that it does not have
1538		write/read privileges to this address.
1539
1540		This should technically work for prober and probew in PA 1.1,
1541		and also probe,r and probe,w in PA 2.0
1542
1543		WARNING: USE ONLY NON-SHADOW REGISTERS WITH PROBE INSN!
1544		THE SLOW-PATH EMULATION HAS NOT BEEN WRITTEN YET.
1545
1546	*/
1547nadtlb_probe_check:
1548	ldi             0x80,%r16
1549	and             %r9,%r16,%r17
1550	cmpb,<>,n       %r16,%r17,nadtlb_fault /* Must be probe,[rw]*/
1551	BL              get_register,%r25      /* Find the target register */
1552	extrw,u         %r9,31,5,%r8           /* Get target register */
1553	CMPIB=,n        -1,%r1,nadtlb_fault    /* have to use slow path */
1554	BL		set_register,%r25
1555	copy            %r0,%r1                /* Write zero to target register */
1556	b nadtlb_nullify                       /* Nullify return insn */
1557	nop
1558
1559
1560#ifdef CONFIG_64BIT
1561itlb_miss_20w:
1562
1563	/*
1564	 * I miss is a little different, since we allow users to fault
1565	 * on the gateway page which is in the kernel address space.
1566	 */
1567
1568	space_adjust	spc,va,t0
1569	get_pgd		spc,ptp
1570	space_check	spc,t0,itlb_fault
1571
1572	L3_ptep		ptp,pte,t0,va,itlb_fault
1573
1574	update_ptep	ptp,pte,t0,t1
1575
1576	make_insert_tlb	spc,pte,prot
1577
1578	iitlbt          pte,prot
1579
1580	rfir
1581	nop
1582
1583#else
1584
1585itlb_miss_11:
1586	get_pgd		spc,ptp
1587
1588	space_check	spc,t0,itlb_fault
1589
1590	L2_ptep		ptp,pte,t0,va,itlb_fault
1591
1592	update_ptep	ptp,pte,t0,t1
1593
1594	make_insert_tlb_11	spc,pte,prot
1595
1596	mfsp		%sr1,t0  /* Save sr1 so we can use it in tlb inserts */
1597	mtsp		spc,%sr1
1598
1599	iitlba		pte,(%sr1,va)
1600	iitlbp		prot,(%sr1,va)
1601
1602	mtsp		t0, %sr1	/* Restore sr1 */
1603
1604	rfir
1605	nop
1606
1607itlb_miss_20:
1608	get_pgd		spc,ptp
1609
1610	space_check	spc,t0,itlb_fault
1611
1612	L2_ptep		ptp,pte,t0,va,itlb_fault
1613
1614	update_ptep	ptp,pte,t0,t1
1615
1616	make_insert_tlb	spc,pte,prot
1617
1618	f_extend	pte,t0
1619
1620	iitlbt          pte,prot
1621
1622	rfir
1623	nop
1624
1625#endif
1626
1627#ifdef CONFIG_64BIT
1628
1629dbit_trap_20w:
1630	space_adjust	spc,va,t0
1631	get_pgd		spc,ptp
1632	space_check	spc,t0,dbit_fault
1633
1634	L3_ptep		ptp,pte,t0,va,dbit_fault
1635
1636#ifdef CONFIG_SMP
1637	CMPIB=,n        0,spc,dbit_nolock_20w
1638	load32		PA(pa_dbit_lock),t0
1639
1640dbit_spin_20w:
1641	ldcw            0(t0),t1
1642	cmpib,=         0,t1,dbit_spin_20w
1643	nop
1644
1645dbit_nolock_20w:
1646#endif
1647	update_dirty	ptp,pte,t1
1648
1649	make_insert_tlb	spc,pte,prot
1650
1651	idtlbt          pte,prot
1652#ifdef CONFIG_SMP
1653	CMPIB=,n        0,spc,dbit_nounlock_20w
1654	ldi             1,t1
1655	stw             t1,0(t0)
1656
1657dbit_nounlock_20w:
1658#endif
1659
1660	rfir
1661	nop
1662#else
1663
1664dbit_trap_11:
1665
1666	get_pgd		spc,ptp
1667
1668	space_check	spc,t0,dbit_fault
1669
1670	L2_ptep		ptp,pte,t0,va,dbit_fault
1671
1672#ifdef CONFIG_SMP
1673	CMPIB=,n        0,spc,dbit_nolock_11
1674	load32		PA(pa_dbit_lock),t0
1675
1676dbit_spin_11:
1677	ldcw            0(t0),t1
1678	cmpib,=         0,t1,dbit_spin_11
1679	nop
1680
1681dbit_nolock_11:
1682#endif
1683	update_dirty	ptp,pte,t1
1684
1685	make_insert_tlb_11	spc,pte,prot
1686
1687	mfsp            %sr1,t1  /* Save sr1 so we can use it in tlb inserts */
1688	mtsp		spc,%sr1
1689
1690	idtlba		pte,(%sr1,va)
1691	idtlbp		prot,(%sr1,va)
1692
1693	mtsp            t1, %sr1     /* Restore sr1 */
1694#ifdef CONFIG_SMP
1695	CMPIB=,n        0,spc,dbit_nounlock_11
1696	ldi             1,t1
1697	stw             t1,0(t0)
1698
1699dbit_nounlock_11:
1700#endif
1701
1702	rfir
1703	nop
1704
1705dbit_trap_20:
1706	get_pgd		spc,ptp
1707
1708	space_check	spc,t0,dbit_fault
1709
1710	L2_ptep		ptp,pte,t0,va,dbit_fault
1711
1712#ifdef CONFIG_SMP
1713	CMPIB=,n        0,spc,dbit_nolock_20
1714	load32		PA(pa_dbit_lock),t0
1715
1716dbit_spin_20:
1717	ldcw            0(t0),t1
1718	cmpib,=         0,t1,dbit_spin_20
1719	nop
1720
1721dbit_nolock_20:
1722#endif
1723	update_dirty	ptp,pte,t1
1724
1725	make_insert_tlb	spc,pte,prot
1726
1727	f_extend	pte,t1
1728
1729        idtlbt          pte,prot
1730
1731#ifdef CONFIG_SMP
1732	CMPIB=,n        0,spc,dbit_nounlock_20
1733	ldi             1,t1
1734	stw             t1,0(t0)
1735
1736dbit_nounlock_20:
1737#endif
1738
1739	rfir
1740	nop
1741#endif
1742
1743	.import handle_interruption,code
1744
1745kernel_bad_space:
1746	b               intr_save
1747	ldi             31,%r8  /* Use an unused code */
1748
1749dbit_fault:
1750	b               intr_save
1751	ldi             20,%r8
1752
1753itlb_fault:
1754	b               intr_save
1755	ldi             6,%r8
1756
1757nadtlb_fault:
1758	b               intr_save
1759	ldi             17,%r8
1760
1761dtlb_fault:
1762	b               intr_save
1763	ldi             15,%r8
1764
1765	/* Register saving semantics for system calls:
1766
1767	   %r1		   clobbered by system call macro in userspace
1768	   %r2		   saved in PT_REGS by gateway page
1769	   %r3  - %r18	   preserved by C code (saved by signal code)
1770	   %r19 - %r20	   saved in PT_REGS by gateway page
1771	   %r21 - %r22	   non-standard syscall args
1772			   stored in kernel stack by gateway page
1773	   %r23 - %r26	   arg3-arg0, saved in PT_REGS by gateway page
1774	   %r27 - %r30	   saved in PT_REGS by gateway page
1775	   %r31		   syscall return pointer
1776	 */
1777
1778	/* Floating point registers (FIXME: what do we do with these?)
1779
1780	   %fr0  - %fr3	   status/exception, not preserved
1781	   %fr4  - %fr7	   arguments
1782	   %fr8	 - %fr11   not preserved by C code
1783	   %fr12 - %fr21   preserved by C code
1784	   %fr22 - %fr31   not preserved by C code
1785	 */
1786
1787	.macro	reg_save regs
1788	STREG	%r3, PT_GR3(\regs)
1789	STREG	%r4, PT_GR4(\regs)
1790	STREG	%r5, PT_GR5(\regs)
1791	STREG	%r6, PT_GR6(\regs)
1792	STREG	%r7, PT_GR7(\regs)
1793	STREG	%r8, PT_GR8(\regs)
1794	STREG	%r9, PT_GR9(\regs)
1795	STREG   %r10,PT_GR10(\regs)
1796	STREG   %r11,PT_GR11(\regs)
1797	STREG   %r12,PT_GR12(\regs)
1798	STREG   %r13,PT_GR13(\regs)
1799	STREG   %r14,PT_GR14(\regs)
1800	STREG   %r15,PT_GR15(\regs)
1801	STREG   %r16,PT_GR16(\regs)
1802	STREG   %r17,PT_GR17(\regs)
1803	STREG   %r18,PT_GR18(\regs)
1804	.endm
1805
1806	.macro	reg_restore regs
1807	LDREG	PT_GR3(\regs), %r3
1808	LDREG	PT_GR4(\regs), %r4
1809	LDREG	PT_GR5(\regs), %r5
1810	LDREG	PT_GR6(\regs), %r6
1811	LDREG	PT_GR7(\regs), %r7
1812	LDREG	PT_GR8(\regs), %r8
1813	LDREG	PT_GR9(\regs), %r9
1814	LDREG   PT_GR10(\regs),%r10
1815	LDREG   PT_GR11(\regs),%r11
1816	LDREG   PT_GR12(\regs),%r12
1817	LDREG   PT_GR13(\regs),%r13
1818	LDREG   PT_GR14(\regs),%r14
1819	LDREG   PT_GR15(\regs),%r15
1820	LDREG   PT_GR16(\regs),%r16
1821	LDREG   PT_GR17(\regs),%r17
1822	LDREG   PT_GR18(\regs),%r18
1823	.endm
1824
1825	.export sys_fork_wrapper
1826	.export child_return
1827sys_fork_wrapper:
1828	LDREG	TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30), %r1
1829	ldo	TASK_REGS(%r1),%r1
1830	reg_save %r1
1831	mfctl	%cr27, %r3
1832	STREG	%r3, PT_CR27(%r1)
1833
1834	STREG	%r2,-RP_OFFSET(%r30)
1835	ldo	FRAME_SIZE(%r30),%r30
1836#ifdef CONFIG_64BIT
1837	ldo	-16(%r30),%r29		/* Reference param save area */
1838#endif
1839
1840	/* These are call-clobbered registers and therefore
1841	   also syscall-clobbered (we hope). */
1842	STREG	%r2,PT_GR19(%r1)	/* save for child */
1843	STREG	%r30,PT_GR21(%r1)
1844
1845	LDREG	PT_GR30(%r1),%r25
1846	copy	%r1,%r24
1847	BL	sys_clone,%r2
1848	ldi	SIGCHLD,%r26
1849
1850	LDREG	-RP_OFFSET-FRAME_SIZE(%r30),%r2
1851wrapper_exit:
1852	ldo	-FRAME_SIZE(%r30),%r30		/* get the stackframe */
1853	LDREG	TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
1854	ldo	TASK_REGS(%r1),%r1	 /* get pt regs */
1855
1856	LDREG	PT_CR27(%r1), %r3
1857	mtctl	%r3, %cr27
1858	reg_restore %r1
1859
1860	/* strace expects syscall # to be preserved in r20 */
1861	ldi	__NR_fork,%r20
1862	bv %r0(%r2)
1863	STREG	%r20,PT_GR20(%r1)
1864
1865	/* Set the return value for the child */
1866child_return:
1867	BL	schedule_tail, %r2
1868	nop
1869
1870	LDREG	TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE-FRAME_SIZE(%r30), %r1
1871	LDREG	TASK_PT_GR19(%r1),%r2
1872	b	wrapper_exit
1873	copy	%r0,%r28
1874
1875
1876	.export sys_clone_wrapper
1877sys_clone_wrapper:
1878	LDREG	TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
1879	ldo	TASK_REGS(%r1),%r1	/* get pt regs */
1880	reg_save %r1
1881	mfctl	%cr27, %r3
1882	STREG	%r3, PT_CR27(%r1)
1883
1884	STREG	%r2,-RP_OFFSET(%r30)
1885	ldo	FRAME_SIZE(%r30),%r30
1886#ifdef CONFIG_64BIT
1887	ldo	-16(%r30),%r29		/* Reference param save area */
1888#endif
1889
1890	/* WARNING - Clobbers r19 and r21, userspace must save these! */
1891	STREG	%r2,PT_GR19(%r1)	/* save for child */
1892	STREG	%r30,PT_GR21(%r1)
1893	BL	sys_clone,%r2
1894	copy	%r1,%r24
1895
1896	b	wrapper_exit
1897	LDREG	-RP_OFFSET-FRAME_SIZE(%r30),%r2
1898
1899	.export sys_vfork_wrapper
1900sys_vfork_wrapper:
1901	LDREG	TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
1902	ldo	TASK_REGS(%r1),%r1	/* get pt regs */
1903	reg_save %r1
1904	mfctl	%cr27, %r3
1905	STREG	%r3, PT_CR27(%r1)
1906
1907	STREG	%r2,-RP_OFFSET(%r30)
1908	ldo	FRAME_SIZE(%r30),%r30
1909#ifdef CONFIG_64BIT
1910	ldo	-16(%r30),%r29		/* Reference param save area */
1911#endif
1912
1913	STREG	%r2,PT_GR19(%r1)	/* save for child */
1914	STREG	%r30,PT_GR21(%r1)
1915
1916	BL	sys_vfork,%r2
1917	copy	%r1,%r26
1918
1919	b	wrapper_exit
1920	LDREG	-RP_OFFSET-FRAME_SIZE(%r30),%r2
1921
1922
1923	.macro  execve_wrapper execve
1924	LDREG	TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
1925	ldo	TASK_REGS(%r1),%r1	/* get pt regs */
1926
1927	/*
1928	 * Do we need to save/restore r3-r18 here?
1929	 * I don't think so. why would new thread need old
1930	 * threads registers?
1931	 */
1932
1933	/* %arg0 - %arg3 are already saved for us. */
1934
1935	STREG %r2,-RP_OFFSET(%r30)
1936	ldo FRAME_SIZE(%r30),%r30
1937#ifdef CONFIG_64BIT
1938	ldo	-16(%r30),%r29		/* Reference param save area */
1939#endif
1940	BL \execve,%r2
1941	copy %r1,%arg0
1942
1943	ldo -FRAME_SIZE(%r30),%r30
1944	LDREG -RP_OFFSET(%r30),%r2
1945
1946	/* If exec succeeded we need to load the args */
1947
1948	ldo -1024(%r0),%r1
1949	cmpb,>>= %r28,%r1,error_\execve
1950	copy %r2,%r19
1951
1952error_\execve:
1953	bv %r0(%r19)
1954	nop
1955	.endm
1956
1957	.export sys_execve_wrapper
1958	.import sys_execve
1959
1960sys_execve_wrapper:
1961	execve_wrapper sys_execve
1962
1963#ifdef CONFIG_64BIT
1964	.export sys32_execve_wrapper
1965	.import sys32_execve
1966
1967sys32_execve_wrapper:
1968	execve_wrapper sys32_execve
1969#endif
1970
1971	.export sys_rt_sigreturn_wrapper
1972sys_rt_sigreturn_wrapper:
1973	LDREG	TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r26
1974	ldo	TASK_REGS(%r26),%r26	/* get pt regs */
1975	/* Don't save regs, we are going to restore them from sigcontext. */
1976	STREG	%r2, -RP_OFFSET(%r30)
1977#ifdef CONFIG_64BIT
1978	ldo	FRAME_SIZE(%r30), %r30
1979	BL	sys_rt_sigreturn,%r2
1980	ldo	-16(%r30),%r29		/* Reference param save area */
1981#else
1982	BL	sys_rt_sigreturn,%r2
1983	ldo	FRAME_SIZE(%r30), %r30
1984#endif
1985
1986	ldo	-FRAME_SIZE(%r30), %r30
1987	LDREG	-RP_OFFSET(%r30), %r2
1988
1989	/* FIXME: I think we need to restore a few more things here. */
1990	LDREG	TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
1991	ldo	TASK_REGS(%r1),%r1	/* get pt regs */
1992	reg_restore %r1
1993
1994	/* If the signal was received while the process was blocked on a
1995	 * syscall, then r2 will take us to syscall_exit; otherwise r2 will
1996	 * take us to syscall_exit_rfi and on to intr_return.
1997	 */
1998	bv	%r0(%r2)
1999	LDREG	PT_GR28(%r1),%r28  /* reload original r28 for syscall_exit */
2000
2001	.export sys_sigaltstack_wrapper
2002sys_sigaltstack_wrapper:
2003	/* Get the user stack pointer */
2004	LDREG	TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
2005	ldo	TASK_REGS(%r1),%r24	/* get pt regs */
2006	LDREG	TASK_PT_GR30(%r24),%r24
2007	STREG	%r2, -RP_OFFSET(%r30)
2008#ifdef CONFIG_64BIT
2009	ldo	FRAME_SIZE(%r30), %r30
2010	b,l	do_sigaltstack,%r2
2011	ldo	-16(%r30),%r29		/* Reference param save area */
2012#else
2013	bl	do_sigaltstack,%r2
2014	ldo	FRAME_SIZE(%r30), %r30
2015#endif
2016
2017	ldo	-FRAME_SIZE(%r30), %r30
2018	LDREG	-RP_OFFSET(%r30), %r2
2019	bv	%r0(%r2)
2020	nop
2021
2022#ifdef CONFIG_64BIT
2023	.export sys32_sigaltstack_wrapper
2024sys32_sigaltstack_wrapper:
2025	/* Get the user stack pointer */
2026	LDREG	TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r24
2027	LDREG	TASK_PT_GR30(%r24),%r24
2028	STREG	%r2, -RP_OFFSET(%r30)
2029	ldo	FRAME_SIZE(%r30), %r30
2030	b,l	do_sigaltstack32,%r2
2031	ldo	-16(%r30),%r29		/* Reference param save area */
2032
2033	ldo	-FRAME_SIZE(%r30), %r30
2034	LDREG	-RP_OFFSET(%r30), %r2
2035	bv	%r0(%r2)
2036	nop
2037#endif
2038
2039	.export sys_rt_sigsuspend_wrapper
2040sys_rt_sigsuspend_wrapper:
2041	LDREG	TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30), %r1
2042	ldo	TASK_REGS(%r1),%r24
2043	reg_save %r24
2044
2045	STREG	%r2, -RP_OFFSET(%r30)
2046#ifdef CONFIG_64BIT
2047	ldo	FRAME_SIZE(%r30), %r30
2048	b,l	sys_rt_sigsuspend,%r2
2049	ldo	-16(%r30),%r29		/* Reference param save area */
2050#else
2051	bl	sys_rt_sigsuspend,%r2
2052	ldo	FRAME_SIZE(%r30), %r30
2053#endif
2054
2055	ldo	-FRAME_SIZE(%r30), %r30
2056	LDREG	-RP_OFFSET(%r30), %r2
2057
2058	LDREG	TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30), %r1
2059	ldo	TASK_REGS(%r1),%r1
2060	reg_restore %r1
2061
2062	bv	%r0(%r2)
2063	nop
2064
2065	.export syscall_exit
2066syscall_exit:
2067
2068	/* NOTE: HP-UX syscalls also come through here
2069	 * after hpux_syscall_exit fixes up return
2070	 * values. */
2071
2072	/* NOTE: Not all syscalls exit this way.  rt_sigreturn will exit
2073	 * via syscall_exit_rfi if the signal was received while the process
2074	 * was running.
2075	 */
2076
2077	/* save return value now */
2078
2079	mfctl     %cr30, %r1
2080	LDREG     TI_TASK(%r1),%r1
2081	STREG     %r28,TASK_PT_GR28(%r1)
2082
2083#ifdef CONFIG_HPUX
2084
2085/* <linux/personality.h> cannot be easily included */
2086#define PER_HPUX 0x10
2087	LDREG     TASK_PERSONALITY(%r1),%r19
2088
2089	/* We can't use "CMPIB<> PER_HPUX" since "im5" field is sign extended */
2090	ldo	  -PER_HPUX(%r19), %r19
2091	CMPIB<>,n 0,%r19,1f
2092
2093	/* Save other hpux returns if personality is PER_HPUX */
2094	STREG     %r22,TASK_PT_GR22(%r1)
2095	STREG     %r29,TASK_PT_GR29(%r1)
20961:
2097
2098#endif /* CONFIG_HPUX */
2099
2100	/* Seems to me that dp could be wrong here, if the syscall involved
2101	 * calling a module, and nothing got round to restoring dp on return.
2102	 */
2103	loadgp
2104
2105syscall_check_bh:
2106
2107	/* Check for software interrupts */
2108
2109	.import irq_stat,data
2110
2111	load32	irq_stat,%r19
2112
2113#ifdef CONFIG_SMP
2114	/* sched.h: int processor */
2115	/* %r26 is used as scratch register to index into irq_stat[] */
2116	ldw     TI_CPU-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r26 /* cpu # */
2117
2118	/* shift left ____cacheline_aligned (aka L1_CACHE_BYTES) bits */
2119#ifdef CONFIG_64BIT
2120	shld	%r26, 6, %r20
2121#else
2122	shlw	%r26, 5, %r20
2123#endif
2124	add     %r19,%r20,%r19	/* now have &irq_stat[smp_processor_id()] */
2125#endif /* CONFIG_SMP */
2126
2127syscall_check_resched:
2128
2129	/* check for reschedule */
2130
2131	LDREG	TI_FLAGS-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r19	/* long */
2132	bb,<,n	%r19, 31-TIF_NEED_RESCHED, syscall_do_resched /* forward */
2133
2134syscall_check_sig:
2135	LDREG	TI_FLAGS-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r19    /* get ti flags */
2136	bb,<,n	%r19, 31-TIF_SIGPENDING, syscall_do_signal /* forward */
2137
2138syscall_restore:
2139	/* Are we being ptraced? */
2140	LDREG	TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
2141
2142	LDREG	TASK_PTRACE(%r1), %r19
2143	bb,<	%r19,31,syscall_restore_rfi
2144	nop
2145
2146	ldo	TASK_PT_FR31(%r1),%r19		   /* reload fpregs */
2147	rest_fp	%r19
2148
2149	LDREG	TASK_PT_SAR(%r1),%r19		   /* restore SAR */
2150	mtsar	%r19
2151
2152	LDREG	TASK_PT_GR2(%r1),%r2		   /* restore user rp */
2153	LDREG	TASK_PT_GR19(%r1),%r19
2154	LDREG   TASK_PT_GR20(%r1),%r20
2155	LDREG	TASK_PT_GR21(%r1),%r21
2156	LDREG	TASK_PT_GR22(%r1),%r22
2157	LDREG	TASK_PT_GR23(%r1),%r23
2158	LDREG	TASK_PT_GR24(%r1),%r24
2159	LDREG	TASK_PT_GR25(%r1),%r25
2160	LDREG	TASK_PT_GR26(%r1),%r26
2161	LDREG	TASK_PT_GR27(%r1),%r27	   /* restore user dp */
2162	LDREG	TASK_PT_GR28(%r1),%r28	   /* syscall return value */
2163	LDREG	TASK_PT_GR29(%r1),%r29
2164	LDREG	TASK_PT_GR31(%r1),%r31	   /* restore syscall rp */
2165
2166	/* NOTE: We use rsm/ssm pair to make this operation atomic */
2167	rsm     PSW_SM_I, %r0
2168	LDREG   TASK_PT_GR30(%r1),%r30             /* restore user sp */
2169	mfsp	%sr3,%r1			   /* Get users space id */
2170	mtsp    %r1,%sr7                           /* Restore sr7 */
2171	ssm     PSW_SM_I, %r0
2172
2173	/* Set sr2 to zero for userspace syscalls to work. */
2174	mtsp	%r0,%sr2
2175	mtsp	%r1,%sr4			   /* Restore sr4 */
2176	mtsp	%r1,%sr5			   /* Restore sr5 */
2177	mtsp	%r1,%sr6			   /* Restore sr6 */
2178
2179	depi	3,31,2,%r31			   /* ensure return to user mode. */
2180
2181#ifdef CONFIG_64BIT
2182	/* decide whether to reset the wide mode bit
2183	 *
2184	 * For a syscall, the W bit is stored in the lowest bit
2185	 * of sp.  Extract it and reset W if it is zero */
2186	extrd,u,*<>	%r30,63,1,%r1
2187	rsm	PSW_SM_W, %r0
2188	/* now reset the lowest bit of sp if it was set */
2189	xor	%r30,%r1,%r30
2190#endif
2191	be,n    0(%sr3,%r31)                       /* return to user space */
2192
2193	/* We have to return via an RFI, so that PSW T and R bits can be set
2194	 * appropriately.
2195	 * This sets up pt_regs so we can return via intr_restore, which is not
2196	 * the most efficient way of doing things, but it works.
2197	 */
2198syscall_restore_rfi:
2199	ldo	-1(%r0),%r2			   /* Set recovery cntr to -1 */
2200	mtctl	%r2,%cr0			   /*   for immediate trap */
2201	LDREG	TASK_PT_PSW(%r1),%r2		   /* Get old PSW */
2202	ldi	0x0b,%r20			   /* Create new PSW */
2203	depi	-1,13,1,%r20			   /* C, Q, D, and I bits */
2204
2205	/* The values of PA_SINGLESTEP_BIT and PA_BLOCKSTEP_BIT are
2206	 * set in include/linux/ptrace.h and converted to PA bitmap
2207	 * numbers in asm-offsets.c */
2208
2209	/* if ((%r19.PA_SINGLESTEP_BIT)) { %r20.27=1} */
2210	extru,=	%r19,PA_SINGLESTEP_BIT,1,%r0
2211	depi	-1,27,1,%r20			   /* R bit */
2212
2213	/* if ((%r19.PA_BLOCKSTEP_BIT)) { %r20.7=1} */
2214	extru,= %r19,PA_BLOCKSTEP_BIT,1,%r0
2215	depi	-1,7,1,%r20			   /* T bit */
2216
2217	STREG	%r20,TASK_PT_PSW(%r1)
2218
2219	/* Always store space registers, since sr3 can be changed (e.g. fork) */
2220
2221	mfsp    %sr3,%r25
2222	STREG   %r25,TASK_PT_SR3(%r1)
2223	STREG   %r25,TASK_PT_SR4(%r1)
2224	STREG   %r25,TASK_PT_SR5(%r1)
2225	STREG   %r25,TASK_PT_SR6(%r1)
2226	STREG   %r25,TASK_PT_SR7(%r1)
2227	STREG   %r25,TASK_PT_IASQ0(%r1)
2228	STREG   %r25,TASK_PT_IASQ1(%r1)
2229
2230	/* XXX W bit??? */
2231	/* Now if old D bit is clear, it means we didn't save all registers
2232	 * on syscall entry, so do that now.  This only happens on TRACEME
2233	 * calls, or if someone attached to us while we were on a syscall.
2234	 * We could make this more efficient by not saving r3-r18, but
2235	 * then we wouldn't be able to use the common intr_restore path.
2236	 * It is only for traced processes anyway, so performance is not
2237	 * an issue.
2238	 */
2239	bb,<	%r2,30,pt_regs_ok		   /* Branch if D set */
2240	ldo	TASK_REGS(%r1),%r25
2241	reg_save %r25				   /* Save r3 to r18 */
2242
2243	/* Save the current sr */
2244	mfsp	%sr0,%r2
2245	STREG	%r2,TASK_PT_SR0(%r1)
2246
2247	/* Save the scratch sr */
2248	mfsp	%sr1,%r2
2249	STREG	%r2,TASK_PT_SR1(%r1)
2250
2251	/* sr2 should be set to zero for userspace syscalls */
2252	STREG	%r0,TASK_PT_SR2(%r1)
2253
2254pt_regs_ok:
2255	LDREG	TASK_PT_GR31(%r1),%r2
2256	depi	3,31,2,%r2			   /* ensure return to user mode. */
2257	STREG	%r2,TASK_PT_IAOQ0(%r1)
2258	ldo	4(%r2),%r2
2259	STREG	%r2,TASK_PT_IAOQ1(%r1)
2260	copy	%r25,%r16
2261	b	intr_restore
2262	nop
2263
2264	.import schedule,code
2265syscall_do_resched:
2266	BL	schedule,%r2
2267#ifdef CONFIG_64BIT
2268	ldo	-16(%r30),%r29		/* Reference param save area */
2269#else
2270	nop
2271#endif
2272	b       syscall_check_bh  /* if resched, we start over again */
2273	nop
2274
2275	.import do_signal,code
2276syscall_do_signal:
2277	/* Save callee-save registers (for sigcontext).
2278	   FIXME: After this point the process structure should be
2279	   consistent with all the relevant state of the process
2280	   before the syscall.  We need to verify this. */
2281	LDREG	TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
2282	ldo	TASK_REGS(%r1), %r25		/* struct pt_regs *regs */
2283	reg_save %r25
2284
2285	ldi	1, %r24				/* unsigned long in_syscall */
2286
2287#ifdef CONFIG_64BIT
2288	ldo	-16(%r30),%r29			/* Reference param save area */
2289#endif
2290	BL	do_signal,%r2
2291	copy	%r0, %r26			/* sigset_t *oldset = NULL */
2292
2293	LDREG	TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
2294	ldo	TASK_REGS(%r1), %r20		/* reload pt_regs */
2295	reg_restore %r20
2296
2297	b,n     syscall_check_sig
2298
2299	/*
2300	 * get_register is used by the non access tlb miss handlers to
2301	 * copy the value of the general register specified in r8 into
2302	 * r1. This routine can't be used for shadowed registers, since
2303	 * the rfir will restore the original value. So, for the shadowed
2304	 * registers we put a -1 into r1 to indicate that the register
2305	 * should not be used (the register being copied could also have
2306	 * a -1 in it, but that is OK, it just means that we will have
2307	 * to use the slow path instead).
2308	 */
2309
2310get_register:
2311	blr     %r8,%r0
2312	nop
2313	bv      %r0(%r25)    /* r0 */
2314	copy    %r0,%r1
2315	bv      %r0(%r25)    /* r1 - shadowed */
2316	ldi     -1,%r1
2317	bv      %r0(%r25)    /* r2 */
2318	copy    %r2,%r1
2319	bv      %r0(%r25)    /* r3 */
2320	copy    %r3,%r1
2321	bv      %r0(%r25)    /* r4 */
2322	copy    %r4,%r1
2323	bv      %r0(%r25)    /* r5 */
2324	copy    %r5,%r1
2325	bv      %r0(%r25)    /* r6 */
2326	copy    %r6,%r1
2327	bv      %r0(%r25)    /* r7 */
2328	copy    %r7,%r1
2329	bv      %r0(%r25)    /* r8 - shadowed */
2330	ldi     -1,%r1
2331	bv      %r0(%r25)    /* r9 - shadowed */
2332	ldi     -1,%r1
2333	bv      %r0(%r25)    /* r10 */
2334	copy    %r10,%r1
2335	bv      %r0(%r25)    /* r11 */
2336	copy    %r11,%r1
2337	bv      %r0(%r25)    /* r12 */
2338	copy    %r12,%r1
2339	bv      %r0(%r25)    /* r13 */
2340	copy    %r13,%r1
2341	bv      %r0(%r25)    /* r14 */
2342	copy    %r14,%r1
2343	bv      %r0(%r25)    /* r15 */
2344	copy    %r15,%r1
2345	bv      %r0(%r25)    /* r16 - shadowed */
2346	ldi     -1,%r1
2347	bv      %r0(%r25)    /* r17 - shadowed */
2348	ldi     -1,%r1
2349	bv      %r0(%r25)    /* r18 */
2350	copy    %r18,%r1
2351	bv      %r0(%r25)    /* r19 */
2352	copy    %r19,%r1
2353	bv      %r0(%r25)    /* r20 */
2354	copy    %r20,%r1
2355	bv      %r0(%r25)    /* r21 */
2356	copy    %r21,%r1
2357	bv      %r0(%r25)    /* r22 */
2358	copy    %r22,%r1
2359	bv      %r0(%r25)    /* r23 */
2360	copy    %r23,%r1
2361	bv      %r0(%r25)    /* r24 - shadowed */
2362	ldi     -1,%r1
2363	bv      %r0(%r25)    /* r25 - shadowed */
2364	ldi     -1,%r1
2365	bv      %r0(%r25)    /* r26 */
2366	copy    %r26,%r1
2367	bv      %r0(%r25)    /* r27 */
2368	copy    %r27,%r1
2369	bv      %r0(%r25)    /* r28 */
2370	copy    %r28,%r1
2371	bv      %r0(%r25)    /* r29 */
2372	copy    %r29,%r1
2373	bv      %r0(%r25)    /* r30 */
2374	copy    %r30,%r1
2375	bv      %r0(%r25)    /* r31 */
2376	copy    %r31,%r1
2377
2378	/*
2379	 * set_register is used by the non access tlb miss handlers to
2380	 * copy the value of r1 into the general register specified in
2381	 * r8.
2382	 */
2383
2384set_register:
2385	blr     %r8,%r0
2386	nop
2387	bv      %r0(%r25)    /* r0 (silly, but it is a place holder) */
2388	copy    %r1,%r0
2389	bv      %r0(%r25)    /* r1 */
2390	copy    %r1,%r1
2391	bv      %r0(%r25)    /* r2 */
2392	copy    %r1,%r2
2393	bv      %r0(%r25)    /* r3 */
2394	copy    %r1,%r3
2395	bv      %r0(%r25)    /* r4 */
2396	copy    %r1,%r4
2397	bv      %r0(%r25)    /* r5 */
2398	copy    %r1,%r5
2399	bv      %r0(%r25)    /* r6 */
2400	copy    %r1,%r6
2401	bv      %r0(%r25)    /* r7 */
2402	copy    %r1,%r7
2403	bv      %r0(%r25)    /* r8 */
2404	copy    %r1,%r8
2405	bv      %r0(%r25)    /* r9 */
2406	copy    %r1,%r9
2407	bv      %r0(%r25)    /* r10 */
2408	copy    %r1,%r10
2409	bv      %r0(%r25)    /* r11 */
2410	copy    %r1,%r11
2411	bv      %r0(%r25)    /* r12 */
2412	copy    %r1,%r12
2413	bv      %r0(%r25)    /* r13 */
2414	copy    %r1,%r13
2415	bv      %r0(%r25)    /* r14 */
2416	copy    %r1,%r14
2417	bv      %r0(%r25)    /* r15 */
2418	copy    %r1,%r15
2419	bv      %r0(%r25)    /* r16 */
2420	copy    %r1,%r16
2421	bv      %r0(%r25)    /* r17 */
2422	copy    %r1,%r17
2423	bv      %r0(%r25)    /* r18 */
2424	copy    %r1,%r18
2425	bv      %r0(%r25)    /* r19 */
2426	copy    %r1,%r19
2427	bv      %r0(%r25)    /* r20 */
2428	copy    %r1,%r20
2429	bv      %r0(%r25)    /* r21 */
2430	copy    %r1,%r21
2431	bv      %r0(%r25)    /* r22 */
2432	copy    %r1,%r22
2433	bv      %r0(%r25)    /* r23 */
2434	copy    %r1,%r23
2435	bv      %r0(%r25)    /* r24 */
2436	copy    %r1,%r24
2437	bv      %r0(%r25)    /* r25 */
2438	copy    %r1,%r25
2439	bv      %r0(%r25)    /* r26 */
2440	copy    %r1,%r26
2441	bv      %r0(%r25)    /* r27 */
2442	copy    %r1,%r27
2443	bv      %r0(%r25)    /* r28 */
2444	copy    %r1,%r28
2445	bv      %r0(%r25)    /* r29 */
2446	copy    %r1,%r29
2447	bv      %r0(%r25)    /* r30 */
2448	copy    %r1,%r30
2449	bv      %r0(%r25)    /* r31 */
2450	copy    %r1,%r31
2451