xref: /linux/arch/parisc/kernel/entry.S (revision a2cce7a9f1b8cc3d4edce106fb971529f1d4d9ce)
1/*
2 * Linux/PA-RISC Project (http://www.parisc-linux.org/)
3 *
4 * kernel entry points (interruptions, system call wrappers)
5 *  Copyright (C) 1999,2000 Philipp Rumpf
6 *  Copyright (C) 1999 SuSE GmbH Nuernberg
7 *  Copyright (C) 2000 Hewlett-Packard (John Marvin)
8 *  Copyright (C) 1999 Hewlett-Packard (Frank Rowand)
9 *
10 *    This program is free software; you can redistribute it and/or modify
11 *    it under the terms of the GNU General Public License as published by
12 *    the Free Software Foundation; either version 2, or (at your option)
13 *    any later version.
14 *
15 *    This program is distributed in the hope that it will be useful,
16 *    but WITHOUT ANY WARRANTY; without even the implied warranty of
17 *    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
18 *    GNU General Public License for more details.
19 *
20 *    You should have received a copy of the GNU General Public License
21 *    along with this program; if not, write to the Free Software
22 *    Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
23 */
24
25#include <asm/asm-offsets.h>
26
27/* we have the following possibilities to act on an interruption:
28 *  - handle in assembly and use shadowed registers only
29 *  - save registers to kernel stack and handle in assembly or C */
30
31
32#include <asm/psw.h>
33#include <asm/cache.h>		/* for L1_CACHE_SHIFT */
34#include <asm/assembly.h>	/* for LDREG/STREG defines */
35#include <asm/pgtable.h>
36#include <asm/signal.h>
37#include <asm/unistd.h>
38#include <asm/thread_info.h>
39
40#include <linux/linkage.h>
41
42#ifdef CONFIG_64BIT
43	.level 2.0w
44#else
45	.level 2.0
46#endif
47
48	.import		pa_tlb_lock,data
49
50	/* space_to_prot macro creates a prot id from a space id */
51
52#if (SPACEID_SHIFT) == 0
53	.macro  space_to_prot spc prot
54	depd,z  \spc,62,31,\prot
55	.endm
56#else
57	.macro  space_to_prot spc prot
58	extrd,u \spc,(64 - (SPACEID_SHIFT)),32,\prot
59	.endm
60#endif
61
62	/* Switch to virtual mapping, trashing only %r1 */
63	.macro  virt_map
64	/* pcxt_ssm_bug */
65	rsm	PSW_SM_I, %r0	/* barrier for "Relied upon Translation */
66	mtsp	%r0, %sr4
67	mtsp	%r0, %sr5
68	mtsp	%r0, %sr6
69	tovirt_r1 %r29
70	load32	KERNEL_PSW, %r1
71
72	rsm     PSW_SM_QUIET,%r0	/* second "heavy weight" ctl op */
73	mtctl	%r0, %cr17	/* Clear IIASQ tail */
74	mtctl	%r0, %cr17	/* Clear IIASQ head */
75	mtctl	%r1, %ipsw
76	load32	4f, %r1
77	mtctl	%r1, %cr18	/* Set IIAOQ tail */
78	ldo	4(%r1), %r1
79	mtctl	%r1, %cr18	/* Set IIAOQ head */
80	rfir
81	nop
824:
83	.endm
84
85	/*
86	 * The "get_stack" macros are responsible for determining the
87	 * kernel stack value.
88	 *
89	 *      If sr7 == 0
90	 *          Already using a kernel stack, so call the
91	 *          get_stack_use_r30 macro to push a pt_regs structure
92	 *          on the stack, and store registers there.
93	 *      else
94	 *          Need to set up a kernel stack, so call the
95	 *          get_stack_use_cr30 macro to set up a pointer
96	 *          to the pt_regs structure contained within the
97	 *          task pointer pointed to by cr30. Set the stack
98	 *          pointer to point to the end of the task structure.
99	 *
100	 * Note that we use shadowed registers for temps until
101	 * we can save %r26 and %r29. %r26 is used to preserve
102	 * %r8 (a shadowed register) which temporarily contained
103	 * either the fault type ("code") or the eirr. We need
104	 * to use a non-shadowed register to carry the value over
105	 * the rfir in virt_map. We use %r26 since this value winds
106	 * up being passed as the argument to either do_cpu_irq_mask
107	 * or handle_interruption. %r29 is used to hold a pointer
108	 * the register save area, and once again, it needs to
109	 * be a non-shadowed register so that it survives the rfir.
110	 *
111	 * N.B. TASK_SZ_ALGN and PT_SZ_ALGN include space for a stack frame.
112	 */
113
114	.macro  get_stack_use_cr30
115
116	/* we save the registers in the task struct */
117
118	copy	%r30, %r17
119	mfctl   %cr30, %r1
120	ldo	THREAD_SZ_ALGN(%r1), %r30
121	mtsp	%r0,%sr7
122	mtsp	%r16,%sr3
123	tophys  %r1,%r9
124	LDREG	TI_TASK(%r9), %r1	/* thread_info -> task_struct */
125	tophys  %r1,%r9
126	ldo     TASK_REGS(%r9),%r9
127	STREG   %r17,PT_GR30(%r9)
128	STREG   %r29,PT_GR29(%r9)
129	STREG   %r26,PT_GR26(%r9)
130	STREG	%r16,PT_SR7(%r9)
131	copy    %r9,%r29
132	.endm
133
134	.macro  get_stack_use_r30
135
136	/* we put a struct pt_regs on the stack and save the registers there */
137
138	tophys  %r30,%r9
139	copy	%r30,%r1
140	ldo	PT_SZ_ALGN(%r30),%r30
141	STREG   %r1,PT_GR30(%r9)
142	STREG   %r29,PT_GR29(%r9)
143	STREG   %r26,PT_GR26(%r9)
144	STREG	%r16,PT_SR7(%r9)
145	copy    %r9,%r29
146	.endm
147
148	.macro  rest_stack
149	LDREG   PT_GR1(%r29), %r1
150	LDREG   PT_GR30(%r29),%r30
151	LDREG   PT_GR29(%r29),%r29
152	.endm
153
154	/* default interruption handler
155	 * (calls traps.c:handle_interruption) */
156	.macro	def code
157	b	intr_save
158	ldi     \code, %r8
159	.align	32
160	.endm
161
162	/* Interrupt interruption handler
163	 * (calls irq.c:do_cpu_irq_mask) */
164	.macro	extint code
165	b	intr_extint
166	mfsp    %sr7,%r16
167	.align	32
168	.endm
169
170	.import	os_hpmc, code
171
172	/* HPMC handler */
173	.macro	hpmc code
174	nop			/* must be a NOP, will be patched later */
175	load32	PA(os_hpmc), %r3
176	bv,n	0(%r3)
177	nop
178	.word	0		/* checksum (will be patched) */
179	.word	PA(os_hpmc)	/* address of handler */
180	.word	0		/* length of handler */
181	.endm
182
183	/*
184	 * Performance Note: Instructions will be moved up into
185	 * this part of the code later on, once we are sure
186	 * that the tlb miss handlers are close to final form.
187	 */
188
189	/* Register definitions for tlb miss handler macros */
190
191	va  = r8	/* virtual address for which the trap occurred */
192	spc = r24	/* space for which the trap occurred */
193
194#ifndef CONFIG_64BIT
195
196	/*
197	 * itlb miss interruption handler (parisc 1.1 - 32 bit)
198	 */
199
200	.macro	itlb_11 code
201
202	mfctl	%pcsq, spc
203	b	itlb_miss_11
204	mfctl	%pcoq, va
205
206	.align		32
207	.endm
208#endif
209
210	/*
211	 * itlb miss interruption handler (parisc 2.0)
212	 */
213
214	.macro	itlb_20 code
215	mfctl	%pcsq, spc
216#ifdef CONFIG_64BIT
217	b       itlb_miss_20w
218#else
219	b	itlb_miss_20
220#endif
221	mfctl	%pcoq, va
222
223	.align		32
224	.endm
225
226#ifndef CONFIG_64BIT
227	/*
228	 * naitlb miss interruption handler (parisc 1.1 - 32 bit)
229	 */
230
231	.macro	naitlb_11 code
232
233	mfctl	%isr,spc
234	b	naitlb_miss_11
235	mfctl 	%ior,va
236
237	.align		32
238	.endm
239#endif
240
241	/*
242	 * naitlb miss interruption handler (parisc 2.0)
243	 */
244
245	.macro	naitlb_20 code
246
247	mfctl	%isr,spc
248#ifdef CONFIG_64BIT
249	b       naitlb_miss_20w
250#else
251	b	naitlb_miss_20
252#endif
253	mfctl 	%ior,va
254
255	.align		32
256	.endm
257
258#ifndef CONFIG_64BIT
259	/*
260	 * dtlb miss interruption handler (parisc 1.1 - 32 bit)
261	 */
262
263	.macro	dtlb_11 code
264
265	mfctl	%isr, spc
266	b	dtlb_miss_11
267	mfctl	%ior, va
268
269	.align		32
270	.endm
271#endif
272
273	/*
274	 * dtlb miss interruption handler (parisc 2.0)
275	 */
276
277	.macro	dtlb_20 code
278
279	mfctl	%isr, spc
280#ifdef CONFIG_64BIT
281	b       dtlb_miss_20w
282#else
283	b	dtlb_miss_20
284#endif
285	mfctl	%ior, va
286
287	.align		32
288	.endm
289
290#ifndef CONFIG_64BIT
291	/* nadtlb miss interruption handler (parisc 1.1 - 32 bit) */
292
293	.macro	nadtlb_11 code
294
295	mfctl	%isr,spc
296	b       nadtlb_miss_11
297	mfctl	%ior,va
298
299	.align		32
300	.endm
301#endif
302
303	/* nadtlb miss interruption handler (parisc 2.0) */
304
305	.macro	nadtlb_20 code
306
307	mfctl	%isr,spc
308#ifdef CONFIG_64BIT
309	b       nadtlb_miss_20w
310#else
311	b       nadtlb_miss_20
312#endif
313	mfctl	%ior,va
314
315	.align		32
316	.endm
317
318#ifndef CONFIG_64BIT
319	/*
320	 * dirty bit trap interruption handler (parisc 1.1 - 32 bit)
321	 */
322
323	.macro	dbit_11 code
324
325	mfctl	%isr,spc
326	b	dbit_trap_11
327	mfctl	%ior,va
328
329	.align		32
330	.endm
331#endif
332
333	/*
334	 * dirty bit trap interruption handler (parisc 2.0)
335	 */
336
337	.macro	dbit_20 code
338
339	mfctl	%isr,spc
340#ifdef CONFIG_64BIT
341	b       dbit_trap_20w
342#else
343	b	dbit_trap_20
344#endif
345	mfctl	%ior,va
346
347	.align		32
348	.endm
349
350	/* In LP64, the space contains part of the upper 32 bits of the
351	 * fault.  We have to extract this and place it in the va,
352	 * zeroing the corresponding bits in the space register */
353	.macro		space_adjust	spc,va,tmp
354#ifdef CONFIG_64BIT
355	extrd,u		\spc,63,SPACEID_SHIFT,\tmp
356	depd		%r0,63,SPACEID_SHIFT,\spc
357	depd		\tmp,31,SPACEID_SHIFT,\va
358#endif
359	.endm
360
361	.import		swapper_pg_dir,code
362
363	/* Get the pgd.  For faults on space zero (kernel space), this
364	 * is simply swapper_pg_dir.  For user space faults, the
365	 * pgd is stored in %cr25 */
366	.macro		get_pgd		spc,reg
367	ldil		L%PA(swapper_pg_dir),\reg
368	ldo		R%PA(swapper_pg_dir)(\reg),\reg
369	or,COND(=)	%r0,\spc,%r0
370	mfctl		%cr25,\reg
371	.endm
372
373	/*
374		space_check(spc,tmp,fault)
375
376		spc - The space we saw the fault with.
377		tmp - The place to store the current space.
378		fault - Function to call on failure.
379
380		Only allow faults on different spaces from the
381		currently active one if we're the kernel
382
383	*/
384	.macro		space_check	spc,tmp,fault
385	mfsp		%sr7,\tmp
386	or,COND(<>)	%r0,\spc,%r0	/* user may execute gateway page
387					 * as kernel, so defeat the space
388					 * check if it is */
389	copy		\spc,\tmp
390	or,COND(=)	%r0,\tmp,%r0	/* nullify if executing as kernel */
391	cmpb,COND(<>),n	\tmp,\spc,\fault
392	.endm
393
394	/* Look up a PTE in a 2-Level scheme (faulting at each
395	 * level if the entry isn't present
396	 *
397	 * NOTE: we use ldw even for LP64, since the short pointers
398	 * can address up to 1TB
399	 */
400	.macro		L2_ptep	pmd,pte,index,va,fault
401#if CONFIG_PGTABLE_LEVELS == 3
402	extru		\va,31-ASM_PMD_SHIFT,ASM_BITS_PER_PMD,\index
403#else
404# if defined(CONFIG_64BIT)
405	extrd,u		\va,63-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index
406  #else
407  # if PAGE_SIZE > 4096
408	extru		\va,31-ASM_PGDIR_SHIFT,32-ASM_PGDIR_SHIFT,\index
409  # else
410	extru		\va,31-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index
411  # endif
412# endif
413#endif
414	dep             %r0,31,PAGE_SHIFT,\pmd  /* clear offset */
415	copy		%r0,\pte
416	ldw,s		\index(\pmd),\pmd
417	bb,>=,n		\pmd,_PxD_PRESENT_BIT,\fault
418	dep		%r0,31,PxD_FLAG_SHIFT,\pmd /* clear flags */
419	copy		\pmd,%r9
420	SHLREG		%r9,PxD_VALUE_SHIFT,\pmd
421	extru		\va,31-PAGE_SHIFT,ASM_BITS_PER_PTE,\index
422	dep		%r0,31,PAGE_SHIFT,\pmd  /* clear offset */
423	shladd		\index,BITS_PER_PTE_ENTRY,\pmd,\pmd /* pmd is now pte */
424	LDREG		%r0(\pmd),\pte
425	bb,>=,n		\pte,_PAGE_PRESENT_BIT,\fault
426	.endm
427
428	/* Look up PTE in a 3-Level scheme.
429	 *
430	 * Here we implement a Hybrid L2/L3 scheme: we allocate the
431	 * first pmd adjacent to the pgd.  This means that we can
432	 * subtract a constant offset to get to it.  The pmd and pgd
433	 * sizes are arranged so that a single pmd covers 4GB (giving
434	 * a full LP64 process access to 8TB) so our lookups are
435	 * effectively L2 for the first 4GB of the kernel (i.e. for
436	 * all ILP32 processes and all the kernel for machines with
437	 * under 4GB of memory) */
438	.macro		L3_ptep pgd,pte,index,va,fault
439#if CONFIG_PGTABLE_LEVELS == 3 /* we might have a 2-Level scheme, e.g. with 16kb page size */
440	extrd,u		\va,63-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index
441	copy		%r0,\pte
442	extrd,u,*=	\va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0
443	ldw,s		\index(\pgd),\pgd
444	extrd,u,*=	\va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0
445	bb,>=,n		\pgd,_PxD_PRESENT_BIT,\fault
446	extrd,u,*=	\va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0
447	shld		\pgd,PxD_VALUE_SHIFT,\index
448	extrd,u,*=	\va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0
449	copy		\index,\pgd
450	extrd,u,*<>	\va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0
451	ldo		ASM_PGD_PMD_OFFSET(\pgd),\pgd
452#endif
453	L2_ptep		\pgd,\pte,\index,\va,\fault
454	.endm
455
456	/* Acquire pa_tlb_lock lock and recheck page is still present. */
457	.macro		tlb_lock	spc,ptp,pte,tmp,tmp1,fault
458#ifdef CONFIG_SMP
459	cmpib,COND(=),n	0,\spc,2f
460	load32		PA(pa_tlb_lock),\tmp
4611:	LDCW		0(\tmp),\tmp1
462	cmpib,COND(=)	0,\tmp1,1b
463	nop
464	LDREG		0(\ptp),\pte
465	bb,<,n		\pte,_PAGE_PRESENT_BIT,2f
466	b		\fault
467	stw		 \spc,0(\tmp)
4682:
469#endif
470	.endm
471
472	/* Release pa_tlb_lock lock without reloading lock address. */
473	.macro		tlb_unlock0	spc,tmp
474#ifdef CONFIG_SMP
475	or,COND(=)	%r0,\spc,%r0
476	stw             \spc,0(\tmp)
477#endif
478	.endm
479
480	/* Release pa_tlb_lock lock. */
481	.macro		tlb_unlock1	spc,tmp
482#ifdef CONFIG_SMP
483	load32		PA(pa_tlb_lock),\tmp
484	tlb_unlock0	\spc,\tmp
485#endif
486	.endm
487
488	/* Set the _PAGE_ACCESSED bit of the PTE.  Be clever and
489	 * don't needlessly dirty the cache line if it was already set */
490	.macro		update_accessed	ptp,pte,tmp,tmp1
491	ldi		_PAGE_ACCESSED,\tmp1
492	or		\tmp1,\pte,\tmp
493	and,COND(<>)	\tmp1,\pte,%r0
494	STREG		\tmp,0(\ptp)
495	.endm
496
497	/* Set the dirty bit (and accessed bit).  No need to be
498	 * clever, this is only used from the dirty fault */
499	.macro		update_dirty	ptp,pte,tmp
500	ldi		_PAGE_ACCESSED|_PAGE_DIRTY,\tmp
501	or		\tmp,\pte,\pte
502	STREG		\pte,0(\ptp)
503	.endm
504
505	/* bitshift difference between a PFN (based on kernel's PAGE_SIZE)
506	 * to a CPU TLB 4k PFN (4k => 12 bits to shift) */
507	#define PAGE_ADD_SHIFT  (PAGE_SHIFT-12)
508
509	/* Drop prot bits and convert to page addr for iitlbt and idtlbt */
510	.macro		convert_for_tlb_insert20 pte
511	extrd,u		\pte,(63-ASM_PFN_PTE_SHIFT)+(63-58)+PAGE_ADD_SHIFT,\
512				64-PAGE_SHIFT-PAGE_ADD_SHIFT,\pte
513	depdi		_PAGE_SIZE_ENCODING_DEFAULT,63,\
514				(63-58)+PAGE_ADD_SHIFT,\pte
515	.endm
516
517	/* Convert the pte and prot to tlb insertion values.  How
518	 * this happens is quite subtle, read below */
519	.macro		make_insert_tlb	spc,pte,prot
520	space_to_prot   \spc \prot        /* create prot id from space */
521	/* The following is the real subtlety.  This is depositing
522	 * T <-> _PAGE_REFTRAP
523	 * D <-> _PAGE_DIRTY
524	 * B <-> _PAGE_DMB (memory break)
525	 *
526	 * Then incredible subtlety: The access rights are
527	 * _PAGE_GATEWAY, _PAGE_EXEC and _PAGE_WRITE
528	 * See 3-14 of the parisc 2.0 manual
529	 *
530	 * Finally, _PAGE_READ goes in the top bit of PL1 (so we
531	 * trigger an access rights trap in user space if the user
532	 * tries to read an unreadable page */
533	depd            \pte,8,7,\prot
534
535	/* PAGE_USER indicates the page can be read with user privileges,
536	 * so deposit X1|11 to PL1|PL2 (remember the upper bit of PL1
537	 * contains _PAGE_READ) */
538	extrd,u,*=      \pte,_PAGE_USER_BIT+32,1,%r0
539	depdi		7,11,3,\prot
540	/* If we're a gateway page, drop PL2 back to zero for promotion
541	 * to kernel privilege (so we can execute the page as kernel).
542	 * Any privilege promotion page always denys read and write */
543	extrd,u,*= 	\pte,_PAGE_GATEWAY_BIT+32,1,%r0
544	depd		%r0,11,2,\prot	/* If Gateway, Set PL2 to 0 */
545
546	/* Enforce uncacheable pages.
547	 * This should ONLY be use for MMIO on PA 2.0 machines.
548	 * Memory/DMA is cache coherent on all PA2.0 machines we support
549	 * (that means T-class is NOT supported) and the memory controllers
550	 * on most of those machines only handles cache transactions.
551	 */
552	extrd,u,*=	\pte,_PAGE_NO_CACHE_BIT+32,1,%r0
553	depdi		1,12,1,\prot
554
555	/* Drop prot bits and convert to page addr for iitlbt and idtlbt */
556	convert_for_tlb_insert20 \pte
557	.endm
558
559	/* Identical macro to make_insert_tlb above, except it
560	 * makes the tlb entry for the differently formatted pa11
561	 * insertion instructions */
562	.macro		make_insert_tlb_11	spc,pte,prot
563	zdep		\spc,30,15,\prot
564	dep		\pte,8,7,\prot
565	extru,=		\pte,_PAGE_NO_CACHE_BIT,1,%r0
566	depi		1,12,1,\prot
567	extru,=         \pte,_PAGE_USER_BIT,1,%r0
568	depi		7,11,3,\prot   /* Set for user space (1 rsvd for read) */
569	extru,= 	\pte,_PAGE_GATEWAY_BIT,1,%r0
570	depi		0,11,2,\prot	/* If Gateway, Set PL2 to 0 */
571
572	/* Get rid of prot bits and convert to page addr for iitlba */
573
574	depi		0,31,ASM_PFN_PTE_SHIFT,\pte
575	SHRREG		\pte,(ASM_PFN_PTE_SHIFT-(31-26)),\pte
576	.endm
577
578	/* This is for ILP32 PA2.0 only.  The TLB insertion needs
579	 * to extend into I/O space if the address is 0xfXXXXXXX
580	 * so we extend the f's into the top word of the pte in
581	 * this case */
582	.macro		f_extend	pte,tmp
583	extrd,s		\pte,42,4,\tmp
584	addi,<>		1,\tmp,%r0
585	extrd,s		\pte,63,25,\pte
586	.endm
587
588	/* The alias region is an 8MB aligned 16MB to do clear and
589	 * copy user pages at addresses congruent with the user
590	 * virtual address.
591	 *
592	 * To use the alias page, you set %r26 up with the to TLB
593	 * entry (identifying the physical page) and %r23 up with
594	 * the from tlb entry (or nothing if only a to entry---for
595	 * clear_user_page_asm) */
596	.macro		do_alias	spc,tmp,tmp1,va,pte,prot,fault,patype
597	cmpib,COND(<>),n 0,\spc,\fault
598	ldil		L%(TMPALIAS_MAP_START),\tmp
599#if defined(CONFIG_64BIT) && (TMPALIAS_MAP_START >= 0x80000000)
600	/* on LP64, ldi will sign extend into the upper 32 bits,
601	 * which is behaviour we don't want */
602	depdi		0,31,32,\tmp
603#endif
604	copy		\va,\tmp1
605	depi		0,31,23,\tmp1
606	cmpb,COND(<>),n	\tmp,\tmp1,\fault
607	mfctl		%cr19,\tmp	/* iir */
608	/* get the opcode (first six bits) into \tmp */
609	extrw,u		\tmp,5,6,\tmp
610	/*
611	 * Only setting the T bit prevents data cache movein
612	 * Setting access rights to zero prevents instruction cache movein
613	 *
614	 * Note subtlety here: _PAGE_GATEWAY, _PAGE_EXEC and _PAGE_WRITE go
615	 * to type field and _PAGE_READ goes to top bit of PL1
616	 */
617	ldi		(_PAGE_REFTRAP|_PAGE_READ|_PAGE_WRITE),\prot
618	/*
619	 * so if the opcode is one (i.e. this is a memory management
620	 * instruction) nullify the next load so \prot is only T.
621	 * Otherwise this is a normal data operation
622	 */
623	cmpiclr,=	0x01,\tmp,%r0
624	ldi		(_PAGE_DIRTY|_PAGE_READ|_PAGE_WRITE),\prot
625.ifc \patype,20
626	depd,z		\prot,8,7,\prot
627.else
628.ifc \patype,11
629	depw,z		\prot,8,7,\prot
630.else
631	.error "undefined PA type to do_alias"
632.endif
633.endif
634	/*
635	 * OK, it is in the temp alias region, check whether "from" or "to".
636	 * Check "subtle" note in pacache.S re: r23/r26.
637	 */
638#ifdef CONFIG_64BIT
639	extrd,u,*=	\va,41,1,%r0
640#else
641	extrw,u,=	\va,9,1,%r0
642#endif
643	or,COND(tr)	%r23,%r0,\pte
644	or		%r26,%r0,\pte
645	.endm
646
647
648	/*
649	 * Align fault_vector_20 on 4K boundary so that both
650	 * fault_vector_11 and fault_vector_20 are on the
651	 * same page. This is only necessary as long as we
652	 * write protect the kernel text, which we may stop
653	 * doing once we use large page translations to cover
654	 * the static part of the kernel address space.
655	 */
656
657	.text
658
659	.align 4096
660
661ENTRY(fault_vector_20)
662	/* First vector is invalid (0) */
663	.ascii	"cows can fly"
664	.byte 0
665	.align 32
666
667	hpmc		 1
668	def		 2
669	def		 3
670	extint		 4
671	def		 5
672	itlb_20		 6
673	def		 7
674	def		 8
675	def              9
676	def		10
677	def		11
678	def		12
679	def		13
680	def		14
681	dtlb_20		15
682	naitlb_20	16
683	nadtlb_20	17
684	def		18
685	def		19
686	dbit_20		20
687	def		21
688	def		22
689	def		23
690	def		24
691	def		25
692	def		26
693	def		27
694	def		28
695	def		29
696	def		30
697	def		31
698END(fault_vector_20)
699
700#ifndef CONFIG_64BIT
701
702	.align 2048
703
704ENTRY(fault_vector_11)
705	/* First vector is invalid (0) */
706	.ascii	"cows can fly"
707	.byte 0
708	.align 32
709
710	hpmc		 1
711	def		 2
712	def		 3
713	extint		 4
714	def		 5
715	itlb_11		 6
716	def		 7
717	def		 8
718	def              9
719	def		10
720	def		11
721	def		12
722	def		13
723	def		14
724	dtlb_11		15
725	naitlb_11	16
726	nadtlb_11	17
727	def		18
728	def		19
729	dbit_11		20
730	def		21
731	def		22
732	def		23
733	def		24
734	def		25
735	def		26
736	def		27
737	def		28
738	def		29
739	def		30
740	def		31
741END(fault_vector_11)
742
743#endif
744	/* Fault vector is separately protected and *must* be on its own page */
745	.align		PAGE_SIZE
746ENTRY(end_fault_vector)
747
748	.import		handle_interruption,code
749	.import		do_cpu_irq_mask,code
750
751	/*
752	 * Child Returns here
753	 *
754	 * copy_thread moved args into task save area.
755	 */
756
757ENTRY(ret_from_kernel_thread)
758
759	/* Call schedule_tail first though */
760	BL	schedule_tail, %r2
761	nop
762
763	LDREG	TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30), %r1
764	LDREG	TASK_PT_GR25(%r1), %r26
765#ifdef CONFIG_64BIT
766	LDREG	TASK_PT_GR27(%r1), %r27
767#endif
768	LDREG	TASK_PT_GR26(%r1), %r1
769	ble	0(%sr7, %r1)
770	copy	%r31, %r2
771	b	finish_child_return
772	nop
773ENDPROC(ret_from_kernel_thread)
774
775
776	/*
777	 * struct task_struct *_switch_to(struct task_struct *prev,
778	 *	struct task_struct *next)
779	 *
780	 * switch kernel stacks and return prev */
781ENTRY(_switch_to)
782	STREG	 %r2, -RP_OFFSET(%r30)
783
784	callee_save_float
785	callee_save
786
787	load32	_switch_to_ret, %r2
788
789	STREG	%r2, TASK_PT_KPC(%r26)
790	LDREG	TASK_PT_KPC(%r25), %r2
791
792	STREG	%r30, TASK_PT_KSP(%r26)
793	LDREG	TASK_PT_KSP(%r25), %r30
794	LDREG	TASK_THREAD_INFO(%r25), %r25
795	bv	%r0(%r2)
796	mtctl   %r25,%cr30
797
798_switch_to_ret:
799	mtctl	%r0, %cr0		/* Needed for single stepping */
800	callee_rest
801	callee_rest_float
802
803	LDREG	-RP_OFFSET(%r30), %r2
804	bv	%r0(%r2)
805	copy	%r26, %r28
806ENDPROC(_switch_to)
807
808	/*
809	 * Common rfi return path for interruptions, kernel execve, and
810	 * sys_rt_sigreturn (sometimes).  The sys_rt_sigreturn syscall will
811	 * return via this path if the signal was received when the process
812	 * was running; if the process was blocked on a syscall then the
813	 * normal syscall_exit path is used.  All syscalls for traced
814	 * proceses exit via intr_restore.
815	 *
816	 * XXX If any syscalls that change a processes space id ever exit
817	 * this way, then we will need to copy %sr3 in to PT_SR[3..7], and
818	 * adjust IASQ[0..1].
819	 *
820	 */
821
822	.align	PAGE_SIZE
823
824ENTRY(syscall_exit_rfi)
825	mfctl   %cr30,%r16
826	LDREG	TI_TASK(%r16), %r16	/* thread_info -> task_struct */
827	ldo	TASK_REGS(%r16),%r16
828	/* Force iaoq to userspace, as the user has had access to our current
829	 * context via sigcontext. Also Filter the PSW for the same reason.
830	 */
831	LDREG	PT_IAOQ0(%r16),%r19
832	depi	3,31,2,%r19
833	STREG	%r19,PT_IAOQ0(%r16)
834	LDREG	PT_IAOQ1(%r16),%r19
835	depi	3,31,2,%r19
836	STREG	%r19,PT_IAOQ1(%r16)
837	LDREG   PT_PSW(%r16),%r19
838	load32	USER_PSW_MASK,%r1
839#ifdef CONFIG_64BIT
840	load32	USER_PSW_HI_MASK,%r20
841	depd    %r20,31,32,%r1
842#endif
843	and     %r19,%r1,%r19 /* Mask out bits that user shouldn't play with */
844	load32	USER_PSW,%r1
845	or      %r19,%r1,%r19 /* Make sure default USER_PSW bits are set */
846	STREG   %r19,PT_PSW(%r16)
847
848	/*
849	 * If we aren't being traced, we never saved space registers
850	 * (we don't store them in the sigcontext), so set them
851	 * to "proper" values now (otherwise we'll wind up restoring
852	 * whatever was last stored in the task structure, which might
853	 * be inconsistent if an interrupt occurred while on the gateway
854	 * page). Note that we may be "trashing" values the user put in
855	 * them, but we don't support the user changing them.
856	 */
857
858	STREG   %r0,PT_SR2(%r16)
859	mfsp    %sr3,%r19
860	STREG   %r19,PT_SR0(%r16)
861	STREG   %r19,PT_SR1(%r16)
862	STREG   %r19,PT_SR3(%r16)
863	STREG   %r19,PT_SR4(%r16)
864	STREG   %r19,PT_SR5(%r16)
865	STREG   %r19,PT_SR6(%r16)
866	STREG   %r19,PT_SR7(%r16)
867
868intr_return:
869	/* check for reschedule */
870	mfctl   %cr30,%r1
871	LDREG   TI_FLAGS(%r1),%r19	/* sched.h: TIF_NEED_RESCHED */
872	bb,<,n	%r19,31-TIF_NEED_RESCHED,intr_do_resched /* forward */
873
874	.import do_notify_resume,code
875intr_check_sig:
876	/* As above */
877	mfctl   %cr30,%r1
878	LDREG	TI_FLAGS(%r1),%r19
879	ldi	(_TIF_SIGPENDING|_TIF_NOTIFY_RESUME), %r20
880	and,COND(<>)	%r19, %r20, %r0
881	b,n	intr_restore	/* skip past if we've nothing to do */
882
883	/* This check is critical to having LWS
884	 * working. The IASQ is zero on the gateway
885	 * page and we cannot deliver any signals until
886	 * we get off the gateway page.
887	 *
888	 * Only do signals if we are returning to user space
889	 */
890	LDREG	PT_IASQ0(%r16), %r20
891	cmpib,COND(=),n 0,%r20,intr_restore /* backward */
892	LDREG	PT_IASQ1(%r16), %r20
893	cmpib,COND(=),n 0,%r20,intr_restore /* backward */
894
895	/* NOTE: We need to enable interrupts if we have to deliver
896	 * signals. We used to do this earlier but it caused kernel
897	 * stack overflows. */
898	ssm     PSW_SM_I, %r0
899
900	copy	%r0, %r25			/* long in_syscall = 0 */
901#ifdef CONFIG_64BIT
902	ldo	-16(%r30),%r29			/* Reference param save area */
903#endif
904
905	BL	do_notify_resume,%r2
906	copy	%r16, %r26			/* struct pt_regs *regs */
907
908	b,n	intr_check_sig
909
910intr_restore:
911	copy            %r16,%r29
912	ldo             PT_FR31(%r29),%r1
913	rest_fp         %r1
914	rest_general    %r29
915
916	/* inverse of virt_map */
917	pcxt_ssm_bug
918	rsm             PSW_SM_QUIET,%r0	/* prepare for rfi */
919	tophys_r1       %r29
920
921	/* Restore space id's and special cr's from PT_REGS
922	 * structure pointed to by r29
923	 */
924	rest_specials	%r29
925
926	/* IMPORTANT: rest_stack restores r29 last (we are using it)!
927	 * It also restores r1 and r30.
928	 */
929	rest_stack
930
931	rfi
932	nop
933
934#ifndef CONFIG_PREEMPT
935# define intr_do_preempt	intr_restore
936#endif /* !CONFIG_PREEMPT */
937
938	.import schedule,code
939intr_do_resched:
940	/* Only call schedule on return to userspace. If we're returning
941	 * to kernel space, we may schedule if CONFIG_PREEMPT, otherwise
942	 * we jump back to intr_restore.
943	 */
944	LDREG	PT_IASQ0(%r16), %r20
945	cmpib,COND(=)	0, %r20, intr_do_preempt
946	nop
947	LDREG	PT_IASQ1(%r16), %r20
948	cmpib,COND(=)	0, %r20, intr_do_preempt
949	nop
950
951	/* NOTE: We need to enable interrupts if we schedule.  We used
952	 * to do this earlier but it caused kernel stack overflows. */
953	ssm     PSW_SM_I, %r0
954
955#ifdef CONFIG_64BIT
956	ldo	-16(%r30),%r29		/* Reference param save area */
957#endif
958
959	ldil	L%intr_check_sig, %r2
960#ifndef CONFIG_64BIT
961	b	schedule
962#else
963	load32	schedule, %r20
964	bv	%r0(%r20)
965#endif
966	ldo	R%intr_check_sig(%r2), %r2
967
968	/* preempt the current task on returning to kernel
969	 * mode from an interrupt, iff need_resched is set,
970	 * and preempt_count is 0. otherwise, we continue on
971	 * our merry way back to the current running task.
972	 */
973#ifdef CONFIG_PREEMPT
974	.import preempt_schedule_irq,code
975intr_do_preempt:
976	rsm	PSW_SM_I, %r0		/* disable interrupts */
977
978	/* current_thread_info()->preempt_count */
979	mfctl	%cr30, %r1
980	LDREG	TI_PRE_COUNT(%r1), %r19
981	cmpib,COND(<>)	0, %r19, intr_restore	/* if preempt_count > 0 */
982	nop				/* prev insn branched backwards */
983
984	/* check if we interrupted a critical path */
985	LDREG	PT_PSW(%r16), %r20
986	bb,<,n	%r20, 31 - PSW_SM_I, intr_restore
987	nop
988
989	BL	preempt_schedule_irq, %r2
990	nop
991
992	b,n	intr_restore		/* ssm PSW_SM_I done by intr_restore */
993#endif /* CONFIG_PREEMPT */
994
995	/*
996	 * External interrupts.
997	 */
998
999intr_extint:
1000	cmpib,COND(=),n 0,%r16,1f
1001
1002	get_stack_use_cr30
1003	b,n 2f
1004
10051:
1006	get_stack_use_r30
10072:
1008	save_specials	%r29
1009	virt_map
1010	save_general	%r29
1011
1012	ldo	PT_FR0(%r29), %r24
1013	save_fp	%r24
1014
1015	loadgp
1016
1017	copy	%r29, %r26	/* arg0 is pt_regs */
1018	copy	%r29, %r16	/* save pt_regs */
1019
1020	ldil	L%intr_return, %r2
1021
1022#ifdef CONFIG_64BIT
1023	ldo	-16(%r30),%r29	/* Reference param save area */
1024#endif
1025
1026	b	do_cpu_irq_mask
1027	ldo	R%intr_return(%r2), %r2	/* return to intr_return, not here */
1028ENDPROC(syscall_exit_rfi)
1029
1030
1031	/* Generic interruptions (illegal insn, unaligned, page fault, etc) */
1032
1033ENTRY(intr_save)		/* for os_hpmc */
1034	mfsp    %sr7,%r16
1035	cmpib,COND(=),n 0,%r16,1f
1036	get_stack_use_cr30
1037	b	2f
1038	copy    %r8,%r26
1039
10401:
1041	get_stack_use_r30
1042	copy    %r8,%r26
1043
10442:
1045	save_specials	%r29
1046
1047	/* If this trap is a itlb miss, skip saving/adjusting isr/ior */
1048
1049	/*
1050	 * FIXME: 1) Use a #define for the hardwired "6" below (and in
1051	 *           traps.c.
1052	 *        2) Once we start executing code above 4 Gb, we need
1053	 *           to adjust iasq/iaoq here in the same way we
1054	 *           adjust isr/ior below.
1055	 */
1056
1057	cmpib,COND(=),n        6,%r26,skip_save_ior
1058
1059
1060	mfctl           %cr20, %r16 /* isr */
1061	nop		/* serialize mfctl on PA 2.0 to avoid 4 cycle penalty */
1062	mfctl           %cr21, %r17 /* ior */
1063
1064
1065#ifdef CONFIG_64BIT
1066	/*
1067	 * If the interrupted code was running with W bit off (32 bit),
1068	 * clear the b bits (bits 0 & 1) in the ior.
1069	 * save_specials left ipsw value in r8 for us to test.
1070	 */
1071	extrd,u,*<>     %r8,PSW_W_BIT,1,%r0
1072	depdi           0,1,2,%r17
1073
1074	/*
1075	 * FIXME: This code has hardwired assumptions about the split
1076	 *        between space bits and offset bits. This will change
1077	 *        when we allow alternate page sizes.
1078	 */
1079
1080	/* adjust isr/ior. */
1081	extrd,u         %r16,63,SPACEID_SHIFT,%r1	/* get high bits from isr for ior */
1082	depd            %r1,31,SPACEID_SHIFT,%r17	/* deposit them into ior */
1083	depdi           0,63,SPACEID_SHIFT,%r16		/* clear them from isr */
1084#endif
1085	STREG           %r16, PT_ISR(%r29)
1086	STREG           %r17, PT_IOR(%r29)
1087
1088
1089skip_save_ior:
1090	virt_map
1091	save_general	%r29
1092
1093	ldo		PT_FR0(%r29), %r25
1094	save_fp		%r25
1095
1096	loadgp
1097
1098	copy		%r29, %r25	/* arg1 is pt_regs */
1099#ifdef CONFIG_64BIT
1100	ldo		-16(%r30),%r29	/* Reference param save area */
1101#endif
1102
1103	ldil		L%intr_check_sig, %r2
1104	copy		%r25, %r16	/* save pt_regs */
1105
1106	b		handle_interruption
1107	ldo		R%intr_check_sig(%r2), %r2
1108ENDPROC(intr_save)
1109
1110
1111	/*
1112	 * Note for all tlb miss handlers:
1113	 *
1114	 * cr24 contains a pointer to the kernel address space
1115	 * page directory.
1116	 *
1117	 * cr25 contains a pointer to the current user address
1118	 * space page directory.
1119	 *
1120	 * sr3 will contain the space id of the user address space
1121	 * of the current running thread while that thread is
1122	 * running in the kernel.
1123	 */
1124
1125	/*
1126	 * register number allocations.  Note that these are all
1127	 * in the shadowed registers
1128	 */
1129
1130	t0 = r1		/* temporary register 0 */
1131	va = r8		/* virtual address for which the trap occurred */
1132	t1 = r9		/* temporary register 1 */
1133	pte  = r16	/* pte/phys page # */
1134	prot = r17	/* prot bits */
1135	spc  = r24	/* space for which the trap occurred */
1136	ptp = r25	/* page directory/page table pointer */
1137
1138#ifdef CONFIG_64BIT
1139
1140dtlb_miss_20w:
1141	space_adjust	spc,va,t0
1142	get_pgd		spc,ptp
1143	space_check	spc,t0,dtlb_fault
1144
1145	L3_ptep		ptp,pte,t0,va,dtlb_check_alias_20w
1146
1147	tlb_lock	spc,ptp,pte,t0,t1,dtlb_check_alias_20w
1148	update_accessed	ptp,pte,t0,t1
1149
1150	make_insert_tlb	spc,pte,prot
1151
1152	idtlbt          pte,prot
1153
1154	tlb_unlock1	spc,t0
1155	rfir
1156	nop
1157
1158dtlb_check_alias_20w:
1159	do_alias	spc,t0,t1,va,pte,prot,dtlb_fault,20
1160
1161	idtlbt          pte,prot
1162
1163	rfir
1164	nop
1165
1166nadtlb_miss_20w:
1167	space_adjust	spc,va,t0
1168	get_pgd		spc,ptp
1169	space_check	spc,t0,nadtlb_fault
1170
1171	L3_ptep		ptp,pte,t0,va,nadtlb_check_alias_20w
1172
1173	tlb_lock	spc,ptp,pte,t0,t1,nadtlb_check_alias_20w
1174	update_accessed	ptp,pte,t0,t1
1175
1176	make_insert_tlb	spc,pte,prot
1177
1178	idtlbt          pte,prot
1179
1180	tlb_unlock1	spc,t0
1181	rfir
1182	nop
1183
1184nadtlb_check_alias_20w:
1185	do_alias	spc,t0,t1,va,pte,prot,nadtlb_emulate,20
1186
1187	idtlbt          pte,prot
1188
1189	rfir
1190	nop
1191
1192#else
1193
1194dtlb_miss_11:
1195	get_pgd		spc,ptp
1196
1197	space_check	spc,t0,dtlb_fault
1198
1199	L2_ptep		ptp,pte,t0,va,dtlb_check_alias_11
1200
1201	tlb_lock	spc,ptp,pte,t0,t1,dtlb_check_alias_11
1202	update_accessed	ptp,pte,t0,t1
1203
1204	make_insert_tlb_11	spc,pte,prot
1205
1206	mfsp		%sr1,t1  /* Save sr1 so we can use it in tlb inserts */
1207	mtsp		spc,%sr1
1208
1209	idtlba		pte,(%sr1,va)
1210	idtlbp		prot,(%sr1,va)
1211
1212	mtsp		t1, %sr1	/* Restore sr1 */
1213
1214	tlb_unlock1	spc,t0
1215	rfir
1216	nop
1217
1218dtlb_check_alias_11:
1219	do_alias	spc,t0,t1,va,pte,prot,dtlb_fault,11
1220
1221	idtlba          pte,(va)
1222	idtlbp          prot,(va)
1223
1224	rfir
1225	nop
1226
1227nadtlb_miss_11:
1228	get_pgd		spc,ptp
1229
1230	space_check	spc,t0,nadtlb_fault
1231
1232	L2_ptep		ptp,pte,t0,va,nadtlb_check_alias_11
1233
1234	tlb_lock	spc,ptp,pte,t0,t1,nadtlb_check_alias_11
1235	update_accessed	ptp,pte,t0,t1
1236
1237	make_insert_tlb_11	spc,pte,prot
1238
1239	mfsp		%sr1,t1  /* Save sr1 so we can use it in tlb inserts */
1240	mtsp		spc,%sr1
1241
1242	idtlba		pte,(%sr1,va)
1243	idtlbp		prot,(%sr1,va)
1244
1245	mtsp		t1, %sr1	/* Restore sr1 */
1246
1247	tlb_unlock1	spc,t0
1248	rfir
1249	nop
1250
1251nadtlb_check_alias_11:
1252	do_alias	spc,t0,t1,va,pte,prot,nadtlb_emulate,11
1253
1254	idtlba          pte,(va)
1255	idtlbp          prot,(va)
1256
1257	rfir
1258	nop
1259
1260dtlb_miss_20:
1261	space_adjust	spc,va,t0
1262	get_pgd		spc,ptp
1263	space_check	spc,t0,dtlb_fault
1264
1265	L2_ptep		ptp,pte,t0,va,dtlb_check_alias_20
1266
1267	tlb_lock	spc,ptp,pte,t0,t1,dtlb_check_alias_20
1268	update_accessed	ptp,pte,t0,t1
1269
1270	make_insert_tlb	spc,pte,prot
1271
1272	f_extend	pte,t1
1273
1274	idtlbt          pte,prot
1275
1276	tlb_unlock1	spc,t0
1277	rfir
1278	nop
1279
1280dtlb_check_alias_20:
1281	do_alias	spc,t0,t1,va,pte,prot,dtlb_fault,20
1282
1283	idtlbt          pte,prot
1284
1285	rfir
1286	nop
1287
1288nadtlb_miss_20:
1289	get_pgd		spc,ptp
1290
1291	space_check	spc,t0,nadtlb_fault
1292
1293	L2_ptep		ptp,pte,t0,va,nadtlb_check_alias_20
1294
1295	tlb_lock	spc,ptp,pte,t0,t1,nadtlb_check_alias_20
1296	update_accessed	ptp,pte,t0,t1
1297
1298	make_insert_tlb	spc,pte,prot
1299
1300	f_extend	pte,t1
1301
1302	idtlbt		pte,prot
1303
1304	tlb_unlock1	spc,t0
1305	rfir
1306	nop
1307
1308nadtlb_check_alias_20:
1309	do_alias	spc,t0,t1,va,pte,prot,nadtlb_emulate,20
1310
1311	idtlbt          pte,prot
1312
1313	rfir
1314	nop
1315
1316#endif
1317
1318nadtlb_emulate:
1319
1320	/*
1321	 * Non access misses can be caused by fdc,fic,pdc,lpa,probe and
1322	 * probei instructions. We don't want to fault for these
1323	 * instructions (not only does it not make sense, it can cause
1324	 * deadlocks, since some flushes are done with the mmap
1325	 * semaphore held). If the translation doesn't exist, we can't
1326	 * insert a translation, so have to emulate the side effects
1327	 * of the instruction. Since we don't insert a translation
1328	 * we can get a lot of faults during a flush loop, so it makes
1329	 * sense to try to do it here with minimum overhead. We only
1330	 * emulate fdc,fic,pdc,probew,prober instructions whose base
1331	 * and index registers are not shadowed. We defer everything
1332	 * else to the "slow" path.
1333	 */
1334
1335	mfctl           %cr19,%r9 /* Get iir */
1336
1337	/* PA 2.0 Arch Ref. Book pg 382 has a good description of the insn bits.
1338	   Checks for fdc,fdce,pdc,"fic,4f",prober,probeir,probew, probeiw */
1339
1340	/* Checks for fdc,fdce,pdc,"fic,4f" only */
1341	ldi             0x280,%r16
1342	and             %r9,%r16,%r17
1343	cmpb,<>,n       %r16,%r17,nadtlb_probe_check
1344	bb,>=,n         %r9,26,nadtlb_nullify  /* m bit not set, just nullify */
1345	BL		get_register,%r25
1346	extrw,u         %r9,15,5,%r8           /* Get index register # */
1347	cmpib,COND(=),n        -1,%r1,nadtlb_fault    /* have to use slow path */
1348	copy            %r1,%r24
1349	BL		get_register,%r25
1350	extrw,u         %r9,10,5,%r8           /* Get base register # */
1351	cmpib,COND(=),n        -1,%r1,nadtlb_fault    /* have to use slow path */
1352	BL		set_register,%r25
1353	add,l           %r1,%r24,%r1           /* doesn't affect c/b bits */
1354
1355nadtlb_nullify:
1356	mfctl           %ipsw,%r8
1357	ldil            L%PSW_N,%r9
1358	or              %r8,%r9,%r8            /* Set PSW_N */
1359	mtctl           %r8,%ipsw
1360
1361	rfir
1362	nop
1363
1364	/*
1365		When there is no translation for the probe address then we
1366		must nullify the insn and return zero in the target regsiter.
1367		This will indicate to the calling code that it does not have
1368		write/read privileges to this address.
1369
1370		This should technically work for prober and probew in PA 1.1,
1371		and also probe,r and probe,w in PA 2.0
1372
1373		WARNING: USE ONLY NON-SHADOW REGISTERS WITH PROBE INSN!
1374		THE SLOW-PATH EMULATION HAS NOT BEEN WRITTEN YET.
1375
1376	*/
1377nadtlb_probe_check:
1378	ldi             0x80,%r16
1379	and             %r9,%r16,%r17
1380	cmpb,<>,n       %r16,%r17,nadtlb_fault /* Must be probe,[rw]*/
1381	BL              get_register,%r25      /* Find the target register */
1382	extrw,u         %r9,31,5,%r8           /* Get target register */
1383	cmpib,COND(=),n        -1,%r1,nadtlb_fault    /* have to use slow path */
1384	BL		set_register,%r25
1385	copy            %r0,%r1                /* Write zero to target register */
1386	b nadtlb_nullify                       /* Nullify return insn */
1387	nop
1388
1389
1390#ifdef CONFIG_64BIT
1391itlb_miss_20w:
1392
1393	/*
1394	 * I miss is a little different, since we allow users to fault
1395	 * on the gateway page which is in the kernel address space.
1396	 */
1397
1398	space_adjust	spc,va,t0
1399	get_pgd		spc,ptp
1400	space_check	spc,t0,itlb_fault
1401
1402	L3_ptep		ptp,pte,t0,va,itlb_fault
1403
1404	tlb_lock	spc,ptp,pte,t0,t1,itlb_fault
1405	update_accessed	ptp,pte,t0,t1
1406
1407	make_insert_tlb	spc,pte,prot
1408
1409	iitlbt          pte,prot
1410
1411	tlb_unlock1	spc,t0
1412	rfir
1413	nop
1414
1415naitlb_miss_20w:
1416
1417	/*
1418	 * I miss is a little different, since we allow users to fault
1419	 * on the gateway page which is in the kernel address space.
1420	 */
1421
1422	space_adjust	spc,va,t0
1423	get_pgd		spc,ptp
1424	space_check	spc,t0,naitlb_fault
1425
1426	L3_ptep		ptp,pte,t0,va,naitlb_check_alias_20w
1427
1428	tlb_lock	spc,ptp,pte,t0,t1,naitlb_check_alias_20w
1429	update_accessed	ptp,pte,t0,t1
1430
1431	make_insert_tlb	spc,pte,prot
1432
1433	iitlbt          pte,prot
1434
1435	tlb_unlock1	spc,t0
1436	rfir
1437	nop
1438
1439naitlb_check_alias_20w:
1440	do_alias	spc,t0,t1,va,pte,prot,naitlb_fault,20
1441
1442	iitlbt		pte,prot
1443
1444	rfir
1445	nop
1446
1447#else
1448
1449itlb_miss_11:
1450	get_pgd		spc,ptp
1451
1452	space_check	spc,t0,itlb_fault
1453
1454	L2_ptep		ptp,pte,t0,va,itlb_fault
1455
1456	tlb_lock	spc,ptp,pte,t0,t1,itlb_fault
1457	update_accessed	ptp,pte,t0,t1
1458
1459	make_insert_tlb_11	spc,pte,prot
1460
1461	mfsp		%sr1,t1  /* Save sr1 so we can use it in tlb inserts */
1462	mtsp		spc,%sr1
1463
1464	iitlba		pte,(%sr1,va)
1465	iitlbp		prot,(%sr1,va)
1466
1467	mtsp		t1, %sr1	/* Restore sr1 */
1468
1469	tlb_unlock1	spc,t0
1470	rfir
1471	nop
1472
1473naitlb_miss_11:
1474	get_pgd		spc,ptp
1475
1476	space_check	spc,t0,naitlb_fault
1477
1478	L2_ptep		ptp,pte,t0,va,naitlb_check_alias_11
1479
1480	tlb_lock	spc,ptp,pte,t0,t1,naitlb_check_alias_11
1481	update_accessed	ptp,pte,t0,t1
1482
1483	make_insert_tlb_11	spc,pte,prot
1484
1485	mfsp		%sr1,t1  /* Save sr1 so we can use it in tlb inserts */
1486	mtsp		spc,%sr1
1487
1488	iitlba		pte,(%sr1,va)
1489	iitlbp		prot,(%sr1,va)
1490
1491	mtsp		t1, %sr1	/* Restore sr1 */
1492
1493	tlb_unlock1	spc,t0
1494	rfir
1495	nop
1496
1497naitlb_check_alias_11:
1498	do_alias	spc,t0,t1,va,pte,prot,itlb_fault,11
1499
1500	iitlba          pte,(%sr0, va)
1501	iitlbp          prot,(%sr0, va)
1502
1503	rfir
1504	nop
1505
1506
1507itlb_miss_20:
1508	get_pgd		spc,ptp
1509
1510	space_check	spc,t0,itlb_fault
1511
1512	L2_ptep		ptp,pte,t0,va,itlb_fault
1513
1514	tlb_lock	spc,ptp,pte,t0,t1,itlb_fault
1515	update_accessed	ptp,pte,t0,t1
1516
1517	make_insert_tlb	spc,pte,prot
1518
1519	f_extend	pte,t1
1520
1521	iitlbt          pte,prot
1522
1523	tlb_unlock1	spc,t0
1524	rfir
1525	nop
1526
1527naitlb_miss_20:
1528	get_pgd		spc,ptp
1529
1530	space_check	spc,t0,naitlb_fault
1531
1532	L2_ptep		ptp,pte,t0,va,naitlb_check_alias_20
1533
1534	tlb_lock	spc,ptp,pte,t0,t1,naitlb_check_alias_20
1535	update_accessed	ptp,pte,t0,t1
1536
1537	make_insert_tlb	spc,pte,prot
1538
1539	f_extend	pte,t1
1540
1541	iitlbt          pte,prot
1542
1543	tlb_unlock1	spc,t0
1544	rfir
1545	nop
1546
1547naitlb_check_alias_20:
1548	do_alias	spc,t0,t1,va,pte,prot,naitlb_fault,20
1549
1550	iitlbt          pte,prot
1551
1552	rfir
1553	nop
1554
1555#endif
1556
1557#ifdef CONFIG_64BIT
1558
1559dbit_trap_20w:
1560	space_adjust	spc,va,t0
1561	get_pgd		spc,ptp
1562	space_check	spc,t0,dbit_fault
1563
1564	L3_ptep		ptp,pte,t0,va,dbit_fault
1565
1566	tlb_lock	spc,ptp,pte,t0,t1,dbit_fault
1567	update_dirty	ptp,pte,t1
1568
1569	make_insert_tlb	spc,pte,prot
1570
1571	idtlbt          pte,prot
1572
1573	tlb_unlock0	spc,t0
1574	rfir
1575	nop
1576#else
1577
1578dbit_trap_11:
1579
1580	get_pgd		spc,ptp
1581
1582	space_check	spc,t0,dbit_fault
1583
1584	L2_ptep		ptp,pte,t0,va,dbit_fault
1585
1586	tlb_lock	spc,ptp,pte,t0,t1,dbit_fault
1587	update_dirty	ptp,pte,t1
1588
1589	make_insert_tlb_11	spc,pte,prot
1590
1591	mfsp            %sr1,t1  /* Save sr1 so we can use it in tlb inserts */
1592	mtsp		spc,%sr1
1593
1594	idtlba		pte,(%sr1,va)
1595	idtlbp		prot,(%sr1,va)
1596
1597	mtsp            t1, %sr1     /* Restore sr1 */
1598
1599	tlb_unlock0	spc,t0
1600	rfir
1601	nop
1602
1603dbit_trap_20:
1604	get_pgd		spc,ptp
1605
1606	space_check	spc,t0,dbit_fault
1607
1608	L2_ptep		ptp,pte,t0,va,dbit_fault
1609
1610	tlb_lock	spc,ptp,pte,t0,t1,dbit_fault
1611	update_dirty	ptp,pte,t1
1612
1613	make_insert_tlb	spc,pte,prot
1614
1615	f_extend	pte,t1
1616
1617	idtlbt		pte,prot
1618
1619	tlb_unlock0	spc,t0
1620	rfir
1621	nop
1622#endif
1623
1624	.import handle_interruption,code
1625
1626kernel_bad_space:
1627	b               intr_save
1628	ldi             31,%r8  /* Use an unused code */
1629
1630dbit_fault:
1631	b               intr_save
1632	ldi             20,%r8
1633
1634itlb_fault:
1635	b               intr_save
1636	ldi             6,%r8
1637
1638nadtlb_fault:
1639	b               intr_save
1640	ldi             17,%r8
1641
1642naitlb_fault:
1643	b               intr_save
1644	ldi             16,%r8
1645
1646dtlb_fault:
1647	b               intr_save
1648	ldi             15,%r8
1649
1650	/* Register saving semantics for system calls:
1651
1652	   %r1		   clobbered by system call macro in userspace
1653	   %r2		   saved in PT_REGS by gateway page
1654	   %r3  - %r18	   preserved by C code (saved by signal code)
1655	   %r19 - %r20	   saved in PT_REGS by gateway page
1656	   %r21 - %r22	   non-standard syscall args
1657			   stored in kernel stack by gateway page
1658	   %r23 - %r26	   arg3-arg0, saved in PT_REGS by gateway page
1659	   %r27 - %r30	   saved in PT_REGS by gateway page
1660	   %r31		   syscall return pointer
1661	 */
1662
1663	/* Floating point registers (FIXME: what do we do with these?)
1664
1665	   %fr0  - %fr3	   status/exception, not preserved
1666	   %fr4  - %fr7	   arguments
1667	   %fr8	 - %fr11   not preserved by C code
1668	   %fr12 - %fr21   preserved by C code
1669	   %fr22 - %fr31   not preserved by C code
1670	 */
1671
1672	.macro	reg_save regs
1673	STREG	%r3, PT_GR3(\regs)
1674	STREG	%r4, PT_GR4(\regs)
1675	STREG	%r5, PT_GR5(\regs)
1676	STREG	%r6, PT_GR6(\regs)
1677	STREG	%r7, PT_GR7(\regs)
1678	STREG	%r8, PT_GR8(\regs)
1679	STREG	%r9, PT_GR9(\regs)
1680	STREG   %r10,PT_GR10(\regs)
1681	STREG   %r11,PT_GR11(\regs)
1682	STREG   %r12,PT_GR12(\regs)
1683	STREG   %r13,PT_GR13(\regs)
1684	STREG   %r14,PT_GR14(\regs)
1685	STREG   %r15,PT_GR15(\regs)
1686	STREG   %r16,PT_GR16(\regs)
1687	STREG   %r17,PT_GR17(\regs)
1688	STREG   %r18,PT_GR18(\regs)
1689	.endm
1690
1691	.macro	reg_restore regs
1692	LDREG	PT_GR3(\regs), %r3
1693	LDREG	PT_GR4(\regs), %r4
1694	LDREG	PT_GR5(\regs), %r5
1695	LDREG	PT_GR6(\regs), %r6
1696	LDREG	PT_GR7(\regs), %r7
1697	LDREG	PT_GR8(\regs), %r8
1698	LDREG	PT_GR9(\regs), %r9
1699	LDREG   PT_GR10(\regs),%r10
1700	LDREG   PT_GR11(\regs),%r11
1701	LDREG   PT_GR12(\regs),%r12
1702	LDREG   PT_GR13(\regs),%r13
1703	LDREG   PT_GR14(\regs),%r14
1704	LDREG   PT_GR15(\regs),%r15
1705	LDREG   PT_GR16(\regs),%r16
1706	LDREG   PT_GR17(\regs),%r17
1707	LDREG   PT_GR18(\regs),%r18
1708	.endm
1709
1710	.macro	fork_like name
1711ENTRY(sys_\name\()_wrapper)
1712	LDREG	TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30), %r1
1713	ldo	TASK_REGS(%r1),%r1
1714	reg_save %r1
1715	mfctl	%cr27, %r28
1716	ldil	L%sys_\name, %r31
1717	be	R%sys_\name(%sr4,%r31)
1718	STREG	%r28, PT_CR27(%r1)
1719ENDPROC(sys_\name\()_wrapper)
1720	.endm
1721
1722fork_like clone
1723fork_like fork
1724fork_like vfork
1725
1726	/* Set the return value for the child */
1727ENTRY(child_return)
1728	BL	schedule_tail, %r2
1729	nop
1730finish_child_return:
1731	LDREG	TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30), %r1
1732	ldo	TASK_REGS(%r1),%r1	 /* get pt regs */
1733
1734	LDREG	PT_CR27(%r1), %r3
1735	mtctl	%r3, %cr27
1736	reg_restore %r1
1737	b	syscall_exit
1738	copy	%r0,%r28
1739ENDPROC(child_return)
1740
1741ENTRY(sys_rt_sigreturn_wrapper)
1742	LDREG	TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r26
1743	ldo	TASK_REGS(%r26),%r26	/* get pt regs */
1744	/* Don't save regs, we are going to restore them from sigcontext. */
1745	STREG	%r2, -RP_OFFSET(%r30)
1746#ifdef CONFIG_64BIT
1747	ldo	FRAME_SIZE(%r30), %r30
1748	BL	sys_rt_sigreturn,%r2
1749	ldo	-16(%r30),%r29		/* Reference param save area */
1750#else
1751	BL	sys_rt_sigreturn,%r2
1752	ldo	FRAME_SIZE(%r30), %r30
1753#endif
1754
1755	ldo	-FRAME_SIZE(%r30), %r30
1756	LDREG	-RP_OFFSET(%r30), %r2
1757
1758	/* FIXME: I think we need to restore a few more things here. */
1759	LDREG	TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
1760	ldo	TASK_REGS(%r1),%r1	/* get pt regs */
1761	reg_restore %r1
1762
1763	/* If the signal was received while the process was blocked on a
1764	 * syscall, then r2 will take us to syscall_exit; otherwise r2 will
1765	 * take us to syscall_exit_rfi and on to intr_return.
1766	 */
1767	bv	%r0(%r2)
1768	LDREG	PT_GR28(%r1),%r28  /* reload original r28 for syscall_exit */
1769ENDPROC(sys_rt_sigreturn_wrapper)
1770
1771ENTRY(syscall_exit)
1772	/* NOTE: Not all syscalls exit this way.  rt_sigreturn will exit
1773	 * via syscall_exit_rfi if the signal was received while the process
1774	 * was running.
1775	 */
1776
1777	/* save return value now */
1778
1779	mfctl     %cr30, %r1
1780	LDREG     TI_TASK(%r1),%r1
1781	STREG     %r28,TASK_PT_GR28(%r1)
1782
1783	/* Seems to me that dp could be wrong here, if the syscall involved
1784	 * calling a module, and nothing got round to restoring dp on return.
1785	 */
1786	loadgp
1787
1788syscall_check_resched:
1789
1790	/* check for reschedule */
1791
1792	LDREG	TI_FLAGS-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r19	/* long */
1793	bb,<,n	%r19, 31-TIF_NEED_RESCHED, syscall_do_resched /* forward */
1794
1795	.import do_signal,code
1796syscall_check_sig:
1797	LDREG	TI_FLAGS-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r19
1798	ldi	(_TIF_SIGPENDING|_TIF_NOTIFY_RESUME), %r26
1799	and,COND(<>)	%r19, %r26, %r0
1800	b,n	syscall_restore	/* skip past if we've nothing to do */
1801
1802syscall_do_signal:
1803	/* Save callee-save registers (for sigcontext).
1804	 * FIXME: After this point the process structure should be
1805	 * consistent with all the relevant state of the process
1806	 * before the syscall.  We need to verify this.
1807	 */
1808	LDREG	TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
1809	ldo	TASK_REGS(%r1), %r26		/* struct pt_regs *regs */
1810	reg_save %r26
1811
1812#ifdef CONFIG_64BIT
1813	ldo	-16(%r30),%r29			/* Reference param save area */
1814#endif
1815
1816	BL	do_notify_resume,%r2
1817	ldi	1, %r25				/* long in_syscall = 1 */
1818
1819	LDREG	TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
1820	ldo	TASK_REGS(%r1), %r20		/* reload pt_regs */
1821	reg_restore %r20
1822
1823	b,n     syscall_check_sig
1824
1825syscall_restore:
1826	LDREG	TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
1827
1828	/* Are we being ptraced? */
1829	ldw	TASK_FLAGS(%r1),%r19
1830	ldi	_TIF_SYSCALL_TRACE_MASK,%r2
1831	and,COND(=)	%r19,%r2,%r0
1832	b,n	syscall_restore_rfi
1833
1834	ldo	TASK_PT_FR31(%r1),%r19		   /* reload fpregs */
1835	rest_fp	%r19
1836
1837	LDREG	TASK_PT_SAR(%r1),%r19		   /* restore SAR */
1838	mtsar	%r19
1839
1840	LDREG	TASK_PT_GR2(%r1),%r2		   /* restore user rp */
1841	LDREG	TASK_PT_GR19(%r1),%r19
1842	LDREG   TASK_PT_GR20(%r1),%r20
1843	LDREG	TASK_PT_GR21(%r1),%r21
1844	LDREG	TASK_PT_GR22(%r1),%r22
1845	LDREG	TASK_PT_GR23(%r1),%r23
1846	LDREG	TASK_PT_GR24(%r1),%r24
1847	LDREG	TASK_PT_GR25(%r1),%r25
1848	LDREG	TASK_PT_GR26(%r1),%r26
1849	LDREG	TASK_PT_GR27(%r1),%r27	   /* restore user dp */
1850	LDREG	TASK_PT_GR28(%r1),%r28	   /* syscall return value */
1851	LDREG	TASK_PT_GR29(%r1),%r29
1852	LDREG	TASK_PT_GR31(%r1),%r31	   /* restore syscall rp */
1853
1854	/* NOTE: We use rsm/ssm pair to make this operation atomic */
1855	LDREG   TASK_PT_GR30(%r1),%r1              /* Get user sp */
1856	rsm     PSW_SM_I, %r0
1857	copy    %r1,%r30                           /* Restore user sp */
1858	mfsp    %sr3,%r1                           /* Get user space id */
1859	mtsp    %r1,%sr7                           /* Restore sr7 */
1860	ssm     PSW_SM_I, %r0
1861
1862	/* Set sr2 to zero for userspace syscalls to work. */
1863	mtsp	%r0,%sr2
1864	mtsp	%r1,%sr4			   /* Restore sr4 */
1865	mtsp	%r1,%sr5			   /* Restore sr5 */
1866	mtsp	%r1,%sr6			   /* Restore sr6 */
1867
1868	depi	3,31,2,%r31			   /* ensure return to user mode. */
1869
1870#ifdef CONFIG_64BIT
1871	/* decide whether to reset the wide mode bit
1872	 *
1873	 * For a syscall, the W bit is stored in the lowest bit
1874	 * of sp.  Extract it and reset W if it is zero */
1875	extrd,u,*<>	%r30,63,1,%r1
1876	rsm	PSW_SM_W, %r0
1877	/* now reset the lowest bit of sp if it was set */
1878	xor	%r30,%r1,%r30
1879#endif
1880	be,n    0(%sr3,%r31)                       /* return to user space */
1881
1882	/* We have to return via an RFI, so that PSW T and R bits can be set
1883	 * appropriately.
1884	 * This sets up pt_regs so we can return via intr_restore, which is not
1885	 * the most efficient way of doing things, but it works.
1886	 */
1887syscall_restore_rfi:
1888	ldo	-1(%r0),%r2			   /* Set recovery cntr to -1 */
1889	mtctl	%r2,%cr0			   /*   for immediate trap */
1890	LDREG	TASK_PT_PSW(%r1),%r2		   /* Get old PSW */
1891	ldi	0x0b,%r20			   /* Create new PSW */
1892	depi	-1,13,1,%r20			   /* C, Q, D, and I bits */
1893
1894	/* The values of SINGLESTEP_BIT and BLOCKSTEP_BIT are
1895	 * set in thread_info.h and converted to PA bitmap
1896	 * numbers in asm-offsets.c */
1897
1898	/* if ((%r19.SINGLESTEP_BIT)) { %r20.27=1} */
1899	extru,=	%r19,TIF_SINGLESTEP_PA_BIT,1,%r0
1900	depi	-1,27,1,%r20			   /* R bit */
1901
1902	/* if ((%r19.BLOCKSTEP_BIT)) { %r20.7=1} */
1903	extru,= %r19,TIF_BLOCKSTEP_PA_BIT,1,%r0
1904	depi	-1,7,1,%r20			   /* T bit */
1905
1906	STREG	%r20,TASK_PT_PSW(%r1)
1907
1908	/* Always store space registers, since sr3 can be changed (e.g. fork) */
1909
1910	mfsp    %sr3,%r25
1911	STREG   %r25,TASK_PT_SR3(%r1)
1912	STREG   %r25,TASK_PT_SR4(%r1)
1913	STREG   %r25,TASK_PT_SR5(%r1)
1914	STREG   %r25,TASK_PT_SR6(%r1)
1915	STREG   %r25,TASK_PT_SR7(%r1)
1916	STREG   %r25,TASK_PT_IASQ0(%r1)
1917	STREG   %r25,TASK_PT_IASQ1(%r1)
1918
1919	/* XXX W bit??? */
1920	/* Now if old D bit is clear, it means we didn't save all registers
1921	 * on syscall entry, so do that now.  This only happens on TRACEME
1922	 * calls, or if someone attached to us while we were on a syscall.
1923	 * We could make this more efficient by not saving r3-r18, but
1924	 * then we wouldn't be able to use the common intr_restore path.
1925	 * It is only for traced processes anyway, so performance is not
1926	 * an issue.
1927	 */
1928	bb,<	%r2,30,pt_regs_ok		   /* Branch if D set */
1929	ldo	TASK_REGS(%r1),%r25
1930	reg_save %r25				   /* Save r3 to r18 */
1931
1932	/* Save the current sr */
1933	mfsp	%sr0,%r2
1934	STREG	%r2,TASK_PT_SR0(%r1)
1935
1936	/* Save the scratch sr */
1937	mfsp	%sr1,%r2
1938	STREG	%r2,TASK_PT_SR1(%r1)
1939
1940	/* sr2 should be set to zero for userspace syscalls */
1941	STREG	%r0,TASK_PT_SR2(%r1)
1942
1943	LDREG	TASK_PT_GR31(%r1),%r2
1944	depi	3,31,2,%r2		   /* ensure return to user mode. */
1945	STREG   %r2,TASK_PT_IAOQ0(%r1)
1946	ldo	4(%r2),%r2
1947	STREG	%r2,TASK_PT_IAOQ1(%r1)
1948	b	intr_restore
1949	copy	%r25,%r16
1950
1951pt_regs_ok:
1952	LDREG	TASK_PT_IAOQ0(%r1),%r2
1953	depi	3,31,2,%r2		   /* ensure return to user mode. */
1954	STREG	%r2,TASK_PT_IAOQ0(%r1)
1955	LDREG	TASK_PT_IAOQ1(%r1),%r2
1956	depi	3,31,2,%r2
1957	STREG	%r2,TASK_PT_IAOQ1(%r1)
1958	b	intr_restore
1959	copy	%r25,%r16
1960
1961	.import schedule,code
1962syscall_do_resched:
1963	BL	schedule,%r2
1964#ifdef CONFIG_64BIT
1965	ldo	-16(%r30),%r29		/* Reference param save area */
1966#else
1967	nop
1968#endif
1969	b	syscall_check_resched	/* if resched, we start over again */
1970	nop
1971ENDPROC(syscall_exit)
1972
1973
1974#ifdef CONFIG_FUNCTION_TRACER
1975	.import ftrace_function_trampoline,code
1976ENTRY(_mcount)
1977	copy	%r3, %arg2
1978	b	ftrace_function_trampoline
1979	nop
1980ENDPROC(_mcount)
1981
1982ENTRY(return_to_handler)
1983	load32	return_trampoline, %rp
1984	copy	%ret0, %arg0
1985	copy	%ret1, %arg1
1986	b	ftrace_return_to_handler
1987	nop
1988return_trampoline:
1989	copy	%ret0, %rp
1990	copy	%r23, %ret0
1991	copy	%r24, %ret1
1992
1993.globl ftrace_stub
1994ftrace_stub:
1995	bv	%r0(%rp)
1996	nop
1997ENDPROC(return_to_handler)
1998#endif	/* CONFIG_FUNCTION_TRACER */
1999
2000#ifdef CONFIG_IRQSTACKS
2001/* void call_on_stack(unsigned long param1, void *func,
2002		      unsigned long new_stack) */
2003ENTRY(call_on_stack)
2004	copy	%sp, %r1
2005
2006	/* Regarding the HPPA calling conventions for function pointers,
2007	   we assume the PIC register is not changed across call.  For
2008	   CONFIG_64BIT, the argument pointer is left to point at the
2009	   argument region allocated for the call to call_on_stack. */
2010# ifdef CONFIG_64BIT
2011	/* Switch to new stack.  We allocate two 128 byte frames.  */
2012	ldo	256(%arg2), %sp
2013	/* Save previous stack pointer and return pointer in frame marker */
2014	STREG	%rp, -144(%sp)
2015	/* Calls always use function descriptor */
2016	LDREG	16(%arg1), %arg1
2017	bve,l	(%arg1), %rp
2018	STREG	%r1, -136(%sp)
2019	LDREG	-144(%sp), %rp
2020	bve	(%rp)
2021	LDREG	-136(%sp), %sp
2022# else
2023	/* Switch to new stack.  We allocate two 64 byte frames.  */
2024	ldo	128(%arg2), %sp
2025	/* Save previous stack pointer and return pointer in frame marker */
2026	STREG	%r1, -68(%sp)
2027	STREG	%rp, -84(%sp)
2028	/* Calls use function descriptor if PLABEL bit is set */
2029	bb,>=,n	%arg1, 30, 1f
2030	depwi	0,31,2, %arg1
2031	LDREG	0(%arg1), %arg1
20321:
2033	be,l	0(%sr4,%arg1), %sr0, %r31
2034	copy	%r31, %rp
2035	LDREG	-84(%sp), %rp
2036	bv	(%rp)
2037	LDREG	-68(%sp), %sp
2038# endif /* CONFIG_64BIT */
2039ENDPROC(call_on_stack)
2040#endif /* CONFIG_IRQSTACKS */
2041
2042get_register:
2043	/*
2044	 * get_register is used by the non access tlb miss handlers to
2045	 * copy the value of the general register specified in r8 into
2046	 * r1. This routine can't be used for shadowed registers, since
2047	 * the rfir will restore the original value. So, for the shadowed
2048	 * registers we put a -1 into r1 to indicate that the register
2049	 * should not be used (the register being copied could also have
2050	 * a -1 in it, but that is OK, it just means that we will have
2051	 * to use the slow path instead).
2052	 */
2053	blr     %r8,%r0
2054	nop
2055	bv      %r0(%r25)    /* r0 */
2056	copy    %r0,%r1
2057	bv      %r0(%r25)    /* r1 - shadowed */
2058	ldi     -1,%r1
2059	bv      %r0(%r25)    /* r2 */
2060	copy    %r2,%r1
2061	bv      %r0(%r25)    /* r3 */
2062	copy    %r3,%r1
2063	bv      %r0(%r25)    /* r4 */
2064	copy    %r4,%r1
2065	bv      %r0(%r25)    /* r5 */
2066	copy    %r5,%r1
2067	bv      %r0(%r25)    /* r6 */
2068	copy    %r6,%r1
2069	bv      %r0(%r25)    /* r7 */
2070	copy    %r7,%r1
2071	bv      %r0(%r25)    /* r8 - shadowed */
2072	ldi     -1,%r1
2073	bv      %r0(%r25)    /* r9 - shadowed */
2074	ldi     -1,%r1
2075	bv      %r0(%r25)    /* r10 */
2076	copy    %r10,%r1
2077	bv      %r0(%r25)    /* r11 */
2078	copy    %r11,%r1
2079	bv      %r0(%r25)    /* r12 */
2080	copy    %r12,%r1
2081	bv      %r0(%r25)    /* r13 */
2082	copy    %r13,%r1
2083	bv      %r0(%r25)    /* r14 */
2084	copy    %r14,%r1
2085	bv      %r0(%r25)    /* r15 */
2086	copy    %r15,%r1
2087	bv      %r0(%r25)    /* r16 - shadowed */
2088	ldi     -1,%r1
2089	bv      %r0(%r25)    /* r17 - shadowed */
2090	ldi     -1,%r1
2091	bv      %r0(%r25)    /* r18 */
2092	copy    %r18,%r1
2093	bv      %r0(%r25)    /* r19 */
2094	copy    %r19,%r1
2095	bv      %r0(%r25)    /* r20 */
2096	copy    %r20,%r1
2097	bv      %r0(%r25)    /* r21 */
2098	copy    %r21,%r1
2099	bv      %r0(%r25)    /* r22 */
2100	copy    %r22,%r1
2101	bv      %r0(%r25)    /* r23 */
2102	copy    %r23,%r1
2103	bv      %r0(%r25)    /* r24 - shadowed */
2104	ldi     -1,%r1
2105	bv      %r0(%r25)    /* r25 - shadowed */
2106	ldi     -1,%r1
2107	bv      %r0(%r25)    /* r26 */
2108	copy    %r26,%r1
2109	bv      %r0(%r25)    /* r27 */
2110	copy    %r27,%r1
2111	bv      %r0(%r25)    /* r28 */
2112	copy    %r28,%r1
2113	bv      %r0(%r25)    /* r29 */
2114	copy    %r29,%r1
2115	bv      %r0(%r25)    /* r30 */
2116	copy    %r30,%r1
2117	bv      %r0(%r25)    /* r31 */
2118	copy    %r31,%r1
2119
2120
2121set_register:
2122	/*
2123	 * set_register is used by the non access tlb miss handlers to
2124	 * copy the value of r1 into the general register specified in
2125	 * r8.
2126	 */
2127	blr     %r8,%r0
2128	nop
2129	bv      %r0(%r25)    /* r0 (silly, but it is a place holder) */
2130	copy    %r1,%r0
2131	bv      %r0(%r25)    /* r1 */
2132	copy    %r1,%r1
2133	bv      %r0(%r25)    /* r2 */
2134	copy    %r1,%r2
2135	bv      %r0(%r25)    /* r3 */
2136	copy    %r1,%r3
2137	bv      %r0(%r25)    /* r4 */
2138	copy    %r1,%r4
2139	bv      %r0(%r25)    /* r5 */
2140	copy    %r1,%r5
2141	bv      %r0(%r25)    /* r6 */
2142	copy    %r1,%r6
2143	bv      %r0(%r25)    /* r7 */
2144	copy    %r1,%r7
2145	bv      %r0(%r25)    /* r8 */
2146	copy    %r1,%r8
2147	bv      %r0(%r25)    /* r9 */
2148	copy    %r1,%r9
2149	bv      %r0(%r25)    /* r10 */
2150	copy    %r1,%r10
2151	bv      %r0(%r25)    /* r11 */
2152	copy    %r1,%r11
2153	bv      %r0(%r25)    /* r12 */
2154	copy    %r1,%r12
2155	bv      %r0(%r25)    /* r13 */
2156	copy    %r1,%r13
2157	bv      %r0(%r25)    /* r14 */
2158	copy    %r1,%r14
2159	bv      %r0(%r25)    /* r15 */
2160	copy    %r1,%r15
2161	bv      %r0(%r25)    /* r16 */
2162	copy    %r1,%r16
2163	bv      %r0(%r25)    /* r17 */
2164	copy    %r1,%r17
2165	bv      %r0(%r25)    /* r18 */
2166	copy    %r1,%r18
2167	bv      %r0(%r25)    /* r19 */
2168	copy    %r1,%r19
2169	bv      %r0(%r25)    /* r20 */
2170	copy    %r1,%r20
2171	bv      %r0(%r25)    /* r21 */
2172	copy    %r1,%r21
2173	bv      %r0(%r25)    /* r22 */
2174	copy    %r1,%r22
2175	bv      %r0(%r25)    /* r23 */
2176	copy    %r1,%r23
2177	bv      %r0(%r25)    /* r24 */
2178	copy    %r1,%r24
2179	bv      %r0(%r25)    /* r25 */
2180	copy    %r1,%r25
2181	bv      %r0(%r25)    /* r26 */
2182	copy    %r1,%r26
2183	bv      %r0(%r25)    /* r27 */
2184	copy    %r1,%r27
2185	bv      %r0(%r25)    /* r28 */
2186	copy    %r1,%r28
2187	bv      %r0(%r25)    /* r29 */
2188	copy    %r1,%r29
2189	bv      %r0(%r25)    /* r30 */
2190	copy    %r1,%r30
2191	bv      %r0(%r25)    /* r31 */
2192	copy    %r1,%r31
2193
2194