xref: /linux/arch/parisc/kernel/entry.S (revision 4182d0cdf853fb044b969318289ae9f451f69c86)
1/*
2 * Linux/PA-RISC Project (http://www.parisc-linux.org/)
3 *
4 * kernel entry points (interruptions, system call wrappers)
5 *  Copyright (C) 1999,2000 Philipp Rumpf
6 *  Copyright (C) 1999 SuSE GmbH Nuernberg
7 *  Copyright (C) 2000 Hewlett-Packard (John Marvin)
8 *  Copyright (C) 1999 Hewlett-Packard (Frank Rowand)
9 *
10 *    This program is free software; you can redistribute it and/or modify
11 *    it under the terms of the GNU General Public License as published by
12 *    the Free Software Foundation; either version 2, or (at your option)
13 *    any later version.
14 *
15 *    This program is distributed in the hope that it will be useful,
16 *    but WITHOUT ANY WARRANTY; without even the implied warranty of
17 *    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
18 *    GNU General Public License for more details.
19 *
20 *    You should have received a copy of the GNU General Public License
21 *    along with this program; if not, write to the Free Software
22 *    Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
23 */
24
25#include <asm/asm-offsets.h>
26
27/* we have the following possibilities to act on an interruption:
28 *  - handle in assembly and use shadowed registers only
29 *  - save registers to kernel stack and handle in assembly or C */
30
31
32#include <asm/psw.h>
33#include <asm/cache.h>		/* for L1_CACHE_SHIFT */
34#include <asm/assembly.h>	/* for LDREG/STREG defines */
35#include <asm/pgtable.h>
36#include <asm/signal.h>
37#include <asm/unistd.h>
38#include <asm/thread_info.h>
39
40#include <linux/linkage.h>
41
42#ifdef CONFIG_64BIT
43	.level 2.0w
44#else
45	.level 2.0
46#endif
47
48	.import		pa_tlb_lock,data
49
50	/* space_to_prot macro creates a prot id from a space id */
51
52#if (SPACEID_SHIFT) == 0
53	.macro  space_to_prot spc prot
54	depd,z  \spc,62,31,\prot
55	.endm
56#else
57	.macro  space_to_prot spc prot
58	extrd,u \spc,(64 - (SPACEID_SHIFT)),32,\prot
59	.endm
60#endif
61
62	/* Switch to virtual mapping, trashing only %r1 */
63	.macro  virt_map
64	/* pcxt_ssm_bug */
65	rsm	PSW_SM_I, %r0	/* barrier for "Relied upon Translation */
66	mtsp	%r0, %sr4
67	mtsp	%r0, %sr5
68	mtsp	%r0, %sr6
69	tovirt_r1 %r29
70	load32	KERNEL_PSW, %r1
71
72	rsm     PSW_SM_QUIET,%r0	/* second "heavy weight" ctl op */
73	mtctl	%r0, %cr17	/* Clear IIASQ tail */
74	mtctl	%r0, %cr17	/* Clear IIASQ head */
75	mtctl	%r1, %ipsw
76	load32	4f, %r1
77	mtctl	%r1, %cr18	/* Set IIAOQ tail */
78	ldo	4(%r1), %r1
79	mtctl	%r1, %cr18	/* Set IIAOQ head */
80	rfir
81	nop
824:
83	.endm
84
85	/*
86	 * The "get_stack" macros are responsible for determining the
87	 * kernel stack value.
88	 *
89	 *      If sr7 == 0
90	 *          Already using a kernel stack, so call the
91	 *          get_stack_use_r30 macro to push a pt_regs structure
92	 *          on the stack, and store registers there.
93	 *      else
94	 *          Need to set up a kernel stack, so call the
95	 *          get_stack_use_cr30 macro to set up a pointer
96	 *          to the pt_regs structure contained within the
97	 *          task pointer pointed to by cr30. Set the stack
98	 *          pointer to point to the end of the task structure.
99	 *
100	 * Note that we use shadowed registers for temps until
101	 * we can save %r26 and %r29. %r26 is used to preserve
102	 * %r8 (a shadowed register) which temporarily contained
103	 * either the fault type ("code") or the eirr. We need
104	 * to use a non-shadowed register to carry the value over
105	 * the rfir in virt_map. We use %r26 since this value winds
106	 * up being passed as the argument to either do_cpu_irq_mask
107	 * or handle_interruption. %r29 is used to hold a pointer
108	 * the register save area, and once again, it needs to
109	 * be a non-shadowed register so that it survives the rfir.
110	 *
111	 * N.B. TASK_SZ_ALGN and PT_SZ_ALGN include space for a stack frame.
112	 */
113
114	.macro  get_stack_use_cr30
115
116	/* we save the registers in the task struct */
117
118	copy	%r30, %r17
119	mfctl   %cr30, %r1
120	ldo	THREAD_SZ_ALGN(%r1), %r30
121	mtsp	%r0,%sr7
122	mtsp	%r16,%sr3
123	tophys  %r1,%r9
124	LDREG	TI_TASK(%r9), %r1	/* thread_info -> task_struct */
125	tophys  %r1,%r9
126	ldo     TASK_REGS(%r9),%r9
127	STREG   %r17,PT_GR30(%r9)
128	STREG   %r29,PT_GR29(%r9)
129	STREG   %r26,PT_GR26(%r9)
130	STREG	%r16,PT_SR7(%r9)
131	copy    %r9,%r29
132	.endm
133
134	.macro  get_stack_use_r30
135
136	/* we put a struct pt_regs on the stack and save the registers there */
137
138	tophys  %r30,%r9
139	copy	%r30,%r1
140	ldo	PT_SZ_ALGN(%r30),%r30
141	STREG   %r1,PT_GR30(%r9)
142	STREG   %r29,PT_GR29(%r9)
143	STREG   %r26,PT_GR26(%r9)
144	STREG	%r16,PT_SR7(%r9)
145	copy    %r9,%r29
146	.endm
147
148	.macro  rest_stack
149	LDREG   PT_GR1(%r29), %r1
150	LDREG   PT_GR30(%r29),%r30
151	LDREG   PT_GR29(%r29),%r29
152	.endm
153
154	/* default interruption handler
155	 * (calls traps.c:handle_interruption) */
156	.macro	def code
157	b	intr_save
158	ldi     \code, %r8
159	.align	32
160	.endm
161
162	/* Interrupt interruption handler
163	 * (calls irq.c:do_cpu_irq_mask) */
164	.macro	extint code
165	b	intr_extint
166	mfsp    %sr7,%r16
167	.align	32
168	.endm
169
170	.import	os_hpmc, code
171
172	/* HPMC handler */
173	.macro	hpmc code
174	nop			/* must be a NOP, will be patched later */
175	load32	PA(os_hpmc), %r3
176	bv,n	0(%r3)
177	nop
178	.word	0		/* checksum (will be patched) */
179	.word	PA(os_hpmc)	/* address of handler */
180	.word	0		/* length of handler */
181	.endm
182
183	/*
184	 * Performance Note: Instructions will be moved up into
185	 * this part of the code later on, once we are sure
186	 * that the tlb miss handlers are close to final form.
187	 */
188
189	/* Register definitions for tlb miss handler macros */
190
191	va  = r8	/* virtual address for which the trap occurred */
192	spc = r24	/* space for which the trap occurred */
193
194#ifndef CONFIG_64BIT
195
196	/*
197	 * itlb miss interruption handler (parisc 1.1 - 32 bit)
198	 */
199
200	.macro	itlb_11 code
201
202	mfctl	%pcsq, spc
203	b	itlb_miss_11
204	mfctl	%pcoq, va
205
206	.align		32
207	.endm
208#endif
209
210	/*
211	 * itlb miss interruption handler (parisc 2.0)
212	 */
213
214	.macro	itlb_20 code
215	mfctl	%pcsq, spc
216#ifdef CONFIG_64BIT
217	b       itlb_miss_20w
218#else
219	b	itlb_miss_20
220#endif
221	mfctl	%pcoq, va
222
223	.align		32
224	.endm
225
226#ifndef CONFIG_64BIT
227	/*
228	 * naitlb miss interruption handler (parisc 1.1 - 32 bit)
229	 */
230
231	.macro	naitlb_11 code
232
233	mfctl	%isr,spc
234	b	naitlb_miss_11
235	mfctl 	%ior,va
236
237	.align		32
238	.endm
239#endif
240
241	/*
242	 * naitlb miss interruption handler (parisc 2.0)
243	 */
244
245	.macro	naitlb_20 code
246
247	mfctl	%isr,spc
248#ifdef CONFIG_64BIT
249	b       naitlb_miss_20w
250#else
251	b	naitlb_miss_20
252#endif
253	mfctl 	%ior,va
254
255	.align		32
256	.endm
257
258#ifndef CONFIG_64BIT
259	/*
260	 * dtlb miss interruption handler (parisc 1.1 - 32 bit)
261	 */
262
263	.macro	dtlb_11 code
264
265	mfctl	%isr, spc
266	b	dtlb_miss_11
267	mfctl	%ior, va
268
269	.align		32
270	.endm
271#endif
272
273	/*
274	 * dtlb miss interruption handler (parisc 2.0)
275	 */
276
277	.macro	dtlb_20 code
278
279	mfctl	%isr, spc
280#ifdef CONFIG_64BIT
281	b       dtlb_miss_20w
282#else
283	b	dtlb_miss_20
284#endif
285	mfctl	%ior, va
286
287	.align		32
288	.endm
289
290#ifndef CONFIG_64BIT
291	/* nadtlb miss interruption handler (parisc 1.1 - 32 bit) */
292
293	.macro	nadtlb_11 code
294
295	mfctl	%isr,spc
296	b       nadtlb_miss_11
297	mfctl	%ior,va
298
299	.align		32
300	.endm
301#endif
302
303	/* nadtlb miss interruption handler (parisc 2.0) */
304
305	.macro	nadtlb_20 code
306
307	mfctl	%isr,spc
308#ifdef CONFIG_64BIT
309	b       nadtlb_miss_20w
310#else
311	b       nadtlb_miss_20
312#endif
313	mfctl	%ior,va
314
315	.align		32
316	.endm
317
318#ifndef CONFIG_64BIT
319	/*
320	 * dirty bit trap interruption handler (parisc 1.1 - 32 bit)
321	 */
322
323	.macro	dbit_11 code
324
325	mfctl	%isr,spc
326	b	dbit_trap_11
327	mfctl	%ior,va
328
329	.align		32
330	.endm
331#endif
332
333	/*
334	 * dirty bit trap interruption handler (parisc 2.0)
335	 */
336
337	.macro	dbit_20 code
338
339	mfctl	%isr,spc
340#ifdef CONFIG_64BIT
341	b       dbit_trap_20w
342#else
343	b	dbit_trap_20
344#endif
345	mfctl	%ior,va
346
347	.align		32
348	.endm
349
350	/* In LP64, the space contains part of the upper 32 bits of the
351	 * fault.  We have to extract this and place it in the va,
352	 * zeroing the corresponding bits in the space register */
353	.macro		space_adjust	spc,va,tmp
354#ifdef CONFIG_64BIT
355	extrd,u		\spc,63,SPACEID_SHIFT,\tmp
356	depd		%r0,63,SPACEID_SHIFT,\spc
357	depd		\tmp,31,SPACEID_SHIFT,\va
358#endif
359	.endm
360
361	.import		swapper_pg_dir,code
362
363	/* Get the pgd.  For faults on space zero (kernel space), this
364	 * is simply swapper_pg_dir.  For user space faults, the
365	 * pgd is stored in %cr25 */
366	.macro		get_pgd		spc,reg
367	ldil		L%PA(swapper_pg_dir),\reg
368	ldo		R%PA(swapper_pg_dir)(\reg),\reg
369	or,COND(=)	%r0,\spc,%r0
370	mfctl		%cr25,\reg
371	.endm
372
373	/*
374		space_check(spc,tmp,fault)
375
376		spc - The space we saw the fault with.
377		tmp - The place to store the current space.
378		fault - Function to call on failure.
379
380		Only allow faults on different spaces from the
381		currently active one if we're the kernel
382
383	*/
384	.macro		space_check	spc,tmp,fault
385	mfsp		%sr7,\tmp
386	or,COND(<>)	%r0,\spc,%r0	/* user may execute gateway page
387					 * as kernel, so defeat the space
388					 * check if it is */
389	copy		\spc,\tmp
390	or,COND(=)	%r0,\tmp,%r0	/* nullify if executing as kernel */
391	cmpb,COND(<>),n	\tmp,\spc,\fault
392	.endm
393
394	/* Look up a PTE in a 2-Level scheme (faulting at each
395	 * level if the entry isn't present
396	 *
397	 * NOTE: we use ldw even for LP64, since the short pointers
398	 * can address up to 1TB
399	 */
400	.macro		L2_ptep	pmd,pte,index,va,fault
401#if CONFIG_PGTABLE_LEVELS == 3
402	extru		\va,31-ASM_PMD_SHIFT,ASM_BITS_PER_PMD,\index
403#else
404# if defined(CONFIG_64BIT)
405	extrd,u		\va,63-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index
406  #else
407  # if PAGE_SIZE > 4096
408	extru		\va,31-ASM_PGDIR_SHIFT,32-ASM_PGDIR_SHIFT,\index
409  # else
410	extru		\va,31-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index
411  # endif
412# endif
413#endif
414	dep             %r0,31,PAGE_SHIFT,\pmd  /* clear offset */
415	copy		%r0,\pte
416	ldw,s		\index(\pmd),\pmd
417	bb,>=,n		\pmd,_PxD_PRESENT_BIT,\fault
418	dep		%r0,31,PxD_FLAG_SHIFT,\pmd /* clear flags */
419	copy		\pmd,%r9
420	SHLREG		%r9,PxD_VALUE_SHIFT,\pmd
421	extru		\va,31-PAGE_SHIFT,ASM_BITS_PER_PTE,\index
422	dep		%r0,31,PAGE_SHIFT,\pmd  /* clear offset */
423	shladd		\index,BITS_PER_PTE_ENTRY,\pmd,\pmd /* pmd is now pte */
424	LDREG		%r0(\pmd),\pte
425	bb,>=,n		\pte,_PAGE_PRESENT_BIT,\fault
426	.endm
427
428	/* Look up PTE in a 3-Level scheme.
429	 *
430	 * Here we implement a Hybrid L2/L3 scheme: we allocate the
431	 * first pmd adjacent to the pgd.  This means that we can
432	 * subtract a constant offset to get to it.  The pmd and pgd
433	 * sizes are arranged so that a single pmd covers 4GB (giving
434	 * a full LP64 process access to 8TB) so our lookups are
435	 * effectively L2 for the first 4GB of the kernel (i.e. for
436	 * all ILP32 processes and all the kernel for machines with
437	 * under 4GB of memory) */
438	.macro		L3_ptep pgd,pte,index,va,fault
439#if CONFIG_PGTABLE_LEVELS == 3 /* we might have a 2-Level scheme, e.g. with 16kb page size */
440	extrd,u		\va,63-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index
441	copy		%r0,\pte
442	extrd,u,*=	\va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0
443	ldw,s		\index(\pgd),\pgd
444	extrd,u,*=	\va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0
445	bb,>=,n		\pgd,_PxD_PRESENT_BIT,\fault
446	extrd,u,*=	\va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0
447	shld		\pgd,PxD_VALUE_SHIFT,\index
448	extrd,u,*=	\va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0
449	copy		\index,\pgd
450	extrd,u,*<>	\va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0
451	ldo		ASM_PGD_PMD_OFFSET(\pgd),\pgd
452#endif
453	L2_ptep		\pgd,\pte,\index,\va,\fault
454	.endm
455
456	/* Acquire pa_tlb_lock lock and recheck page is still present. */
457	.macro		tlb_lock	spc,ptp,pte,tmp,tmp1,fault
458#ifdef CONFIG_SMP
459	cmpib,COND(=),n	0,\spc,2f
460	load32		PA(pa_tlb_lock),\tmp
4611:	LDCW		0(\tmp),\tmp1
462	cmpib,COND(=)	0,\tmp1,1b
463	nop
464	LDREG		0(\ptp),\pte
465	bb,<,n		\pte,_PAGE_PRESENT_BIT,2f
466	b		\fault
467	stw		 \spc,0(\tmp)
4682:
469#endif
470	.endm
471
472	/* Release pa_tlb_lock lock without reloading lock address. */
473	.macro		tlb_unlock0	spc,tmp
474#ifdef CONFIG_SMP
475	or,COND(=)	%r0,\spc,%r0
476	stw             \spc,0(\tmp)
477#endif
478	.endm
479
480	/* Release pa_tlb_lock lock. */
481	.macro		tlb_unlock1	spc,tmp
482#ifdef CONFIG_SMP
483	load32		PA(pa_tlb_lock),\tmp
484	tlb_unlock0	\spc,\tmp
485#endif
486	.endm
487
488	/* Set the _PAGE_ACCESSED bit of the PTE.  Be clever and
489	 * don't needlessly dirty the cache line if it was already set */
490	.macro		update_accessed	ptp,pte,tmp,tmp1
491	ldi		_PAGE_ACCESSED,\tmp1
492	or		\tmp1,\pte,\tmp
493	and,COND(<>)	\tmp1,\pte,%r0
494	STREG		\tmp,0(\ptp)
495	.endm
496
497	/* Set the dirty bit (and accessed bit).  No need to be
498	 * clever, this is only used from the dirty fault */
499	.macro		update_dirty	ptp,pte,tmp
500	ldi		_PAGE_ACCESSED|_PAGE_DIRTY,\tmp
501	or		\tmp,\pte,\pte
502	STREG		\pte,0(\ptp)
503	.endm
504
505	/* bitshift difference between a PFN (based on kernel's PAGE_SIZE)
506	 * to a CPU TLB 4k PFN (4k => 12 bits to shift) */
507	#define PAGE_ADD_SHIFT  (PAGE_SHIFT-12)
508
509	/* Drop prot bits and convert to page addr for iitlbt and idtlbt */
510	.macro		convert_for_tlb_insert20 pte
511	extrd,u		\pte,(63-ASM_PFN_PTE_SHIFT)+(63-58)+PAGE_ADD_SHIFT,\
512				64-PAGE_SHIFT-PAGE_ADD_SHIFT,\pte
513	depdi		_PAGE_SIZE_ENCODING_DEFAULT,63,\
514				(63-58)+PAGE_ADD_SHIFT,\pte
515	.endm
516
517	/* Convert the pte and prot to tlb insertion values.  How
518	 * this happens is quite subtle, read below */
519	.macro		make_insert_tlb	spc,pte,prot
520	space_to_prot   \spc \prot        /* create prot id from space */
521	/* The following is the real subtlety.  This is depositing
522	 * T <-> _PAGE_REFTRAP
523	 * D <-> _PAGE_DIRTY
524	 * B <-> _PAGE_DMB (memory break)
525	 *
526	 * Then incredible subtlety: The access rights are
527	 * _PAGE_GATEWAY, _PAGE_EXEC and _PAGE_WRITE
528	 * See 3-14 of the parisc 2.0 manual
529	 *
530	 * Finally, _PAGE_READ goes in the top bit of PL1 (so we
531	 * trigger an access rights trap in user space if the user
532	 * tries to read an unreadable page */
533	depd            \pte,8,7,\prot
534
535	/* PAGE_USER indicates the page can be read with user privileges,
536	 * so deposit X1|11 to PL1|PL2 (remember the upper bit of PL1
537	 * contains _PAGE_READ) */
538	extrd,u,*=      \pte,_PAGE_USER_BIT+32,1,%r0
539	depdi		7,11,3,\prot
540	/* If we're a gateway page, drop PL2 back to zero for promotion
541	 * to kernel privilege (so we can execute the page as kernel).
542	 * Any privilege promotion page always denys read and write */
543	extrd,u,*= 	\pte,_PAGE_GATEWAY_BIT+32,1,%r0
544	depd		%r0,11,2,\prot	/* If Gateway, Set PL2 to 0 */
545
546	/* Enforce uncacheable pages.
547	 * This should ONLY be use for MMIO on PA 2.0 machines.
548	 * Memory/DMA is cache coherent on all PA2.0 machines we support
549	 * (that means T-class is NOT supported) and the memory controllers
550	 * on most of those machines only handles cache transactions.
551	 */
552	extrd,u,*=	\pte,_PAGE_NO_CACHE_BIT+32,1,%r0
553	depdi		1,12,1,\prot
554
555	/* Drop prot bits and convert to page addr for iitlbt and idtlbt */
556	convert_for_tlb_insert20 \pte
557	.endm
558
559	/* Identical macro to make_insert_tlb above, except it
560	 * makes the tlb entry for the differently formatted pa11
561	 * insertion instructions */
562	.macro		make_insert_tlb_11	spc,pte,prot
563	zdep		\spc,30,15,\prot
564	dep		\pte,8,7,\prot
565	extru,=		\pte,_PAGE_NO_CACHE_BIT,1,%r0
566	depi		1,12,1,\prot
567	extru,=         \pte,_PAGE_USER_BIT,1,%r0
568	depi		7,11,3,\prot   /* Set for user space (1 rsvd for read) */
569	extru,= 	\pte,_PAGE_GATEWAY_BIT,1,%r0
570	depi		0,11,2,\prot	/* If Gateway, Set PL2 to 0 */
571
572	/* Get rid of prot bits and convert to page addr for iitlba */
573
574	depi		0,31,ASM_PFN_PTE_SHIFT,\pte
575	SHRREG		\pte,(ASM_PFN_PTE_SHIFT-(31-26)),\pte
576	.endm
577
578	/* This is for ILP32 PA2.0 only.  The TLB insertion needs
579	 * to extend into I/O space if the address is 0xfXXXXXXX
580	 * so we extend the f's into the top word of the pte in
581	 * this case */
582	.macro		f_extend	pte,tmp
583	extrd,s		\pte,42,4,\tmp
584	addi,<>		1,\tmp,%r0
585	extrd,s		\pte,63,25,\pte
586	.endm
587
588	/* The alias region is an 8MB aligned 16MB to do clear and
589	 * copy user pages at addresses congruent with the user
590	 * virtual address.
591	 *
592	 * To use the alias page, you set %r26 up with the to TLB
593	 * entry (identifying the physical page) and %r23 up with
594	 * the from tlb entry (or nothing if only a to entry---for
595	 * clear_user_page_asm) */
596	.macro		do_alias	spc,tmp,tmp1,va,pte,prot,fault,patype
597	cmpib,COND(<>),n 0,\spc,\fault
598	ldil		L%(TMPALIAS_MAP_START),\tmp
599#if defined(CONFIG_64BIT) && (TMPALIAS_MAP_START >= 0x80000000)
600	/* on LP64, ldi will sign extend into the upper 32 bits,
601	 * which is behaviour we don't want */
602	depdi		0,31,32,\tmp
603#endif
604	copy		\va,\tmp1
605	depi		0,31,23,\tmp1
606	cmpb,COND(<>),n	\tmp,\tmp1,\fault
607	mfctl		%cr19,\tmp	/* iir */
608	/* get the opcode (first six bits) into \tmp */
609	extrw,u		\tmp,5,6,\tmp
610	/*
611	 * Only setting the T bit prevents data cache movein
612	 * Setting access rights to zero prevents instruction cache movein
613	 *
614	 * Note subtlety here: _PAGE_GATEWAY, _PAGE_EXEC and _PAGE_WRITE go
615	 * to type field and _PAGE_READ goes to top bit of PL1
616	 */
617	ldi		(_PAGE_REFTRAP|_PAGE_READ|_PAGE_WRITE),\prot
618	/*
619	 * so if the opcode is one (i.e. this is a memory management
620	 * instruction) nullify the next load so \prot is only T.
621	 * Otherwise this is a normal data operation
622	 */
623	cmpiclr,=	0x01,\tmp,%r0
624	ldi		(_PAGE_DIRTY|_PAGE_READ|_PAGE_WRITE),\prot
625.ifc \patype,20
626	depd,z		\prot,8,7,\prot
627.else
628.ifc \patype,11
629	depw,z		\prot,8,7,\prot
630.else
631	.error "undefined PA type to do_alias"
632.endif
633.endif
634	/*
635	 * OK, it is in the temp alias region, check whether "from" or "to".
636	 * Check "subtle" note in pacache.S re: r23/r26.
637	 */
638#ifdef CONFIG_64BIT
639	extrd,u,*=	\va,41,1,%r0
640#else
641	extrw,u,=	\va,9,1,%r0
642#endif
643	or,COND(tr)	%r23,%r0,\pte
644	or		%r26,%r0,\pte
645	.endm
646
647
648	/*
649	 * Fault_vectors are architecturally required to be aligned on a 2K
650	 * boundary
651	 */
652
653	.text
654	.align 2048
655
656ENTRY(fault_vector_20)
657	/* First vector is invalid (0) */
658	.ascii	"cows can fly"
659	.byte 0
660	.align 32
661
662	hpmc		 1
663	def		 2
664	def		 3
665	extint		 4
666	def		 5
667	itlb_20		 6
668	def		 7
669	def		 8
670	def              9
671	def		10
672	def		11
673	def		12
674	def		13
675	def		14
676	dtlb_20		15
677	naitlb_20	16
678	nadtlb_20	17
679	def		18
680	def		19
681	dbit_20		20
682	def		21
683	def		22
684	def		23
685	def		24
686	def		25
687	def		26
688	def		27
689	def		28
690	def		29
691	def		30
692	def		31
693END(fault_vector_20)
694
695#ifndef CONFIG_64BIT
696
697	.align 2048
698
699ENTRY(fault_vector_11)
700	/* First vector is invalid (0) */
701	.ascii	"cows can fly"
702	.byte 0
703	.align 32
704
705	hpmc		 1
706	def		 2
707	def		 3
708	extint		 4
709	def		 5
710	itlb_11		 6
711	def		 7
712	def		 8
713	def              9
714	def		10
715	def		11
716	def		12
717	def		13
718	def		14
719	dtlb_11		15
720	naitlb_11	16
721	nadtlb_11	17
722	def		18
723	def		19
724	dbit_11		20
725	def		21
726	def		22
727	def		23
728	def		24
729	def		25
730	def		26
731	def		27
732	def		28
733	def		29
734	def		30
735	def		31
736END(fault_vector_11)
737
738#endif
739	/* Fault vector is separately protected and *must* be on its own page */
740	.align		PAGE_SIZE
741ENTRY(end_fault_vector)
742
743	.import		handle_interruption,code
744	.import		do_cpu_irq_mask,code
745
746	/*
747	 * Child Returns here
748	 *
749	 * copy_thread moved args into task save area.
750	 */
751
752ENTRY(ret_from_kernel_thread)
753
754	/* Call schedule_tail first though */
755	BL	schedule_tail, %r2
756	nop
757
758	LDREG	TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30), %r1
759	LDREG	TASK_PT_GR25(%r1), %r26
760#ifdef CONFIG_64BIT
761	LDREG	TASK_PT_GR27(%r1), %r27
762#endif
763	LDREG	TASK_PT_GR26(%r1), %r1
764	ble	0(%sr7, %r1)
765	copy	%r31, %r2
766	b	finish_child_return
767	nop
768ENDPROC(ret_from_kernel_thread)
769
770
771	/*
772	 * struct task_struct *_switch_to(struct task_struct *prev,
773	 *	struct task_struct *next)
774	 *
775	 * switch kernel stacks and return prev */
776ENTRY(_switch_to)
777	STREG	 %r2, -RP_OFFSET(%r30)
778
779	callee_save_float
780	callee_save
781
782	load32	_switch_to_ret, %r2
783
784	STREG	%r2, TASK_PT_KPC(%r26)
785	LDREG	TASK_PT_KPC(%r25), %r2
786
787	STREG	%r30, TASK_PT_KSP(%r26)
788	LDREG	TASK_PT_KSP(%r25), %r30
789	LDREG	TASK_THREAD_INFO(%r25), %r25
790	bv	%r0(%r2)
791	mtctl   %r25,%cr30
792
793_switch_to_ret:
794	mtctl	%r0, %cr0		/* Needed for single stepping */
795	callee_rest
796	callee_rest_float
797
798	LDREG	-RP_OFFSET(%r30), %r2
799	bv	%r0(%r2)
800	copy	%r26, %r28
801ENDPROC(_switch_to)
802
803	/*
804	 * Common rfi return path for interruptions, kernel execve, and
805	 * sys_rt_sigreturn (sometimes).  The sys_rt_sigreturn syscall will
806	 * return via this path if the signal was received when the process
807	 * was running; if the process was blocked on a syscall then the
808	 * normal syscall_exit path is used.  All syscalls for traced
809	 * proceses exit via intr_restore.
810	 *
811	 * XXX If any syscalls that change a processes space id ever exit
812	 * this way, then we will need to copy %sr3 in to PT_SR[3..7], and
813	 * adjust IASQ[0..1].
814	 *
815	 */
816
817	.align	PAGE_SIZE
818
819ENTRY(syscall_exit_rfi)
820	mfctl   %cr30,%r16
821	LDREG	TI_TASK(%r16), %r16	/* thread_info -> task_struct */
822	ldo	TASK_REGS(%r16),%r16
823	/* Force iaoq to userspace, as the user has had access to our current
824	 * context via sigcontext. Also Filter the PSW for the same reason.
825	 */
826	LDREG	PT_IAOQ0(%r16),%r19
827	depi	3,31,2,%r19
828	STREG	%r19,PT_IAOQ0(%r16)
829	LDREG	PT_IAOQ1(%r16),%r19
830	depi	3,31,2,%r19
831	STREG	%r19,PT_IAOQ1(%r16)
832	LDREG   PT_PSW(%r16),%r19
833	load32	USER_PSW_MASK,%r1
834#ifdef CONFIG_64BIT
835	load32	USER_PSW_HI_MASK,%r20
836	depd    %r20,31,32,%r1
837#endif
838	and     %r19,%r1,%r19 /* Mask out bits that user shouldn't play with */
839	load32	USER_PSW,%r1
840	or      %r19,%r1,%r19 /* Make sure default USER_PSW bits are set */
841	STREG   %r19,PT_PSW(%r16)
842
843	/*
844	 * If we aren't being traced, we never saved space registers
845	 * (we don't store them in the sigcontext), so set them
846	 * to "proper" values now (otherwise we'll wind up restoring
847	 * whatever was last stored in the task structure, which might
848	 * be inconsistent if an interrupt occurred while on the gateway
849	 * page). Note that we may be "trashing" values the user put in
850	 * them, but we don't support the user changing them.
851	 */
852
853	STREG   %r0,PT_SR2(%r16)
854	mfsp    %sr3,%r19
855	STREG   %r19,PT_SR0(%r16)
856	STREG   %r19,PT_SR1(%r16)
857	STREG   %r19,PT_SR3(%r16)
858	STREG   %r19,PT_SR4(%r16)
859	STREG   %r19,PT_SR5(%r16)
860	STREG   %r19,PT_SR6(%r16)
861	STREG   %r19,PT_SR7(%r16)
862
863intr_return:
864	/* check for reschedule */
865	mfctl   %cr30,%r1
866	LDREG   TI_FLAGS(%r1),%r19	/* sched.h: TIF_NEED_RESCHED */
867	bb,<,n	%r19,31-TIF_NEED_RESCHED,intr_do_resched /* forward */
868
869	.import do_notify_resume,code
870intr_check_sig:
871	/* As above */
872	mfctl   %cr30,%r1
873	LDREG	TI_FLAGS(%r1),%r19
874	ldi	(_TIF_SIGPENDING|_TIF_NOTIFY_RESUME), %r20
875	and,COND(<>)	%r19, %r20, %r0
876	b,n	intr_restore	/* skip past if we've nothing to do */
877
878	/* This check is critical to having LWS
879	 * working. The IASQ is zero on the gateway
880	 * page and we cannot deliver any signals until
881	 * we get off the gateway page.
882	 *
883	 * Only do signals if we are returning to user space
884	 */
885	LDREG	PT_IASQ0(%r16), %r20
886	cmpib,COND(=),n 0,%r20,intr_restore /* backward */
887	LDREG	PT_IASQ1(%r16), %r20
888	cmpib,COND(=),n 0,%r20,intr_restore /* backward */
889
890	/* NOTE: We need to enable interrupts if we have to deliver
891	 * signals. We used to do this earlier but it caused kernel
892	 * stack overflows. */
893	ssm     PSW_SM_I, %r0
894
895	copy	%r0, %r25			/* long in_syscall = 0 */
896#ifdef CONFIG_64BIT
897	ldo	-16(%r30),%r29			/* Reference param save area */
898#endif
899
900	BL	do_notify_resume,%r2
901	copy	%r16, %r26			/* struct pt_regs *regs */
902
903	b,n	intr_check_sig
904
905intr_restore:
906	copy            %r16,%r29
907	ldo             PT_FR31(%r29),%r1
908	rest_fp         %r1
909	rest_general    %r29
910
911	/* inverse of virt_map */
912	pcxt_ssm_bug
913	rsm             PSW_SM_QUIET,%r0	/* prepare for rfi */
914	tophys_r1       %r29
915
916	/* Restore space id's and special cr's from PT_REGS
917	 * structure pointed to by r29
918	 */
919	rest_specials	%r29
920
921	/* IMPORTANT: rest_stack restores r29 last (we are using it)!
922	 * It also restores r1 and r30.
923	 */
924	rest_stack
925
926	rfi
927	nop
928
929#ifndef CONFIG_PREEMPT
930# define intr_do_preempt	intr_restore
931#endif /* !CONFIG_PREEMPT */
932
933	.import schedule,code
934intr_do_resched:
935	/* Only call schedule on return to userspace. If we're returning
936	 * to kernel space, we may schedule if CONFIG_PREEMPT, otherwise
937	 * we jump back to intr_restore.
938	 */
939	LDREG	PT_IASQ0(%r16), %r20
940	cmpib,COND(=)	0, %r20, intr_do_preempt
941	nop
942	LDREG	PT_IASQ1(%r16), %r20
943	cmpib,COND(=)	0, %r20, intr_do_preempt
944	nop
945
946	/* NOTE: We need to enable interrupts if we schedule.  We used
947	 * to do this earlier but it caused kernel stack overflows. */
948	ssm     PSW_SM_I, %r0
949
950#ifdef CONFIG_64BIT
951	ldo	-16(%r30),%r29		/* Reference param save area */
952#endif
953
954	ldil	L%intr_check_sig, %r2
955#ifndef CONFIG_64BIT
956	b	schedule
957#else
958	load32	schedule, %r20
959	bv	%r0(%r20)
960#endif
961	ldo	R%intr_check_sig(%r2), %r2
962
963	/* preempt the current task on returning to kernel
964	 * mode from an interrupt, iff need_resched is set,
965	 * and preempt_count is 0. otherwise, we continue on
966	 * our merry way back to the current running task.
967	 */
968#ifdef CONFIG_PREEMPT
969	.import preempt_schedule_irq,code
970intr_do_preempt:
971	rsm	PSW_SM_I, %r0		/* disable interrupts */
972
973	/* current_thread_info()->preempt_count */
974	mfctl	%cr30, %r1
975	LDREG	TI_PRE_COUNT(%r1), %r19
976	cmpib,COND(<>)	0, %r19, intr_restore	/* if preempt_count > 0 */
977	nop				/* prev insn branched backwards */
978
979	/* check if we interrupted a critical path */
980	LDREG	PT_PSW(%r16), %r20
981	bb,<,n	%r20, 31 - PSW_SM_I, intr_restore
982	nop
983
984	BL	preempt_schedule_irq, %r2
985	nop
986
987	b,n	intr_restore		/* ssm PSW_SM_I done by intr_restore */
988#endif /* CONFIG_PREEMPT */
989
990	/*
991	 * External interrupts.
992	 */
993
994intr_extint:
995	cmpib,COND(=),n 0,%r16,1f
996
997	get_stack_use_cr30
998	b,n 2f
999
10001:
1001	get_stack_use_r30
10022:
1003	save_specials	%r29
1004	virt_map
1005	save_general	%r29
1006
1007	ldo	PT_FR0(%r29), %r24
1008	save_fp	%r24
1009
1010	loadgp
1011
1012	copy	%r29, %r26	/* arg0 is pt_regs */
1013	copy	%r29, %r16	/* save pt_regs */
1014
1015	ldil	L%intr_return, %r2
1016
1017#ifdef CONFIG_64BIT
1018	ldo	-16(%r30),%r29	/* Reference param save area */
1019#endif
1020
1021	b	do_cpu_irq_mask
1022	ldo	R%intr_return(%r2), %r2	/* return to intr_return, not here */
1023ENDPROC(syscall_exit_rfi)
1024
1025
1026	/* Generic interruptions (illegal insn, unaligned, page fault, etc) */
1027
1028ENTRY(intr_save)		/* for os_hpmc */
1029	mfsp    %sr7,%r16
1030	cmpib,COND(=),n 0,%r16,1f
1031	get_stack_use_cr30
1032	b	2f
1033	copy    %r8,%r26
1034
10351:
1036	get_stack_use_r30
1037	copy    %r8,%r26
1038
10392:
1040	save_specials	%r29
1041
1042	/* If this trap is a itlb miss, skip saving/adjusting isr/ior */
1043
1044	/*
1045	 * FIXME: 1) Use a #define for the hardwired "6" below (and in
1046	 *           traps.c.
1047	 *        2) Once we start executing code above 4 Gb, we need
1048	 *           to adjust iasq/iaoq here in the same way we
1049	 *           adjust isr/ior below.
1050	 */
1051
1052	cmpib,COND(=),n        6,%r26,skip_save_ior
1053
1054
1055	mfctl           %cr20, %r16 /* isr */
1056	nop		/* serialize mfctl on PA 2.0 to avoid 4 cycle penalty */
1057	mfctl           %cr21, %r17 /* ior */
1058
1059
1060#ifdef CONFIG_64BIT
1061	/*
1062	 * If the interrupted code was running with W bit off (32 bit),
1063	 * clear the b bits (bits 0 & 1) in the ior.
1064	 * save_specials left ipsw value in r8 for us to test.
1065	 */
1066	extrd,u,*<>     %r8,PSW_W_BIT,1,%r0
1067	depdi           0,1,2,%r17
1068
1069	/*
1070	 * FIXME: This code has hardwired assumptions about the split
1071	 *        between space bits and offset bits. This will change
1072	 *        when we allow alternate page sizes.
1073	 */
1074
1075	/* adjust isr/ior. */
1076	extrd,u         %r16,63,SPACEID_SHIFT,%r1	/* get high bits from isr for ior */
1077	depd            %r1,31,SPACEID_SHIFT,%r17	/* deposit them into ior */
1078	depdi           0,63,SPACEID_SHIFT,%r16		/* clear them from isr */
1079#endif
1080	STREG           %r16, PT_ISR(%r29)
1081	STREG           %r17, PT_IOR(%r29)
1082
1083
1084skip_save_ior:
1085	virt_map
1086	save_general	%r29
1087
1088	ldo		PT_FR0(%r29), %r25
1089	save_fp		%r25
1090
1091	loadgp
1092
1093	copy		%r29, %r25	/* arg1 is pt_regs */
1094#ifdef CONFIG_64BIT
1095	ldo		-16(%r30),%r29	/* Reference param save area */
1096#endif
1097
1098	ldil		L%intr_check_sig, %r2
1099	copy		%r25, %r16	/* save pt_regs */
1100
1101	b		handle_interruption
1102	ldo		R%intr_check_sig(%r2), %r2
1103ENDPROC(intr_save)
1104
1105
1106	/*
1107	 * Note for all tlb miss handlers:
1108	 *
1109	 * cr24 contains a pointer to the kernel address space
1110	 * page directory.
1111	 *
1112	 * cr25 contains a pointer to the current user address
1113	 * space page directory.
1114	 *
1115	 * sr3 will contain the space id of the user address space
1116	 * of the current running thread while that thread is
1117	 * running in the kernel.
1118	 */
1119
1120	/*
1121	 * register number allocations.  Note that these are all
1122	 * in the shadowed registers
1123	 */
1124
1125	t0 = r1		/* temporary register 0 */
1126	va = r8		/* virtual address for which the trap occurred */
1127	t1 = r9		/* temporary register 1 */
1128	pte  = r16	/* pte/phys page # */
1129	prot = r17	/* prot bits */
1130	spc  = r24	/* space for which the trap occurred */
1131	ptp = r25	/* page directory/page table pointer */
1132
1133#ifdef CONFIG_64BIT
1134
1135dtlb_miss_20w:
1136	space_adjust	spc,va,t0
1137	get_pgd		spc,ptp
1138	space_check	spc,t0,dtlb_fault
1139
1140	L3_ptep		ptp,pte,t0,va,dtlb_check_alias_20w
1141
1142	tlb_lock	spc,ptp,pte,t0,t1,dtlb_check_alias_20w
1143	update_accessed	ptp,pte,t0,t1
1144
1145	make_insert_tlb	spc,pte,prot
1146
1147	idtlbt          pte,prot
1148
1149	tlb_unlock1	spc,t0
1150	rfir
1151	nop
1152
1153dtlb_check_alias_20w:
1154	do_alias	spc,t0,t1,va,pte,prot,dtlb_fault,20
1155
1156	idtlbt          pte,prot
1157
1158	rfir
1159	nop
1160
1161nadtlb_miss_20w:
1162	space_adjust	spc,va,t0
1163	get_pgd		spc,ptp
1164	space_check	spc,t0,nadtlb_fault
1165
1166	L3_ptep		ptp,pte,t0,va,nadtlb_check_alias_20w
1167
1168	tlb_lock	spc,ptp,pte,t0,t1,nadtlb_check_alias_20w
1169	update_accessed	ptp,pte,t0,t1
1170
1171	make_insert_tlb	spc,pte,prot
1172
1173	idtlbt          pte,prot
1174
1175	tlb_unlock1	spc,t0
1176	rfir
1177	nop
1178
1179nadtlb_check_alias_20w:
1180	do_alias	spc,t0,t1,va,pte,prot,nadtlb_emulate,20
1181
1182	idtlbt          pte,prot
1183
1184	rfir
1185	nop
1186
1187#else
1188
1189dtlb_miss_11:
1190	get_pgd		spc,ptp
1191
1192	space_check	spc,t0,dtlb_fault
1193
1194	L2_ptep		ptp,pte,t0,va,dtlb_check_alias_11
1195
1196	tlb_lock	spc,ptp,pte,t0,t1,dtlb_check_alias_11
1197	update_accessed	ptp,pte,t0,t1
1198
1199	make_insert_tlb_11	spc,pte,prot
1200
1201	mfsp		%sr1,t1  /* Save sr1 so we can use it in tlb inserts */
1202	mtsp		spc,%sr1
1203
1204	idtlba		pte,(%sr1,va)
1205	idtlbp		prot,(%sr1,va)
1206
1207	mtsp		t1, %sr1	/* Restore sr1 */
1208
1209	tlb_unlock1	spc,t0
1210	rfir
1211	nop
1212
1213dtlb_check_alias_11:
1214	do_alias	spc,t0,t1,va,pte,prot,dtlb_fault,11
1215
1216	idtlba          pte,(va)
1217	idtlbp          prot,(va)
1218
1219	rfir
1220	nop
1221
1222nadtlb_miss_11:
1223	get_pgd		spc,ptp
1224
1225	space_check	spc,t0,nadtlb_fault
1226
1227	L2_ptep		ptp,pte,t0,va,nadtlb_check_alias_11
1228
1229	tlb_lock	spc,ptp,pte,t0,t1,nadtlb_check_alias_11
1230	update_accessed	ptp,pte,t0,t1
1231
1232	make_insert_tlb_11	spc,pte,prot
1233
1234	mfsp		%sr1,t1  /* Save sr1 so we can use it in tlb inserts */
1235	mtsp		spc,%sr1
1236
1237	idtlba		pte,(%sr1,va)
1238	idtlbp		prot,(%sr1,va)
1239
1240	mtsp		t1, %sr1	/* Restore sr1 */
1241
1242	tlb_unlock1	spc,t0
1243	rfir
1244	nop
1245
1246nadtlb_check_alias_11:
1247	do_alias	spc,t0,t1,va,pte,prot,nadtlb_emulate,11
1248
1249	idtlba          pte,(va)
1250	idtlbp          prot,(va)
1251
1252	rfir
1253	nop
1254
1255dtlb_miss_20:
1256	space_adjust	spc,va,t0
1257	get_pgd		spc,ptp
1258	space_check	spc,t0,dtlb_fault
1259
1260	L2_ptep		ptp,pte,t0,va,dtlb_check_alias_20
1261
1262	tlb_lock	spc,ptp,pte,t0,t1,dtlb_check_alias_20
1263	update_accessed	ptp,pte,t0,t1
1264
1265	make_insert_tlb	spc,pte,prot
1266
1267	f_extend	pte,t1
1268
1269	idtlbt          pte,prot
1270
1271	tlb_unlock1	spc,t0
1272	rfir
1273	nop
1274
1275dtlb_check_alias_20:
1276	do_alias	spc,t0,t1,va,pte,prot,dtlb_fault,20
1277
1278	idtlbt          pte,prot
1279
1280	rfir
1281	nop
1282
1283nadtlb_miss_20:
1284	get_pgd		spc,ptp
1285
1286	space_check	spc,t0,nadtlb_fault
1287
1288	L2_ptep		ptp,pte,t0,va,nadtlb_check_alias_20
1289
1290	tlb_lock	spc,ptp,pte,t0,t1,nadtlb_check_alias_20
1291	update_accessed	ptp,pte,t0,t1
1292
1293	make_insert_tlb	spc,pte,prot
1294
1295	f_extend	pte,t1
1296
1297	idtlbt		pte,prot
1298
1299	tlb_unlock1	spc,t0
1300	rfir
1301	nop
1302
1303nadtlb_check_alias_20:
1304	do_alias	spc,t0,t1,va,pte,prot,nadtlb_emulate,20
1305
1306	idtlbt          pte,prot
1307
1308	rfir
1309	nop
1310
1311#endif
1312
1313nadtlb_emulate:
1314
1315	/*
1316	 * Non access misses can be caused by fdc,fic,pdc,lpa,probe and
1317	 * probei instructions. We don't want to fault for these
1318	 * instructions (not only does it not make sense, it can cause
1319	 * deadlocks, since some flushes are done with the mmap
1320	 * semaphore held). If the translation doesn't exist, we can't
1321	 * insert a translation, so have to emulate the side effects
1322	 * of the instruction. Since we don't insert a translation
1323	 * we can get a lot of faults during a flush loop, so it makes
1324	 * sense to try to do it here with minimum overhead. We only
1325	 * emulate fdc,fic,pdc,probew,prober instructions whose base
1326	 * and index registers are not shadowed. We defer everything
1327	 * else to the "slow" path.
1328	 */
1329
1330	mfctl           %cr19,%r9 /* Get iir */
1331
1332	/* PA 2.0 Arch Ref. Book pg 382 has a good description of the insn bits.
1333	   Checks for fdc,fdce,pdc,"fic,4f",prober,probeir,probew, probeiw */
1334
1335	/* Checks for fdc,fdce,pdc,"fic,4f" only */
1336	ldi             0x280,%r16
1337	and             %r9,%r16,%r17
1338	cmpb,<>,n       %r16,%r17,nadtlb_probe_check
1339	bb,>=,n         %r9,26,nadtlb_nullify  /* m bit not set, just nullify */
1340	BL		get_register,%r25
1341	extrw,u         %r9,15,5,%r8           /* Get index register # */
1342	cmpib,COND(=),n        -1,%r1,nadtlb_fault    /* have to use slow path */
1343	copy            %r1,%r24
1344	BL		get_register,%r25
1345	extrw,u         %r9,10,5,%r8           /* Get base register # */
1346	cmpib,COND(=),n        -1,%r1,nadtlb_fault    /* have to use slow path */
1347	BL		set_register,%r25
1348	add,l           %r1,%r24,%r1           /* doesn't affect c/b bits */
1349
1350nadtlb_nullify:
1351	mfctl           %ipsw,%r8
1352	ldil            L%PSW_N,%r9
1353	or              %r8,%r9,%r8            /* Set PSW_N */
1354	mtctl           %r8,%ipsw
1355
1356	rfir
1357	nop
1358
1359	/*
1360		When there is no translation for the probe address then we
1361		must nullify the insn and return zero in the target regsiter.
1362		This will indicate to the calling code that it does not have
1363		write/read privileges to this address.
1364
1365		This should technically work for prober and probew in PA 1.1,
1366		and also probe,r and probe,w in PA 2.0
1367
1368		WARNING: USE ONLY NON-SHADOW REGISTERS WITH PROBE INSN!
1369		THE SLOW-PATH EMULATION HAS NOT BEEN WRITTEN YET.
1370
1371	*/
1372nadtlb_probe_check:
1373	ldi             0x80,%r16
1374	and             %r9,%r16,%r17
1375	cmpb,<>,n       %r16,%r17,nadtlb_fault /* Must be probe,[rw]*/
1376	BL              get_register,%r25      /* Find the target register */
1377	extrw,u         %r9,31,5,%r8           /* Get target register */
1378	cmpib,COND(=),n        -1,%r1,nadtlb_fault    /* have to use slow path */
1379	BL		set_register,%r25
1380	copy            %r0,%r1                /* Write zero to target register */
1381	b nadtlb_nullify                       /* Nullify return insn */
1382	nop
1383
1384
1385#ifdef CONFIG_64BIT
1386itlb_miss_20w:
1387
1388	/*
1389	 * I miss is a little different, since we allow users to fault
1390	 * on the gateway page which is in the kernel address space.
1391	 */
1392
1393	space_adjust	spc,va,t0
1394	get_pgd		spc,ptp
1395	space_check	spc,t0,itlb_fault
1396
1397	L3_ptep		ptp,pte,t0,va,itlb_fault
1398
1399	tlb_lock	spc,ptp,pte,t0,t1,itlb_fault
1400	update_accessed	ptp,pte,t0,t1
1401
1402	make_insert_tlb	spc,pte,prot
1403
1404	iitlbt          pte,prot
1405
1406	tlb_unlock1	spc,t0
1407	rfir
1408	nop
1409
1410naitlb_miss_20w:
1411
1412	/*
1413	 * I miss is a little different, since we allow users to fault
1414	 * on the gateway page which is in the kernel address space.
1415	 */
1416
1417	space_adjust	spc,va,t0
1418	get_pgd		spc,ptp
1419	space_check	spc,t0,naitlb_fault
1420
1421	L3_ptep		ptp,pte,t0,va,naitlb_check_alias_20w
1422
1423	tlb_lock	spc,ptp,pte,t0,t1,naitlb_check_alias_20w
1424	update_accessed	ptp,pte,t0,t1
1425
1426	make_insert_tlb	spc,pte,prot
1427
1428	iitlbt          pte,prot
1429
1430	tlb_unlock1	spc,t0
1431	rfir
1432	nop
1433
1434naitlb_check_alias_20w:
1435	do_alias	spc,t0,t1,va,pte,prot,naitlb_fault,20
1436
1437	iitlbt		pte,prot
1438
1439	rfir
1440	nop
1441
1442#else
1443
1444itlb_miss_11:
1445	get_pgd		spc,ptp
1446
1447	space_check	spc,t0,itlb_fault
1448
1449	L2_ptep		ptp,pte,t0,va,itlb_fault
1450
1451	tlb_lock	spc,ptp,pte,t0,t1,itlb_fault
1452	update_accessed	ptp,pte,t0,t1
1453
1454	make_insert_tlb_11	spc,pte,prot
1455
1456	mfsp		%sr1,t1  /* Save sr1 so we can use it in tlb inserts */
1457	mtsp		spc,%sr1
1458
1459	iitlba		pte,(%sr1,va)
1460	iitlbp		prot,(%sr1,va)
1461
1462	mtsp		t1, %sr1	/* Restore sr1 */
1463
1464	tlb_unlock1	spc,t0
1465	rfir
1466	nop
1467
1468naitlb_miss_11:
1469	get_pgd		spc,ptp
1470
1471	space_check	spc,t0,naitlb_fault
1472
1473	L2_ptep		ptp,pte,t0,va,naitlb_check_alias_11
1474
1475	tlb_lock	spc,ptp,pte,t0,t1,naitlb_check_alias_11
1476	update_accessed	ptp,pte,t0,t1
1477
1478	make_insert_tlb_11	spc,pte,prot
1479
1480	mfsp		%sr1,t1  /* Save sr1 so we can use it in tlb inserts */
1481	mtsp		spc,%sr1
1482
1483	iitlba		pte,(%sr1,va)
1484	iitlbp		prot,(%sr1,va)
1485
1486	mtsp		t1, %sr1	/* Restore sr1 */
1487
1488	tlb_unlock1	spc,t0
1489	rfir
1490	nop
1491
1492naitlb_check_alias_11:
1493	do_alias	spc,t0,t1,va,pte,prot,itlb_fault,11
1494
1495	iitlba          pte,(%sr0, va)
1496	iitlbp          prot,(%sr0, va)
1497
1498	rfir
1499	nop
1500
1501
1502itlb_miss_20:
1503	get_pgd		spc,ptp
1504
1505	space_check	spc,t0,itlb_fault
1506
1507	L2_ptep		ptp,pte,t0,va,itlb_fault
1508
1509	tlb_lock	spc,ptp,pte,t0,t1,itlb_fault
1510	update_accessed	ptp,pte,t0,t1
1511
1512	make_insert_tlb	spc,pte,prot
1513
1514	f_extend	pte,t1
1515
1516	iitlbt          pte,prot
1517
1518	tlb_unlock1	spc,t0
1519	rfir
1520	nop
1521
1522naitlb_miss_20:
1523	get_pgd		spc,ptp
1524
1525	space_check	spc,t0,naitlb_fault
1526
1527	L2_ptep		ptp,pte,t0,va,naitlb_check_alias_20
1528
1529	tlb_lock	spc,ptp,pte,t0,t1,naitlb_check_alias_20
1530	update_accessed	ptp,pte,t0,t1
1531
1532	make_insert_tlb	spc,pte,prot
1533
1534	f_extend	pte,t1
1535
1536	iitlbt          pte,prot
1537
1538	tlb_unlock1	spc,t0
1539	rfir
1540	nop
1541
1542naitlb_check_alias_20:
1543	do_alias	spc,t0,t1,va,pte,prot,naitlb_fault,20
1544
1545	iitlbt          pte,prot
1546
1547	rfir
1548	nop
1549
1550#endif
1551
1552#ifdef CONFIG_64BIT
1553
1554dbit_trap_20w:
1555	space_adjust	spc,va,t0
1556	get_pgd		spc,ptp
1557	space_check	spc,t0,dbit_fault
1558
1559	L3_ptep		ptp,pte,t0,va,dbit_fault
1560
1561	tlb_lock	spc,ptp,pte,t0,t1,dbit_fault
1562	update_dirty	ptp,pte,t1
1563
1564	make_insert_tlb	spc,pte,prot
1565
1566	idtlbt          pte,prot
1567
1568	tlb_unlock0	spc,t0
1569	rfir
1570	nop
1571#else
1572
1573dbit_trap_11:
1574
1575	get_pgd		spc,ptp
1576
1577	space_check	spc,t0,dbit_fault
1578
1579	L2_ptep		ptp,pte,t0,va,dbit_fault
1580
1581	tlb_lock	spc,ptp,pte,t0,t1,dbit_fault
1582	update_dirty	ptp,pte,t1
1583
1584	make_insert_tlb_11	spc,pte,prot
1585
1586	mfsp            %sr1,t1  /* Save sr1 so we can use it in tlb inserts */
1587	mtsp		spc,%sr1
1588
1589	idtlba		pte,(%sr1,va)
1590	idtlbp		prot,(%sr1,va)
1591
1592	mtsp            t1, %sr1     /* Restore sr1 */
1593
1594	tlb_unlock0	spc,t0
1595	rfir
1596	nop
1597
1598dbit_trap_20:
1599	get_pgd		spc,ptp
1600
1601	space_check	spc,t0,dbit_fault
1602
1603	L2_ptep		ptp,pte,t0,va,dbit_fault
1604
1605	tlb_lock	spc,ptp,pte,t0,t1,dbit_fault
1606	update_dirty	ptp,pte,t1
1607
1608	make_insert_tlb	spc,pte,prot
1609
1610	f_extend	pte,t1
1611
1612	idtlbt		pte,prot
1613
1614	tlb_unlock0	spc,t0
1615	rfir
1616	nop
1617#endif
1618
1619	.import handle_interruption,code
1620
1621kernel_bad_space:
1622	b               intr_save
1623	ldi             31,%r8  /* Use an unused code */
1624
1625dbit_fault:
1626	b               intr_save
1627	ldi             20,%r8
1628
1629itlb_fault:
1630	b               intr_save
1631	ldi             6,%r8
1632
1633nadtlb_fault:
1634	b               intr_save
1635	ldi             17,%r8
1636
1637naitlb_fault:
1638	b               intr_save
1639	ldi             16,%r8
1640
1641dtlb_fault:
1642	b               intr_save
1643	ldi             15,%r8
1644
1645	/* Register saving semantics for system calls:
1646
1647	   %r1		   clobbered by system call macro in userspace
1648	   %r2		   saved in PT_REGS by gateway page
1649	   %r3  - %r18	   preserved by C code (saved by signal code)
1650	   %r19 - %r20	   saved in PT_REGS by gateway page
1651	   %r21 - %r22	   non-standard syscall args
1652			   stored in kernel stack by gateway page
1653	   %r23 - %r26	   arg3-arg0, saved in PT_REGS by gateway page
1654	   %r27 - %r30	   saved in PT_REGS by gateway page
1655	   %r31		   syscall return pointer
1656	 */
1657
1658	/* Floating point registers (FIXME: what do we do with these?)
1659
1660	   %fr0  - %fr3	   status/exception, not preserved
1661	   %fr4  - %fr7	   arguments
1662	   %fr8	 - %fr11   not preserved by C code
1663	   %fr12 - %fr21   preserved by C code
1664	   %fr22 - %fr31   not preserved by C code
1665	 */
1666
1667	.macro	reg_save regs
1668	STREG	%r3, PT_GR3(\regs)
1669	STREG	%r4, PT_GR4(\regs)
1670	STREG	%r5, PT_GR5(\regs)
1671	STREG	%r6, PT_GR6(\regs)
1672	STREG	%r7, PT_GR7(\regs)
1673	STREG	%r8, PT_GR8(\regs)
1674	STREG	%r9, PT_GR9(\regs)
1675	STREG   %r10,PT_GR10(\regs)
1676	STREG   %r11,PT_GR11(\regs)
1677	STREG   %r12,PT_GR12(\regs)
1678	STREG   %r13,PT_GR13(\regs)
1679	STREG   %r14,PT_GR14(\regs)
1680	STREG   %r15,PT_GR15(\regs)
1681	STREG   %r16,PT_GR16(\regs)
1682	STREG   %r17,PT_GR17(\regs)
1683	STREG   %r18,PT_GR18(\regs)
1684	.endm
1685
1686	.macro	reg_restore regs
1687	LDREG	PT_GR3(\regs), %r3
1688	LDREG	PT_GR4(\regs), %r4
1689	LDREG	PT_GR5(\regs), %r5
1690	LDREG	PT_GR6(\regs), %r6
1691	LDREG	PT_GR7(\regs), %r7
1692	LDREG	PT_GR8(\regs), %r8
1693	LDREG	PT_GR9(\regs), %r9
1694	LDREG   PT_GR10(\regs),%r10
1695	LDREG   PT_GR11(\regs),%r11
1696	LDREG   PT_GR12(\regs),%r12
1697	LDREG   PT_GR13(\regs),%r13
1698	LDREG   PT_GR14(\regs),%r14
1699	LDREG   PT_GR15(\regs),%r15
1700	LDREG   PT_GR16(\regs),%r16
1701	LDREG   PT_GR17(\regs),%r17
1702	LDREG   PT_GR18(\regs),%r18
1703	.endm
1704
1705	.macro	fork_like name
1706ENTRY(sys_\name\()_wrapper)
1707	LDREG	TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30), %r1
1708	ldo	TASK_REGS(%r1),%r1
1709	reg_save %r1
1710	mfctl	%cr27, %r28
1711	ldil	L%sys_\name, %r31
1712	be	R%sys_\name(%sr4,%r31)
1713	STREG	%r28, PT_CR27(%r1)
1714ENDPROC(sys_\name\()_wrapper)
1715	.endm
1716
1717fork_like clone
1718fork_like fork
1719fork_like vfork
1720
1721	/* Set the return value for the child */
1722ENTRY(child_return)
1723	BL	schedule_tail, %r2
1724	nop
1725finish_child_return:
1726	LDREG	TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30), %r1
1727	ldo	TASK_REGS(%r1),%r1	 /* get pt regs */
1728
1729	LDREG	PT_CR27(%r1), %r3
1730	mtctl	%r3, %cr27
1731	reg_restore %r1
1732	b	syscall_exit
1733	copy	%r0,%r28
1734ENDPROC(child_return)
1735
1736ENTRY(sys_rt_sigreturn_wrapper)
1737	LDREG	TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r26
1738	ldo	TASK_REGS(%r26),%r26	/* get pt regs */
1739	/* Don't save regs, we are going to restore them from sigcontext. */
1740	STREG	%r2, -RP_OFFSET(%r30)
1741#ifdef CONFIG_64BIT
1742	ldo	FRAME_SIZE(%r30), %r30
1743	BL	sys_rt_sigreturn,%r2
1744	ldo	-16(%r30),%r29		/* Reference param save area */
1745#else
1746	BL	sys_rt_sigreturn,%r2
1747	ldo	FRAME_SIZE(%r30), %r30
1748#endif
1749
1750	ldo	-FRAME_SIZE(%r30), %r30
1751	LDREG	-RP_OFFSET(%r30), %r2
1752
1753	/* FIXME: I think we need to restore a few more things here. */
1754	LDREG	TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
1755	ldo	TASK_REGS(%r1),%r1	/* get pt regs */
1756	reg_restore %r1
1757
1758	/* If the signal was received while the process was blocked on a
1759	 * syscall, then r2 will take us to syscall_exit; otherwise r2 will
1760	 * take us to syscall_exit_rfi and on to intr_return.
1761	 */
1762	bv	%r0(%r2)
1763	LDREG	PT_GR28(%r1),%r28  /* reload original r28 for syscall_exit */
1764ENDPROC(sys_rt_sigreturn_wrapper)
1765
1766ENTRY(syscall_exit)
1767	/* NOTE: Not all syscalls exit this way.  rt_sigreturn will exit
1768	 * via syscall_exit_rfi if the signal was received while the process
1769	 * was running.
1770	 */
1771
1772	/* save return value now */
1773
1774	mfctl     %cr30, %r1
1775	LDREG     TI_TASK(%r1),%r1
1776	STREG     %r28,TASK_PT_GR28(%r1)
1777
1778	/* Seems to me that dp could be wrong here, if the syscall involved
1779	 * calling a module, and nothing got round to restoring dp on return.
1780	 */
1781	loadgp
1782
1783syscall_check_resched:
1784
1785	/* check for reschedule */
1786
1787	LDREG	TI_FLAGS-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r19	/* long */
1788	bb,<,n	%r19, 31-TIF_NEED_RESCHED, syscall_do_resched /* forward */
1789
1790	.import do_signal,code
1791syscall_check_sig:
1792	LDREG	TI_FLAGS-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r19
1793	ldi	(_TIF_SIGPENDING|_TIF_NOTIFY_RESUME), %r26
1794	and,COND(<>)	%r19, %r26, %r0
1795	b,n	syscall_restore	/* skip past if we've nothing to do */
1796
1797syscall_do_signal:
1798	/* Save callee-save registers (for sigcontext).
1799	 * FIXME: After this point the process structure should be
1800	 * consistent with all the relevant state of the process
1801	 * before the syscall.  We need to verify this.
1802	 */
1803	LDREG	TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
1804	ldo	TASK_REGS(%r1), %r26		/* struct pt_regs *regs */
1805	reg_save %r26
1806
1807#ifdef CONFIG_64BIT
1808	ldo	-16(%r30),%r29			/* Reference param save area */
1809#endif
1810
1811	BL	do_notify_resume,%r2
1812	ldi	1, %r25				/* long in_syscall = 1 */
1813
1814	LDREG	TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
1815	ldo	TASK_REGS(%r1), %r20		/* reload pt_regs */
1816	reg_restore %r20
1817
1818	b,n     syscall_check_sig
1819
1820syscall_restore:
1821	LDREG	TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
1822
1823	/* Are we being ptraced? */
1824	ldw	TASK_FLAGS(%r1),%r19
1825	ldi	_TIF_SYSCALL_TRACE_MASK,%r2
1826	and,COND(=)	%r19,%r2,%r0
1827	b,n	syscall_restore_rfi
1828
1829	ldo	TASK_PT_FR31(%r1),%r19		   /* reload fpregs */
1830	rest_fp	%r19
1831
1832	LDREG	TASK_PT_SAR(%r1),%r19		   /* restore SAR */
1833	mtsar	%r19
1834
1835	LDREG	TASK_PT_GR2(%r1),%r2		   /* restore user rp */
1836	LDREG	TASK_PT_GR19(%r1),%r19
1837	LDREG   TASK_PT_GR20(%r1),%r20
1838	LDREG	TASK_PT_GR21(%r1),%r21
1839	LDREG	TASK_PT_GR22(%r1),%r22
1840	LDREG	TASK_PT_GR23(%r1),%r23
1841	LDREG	TASK_PT_GR24(%r1),%r24
1842	LDREG	TASK_PT_GR25(%r1),%r25
1843	LDREG	TASK_PT_GR26(%r1),%r26
1844	LDREG	TASK_PT_GR27(%r1),%r27	   /* restore user dp */
1845	LDREG	TASK_PT_GR28(%r1),%r28	   /* syscall return value */
1846	LDREG	TASK_PT_GR29(%r1),%r29
1847	LDREG	TASK_PT_GR31(%r1),%r31	   /* restore syscall rp */
1848
1849	/* NOTE: We use rsm/ssm pair to make this operation atomic */
1850	LDREG   TASK_PT_GR30(%r1),%r1              /* Get user sp */
1851	rsm     PSW_SM_I, %r0
1852	copy    %r1,%r30                           /* Restore user sp */
1853	mfsp    %sr3,%r1                           /* Get user space id */
1854	mtsp    %r1,%sr7                           /* Restore sr7 */
1855	ssm     PSW_SM_I, %r0
1856
1857	/* Set sr2 to zero for userspace syscalls to work. */
1858	mtsp	%r0,%sr2
1859	mtsp	%r1,%sr4			   /* Restore sr4 */
1860	mtsp	%r1,%sr5			   /* Restore sr5 */
1861	mtsp	%r1,%sr6			   /* Restore sr6 */
1862
1863	depi	3,31,2,%r31			   /* ensure return to user mode. */
1864
1865#ifdef CONFIG_64BIT
1866	/* decide whether to reset the wide mode bit
1867	 *
1868	 * For a syscall, the W bit is stored in the lowest bit
1869	 * of sp.  Extract it and reset W if it is zero */
1870	extrd,u,*<>	%r30,63,1,%r1
1871	rsm	PSW_SM_W, %r0
1872	/* now reset the lowest bit of sp if it was set */
1873	xor	%r30,%r1,%r30
1874#endif
1875	be,n    0(%sr3,%r31)                       /* return to user space */
1876
1877	/* We have to return via an RFI, so that PSW T and R bits can be set
1878	 * appropriately.
1879	 * This sets up pt_regs so we can return via intr_restore, which is not
1880	 * the most efficient way of doing things, but it works.
1881	 */
1882syscall_restore_rfi:
1883	ldo	-1(%r0),%r2			   /* Set recovery cntr to -1 */
1884	mtctl	%r2,%cr0			   /*   for immediate trap */
1885	LDREG	TASK_PT_PSW(%r1),%r2		   /* Get old PSW */
1886	ldi	0x0b,%r20			   /* Create new PSW */
1887	depi	-1,13,1,%r20			   /* C, Q, D, and I bits */
1888
1889	/* The values of SINGLESTEP_BIT and BLOCKSTEP_BIT are
1890	 * set in thread_info.h and converted to PA bitmap
1891	 * numbers in asm-offsets.c */
1892
1893	/* if ((%r19.SINGLESTEP_BIT)) { %r20.27=1} */
1894	extru,=	%r19,TIF_SINGLESTEP_PA_BIT,1,%r0
1895	depi	-1,27,1,%r20			   /* R bit */
1896
1897	/* if ((%r19.BLOCKSTEP_BIT)) { %r20.7=1} */
1898	extru,= %r19,TIF_BLOCKSTEP_PA_BIT,1,%r0
1899	depi	-1,7,1,%r20			   /* T bit */
1900
1901	STREG	%r20,TASK_PT_PSW(%r1)
1902
1903	/* Always store space registers, since sr3 can be changed (e.g. fork) */
1904
1905	mfsp    %sr3,%r25
1906	STREG   %r25,TASK_PT_SR3(%r1)
1907	STREG   %r25,TASK_PT_SR4(%r1)
1908	STREG   %r25,TASK_PT_SR5(%r1)
1909	STREG   %r25,TASK_PT_SR6(%r1)
1910	STREG   %r25,TASK_PT_SR7(%r1)
1911	STREG   %r25,TASK_PT_IASQ0(%r1)
1912	STREG   %r25,TASK_PT_IASQ1(%r1)
1913
1914	/* XXX W bit??? */
1915	/* Now if old D bit is clear, it means we didn't save all registers
1916	 * on syscall entry, so do that now.  This only happens on TRACEME
1917	 * calls, or if someone attached to us while we were on a syscall.
1918	 * We could make this more efficient by not saving r3-r18, but
1919	 * then we wouldn't be able to use the common intr_restore path.
1920	 * It is only for traced processes anyway, so performance is not
1921	 * an issue.
1922	 */
1923	bb,<	%r2,30,pt_regs_ok		   /* Branch if D set */
1924	ldo	TASK_REGS(%r1),%r25
1925	reg_save %r25				   /* Save r3 to r18 */
1926
1927	/* Save the current sr */
1928	mfsp	%sr0,%r2
1929	STREG	%r2,TASK_PT_SR0(%r1)
1930
1931	/* Save the scratch sr */
1932	mfsp	%sr1,%r2
1933	STREG	%r2,TASK_PT_SR1(%r1)
1934
1935	/* sr2 should be set to zero for userspace syscalls */
1936	STREG	%r0,TASK_PT_SR2(%r1)
1937
1938	LDREG	TASK_PT_GR31(%r1),%r2
1939	depi	3,31,2,%r2		   /* ensure return to user mode. */
1940	STREG   %r2,TASK_PT_IAOQ0(%r1)
1941	ldo	4(%r2),%r2
1942	STREG	%r2,TASK_PT_IAOQ1(%r1)
1943	b	intr_restore
1944	copy	%r25,%r16
1945
1946pt_regs_ok:
1947	LDREG	TASK_PT_IAOQ0(%r1),%r2
1948	depi	3,31,2,%r2		   /* ensure return to user mode. */
1949	STREG	%r2,TASK_PT_IAOQ0(%r1)
1950	LDREG	TASK_PT_IAOQ1(%r1),%r2
1951	depi	3,31,2,%r2
1952	STREG	%r2,TASK_PT_IAOQ1(%r1)
1953	b	intr_restore
1954	copy	%r25,%r16
1955
1956	.import schedule,code
1957syscall_do_resched:
1958	BL	schedule,%r2
1959#ifdef CONFIG_64BIT
1960	ldo	-16(%r30),%r29		/* Reference param save area */
1961#else
1962	nop
1963#endif
1964	b	syscall_check_resched	/* if resched, we start over again */
1965	nop
1966ENDPROC(syscall_exit)
1967
1968
1969#ifdef CONFIG_FUNCTION_TRACER
1970	.import ftrace_function_trampoline,code
1971ENTRY(_mcount)
1972	copy	%r3, %arg2
1973	b	ftrace_function_trampoline
1974	nop
1975ENDPROC(_mcount)
1976
1977ENTRY(return_to_handler)
1978	load32	return_trampoline, %rp
1979	copy	%ret0, %arg0
1980	copy	%ret1, %arg1
1981	b	ftrace_return_to_handler
1982	nop
1983return_trampoline:
1984	copy	%ret0, %rp
1985	copy	%r23, %ret0
1986	copy	%r24, %ret1
1987
1988.globl ftrace_stub
1989ftrace_stub:
1990	bv	%r0(%rp)
1991	nop
1992ENDPROC(return_to_handler)
1993#endif	/* CONFIG_FUNCTION_TRACER */
1994
1995#ifdef CONFIG_IRQSTACKS
1996/* void call_on_stack(unsigned long param1, void *func,
1997		      unsigned long new_stack) */
1998ENTRY(call_on_stack)
1999	copy	%sp, %r1
2000
2001	/* Regarding the HPPA calling conventions for function pointers,
2002	   we assume the PIC register is not changed across call.  For
2003	   CONFIG_64BIT, the argument pointer is left to point at the
2004	   argument region allocated for the call to call_on_stack. */
2005# ifdef CONFIG_64BIT
2006	/* Switch to new stack.  We allocate two 128 byte frames.  */
2007	ldo	256(%arg2), %sp
2008	/* Save previous stack pointer and return pointer in frame marker */
2009	STREG	%rp, -144(%sp)
2010	/* Calls always use function descriptor */
2011	LDREG	16(%arg1), %arg1
2012	bve,l	(%arg1), %rp
2013	STREG	%r1, -136(%sp)
2014	LDREG	-144(%sp), %rp
2015	bve	(%rp)
2016	LDREG	-136(%sp), %sp
2017# else
2018	/* Switch to new stack.  We allocate two 64 byte frames.  */
2019	ldo	128(%arg2), %sp
2020	/* Save previous stack pointer and return pointer in frame marker */
2021	STREG	%r1, -68(%sp)
2022	STREG	%rp, -84(%sp)
2023	/* Calls use function descriptor if PLABEL bit is set */
2024	bb,>=,n	%arg1, 30, 1f
2025	depwi	0,31,2, %arg1
2026	LDREG	0(%arg1), %arg1
20271:
2028	be,l	0(%sr4,%arg1), %sr0, %r31
2029	copy	%r31, %rp
2030	LDREG	-84(%sp), %rp
2031	bv	(%rp)
2032	LDREG	-68(%sp), %sp
2033# endif /* CONFIG_64BIT */
2034ENDPROC(call_on_stack)
2035#endif /* CONFIG_IRQSTACKS */
2036
2037get_register:
2038	/*
2039	 * get_register is used by the non access tlb miss handlers to
2040	 * copy the value of the general register specified in r8 into
2041	 * r1. This routine can't be used for shadowed registers, since
2042	 * the rfir will restore the original value. So, for the shadowed
2043	 * registers we put a -1 into r1 to indicate that the register
2044	 * should not be used (the register being copied could also have
2045	 * a -1 in it, but that is OK, it just means that we will have
2046	 * to use the slow path instead).
2047	 */
2048	blr     %r8,%r0
2049	nop
2050	bv      %r0(%r25)    /* r0 */
2051	copy    %r0,%r1
2052	bv      %r0(%r25)    /* r1 - shadowed */
2053	ldi     -1,%r1
2054	bv      %r0(%r25)    /* r2 */
2055	copy    %r2,%r1
2056	bv      %r0(%r25)    /* r3 */
2057	copy    %r3,%r1
2058	bv      %r0(%r25)    /* r4 */
2059	copy    %r4,%r1
2060	bv      %r0(%r25)    /* r5 */
2061	copy    %r5,%r1
2062	bv      %r0(%r25)    /* r6 */
2063	copy    %r6,%r1
2064	bv      %r0(%r25)    /* r7 */
2065	copy    %r7,%r1
2066	bv      %r0(%r25)    /* r8 - shadowed */
2067	ldi     -1,%r1
2068	bv      %r0(%r25)    /* r9 - shadowed */
2069	ldi     -1,%r1
2070	bv      %r0(%r25)    /* r10 */
2071	copy    %r10,%r1
2072	bv      %r0(%r25)    /* r11 */
2073	copy    %r11,%r1
2074	bv      %r0(%r25)    /* r12 */
2075	copy    %r12,%r1
2076	bv      %r0(%r25)    /* r13 */
2077	copy    %r13,%r1
2078	bv      %r0(%r25)    /* r14 */
2079	copy    %r14,%r1
2080	bv      %r0(%r25)    /* r15 */
2081	copy    %r15,%r1
2082	bv      %r0(%r25)    /* r16 - shadowed */
2083	ldi     -1,%r1
2084	bv      %r0(%r25)    /* r17 - shadowed */
2085	ldi     -1,%r1
2086	bv      %r0(%r25)    /* r18 */
2087	copy    %r18,%r1
2088	bv      %r0(%r25)    /* r19 */
2089	copy    %r19,%r1
2090	bv      %r0(%r25)    /* r20 */
2091	copy    %r20,%r1
2092	bv      %r0(%r25)    /* r21 */
2093	copy    %r21,%r1
2094	bv      %r0(%r25)    /* r22 */
2095	copy    %r22,%r1
2096	bv      %r0(%r25)    /* r23 */
2097	copy    %r23,%r1
2098	bv      %r0(%r25)    /* r24 - shadowed */
2099	ldi     -1,%r1
2100	bv      %r0(%r25)    /* r25 - shadowed */
2101	ldi     -1,%r1
2102	bv      %r0(%r25)    /* r26 */
2103	copy    %r26,%r1
2104	bv      %r0(%r25)    /* r27 */
2105	copy    %r27,%r1
2106	bv      %r0(%r25)    /* r28 */
2107	copy    %r28,%r1
2108	bv      %r0(%r25)    /* r29 */
2109	copy    %r29,%r1
2110	bv      %r0(%r25)    /* r30 */
2111	copy    %r30,%r1
2112	bv      %r0(%r25)    /* r31 */
2113	copy    %r31,%r1
2114
2115
2116set_register:
2117	/*
2118	 * set_register is used by the non access tlb miss handlers to
2119	 * copy the value of r1 into the general register specified in
2120	 * r8.
2121	 */
2122	blr     %r8,%r0
2123	nop
2124	bv      %r0(%r25)    /* r0 (silly, but it is a place holder) */
2125	copy    %r1,%r0
2126	bv      %r0(%r25)    /* r1 */
2127	copy    %r1,%r1
2128	bv      %r0(%r25)    /* r2 */
2129	copy    %r1,%r2
2130	bv      %r0(%r25)    /* r3 */
2131	copy    %r1,%r3
2132	bv      %r0(%r25)    /* r4 */
2133	copy    %r1,%r4
2134	bv      %r0(%r25)    /* r5 */
2135	copy    %r1,%r5
2136	bv      %r0(%r25)    /* r6 */
2137	copy    %r1,%r6
2138	bv      %r0(%r25)    /* r7 */
2139	copy    %r1,%r7
2140	bv      %r0(%r25)    /* r8 */
2141	copy    %r1,%r8
2142	bv      %r0(%r25)    /* r9 */
2143	copy    %r1,%r9
2144	bv      %r0(%r25)    /* r10 */
2145	copy    %r1,%r10
2146	bv      %r0(%r25)    /* r11 */
2147	copy    %r1,%r11
2148	bv      %r0(%r25)    /* r12 */
2149	copy    %r1,%r12
2150	bv      %r0(%r25)    /* r13 */
2151	copy    %r1,%r13
2152	bv      %r0(%r25)    /* r14 */
2153	copy    %r1,%r14
2154	bv      %r0(%r25)    /* r15 */
2155	copy    %r1,%r15
2156	bv      %r0(%r25)    /* r16 */
2157	copy    %r1,%r16
2158	bv      %r0(%r25)    /* r17 */
2159	copy    %r1,%r17
2160	bv      %r0(%r25)    /* r18 */
2161	copy    %r1,%r18
2162	bv      %r0(%r25)    /* r19 */
2163	copy    %r1,%r19
2164	bv      %r0(%r25)    /* r20 */
2165	copy    %r1,%r20
2166	bv      %r0(%r25)    /* r21 */
2167	copy    %r1,%r21
2168	bv      %r0(%r25)    /* r22 */
2169	copy    %r1,%r22
2170	bv      %r0(%r25)    /* r23 */
2171	copy    %r1,%r23
2172	bv      %r0(%r25)    /* r24 */
2173	copy    %r1,%r24
2174	bv      %r0(%r25)    /* r25 */
2175	copy    %r1,%r25
2176	bv      %r0(%r25)    /* r26 */
2177	copy    %r1,%r26
2178	bv      %r0(%r25)    /* r27 */
2179	copy    %r1,%r27
2180	bv      %r0(%r25)    /* r28 */
2181	copy    %r1,%r28
2182	bv      %r0(%r25)    /* r29 */
2183	copy    %r1,%r29
2184	bv      %r0(%r25)    /* r30 */
2185	copy    %r1,%r30
2186	bv      %r0(%r25)    /* r31 */
2187	copy    %r1,%r31
2188
2189