xref: /linux/arch/parisc/kernel/entry.S (revision 6eb2fb3170549737207974c2c6ad34bcc2f3025e)
1/*
2 * Linux/PA-RISC Project (http://www.parisc-linux.org/)
3 *
4 * kernel entry points (interruptions, system call wrappers)
5 *  Copyright (C) 1999,2000 Philipp Rumpf
6 *  Copyright (C) 1999 SuSE GmbH Nuernberg
7 *  Copyright (C) 2000 Hewlett-Packard (John Marvin)
8 *  Copyright (C) 1999 Hewlett-Packard (Frank Rowand)
9 *
10 *    This program is free software; you can redistribute it and/or modify
11 *    it under the terms of the GNU General Public License as published by
12 *    the Free Software Foundation; either version 2, or (at your option)
13 *    any later version.
14 *
15 *    This program is distributed in the hope that it will be useful,
16 *    but WITHOUT ANY WARRANTY; without even the implied warranty of
17 *    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
18 *    GNU General Public License for more details.
19 *
20 *    You should have received a copy of the GNU General Public License
21 *    along with this program; if not, write to the Free Software
22 *    Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
23 */
24
25#include <asm/asm-offsets.h>
26
27/* we have the following possibilities to act on an interruption:
28 *  - handle in assembly and use shadowed registers only
29 *  - save registers to kernel stack and handle in assembly or C */
30
31
32#include <asm/psw.h>
33#include <asm/cache.h>		/* for L1_CACHE_SHIFT */
34#include <asm/assembly.h>	/* for LDREG/STREG defines */
35#include <asm/pgtable.h>
36#include <asm/signal.h>
37#include <asm/unistd.h>
38#include <asm/thread_info.h>
39
40#include <linux/linkage.h>
41
42#ifdef CONFIG_64BIT
43	.level 2.0w
44#else
45	.level 2.0
46#endif
47
48	.import         pa_dbit_lock,data
49
50	/* space_to_prot macro creates a prot id from a space id */
51
52#if (SPACEID_SHIFT) == 0
53	.macro  space_to_prot spc prot
54	depd,z  \spc,62,31,\prot
55	.endm
56#else
57	.macro  space_to_prot spc prot
58	extrd,u \spc,(64 - (SPACEID_SHIFT)),32,\prot
59	.endm
60#endif
61
62	/* Switch to virtual mapping, trashing only %r1 */
63	.macro  virt_map
64	/* pcxt_ssm_bug */
65	rsm	PSW_SM_I, %r0	/* barrier for "Relied upon Translation */
66	mtsp	%r0, %sr4
67	mtsp	%r0, %sr5
68	mfsp	%sr7, %r1
69	or,=    %r0,%r1,%r0	/* Only save sr7 in sr3 if sr7 != 0 */
70	mtsp	%r1, %sr3
71	tovirt_r1 %r29
72	load32	KERNEL_PSW, %r1
73
74	rsm     PSW_SM_QUIET,%r0	/* second "heavy weight" ctl op */
75	mtsp	%r0, %sr6
76	mtsp	%r0, %sr7
77	mtctl	%r0, %cr17	/* Clear IIASQ tail */
78	mtctl	%r0, %cr17	/* Clear IIASQ head */
79	mtctl	%r1, %ipsw
80	load32	4f, %r1
81	mtctl	%r1, %cr18	/* Set IIAOQ tail */
82	ldo	4(%r1), %r1
83	mtctl	%r1, %cr18	/* Set IIAOQ head */
84	rfir
85	nop
864:
87	.endm
88
89	/*
90	 * The "get_stack" macros are responsible for determining the
91	 * kernel stack value.
92	 *
93	 *      If sr7 == 0
94	 *          Already using a kernel stack, so call the
95	 *          get_stack_use_r30 macro to push a pt_regs structure
96	 *          on the stack, and store registers there.
97	 *      else
98	 *          Need to set up a kernel stack, so call the
99	 *          get_stack_use_cr30 macro to set up a pointer
100	 *          to the pt_regs structure contained within the
101	 *          task pointer pointed to by cr30. Set the stack
102	 *          pointer to point to the end of the task structure.
103	 *
104	 * Note that we use shadowed registers for temps until
105	 * we can save %r26 and %r29. %r26 is used to preserve
106	 * %r8 (a shadowed register) which temporarily contained
107	 * either the fault type ("code") or the eirr. We need
108	 * to use a non-shadowed register to carry the value over
109	 * the rfir in virt_map. We use %r26 since this value winds
110	 * up being passed as the argument to either do_cpu_irq_mask
111	 * or handle_interruption. %r29 is used to hold a pointer
112	 * the register save area, and once again, it needs to
113	 * be a non-shadowed register so that it survives the rfir.
114	 *
115	 * N.B. TASK_SZ_ALGN and PT_SZ_ALGN include space for a stack frame.
116	 */
117
118	.macro  get_stack_use_cr30
119
120	/* we save the registers in the task struct */
121
122	mfctl   %cr30, %r1
123	tophys  %r1,%r9
124	LDREG	TI_TASK(%r9), %r1	/* thread_info -> task_struct */
125	tophys  %r1,%r9
126	ldo     TASK_REGS(%r9),%r9
127	STREG   %r30, PT_GR30(%r9)
128	STREG   %r29,PT_GR29(%r9)
129	STREG   %r26,PT_GR26(%r9)
130	copy    %r9,%r29
131	mfctl   %cr30, %r1
132	ldo	THREAD_SZ_ALGN(%r1), %r30
133	.endm
134
135	.macro  get_stack_use_r30
136
137	/* we put a struct pt_regs on the stack and save the registers there */
138
139	tophys  %r30,%r9
140	STREG   %r30,PT_GR30(%r9)
141	ldo	PT_SZ_ALGN(%r30),%r30
142	STREG   %r29,PT_GR29(%r9)
143	STREG   %r26,PT_GR26(%r9)
144	copy    %r9,%r29
145	.endm
146
147	.macro  rest_stack
148	LDREG   PT_GR1(%r29), %r1
149	LDREG   PT_GR30(%r29),%r30
150	LDREG   PT_GR29(%r29),%r29
151	.endm
152
153	/* default interruption handler
154	 * (calls traps.c:handle_interruption) */
155	.macro	def code
156	b	intr_save
157	ldi     \code, %r8
158	.align	32
159	.endm
160
161	/* Interrupt interruption handler
162	 * (calls irq.c:do_cpu_irq_mask) */
163	.macro	extint code
164	b	intr_extint
165	mfsp    %sr7,%r16
166	.align	32
167	.endm
168
169	.import	os_hpmc, code
170
171	/* HPMC handler */
172	.macro	hpmc code
173	nop			/* must be a NOP, will be patched later */
174	load32	PA(os_hpmc), %r3
175	bv,n	0(%r3)
176	nop
177	.word	0		/* checksum (will be patched) */
178	.word	PA(os_hpmc)	/* address of handler */
179	.word	0		/* length of handler */
180	.endm
181
182	/*
183	 * Performance Note: Instructions will be moved up into
184	 * this part of the code later on, once we are sure
185	 * that the tlb miss handlers are close to final form.
186	 */
187
188	/* Register definitions for tlb miss handler macros */
189
190	va  = r8	/* virtual address for which the trap occurred */
191	spc = r24	/* space for which the trap occurred */
192
193#ifndef CONFIG_64BIT
194
195	/*
196	 * itlb miss interruption handler (parisc 1.1 - 32 bit)
197	 */
198
199	.macro	itlb_11 code
200
201	mfctl	%pcsq, spc
202	b	itlb_miss_11
203	mfctl	%pcoq, va
204
205	.align		32
206	.endm
207#endif
208
209	/*
210	 * itlb miss interruption handler (parisc 2.0)
211	 */
212
213	.macro	itlb_20 code
214	mfctl	%pcsq, spc
215#ifdef CONFIG_64BIT
216	b       itlb_miss_20w
217#else
218	b	itlb_miss_20
219#endif
220	mfctl	%pcoq, va
221
222	.align		32
223	.endm
224
225#ifndef CONFIG_64BIT
226	/*
227	 * naitlb miss interruption handler (parisc 1.1 - 32 bit)
228	 */
229
230	.macro	naitlb_11 code
231
232	mfctl	%isr,spc
233	b	naitlb_miss_11
234	mfctl 	%ior,va
235
236	.align		32
237	.endm
238#endif
239
240	/*
241	 * naitlb miss interruption handler (parisc 2.0)
242	 */
243
244	.macro	naitlb_20 code
245
246	mfctl	%isr,spc
247#ifdef CONFIG_64BIT
248	b       naitlb_miss_20w
249#else
250	b	naitlb_miss_20
251#endif
252	mfctl 	%ior,va
253
254	.align		32
255	.endm
256
257#ifndef CONFIG_64BIT
258	/*
259	 * dtlb miss interruption handler (parisc 1.1 - 32 bit)
260	 */
261
262	.macro	dtlb_11 code
263
264	mfctl	%isr, spc
265	b	dtlb_miss_11
266	mfctl	%ior, va
267
268	.align		32
269	.endm
270#endif
271
272	/*
273	 * dtlb miss interruption handler (parisc 2.0)
274	 */
275
276	.macro	dtlb_20 code
277
278	mfctl	%isr, spc
279#ifdef CONFIG_64BIT
280	b       dtlb_miss_20w
281#else
282	b	dtlb_miss_20
283#endif
284	mfctl	%ior, va
285
286	.align		32
287	.endm
288
289#ifndef CONFIG_64BIT
290	/* nadtlb miss interruption handler (parisc 1.1 - 32 bit) */
291
292	.macro	nadtlb_11 code
293
294	mfctl	%isr,spc
295	b       nadtlb_miss_11
296	mfctl	%ior,va
297
298	.align		32
299	.endm
300#endif
301
302	/* nadtlb miss interruption handler (parisc 2.0) */
303
304	.macro	nadtlb_20 code
305
306	mfctl	%isr,spc
307#ifdef CONFIG_64BIT
308	b       nadtlb_miss_20w
309#else
310	b       nadtlb_miss_20
311#endif
312	mfctl	%ior,va
313
314	.align		32
315	.endm
316
317#ifndef CONFIG_64BIT
318	/*
319	 * dirty bit trap interruption handler (parisc 1.1 - 32 bit)
320	 */
321
322	.macro	dbit_11 code
323
324	mfctl	%isr,spc
325	b	dbit_trap_11
326	mfctl	%ior,va
327
328	.align		32
329	.endm
330#endif
331
332	/*
333	 * dirty bit trap interruption handler (parisc 2.0)
334	 */
335
336	.macro	dbit_20 code
337
338	mfctl	%isr,spc
339#ifdef CONFIG_64BIT
340	b       dbit_trap_20w
341#else
342	b	dbit_trap_20
343#endif
344	mfctl	%ior,va
345
346	.align		32
347	.endm
348
349	/* In LP64, the space contains part of the upper 32 bits of the
350	 * fault.  We have to extract this and place it in the va,
351	 * zeroing the corresponding bits in the space register */
352	.macro		space_adjust	spc,va,tmp
353#ifdef CONFIG_64BIT
354	extrd,u		\spc,63,SPACEID_SHIFT,\tmp
355	depd		%r0,63,SPACEID_SHIFT,\spc
356	depd		\tmp,31,SPACEID_SHIFT,\va
357#endif
358	.endm
359
360	.import		swapper_pg_dir,code
361
362	/* Get the pgd.  For faults on space zero (kernel space), this
363	 * is simply swapper_pg_dir.  For user space faults, the
364	 * pgd is stored in %cr25 */
365	.macro		get_pgd		spc,reg
366	ldil		L%PA(swapper_pg_dir),\reg
367	ldo		R%PA(swapper_pg_dir)(\reg),\reg
368	or,COND(=)	%r0,\spc,%r0
369	mfctl		%cr25,\reg
370	.endm
371
372	/*
373		space_check(spc,tmp,fault)
374
375		spc - The space we saw the fault with.
376		tmp - The place to store the current space.
377		fault - Function to call on failure.
378
379		Only allow faults on different spaces from the
380		currently active one if we're the kernel
381
382	*/
383	.macro		space_check	spc,tmp,fault
384	mfsp		%sr7,\tmp
385	or,COND(<>)	%r0,\spc,%r0	/* user may execute gateway page
386					 * as kernel, so defeat the space
387					 * check if it is */
388	copy		\spc,\tmp
389	or,COND(=)	%r0,\tmp,%r0	/* nullify if executing as kernel */
390	cmpb,COND(<>),n	\tmp,\spc,\fault
391	.endm
392
393	/* Look up a PTE in a 2-Level scheme (faulting at each
394	 * level if the entry isn't present
395	 *
396	 * NOTE: we use ldw even for LP64, since the short pointers
397	 * can address up to 1TB
398	 */
399	.macro		L2_ptep	pmd,pte,index,va,fault
400#if PT_NLEVELS == 3
401	extru		\va,31-ASM_PMD_SHIFT,ASM_BITS_PER_PMD,\index
402#else
403# if defined(CONFIG_64BIT)
404	extrd,u		\va,63-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index
405  #else
406  # if PAGE_SIZE > 4096
407	extru		\va,31-ASM_PGDIR_SHIFT,32-ASM_PGDIR_SHIFT,\index
408  # else
409	extru		\va,31-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index
410  # endif
411# endif
412#endif
413	dep             %r0,31,PAGE_SHIFT,\pmd  /* clear offset */
414	copy		%r0,\pte
415	ldw,s		\index(\pmd),\pmd
416	bb,>=,n		\pmd,_PxD_PRESENT_BIT,\fault
417	dep		%r0,31,PxD_FLAG_SHIFT,\pmd /* clear flags */
418	copy		\pmd,%r9
419	SHLREG		%r9,PxD_VALUE_SHIFT,\pmd
420	extru		\va,31-PAGE_SHIFT,ASM_BITS_PER_PTE,\index
421	dep		%r0,31,PAGE_SHIFT,\pmd  /* clear offset */
422	shladd		\index,BITS_PER_PTE_ENTRY,\pmd,\pmd
423	LDREG		%r0(\pmd),\pte		/* pmd is now pte */
424	bb,>=,n		\pte,_PAGE_PRESENT_BIT,\fault
425	.endm
426
427	/* Look up PTE in a 3-Level scheme.
428	 *
429	 * Here we implement a Hybrid L2/L3 scheme: we allocate the
430	 * first pmd adjacent to the pgd.  This means that we can
431	 * subtract a constant offset to get to it.  The pmd and pgd
432	 * sizes are arranged so that a single pmd covers 4GB (giving
433	 * a full LP64 process access to 8TB) so our lookups are
434	 * effectively L2 for the first 4GB of the kernel (i.e. for
435	 * all ILP32 processes and all the kernel for machines with
436	 * under 4GB of memory) */
437	.macro		L3_ptep pgd,pte,index,va,fault
438#if PT_NLEVELS == 3 /* we might have a 2-Level scheme, e.g. with 16kb page size */
439	extrd,u		\va,63-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index
440	copy		%r0,\pte
441	extrd,u,*=	\va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0
442	ldw,s		\index(\pgd),\pgd
443	extrd,u,*=	\va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0
444	bb,>=,n		\pgd,_PxD_PRESENT_BIT,\fault
445	extrd,u,*=	\va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0
446	shld		\pgd,PxD_VALUE_SHIFT,\index
447	extrd,u,*=	\va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0
448	copy		\index,\pgd
449	extrd,u,*<>	\va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0
450	ldo		ASM_PGD_PMD_OFFSET(\pgd),\pgd
451#endif
452	L2_ptep		\pgd,\pte,\index,\va,\fault
453	.endm
454
455	/* Set the _PAGE_ACCESSED bit of the PTE.  Be clever and
456	 * don't needlessly dirty the cache line if it was already set */
457	.macro		update_ptep	ptep,pte,tmp,tmp1
458	ldi		_PAGE_ACCESSED,\tmp1
459	or		\tmp1,\pte,\tmp
460	and,COND(<>)	\tmp1,\pte,%r0
461	STREG		\tmp,0(\ptep)
462	.endm
463
464	/* Set the dirty bit (and accessed bit).  No need to be
465	 * clever, this is only used from the dirty fault */
466	.macro		update_dirty	ptep,pte,tmp
467	ldi		_PAGE_ACCESSED|_PAGE_DIRTY,\tmp
468	or		\tmp,\pte,\pte
469	STREG		\pte,0(\ptep)
470	.endm
471
472	/* bitshift difference between a PFN (based on kernel's PAGE_SIZE)
473	 * to a CPU TLB 4k PFN (4k => 12 bits to shift) */
474	#define PAGE_ADD_SHIFT  (PAGE_SHIFT-12)
475
476	/* Drop prot bits and convert to page addr for iitlbt and idtlbt */
477	.macro		convert_for_tlb_insert20 pte
478	extrd,u		\pte,(63-ASM_PFN_PTE_SHIFT)+(63-58)+PAGE_ADD_SHIFT,\
479				64-PAGE_SHIFT-PAGE_ADD_SHIFT,\pte
480	depdi		_PAGE_SIZE_ENCODING_DEFAULT,63,\
481				(63-58)+PAGE_ADD_SHIFT,\pte
482	.endm
483
484	/* Convert the pte and prot to tlb insertion values.  How
485	 * this happens is quite subtle, read below */
486	.macro		make_insert_tlb	spc,pte,prot
487	space_to_prot   \spc \prot        /* create prot id from space */
488	/* The following is the real subtlety.  This is depositing
489	 * T <-> _PAGE_REFTRAP
490	 * D <-> _PAGE_DIRTY
491	 * B <-> _PAGE_DMB (memory break)
492	 *
493	 * Then incredible subtlety: The access rights are
494	 * _PAGE_GATEWAY, _PAGE_EXEC and _PAGE_WRITE
495	 * See 3-14 of the parisc 2.0 manual
496	 *
497	 * Finally, _PAGE_READ goes in the top bit of PL1 (so we
498	 * trigger an access rights trap in user space if the user
499	 * tries to read an unreadable page */
500	depd            \pte,8,7,\prot
501
502	/* PAGE_USER indicates the page can be read with user privileges,
503	 * so deposit X1|11 to PL1|PL2 (remember the upper bit of PL1
504	 * contains _PAGE_READ) */
505	extrd,u,*=      \pte,_PAGE_USER_BIT+32,1,%r0
506	depdi		7,11,3,\prot
507	/* If we're a gateway page, drop PL2 back to zero for promotion
508	 * to kernel privilege (so we can execute the page as kernel).
509	 * Any privilege promotion page always denys read and write */
510	extrd,u,*= 	\pte,_PAGE_GATEWAY_BIT+32,1,%r0
511	depd		%r0,11,2,\prot	/* If Gateway, Set PL2 to 0 */
512
513	/* Enforce uncacheable pages.
514	 * This should ONLY be use for MMIO on PA 2.0 machines.
515	 * Memory/DMA is cache coherent on all PA2.0 machines we support
516	 * (that means T-class is NOT supported) and the memory controllers
517	 * on most of those machines only handles cache transactions.
518	 */
519	extrd,u,*=	\pte,_PAGE_NO_CACHE_BIT+32,1,%r0
520	depdi		1,12,1,\prot
521
522	/* Drop prot bits and convert to page addr for iitlbt and idtlbt */
523	convert_for_tlb_insert20 \pte
524	.endm
525
526	/* Identical macro to make_insert_tlb above, except it
527	 * makes the tlb entry for the differently formatted pa11
528	 * insertion instructions */
529	.macro		make_insert_tlb_11	spc,pte,prot
530	zdep		\spc,30,15,\prot
531	dep		\pte,8,7,\prot
532	extru,=		\pte,_PAGE_NO_CACHE_BIT,1,%r0
533	depi		1,12,1,\prot
534	extru,=         \pte,_PAGE_USER_BIT,1,%r0
535	depi		7,11,3,\prot   /* Set for user space (1 rsvd for read) */
536	extru,= 	\pte,_PAGE_GATEWAY_BIT,1,%r0
537	depi		0,11,2,\prot	/* If Gateway, Set PL2 to 0 */
538
539	/* Get rid of prot bits and convert to page addr for iitlba */
540
541	depi		0,31,ASM_PFN_PTE_SHIFT,\pte
542	SHRREG		\pte,(ASM_PFN_PTE_SHIFT-(31-26)),\pte
543	.endm
544
545	/* This is for ILP32 PA2.0 only.  The TLB insertion needs
546	 * to extend into I/O space if the address is 0xfXXXXXXX
547	 * so we extend the f's into the top word of the pte in
548	 * this case */
549	.macro		f_extend	pte,tmp
550	extrd,s		\pte,42,4,\tmp
551	addi,<>		1,\tmp,%r0
552	extrd,s		\pte,63,25,\pte
553	.endm
554
555	/* The alias region is an 8MB aligned 16MB to do clear and
556	 * copy user pages at addresses congruent with the user
557	 * virtual address.
558	 *
559	 * To use the alias page, you set %r26 up with the to TLB
560	 * entry (identifying the physical page) and %r23 up with
561	 * the from tlb entry (or nothing if only a to entry---for
562	 * clear_user_page_asm) */
563	.macro		do_alias	spc,tmp,tmp1,va,pte,prot,fault,patype
564	cmpib,COND(<>),n 0,\spc,\fault
565	ldil		L%(TMPALIAS_MAP_START),\tmp
566#if defined(CONFIG_64BIT) && (TMPALIAS_MAP_START >= 0x80000000)
567	/* on LP64, ldi will sign extend into the upper 32 bits,
568	 * which is behaviour we don't want */
569	depdi		0,31,32,\tmp
570#endif
571	copy		\va,\tmp1
572	depi		0,31,23,\tmp1
573	cmpb,COND(<>),n	\tmp,\tmp1,\fault
574	mfctl		%cr19,\tmp	/* iir */
575	/* get the opcode (first six bits) into \tmp */
576	extrw,u		\tmp,5,6,\tmp
577	/*
578	 * Only setting the T bit prevents data cache movein
579	 * Setting access rights to zero prevents instruction cache movein
580	 *
581	 * Note subtlety here: _PAGE_GATEWAY, _PAGE_EXEC and _PAGE_WRITE go
582	 * to type field and _PAGE_READ goes to top bit of PL1
583	 */
584	ldi		(_PAGE_REFTRAP|_PAGE_READ|_PAGE_WRITE),\prot
585	/*
586	 * so if the opcode is one (i.e. this is a memory management
587	 * instruction) nullify the next load so \prot is only T.
588	 * Otherwise this is a normal data operation
589	 */
590	cmpiclr,=	0x01,\tmp,%r0
591	ldi		(_PAGE_DIRTY|_PAGE_READ|_PAGE_WRITE),\prot
592.ifc \patype,20
593	depd,z		\prot,8,7,\prot
594.else
595.ifc \patype,11
596	depw,z		\prot,8,7,\prot
597.else
598	.error "undefined PA type to do_alias"
599.endif
600.endif
601	/*
602	 * OK, it is in the temp alias region, check whether "from" or "to".
603	 * Check "subtle" note in pacache.S re: r23/r26.
604	 */
605#ifdef CONFIG_64BIT
606	extrd,u,*=	\va,41,1,%r0
607#else
608	extrw,u,=	\va,9,1,%r0
609#endif
610	or,COND(tr)	%r23,%r0,\pte
611	or		%r26,%r0,\pte
612	.endm
613
614
615	/*
616	 * Align fault_vector_20 on 4K boundary so that both
617	 * fault_vector_11 and fault_vector_20 are on the
618	 * same page. This is only necessary as long as we
619	 * write protect the kernel text, which we may stop
620	 * doing once we use large page translations to cover
621	 * the static part of the kernel address space.
622	 */
623
624	.text
625
626	.align 4096
627
628ENTRY(fault_vector_20)
629	/* First vector is invalid (0) */
630	.ascii	"cows can fly"
631	.byte 0
632	.align 32
633
634	hpmc		 1
635	def		 2
636	def		 3
637	extint		 4
638	def		 5
639	itlb_20		 6
640	def		 7
641	def		 8
642	def              9
643	def		10
644	def		11
645	def		12
646	def		13
647	def		14
648	dtlb_20		15
649	naitlb_20	16
650	nadtlb_20	17
651	def		18
652	def		19
653	dbit_20		20
654	def		21
655	def		22
656	def		23
657	def		24
658	def		25
659	def		26
660	def		27
661	def		28
662	def		29
663	def		30
664	def		31
665END(fault_vector_20)
666
667#ifndef CONFIG_64BIT
668
669	.align 2048
670
671ENTRY(fault_vector_11)
672	/* First vector is invalid (0) */
673	.ascii	"cows can fly"
674	.byte 0
675	.align 32
676
677	hpmc		 1
678	def		 2
679	def		 3
680	extint		 4
681	def		 5
682	itlb_11		 6
683	def		 7
684	def		 8
685	def              9
686	def		10
687	def		11
688	def		12
689	def		13
690	def		14
691	dtlb_11		15
692	naitlb_11	16
693	nadtlb_11	17
694	def		18
695	def		19
696	dbit_11		20
697	def		21
698	def		22
699	def		23
700	def		24
701	def		25
702	def		26
703	def		27
704	def		28
705	def		29
706	def		30
707	def		31
708END(fault_vector_11)
709
710#endif
711	/* Fault vector is separately protected and *must* be on its own page */
712	.align		PAGE_SIZE
713ENTRY(end_fault_vector)
714
715	.import		handle_interruption,code
716	.import		do_cpu_irq_mask,code
717
718	/*
719	 * Child Returns here
720	 *
721	 * copy_thread moved args into task save area.
722	 */
723
724ENTRY(ret_from_kernel_thread)
725
726	/* Call schedule_tail first though */
727	BL	schedule_tail, %r2
728	nop
729
730	LDREG	TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30), %r1
731	LDREG	TASK_PT_GR25(%r1), %r26
732#ifdef CONFIG_64BIT
733	LDREG	TASK_PT_GR27(%r1), %r27
734#endif
735	LDREG	TASK_PT_GR26(%r1), %r1
736	ble	0(%sr7, %r1)
737	copy	%r31, %r2
738	b	finish_child_return
739	nop
740ENDPROC(ret_from_kernel_thread)
741
742
743	/*
744	 * struct task_struct *_switch_to(struct task_struct *prev,
745	 *	struct task_struct *next)
746	 *
747	 * switch kernel stacks and return prev */
748ENTRY(_switch_to)
749	STREG	 %r2, -RP_OFFSET(%r30)
750
751	callee_save_float
752	callee_save
753
754	load32	_switch_to_ret, %r2
755
756	STREG	%r2, TASK_PT_KPC(%r26)
757	LDREG	TASK_PT_KPC(%r25), %r2
758
759	STREG	%r30, TASK_PT_KSP(%r26)
760	LDREG	TASK_PT_KSP(%r25), %r30
761	LDREG	TASK_THREAD_INFO(%r25), %r25
762	bv	%r0(%r2)
763	mtctl   %r25,%cr30
764
765_switch_to_ret:
766	mtctl	%r0, %cr0		/* Needed for single stepping */
767	callee_rest
768	callee_rest_float
769
770	LDREG	-RP_OFFSET(%r30), %r2
771	bv	%r0(%r2)
772	copy	%r26, %r28
773ENDPROC(_switch_to)
774
775	/*
776	 * Common rfi return path for interruptions, kernel execve, and
777	 * sys_rt_sigreturn (sometimes).  The sys_rt_sigreturn syscall will
778	 * return via this path if the signal was received when the process
779	 * was running; if the process was blocked on a syscall then the
780	 * normal syscall_exit path is used.  All syscalls for traced
781	 * proceses exit via intr_restore.
782	 *
783	 * XXX If any syscalls that change a processes space id ever exit
784	 * this way, then we will need to copy %sr3 in to PT_SR[3..7], and
785	 * adjust IASQ[0..1].
786	 *
787	 */
788
789	.align	PAGE_SIZE
790
791ENTRY(syscall_exit_rfi)
792	mfctl   %cr30,%r16
793	LDREG	TI_TASK(%r16), %r16	/* thread_info -> task_struct */
794	ldo	TASK_REGS(%r16),%r16
795	/* Force iaoq to userspace, as the user has had access to our current
796	 * context via sigcontext. Also Filter the PSW for the same reason.
797	 */
798	LDREG	PT_IAOQ0(%r16),%r19
799	depi	3,31,2,%r19
800	STREG	%r19,PT_IAOQ0(%r16)
801	LDREG	PT_IAOQ1(%r16),%r19
802	depi	3,31,2,%r19
803	STREG	%r19,PT_IAOQ1(%r16)
804	LDREG   PT_PSW(%r16),%r19
805	load32	USER_PSW_MASK,%r1
806#ifdef CONFIG_64BIT
807	load32	USER_PSW_HI_MASK,%r20
808	depd    %r20,31,32,%r1
809#endif
810	and     %r19,%r1,%r19 /* Mask out bits that user shouldn't play with */
811	load32	USER_PSW,%r1
812	or      %r19,%r1,%r19 /* Make sure default USER_PSW bits are set */
813	STREG   %r19,PT_PSW(%r16)
814
815	/*
816	 * If we aren't being traced, we never saved space registers
817	 * (we don't store them in the sigcontext), so set them
818	 * to "proper" values now (otherwise we'll wind up restoring
819	 * whatever was last stored in the task structure, which might
820	 * be inconsistent if an interrupt occurred while on the gateway
821	 * page). Note that we may be "trashing" values the user put in
822	 * them, but we don't support the user changing them.
823	 */
824
825	STREG   %r0,PT_SR2(%r16)
826	mfsp    %sr3,%r19
827	STREG   %r19,PT_SR0(%r16)
828	STREG   %r19,PT_SR1(%r16)
829	STREG   %r19,PT_SR3(%r16)
830	STREG   %r19,PT_SR4(%r16)
831	STREG   %r19,PT_SR5(%r16)
832	STREG   %r19,PT_SR6(%r16)
833	STREG   %r19,PT_SR7(%r16)
834
835intr_return:
836	/* check for reschedule */
837	mfctl   %cr30,%r1
838	LDREG   TI_FLAGS(%r1),%r19	/* sched.h: TIF_NEED_RESCHED */
839	bb,<,n	%r19,31-TIF_NEED_RESCHED,intr_do_resched /* forward */
840
841	.import do_notify_resume,code
842intr_check_sig:
843	/* As above */
844	mfctl   %cr30,%r1
845	LDREG	TI_FLAGS(%r1),%r19
846	ldi	(_TIF_SIGPENDING|_TIF_NOTIFY_RESUME), %r20
847	and,COND(<>)	%r19, %r20, %r0
848	b,n	intr_restore	/* skip past if we've nothing to do */
849
850	/* This check is critical to having LWS
851	 * working. The IASQ is zero on the gateway
852	 * page and we cannot deliver any signals until
853	 * we get off the gateway page.
854	 *
855	 * Only do signals if we are returning to user space
856	 */
857	LDREG	PT_IASQ0(%r16), %r20
858	cmpib,COND(=),n 0,%r20,intr_restore /* backward */
859	LDREG	PT_IASQ1(%r16), %r20
860	cmpib,COND(=),n 0,%r20,intr_restore /* backward */
861
862	/* NOTE: We need to enable interrupts if we have to deliver
863	 * signals. We used to do this earlier but it caused kernel
864	 * stack overflows. */
865	ssm     PSW_SM_I, %r0
866
867	copy	%r0, %r25			/* long in_syscall = 0 */
868#ifdef CONFIG_64BIT
869	ldo	-16(%r30),%r29			/* Reference param save area */
870#endif
871
872	BL	do_notify_resume,%r2
873	copy	%r16, %r26			/* struct pt_regs *regs */
874
875	b,n	intr_check_sig
876
877intr_restore:
878	copy            %r16,%r29
879	ldo             PT_FR31(%r29),%r1
880	rest_fp         %r1
881	rest_general    %r29
882
883	/* inverse of virt_map */
884	pcxt_ssm_bug
885	rsm             PSW_SM_QUIET,%r0	/* prepare for rfi */
886	tophys_r1       %r29
887
888	/* Restore space id's and special cr's from PT_REGS
889	 * structure pointed to by r29
890	 */
891	rest_specials	%r29
892
893	/* IMPORTANT: rest_stack restores r29 last (we are using it)!
894	 * It also restores r1 and r30.
895	 */
896	rest_stack
897
898	rfi
899	nop
900
901#ifndef CONFIG_PREEMPT
902# define intr_do_preempt	intr_restore
903#endif /* !CONFIG_PREEMPT */
904
905	.import schedule,code
906intr_do_resched:
907	/* Only call schedule on return to userspace. If we're returning
908	 * to kernel space, we may schedule if CONFIG_PREEMPT, otherwise
909	 * we jump back to intr_restore.
910	 */
911	LDREG	PT_IASQ0(%r16), %r20
912	cmpib,COND(=)	0, %r20, intr_do_preempt
913	nop
914	LDREG	PT_IASQ1(%r16), %r20
915	cmpib,COND(=)	0, %r20, intr_do_preempt
916	nop
917
918	/* NOTE: We need to enable interrupts if we schedule.  We used
919	 * to do this earlier but it caused kernel stack overflows. */
920	ssm     PSW_SM_I, %r0
921
922#ifdef CONFIG_64BIT
923	ldo	-16(%r30),%r29		/* Reference param save area */
924#endif
925
926	ldil	L%intr_check_sig, %r2
927#ifndef CONFIG_64BIT
928	b	schedule
929#else
930	load32	schedule, %r20
931	bv	%r0(%r20)
932#endif
933	ldo	R%intr_check_sig(%r2), %r2
934
935	/* preempt the current task on returning to kernel
936	 * mode from an interrupt, iff need_resched is set,
937	 * and preempt_count is 0. otherwise, we continue on
938	 * our merry way back to the current running task.
939	 */
940#ifdef CONFIG_PREEMPT
941	.import preempt_schedule_irq,code
942intr_do_preempt:
943	rsm	PSW_SM_I, %r0		/* disable interrupts */
944
945	/* current_thread_info()->preempt_count */
946	mfctl	%cr30, %r1
947	LDREG	TI_PRE_COUNT(%r1), %r19
948	cmpib,COND(<>)	0, %r19, intr_restore	/* if preempt_count > 0 */
949	nop				/* prev insn branched backwards */
950
951	/* check if we interrupted a critical path */
952	LDREG	PT_PSW(%r16), %r20
953	bb,<,n	%r20, 31 - PSW_SM_I, intr_restore
954	nop
955
956	BL	preempt_schedule_irq, %r2
957	nop
958
959	b,n	intr_restore		/* ssm PSW_SM_I done by intr_restore */
960#endif /* CONFIG_PREEMPT */
961
962	/*
963	 * External interrupts.
964	 */
965
966intr_extint:
967	cmpib,COND(=),n 0,%r16,1f
968
969	get_stack_use_cr30
970	b,n 2f
971
9721:
973	get_stack_use_r30
9742:
975	save_specials	%r29
976	virt_map
977	save_general	%r29
978
979	ldo	PT_FR0(%r29), %r24
980	save_fp	%r24
981
982	loadgp
983
984	copy	%r29, %r26	/* arg0 is pt_regs */
985	copy	%r29, %r16	/* save pt_regs */
986
987	ldil	L%intr_return, %r2
988
989#ifdef CONFIG_64BIT
990	ldo	-16(%r30),%r29	/* Reference param save area */
991#endif
992
993	b	do_cpu_irq_mask
994	ldo	R%intr_return(%r2), %r2	/* return to intr_return, not here */
995ENDPROC(syscall_exit_rfi)
996
997
998	/* Generic interruptions (illegal insn, unaligned, page fault, etc) */
999
1000ENTRY(intr_save)		/* for os_hpmc */
1001	mfsp    %sr7,%r16
1002	cmpib,COND(=),n 0,%r16,1f
1003	get_stack_use_cr30
1004	b	2f
1005	copy    %r8,%r26
1006
10071:
1008	get_stack_use_r30
1009	copy    %r8,%r26
1010
10112:
1012	save_specials	%r29
1013
1014	/* If this trap is a itlb miss, skip saving/adjusting isr/ior */
1015
1016	/*
1017	 * FIXME: 1) Use a #define for the hardwired "6" below (and in
1018	 *           traps.c.
1019	 *        2) Once we start executing code above 4 Gb, we need
1020	 *           to adjust iasq/iaoq here in the same way we
1021	 *           adjust isr/ior below.
1022	 */
1023
1024	cmpib,COND(=),n        6,%r26,skip_save_ior
1025
1026
1027	mfctl           %cr20, %r16 /* isr */
1028	nop		/* serialize mfctl on PA 2.0 to avoid 4 cycle penalty */
1029	mfctl           %cr21, %r17 /* ior */
1030
1031
1032#ifdef CONFIG_64BIT
1033	/*
1034	 * If the interrupted code was running with W bit off (32 bit),
1035	 * clear the b bits (bits 0 & 1) in the ior.
1036	 * save_specials left ipsw value in r8 for us to test.
1037	 */
1038	extrd,u,*<>     %r8,PSW_W_BIT,1,%r0
1039	depdi           0,1,2,%r17
1040
1041	/*
1042	 * FIXME: This code has hardwired assumptions about the split
1043	 *        between space bits and offset bits. This will change
1044	 *        when we allow alternate page sizes.
1045	 */
1046
1047	/* adjust isr/ior. */
1048	extrd,u         %r16,63,SPACEID_SHIFT,%r1	/* get high bits from isr for ior */
1049	depd            %r1,31,SPACEID_SHIFT,%r17	/* deposit them into ior */
1050	depdi           0,63,SPACEID_SHIFT,%r16		/* clear them from isr */
1051#endif
1052	STREG           %r16, PT_ISR(%r29)
1053	STREG           %r17, PT_IOR(%r29)
1054
1055
1056skip_save_ior:
1057	virt_map
1058	save_general	%r29
1059
1060	ldo		PT_FR0(%r29), %r25
1061	save_fp		%r25
1062
1063	loadgp
1064
1065	copy		%r29, %r25	/* arg1 is pt_regs */
1066#ifdef CONFIG_64BIT
1067	ldo		-16(%r30),%r29	/* Reference param save area */
1068#endif
1069
1070	ldil		L%intr_check_sig, %r2
1071	copy		%r25, %r16	/* save pt_regs */
1072
1073	b		handle_interruption
1074	ldo		R%intr_check_sig(%r2), %r2
1075ENDPROC(intr_save)
1076
1077
1078	/*
1079	 * Note for all tlb miss handlers:
1080	 *
1081	 * cr24 contains a pointer to the kernel address space
1082	 * page directory.
1083	 *
1084	 * cr25 contains a pointer to the current user address
1085	 * space page directory.
1086	 *
1087	 * sr3 will contain the space id of the user address space
1088	 * of the current running thread while that thread is
1089	 * running in the kernel.
1090	 */
1091
1092	/*
1093	 * register number allocations.  Note that these are all
1094	 * in the shadowed registers
1095	 */
1096
1097	t0 = r1		/* temporary register 0 */
1098	va = r8		/* virtual address for which the trap occurred */
1099	t1 = r9		/* temporary register 1 */
1100	pte  = r16	/* pte/phys page # */
1101	prot = r17	/* prot bits */
1102	spc  = r24	/* space for which the trap occurred */
1103	ptp = r25	/* page directory/page table pointer */
1104
1105#ifdef CONFIG_64BIT
1106
1107dtlb_miss_20w:
1108	space_adjust	spc,va,t0
1109	get_pgd		spc,ptp
1110	space_check	spc,t0,dtlb_fault
1111
1112	L3_ptep		ptp,pte,t0,va,dtlb_check_alias_20w
1113
1114	update_ptep	ptp,pte,t0,t1
1115
1116	make_insert_tlb	spc,pte,prot
1117
1118	idtlbt          pte,prot
1119
1120	rfir
1121	nop
1122
1123dtlb_check_alias_20w:
1124	do_alias	spc,t0,t1,va,pte,prot,dtlb_fault,20
1125
1126	idtlbt          pte,prot
1127
1128	rfir
1129	nop
1130
1131nadtlb_miss_20w:
1132	space_adjust	spc,va,t0
1133	get_pgd		spc,ptp
1134	space_check	spc,t0,nadtlb_fault
1135
1136	L3_ptep		ptp,pte,t0,va,nadtlb_check_alias_20w
1137
1138	update_ptep	ptp,pte,t0,t1
1139
1140	make_insert_tlb	spc,pte,prot
1141
1142	idtlbt          pte,prot
1143
1144	rfir
1145	nop
1146
1147nadtlb_check_alias_20w:
1148	do_alias	spc,t0,t1,va,pte,prot,nadtlb_emulate,20
1149
1150	idtlbt          pte,prot
1151
1152	rfir
1153	nop
1154
1155#else
1156
1157dtlb_miss_11:
1158	get_pgd		spc,ptp
1159
1160	space_check	spc,t0,dtlb_fault
1161
1162	L2_ptep		ptp,pte,t0,va,dtlb_check_alias_11
1163
1164	update_ptep	ptp,pte,t0,t1
1165
1166	make_insert_tlb_11	spc,pte,prot
1167
1168	mfsp		%sr1,t0  /* Save sr1 so we can use it in tlb inserts */
1169	mtsp		spc,%sr1
1170
1171	idtlba		pte,(%sr1,va)
1172	idtlbp		prot,(%sr1,va)
1173
1174	mtsp		t0, %sr1	/* Restore sr1 */
1175
1176	rfir
1177	nop
1178
1179dtlb_check_alias_11:
1180	do_alias	spc,t0,t1,va,pte,prot,dtlb_fault,11
1181
1182	idtlba          pte,(va)
1183	idtlbp          prot,(va)
1184
1185	rfir
1186	nop
1187
1188nadtlb_miss_11:
1189	get_pgd		spc,ptp
1190
1191	space_check	spc,t0,nadtlb_fault
1192
1193	L2_ptep		ptp,pte,t0,va,nadtlb_check_alias_11
1194
1195	update_ptep	ptp,pte,t0,t1
1196
1197	make_insert_tlb_11	spc,pte,prot
1198
1199
1200	mfsp		%sr1,t0  /* Save sr1 so we can use it in tlb inserts */
1201	mtsp		spc,%sr1
1202
1203	idtlba		pte,(%sr1,va)
1204	idtlbp		prot,(%sr1,va)
1205
1206	mtsp		t0, %sr1	/* Restore sr1 */
1207
1208	rfir
1209	nop
1210
1211nadtlb_check_alias_11:
1212	do_alias	spc,t0,t1,va,pte,prot,nadtlb_emulate,11
1213
1214	idtlba          pte,(va)
1215	idtlbp          prot,(va)
1216
1217	rfir
1218	nop
1219
1220dtlb_miss_20:
1221	space_adjust	spc,va,t0
1222	get_pgd		spc,ptp
1223	space_check	spc,t0,dtlb_fault
1224
1225	L2_ptep		ptp,pte,t0,va,dtlb_check_alias_20
1226
1227	update_ptep	ptp,pte,t0,t1
1228
1229	make_insert_tlb	spc,pte,prot
1230
1231	f_extend	pte,t0
1232
1233	idtlbt          pte,prot
1234
1235	rfir
1236	nop
1237
1238dtlb_check_alias_20:
1239	do_alias	spc,t0,t1,va,pte,prot,dtlb_fault,20
1240
1241	idtlbt          pte,prot
1242
1243	rfir
1244	nop
1245
1246nadtlb_miss_20:
1247	get_pgd		spc,ptp
1248
1249	space_check	spc,t0,nadtlb_fault
1250
1251	L2_ptep		ptp,pte,t0,va,nadtlb_check_alias_20
1252
1253	update_ptep	ptp,pte,t0,t1
1254
1255	make_insert_tlb	spc,pte,prot
1256
1257	f_extend	pte,t0
1258
1259        idtlbt          pte,prot
1260
1261	rfir
1262	nop
1263
1264nadtlb_check_alias_20:
1265	do_alias	spc,t0,t1,va,pte,prot,nadtlb_emulate,20
1266
1267	idtlbt          pte,prot
1268
1269	rfir
1270	nop
1271
1272#endif
1273
1274nadtlb_emulate:
1275
1276	/*
1277	 * Non access misses can be caused by fdc,fic,pdc,lpa,probe and
1278	 * probei instructions. We don't want to fault for these
1279	 * instructions (not only does it not make sense, it can cause
1280	 * deadlocks, since some flushes are done with the mmap
1281	 * semaphore held). If the translation doesn't exist, we can't
1282	 * insert a translation, so have to emulate the side effects
1283	 * of the instruction. Since we don't insert a translation
1284	 * we can get a lot of faults during a flush loop, so it makes
1285	 * sense to try to do it here with minimum overhead. We only
1286	 * emulate fdc,fic,pdc,probew,prober instructions whose base
1287	 * and index registers are not shadowed. We defer everything
1288	 * else to the "slow" path.
1289	 */
1290
1291	mfctl           %cr19,%r9 /* Get iir */
1292
1293	/* PA 2.0 Arch Ref. Book pg 382 has a good description of the insn bits.
1294	   Checks for fdc,fdce,pdc,"fic,4f",prober,probeir,probew, probeiw */
1295
1296	/* Checks for fdc,fdce,pdc,"fic,4f" only */
1297	ldi             0x280,%r16
1298	and             %r9,%r16,%r17
1299	cmpb,<>,n       %r16,%r17,nadtlb_probe_check
1300	bb,>=,n         %r9,26,nadtlb_nullify  /* m bit not set, just nullify */
1301	BL		get_register,%r25
1302	extrw,u         %r9,15,5,%r8           /* Get index register # */
1303	cmpib,COND(=),n        -1,%r1,nadtlb_fault    /* have to use slow path */
1304	copy            %r1,%r24
1305	BL		get_register,%r25
1306	extrw,u         %r9,10,5,%r8           /* Get base register # */
1307	cmpib,COND(=),n        -1,%r1,nadtlb_fault    /* have to use slow path */
1308	BL		set_register,%r25
1309	add,l           %r1,%r24,%r1           /* doesn't affect c/b bits */
1310
1311nadtlb_nullify:
1312	mfctl           %ipsw,%r8
1313	ldil            L%PSW_N,%r9
1314	or              %r8,%r9,%r8            /* Set PSW_N */
1315	mtctl           %r8,%ipsw
1316
1317	rfir
1318	nop
1319
1320	/*
1321		When there is no translation for the probe address then we
1322		must nullify the insn and return zero in the target regsiter.
1323		This will indicate to the calling code that it does not have
1324		write/read privileges to this address.
1325
1326		This should technically work for prober and probew in PA 1.1,
1327		and also probe,r and probe,w in PA 2.0
1328
1329		WARNING: USE ONLY NON-SHADOW REGISTERS WITH PROBE INSN!
1330		THE SLOW-PATH EMULATION HAS NOT BEEN WRITTEN YET.
1331
1332	*/
1333nadtlb_probe_check:
1334	ldi             0x80,%r16
1335	and             %r9,%r16,%r17
1336	cmpb,<>,n       %r16,%r17,nadtlb_fault /* Must be probe,[rw]*/
1337	BL              get_register,%r25      /* Find the target register */
1338	extrw,u         %r9,31,5,%r8           /* Get target register */
1339	cmpib,COND(=),n        -1,%r1,nadtlb_fault    /* have to use slow path */
1340	BL		set_register,%r25
1341	copy            %r0,%r1                /* Write zero to target register */
1342	b nadtlb_nullify                       /* Nullify return insn */
1343	nop
1344
1345
1346#ifdef CONFIG_64BIT
1347itlb_miss_20w:
1348
1349	/*
1350	 * I miss is a little different, since we allow users to fault
1351	 * on the gateway page which is in the kernel address space.
1352	 */
1353
1354	space_adjust	spc,va,t0
1355	get_pgd		spc,ptp
1356	space_check	spc,t0,itlb_fault
1357
1358	L3_ptep		ptp,pte,t0,va,itlb_fault
1359
1360	update_ptep	ptp,pte,t0,t1
1361
1362	make_insert_tlb	spc,pte,prot
1363
1364	iitlbt          pte,prot
1365
1366	rfir
1367	nop
1368
1369naitlb_miss_20w:
1370
1371	/*
1372	 * I miss is a little different, since we allow users to fault
1373	 * on the gateway page which is in the kernel address space.
1374	 */
1375
1376	space_adjust	spc,va,t0
1377	get_pgd		spc,ptp
1378	space_check	spc,t0,naitlb_fault
1379
1380	L3_ptep		ptp,pte,t0,va,naitlb_check_alias_20w
1381
1382	update_ptep	ptp,pte,t0,t1
1383
1384	make_insert_tlb	spc,pte,prot
1385
1386	iitlbt          pte,prot
1387
1388	rfir
1389	nop
1390
1391naitlb_check_alias_20w:
1392	do_alias	spc,t0,t1,va,pte,prot,naitlb_fault,20
1393
1394	iitlbt		pte,prot
1395
1396	rfir
1397	nop
1398
1399#else
1400
1401itlb_miss_11:
1402	get_pgd		spc,ptp
1403
1404	space_check	spc,t0,itlb_fault
1405
1406	L2_ptep		ptp,pte,t0,va,itlb_fault
1407
1408	update_ptep	ptp,pte,t0,t1
1409
1410	make_insert_tlb_11	spc,pte,prot
1411
1412	mfsp		%sr1,t0  /* Save sr1 so we can use it in tlb inserts */
1413	mtsp		spc,%sr1
1414
1415	iitlba		pte,(%sr1,va)
1416	iitlbp		prot,(%sr1,va)
1417
1418	mtsp		t0, %sr1	/* Restore sr1 */
1419
1420	rfir
1421	nop
1422
1423naitlb_miss_11:
1424	get_pgd		spc,ptp
1425
1426	space_check	spc,t0,naitlb_fault
1427
1428	L2_ptep		ptp,pte,t0,va,naitlb_check_alias_11
1429
1430	update_ptep	ptp,pte,t0,t1
1431
1432	make_insert_tlb_11	spc,pte,prot
1433
1434	mfsp		%sr1,t0  /* Save sr1 so we can use it in tlb inserts */
1435	mtsp		spc,%sr1
1436
1437	iitlba		pte,(%sr1,va)
1438	iitlbp		prot,(%sr1,va)
1439
1440	mtsp		t0, %sr1	/* Restore sr1 */
1441
1442	rfir
1443	nop
1444
1445naitlb_check_alias_11:
1446	do_alias	spc,t0,t1,va,pte,prot,itlb_fault,11
1447
1448	iitlba          pte,(%sr0, va)
1449	iitlbp          prot,(%sr0, va)
1450
1451	rfir
1452	nop
1453
1454
1455itlb_miss_20:
1456	get_pgd		spc,ptp
1457
1458	space_check	spc,t0,itlb_fault
1459
1460	L2_ptep		ptp,pte,t0,va,itlb_fault
1461
1462	update_ptep	ptp,pte,t0,t1
1463
1464	make_insert_tlb	spc,pte,prot
1465
1466	f_extend	pte,t0
1467
1468	iitlbt          pte,prot
1469
1470	rfir
1471	nop
1472
1473naitlb_miss_20:
1474	get_pgd		spc,ptp
1475
1476	space_check	spc,t0,naitlb_fault
1477
1478	L2_ptep		ptp,pte,t0,va,naitlb_check_alias_20
1479
1480	update_ptep	ptp,pte,t0,t1
1481
1482	make_insert_tlb	spc,pte,prot
1483
1484	f_extend	pte,t0
1485
1486	iitlbt          pte,prot
1487
1488	rfir
1489	nop
1490
1491naitlb_check_alias_20:
1492	do_alias	spc,t0,t1,va,pte,prot,naitlb_fault,20
1493
1494	iitlbt          pte,prot
1495
1496	rfir
1497	nop
1498
1499#endif
1500
1501#ifdef CONFIG_64BIT
1502
1503dbit_trap_20w:
1504	space_adjust	spc,va,t0
1505	get_pgd		spc,ptp
1506	space_check	spc,t0,dbit_fault
1507
1508	L3_ptep		ptp,pte,t0,va,dbit_fault
1509
1510#ifdef CONFIG_SMP
1511	cmpib,COND(=),n        0,spc,dbit_nolock_20w
1512	load32		PA(pa_dbit_lock),t0
1513
1514dbit_spin_20w:
1515	LDCW		0(t0),t1
1516	cmpib,COND(=)         0,t1,dbit_spin_20w
1517	nop
1518
1519dbit_nolock_20w:
1520#endif
1521	update_dirty	ptp,pte,t1
1522
1523	make_insert_tlb	spc,pte,prot
1524
1525	idtlbt          pte,prot
1526#ifdef CONFIG_SMP
1527	cmpib,COND(=),n        0,spc,dbit_nounlock_20w
1528	ldi             1,t1
1529	stw             t1,0(t0)
1530
1531dbit_nounlock_20w:
1532#endif
1533
1534	rfir
1535	nop
1536#else
1537
1538dbit_trap_11:
1539
1540	get_pgd		spc,ptp
1541
1542	space_check	spc,t0,dbit_fault
1543
1544	L2_ptep		ptp,pte,t0,va,dbit_fault
1545
1546#ifdef CONFIG_SMP
1547	cmpib,COND(=),n        0,spc,dbit_nolock_11
1548	load32		PA(pa_dbit_lock),t0
1549
1550dbit_spin_11:
1551	LDCW		0(t0),t1
1552	cmpib,=         0,t1,dbit_spin_11
1553	nop
1554
1555dbit_nolock_11:
1556#endif
1557	update_dirty	ptp,pte,t1
1558
1559	make_insert_tlb_11	spc,pte,prot
1560
1561	mfsp            %sr1,t1  /* Save sr1 so we can use it in tlb inserts */
1562	mtsp		spc,%sr1
1563
1564	idtlba		pte,(%sr1,va)
1565	idtlbp		prot,(%sr1,va)
1566
1567	mtsp            t1, %sr1     /* Restore sr1 */
1568#ifdef CONFIG_SMP
1569	cmpib,COND(=),n        0,spc,dbit_nounlock_11
1570	ldi             1,t1
1571	stw             t1,0(t0)
1572
1573dbit_nounlock_11:
1574#endif
1575
1576	rfir
1577	nop
1578
1579dbit_trap_20:
1580	get_pgd		spc,ptp
1581
1582	space_check	spc,t0,dbit_fault
1583
1584	L2_ptep		ptp,pte,t0,va,dbit_fault
1585
1586#ifdef CONFIG_SMP
1587	cmpib,COND(=),n        0,spc,dbit_nolock_20
1588	load32		PA(pa_dbit_lock),t0
1589
1590dbit_spin_20:
1591	LDCW		0(t0),t1
1592	cmpib,=         0,t1,dbit_spin_20
1593	nop
1594
1595dbit_nolock_20:
1596#endif
1597	update_dirty	ptp,pte,t1
1598
1599	make_insert_tlb	spc,pte,prot
1600
1601	f_extend	pte,t1
1602
1603        idtlbt          pte,prot
1604
1605#ifdef CONFIG_SMP
1606	cmpib,COND(=),n        0,spc,dbit_nounlock_20
1607	ldi             1,t1
1608	stw             t1,0(t0)
1609
1610dbit_nounlock_20:
1611#endif
1612
1613	rfir
1614	nop
1615#endif
1616
1617	.import handle_interruption,code
1618
1619kernel_bad_space:
1620	b               intr_save
1621	ldi             31,%r8  /* Use an unused code */
1622
1623dbit_fault:
1624	b               intr_save
1625	ldi             20,%r8
1626
1627itlb_fault:
1628	b               intr_save
1629	ldi             6,%r8
1630
1631nadtlb_fault:
1632	b               intr_save
1633	ldi             17,%r8
1634
1635naitlb_fault:
1636	b               intr_save
1637	ldi             16,%r8
1638
1639dtlb_fault:
1640	b               intr_save
1641	ldi             15,%r8
1642
1643	/* Register saving semantics for system calls:
1644
1645	   %r1		   clobbered by system call macro in userspace
1646	   %r2		   saved in PT_REGS by gateway page
1647	   %r3  - %r18	   preserved by C code (saved by signal code)
1648	   %r19 - %r20	   saved in PT_REGS by gateway page
1649	   %r21 - %r22	   non-standard syscall args
1650			   stored in kernel stack by gateway page
1651	   %r23 - %r26	   arg3-arg0, saved in PT_REGS by gateway page
1652	   %r27 - %r30	   saved in PT_REGS by gateway page
1653	   %r31		   syscall return pointer
1654	 */
1655
1656	/* Floating point registers (FIXME: what do we do with these?)
1657
1658	   %fr0  - %fr3	   status/exception, not preserved
1659	   %fr4  - %fr7	   arguments
1660	   %fr8	 - %fr11   not preserved by C code
1661	   %fr12 - %fr21   preserved by C code
1662	   %fr22 - %fr31   not preserved by C code
1663	 */
1664
1665	.macro	reg_save regs
1666	STREG	%r3, PT_GR3(\regs)
1667	STREG	%r4, PT_GR4(\regs)
1668	STREG	%r5, PT_GR5(\regs)
1669	STREG	%r6, PT_GR6(\regs)
1670	STREG	%r7, PT_GR7(\regs)
1671	STREG	%r8, PT_GR8(\regs)
1672	STREG	%r9, PT_GR9(\regs)
1673	STREG   %r10,PT_GR10(\regs)
1674	STREG   %r11,PT_GR11(\regs)
1675	STREG   %r12,PT_GR12(\regs)
1676	STREG   %r13,PT_GR13(\regs)
1677	STREG   %r14,PT_GR14(\regs)
1678	STREG   %r15,PT_GR15(\regs)
1679	STREG   %r16,PT_GR16(\regs)
1680	STREG   %r17,PT_GR17(\regs)
1681	STREG   %r18,PT_GR18(\regs)
1682	.endm
1683
1684	.macro	reg_restore regs
1685	LDREG	PT_GR3(\regs), %r3
1686	LDREG	PT_GR4(\regs), %r4
1687	LDREG	PT_GR5(\regs), %r5
1688	LDREG	PT_GR6(\regs), %r6
1689	LDREG	PT_GR7(\regs), %r7
1690	LDREG	PT_GR8(\regs), %r8
1691	LDREG	PT_GR9(\regs), %r9
1692	LDREG   PT_GR10(\regs),%r10
1693	LDREG   PT_GR11(\regs),%r11
1694	LDREG   PT_GR12(\regs),%r12
1695	LDREG   PT_GR13(\regs),%r13
1696	LDREG   PT_GR14(\regs),%r14
1697	LDREG   PT_GR15(\regs),%r15
1698	LDREG   PT_GR16(\regs),%r16
1699	LDREG   PT_GR17(\regs),%r17
1700	LDREG   PT_GR18(\regs),%r18
1701	.endm
1702
1703	.macro	fork_like name
1704ENTRY(sys_\name\()_wrapper)
1705	LDREG	TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30), %r1
1706	ldo	TASK_REGS(%r1),%r1
1707	reg_save %r1
1708	mfctl	%cr27, %r28
1709	ldil	L%sys_\name, %r31
1710	be	R%sys_\name(%sr4,%r31)
1711	STREG	%r28, PT_CR27(%r1)
1712ENDPROC(sys_\name\()_wrapper)
1713	.endm
1714
1715fork_like clone
1716fork_like fork
1717fork_like vfork
1718
1719	/* Set the return value for the child */
1720ENTRY(child_return)
1721	BL	schedule_tail, %r2
1722	nop
1723finish_child_return:
1724	LDREG	TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30), %r1
1725	ldo	TASK_REGS(%r1),%r1	 /* get pt regs */
1726
1727	LDREG	PT_CR27(%r1), %r3
1728	mtctl	%r3, %cr27
1729	reg_restore %r1
1730	b	syscall_exit
1731	copy	%r0,%r28
1732ENDPROC(child_return)
1733
1734ENTRY(sys_rt_sigreturn_wrapper)
1735	LDREG	TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r26
1736	ldo	TASK_REGS(%r26),%r26	/* get pt regs */
1737	/* Don't save regs, we are going to restore them from sigcontext. */
1738	STREG	%r2, -RP_OFFSET(%r30)
1739#ifdef CONFIG_64BIT
1740	ldo	FRAME_SIZE(%r30), %r30
1741	BL	sys_rt_sigreturn,%r2
1742	ldo	-16(%r30),%r29		/* Reference param save area */
1743#else
1744	BL	sys_rt_sigreturn,%r2
1745	ldo	FRAME_SIZE(%r30), %r30
1746#endif
1747
1748	ldo	-FRAME_SIZE(%r30), %r30
1749	LDREG	-RP_OFFSET(%r30), %r2
1750
1751	/* FIXME: I think we need to restore a few more things here. */
1752	LDREG	TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
1753	ldo	TASK_REGS(%r1),%r1	/* get pt regs */
1754	reg_restore %r1
1755
1756	/* If the signal was received while the process was blocked on a
1757	 * syscall, then r2 will take us to syscall_exit; otherwise r2 will
1758	 * take us to syscall_exit_rfi and on to intr_return.
1759	 */
1760	bv	%r0(%r2)
1761	LDREG	PT_GR28(%r1),%r28  /* reload original r28 for syscall_exit */
1762ENDPROC(sys_rt_sigreturn_wrapper)
1763
1764ENTRY(syscall_exit)
1765	/* NOTE: HP-UX syscalls also come through here
1766	 * after hpux_syscall_exit fixes up return
1767	 * values. */
1768
1769	/* NOTE: Not all syscalls exit this way.  rt_sigreturn will exit
1770	 * via syscall_exit_rfi if the signal was received while the process
1771	 * was running.
1772	 */
1773
1774	/* save return value now */
1775
1776	mfctl     %cr30, %r1
1777	LDREG     TI_TASK(%r1),%r1
1778	STREG     %r28,TASK_PT_GR28(%r1)
1779
1780#ifdef CONFIG_HPUX
1781/* <linux/personality.h> cannot be easily included */
1782#define PER_HPUX 0x10
1783	ldw	TASK_PERSONALITY(%r1),%r19
1784
1785	/* We can't use "CMPIB<> PER_HPUX" since "im5" field is sign extended */
1786	ldo	  -PER_HPUX(%r19), %r19
1787	cmpib,COND(<>),n 0,%r19,1f
1788
1789	/* Save other hpux returns if personality is PER_HPUX */
1790	STREG     %r22,TASK_PT_GR22(%r1)
1791	STREG     %r29,TASK_PT_GR29(%r1)
17921:
1793
1794#endif /* CONFIG_HPUX */
1795
1796	/* Seems to me that dp could be wrong here, if the syscall involved
1797	 * calling a module, and nothing got round to restoring dp on return.
1798	 */
1799	loadgp
1800
1801syscall_check_resched:
1802
1803	/* check for reschedule */
1804
1805	LDREG	TI_FLAGS-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r19	/* long */
1806	bb,<,n	%r19, 31-TIF_NEED_RESCHED, syscall_do_resched /* forward */
1807
1808	.import do_signal,code
1809syscall_check_sig:
1810	LDREG	TI_FLAGS-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r19
1811	ldi	(_TIF_SIGPENDING|_TIF_NOTIFY_RESUME), %r26
1812	and,COND(<>)	%r19, %r26, %r0
1813	b,n	syscall_restore	/* skip past if we've nothing to do */
1814
1815syscall_do_signal:
1816	/* Save callee-save registers (for sigcontext).
1817	 * FIXME: After this point the process structure should be
1818	 * consistent with all the relevant state of the process
1819	 * before the syscall.  We need to verify this.
1820	 */
1821	LDREG	TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
1822	ldo	TASK_REGS(%r1), %r26		/* struct pt_regs *regs */
1823	reg_save %r26
1824
1825#ifdef CONFIG_64BIT
1826	ldo	-16(%r30),%r29			/* Reference param save area */
1827#endif
1828
1829	BL	do_notify_resume,%r2
1830	ldi	1, %r25				/* long in_syscall = 1 */
1831
1832	LDREG	TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
1833	ldo	TASK_REGS(%r1), %r20		/* reload pt_regs */
1834	reg_restore %r20
1835
1836	b,n     syscall_check_sig
1837
1838syscall_restore:
1839	LDREG	TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
1840
1841	/* Are we being ptraced? */
1842	ldw	TASK_FLAGS(%r1),%r19
1843	ldi	_TIF_SYSCALL_TRACE_MASK,%r2
1844	and,COND(=)	%r19,%r2,%r0
1845	b,n	syscall_restore_rfi
1846
1847	ldo	TASK_PT_FR31(%r1),%r19		   /* reload fpregs */
1848	rest_fp	%r19
1849
1850	LDREG	TASK_PT_SAR(%r1),%r19		   /* restore SAR */
1851	mtsar	%r19
1852
1853	LDREG	TASK_PT_GR2(%r1),%r2		   /* restore user rp */
1854	LDREG	TASK_PT_GR19(%r1),%r19
1855	LDREG   TASK_PT_GR20(%r1),%r20
1856	LDREG	TASK_PT_GR21(%r1),%r21
1857	LDREG	TASK_PT_GR22(%r1),%r22
1858	LDREG	TASK_PT_GR23(%r1),%r23
1859	LDREG	TASK_PT_GR24(%r1),%r24
1860	LDREG	TASK_PT_GR25(%r1),%r25
1861	LDREG	TASK_PT_GR26(%r1),%r26
1862	LDREG	TASK_PT_GR27(%r1),%r27	   /* restore user dp */
1863	LDREG	TASK_PT_GR28(%r1),%r28	   /* syscall return value */
1864	LDREG	TASK_PT_GR29(%r1),%r29
1865	LDREG	TASK_PT_GR31(%r1),%r31	   /* restore syscall rp */
1866
1867	/* NOTE: We use rsm/ssm pair to make this operation atomic */
1868	LDREG   TASK_PT_GR30(%r1),%r1              /* Get user sp */
1869	rsm     PSW_SM_I, %r0
1870	copy    %r1,%r30                           /* Restore user sp */
1871	mfsp    %sr3,%r1                           /* Get user space id */
1872	mtsp    %r1,%sr7                           /* Restore sr7 */
1873	ssm     PSW_SM_I, %r0
1874
1875	/* Set sr2 to zero for userspace syscalls to work. */
1876	mtsp	%r0,%sr2
1877	mtsp	%r1,%sr4			   /* Restore sr4 */
1878	mtsp	%r1,%sr5			   /* Restore sr5 */
1879	mtsp	%r1,%sr6			   /* Restore sr6 */
1880
1881	depi	3,31,2,%r31			   /* ensure return to user mode. */
1882
1883#ifdef CONFIG_64BIT
1884	/* decide whether to reset the wide mode bit
1885	 *
1886	 * For a syscall, the W bit is stored in the lowest bit
1887	 * of sp.  Extract it and reset W if it is zero */
1888	extrd,u,*<>	%r30,63,1,%r1
1889	rsm	PSW_SM_W, %r0
1890	/* now reset the lowest bit of sp if it was set */
1891	xor	%r30,%r1,%r30
1892#endif
1893	be,n    0(%sr3,%r31)                       /* return to user space */
1894
1895	/* We have to return via an RFI, so that PSW T and R bits can be set
1896	 * appropriately.
1897	 * This sets up pt_regs so we can return via intr_restore, which is not
1898	 * the most efficient way of doing things, but it works.
1899	 */
1900syscall_restore_rfi:
1901	ldo	-1(%r0),%r2			   /* Set recovery cntr to -1 */
1902	mtctl	%r2,%cr0			   /*   for immediate trap */
1903	LDREG	TASK_PT_PSW(%r1),%r2		   /* Get old PSW */
1904	ldi	0x0b,%r20			   /* Create new PSW */
1905	depi	-1,13,1,%r20			   /* C, Q, D, and I bits */
1906
1907	/* The values of SINGLESTEP_BIT and BLOCKSTEP_BIT are
1908	 * set in thread_info.h and converted to PA bitmap
1909	 * numbers in asm-offsets.c */
1910
1911	/* if ((%r19.SINGLESTEP_BIT)) { %r20.27=1} */
1912	extru,=	%r19,TIF_SINGLESTEP_PA_BIT,1,%r0
1913	depi	-1,27,1,%r20			   /* R bit */
1914
1915	/* if ((%r19.BLOCKSTEP_BIT)) { %r20.7=1} */
1916	extru,= %r19,TIF_BLOCKSTEP_PA_BIT,1,%r0
1917	depi	-1,7,1,%r20			   /* T bit */
1918
1919	STREG	%r20,TASK_PT_PSW(%r1)
1920
1921	/* Always store space registers, since sr3 can be changed (e.g. fork) */
1922
1923	mfsp    %sr3,%r25
1924	STREG   %r25,TASK_PT_SR3(%r1)
1925	STREG   %r25,TASK_PT_SR4(%r1)
1926	STREG   %r25,TASK_PT_SR5(%r1)
1927	STREG   %r25,TASK_PT_SR6(%r1)
1928	STREG   %r25,TASK_PT_SR7(%r1)
1929	STREG   %r25,TASK_PT_IASQ0(%r1)
1930	STREG   %r25,TASK_PT_IASQ1(%r1)
1931
1932	/* XXX W bit??? */
1933	/* Now if old D bit is clear, it means we didn't save all registers
1934	 * on syscall entry, so do that now.  This only happens on TRACEME
1935	 * calls, or if someone attached to us while we were on a syscall.
1936	 * We could make this more efficient by not saving r3-r18, but
1937	 * then we wouldn't be able to use the common intr_restore path.
1938	 * It is only for traced processes anyway, so performance is not
1939	 * an issue.
1940	 */
1941	bb,<	%r2,30,pt_regs_ok		   /* Branch if D set */
1942	ldo	TASK_REGS(%r1),%r25
1943	reg_save %r25				   /* Save r3 to r18 */
1944
1945	/* Save the current sr */
1946	mfsp	%sr0,%r2
1947	STREG	%r2,TASK_PT_SR0(%r1)
1948
1949	/* Save the scratch sr */
1950	mfsp	%sr1,%r2
1951	STREG	%r2,TASK_PT_SR1(%r1)
1952
1953	/* sr2 should be set to zero for userspace syscalls */
1954	STREG	%r0,TASK_PT_SR2(%r1)
1955
1956	LDREG	TASK_PT_GR31(%r1),%r2
1957	depi	3,31,2,%r2		   /* ensure return to user mode. */
1958	STREG   %r2,TASK_PT_IAOQ0(%r1)
1959	ldo	4(%r2),%r2
1960	STREG	%r2,TASK_PT_IAOQ1(%r1)
1961	b	intr_restore
1962	copy	%r25,%r16
1963
1964pt_regs_ok:
1965	LDREG	TASK_PT_IAOQ0(%r1),%r2
1966	depi	3,31,2,%r2		   /* ensure return to user mode. */
1967	STREG	%r2,TASK_PT_IAOQ0(%r1)
1968	LDREG	TASK_PT_IAOQ1(%r1),%r2
1969	depi	3,31,2,%r2
1970	STREG	%r2,TASK_PT_IAOQ1(%r1)
1971	b	intr_restore
1972	copy	%r25,%r16
1973
1974	.import schedule,code
1975syscall_do_resched:
1976	BL	schedule,%r2
1977#ifdef CONFIG_64BIT
1978	ldo	-16(%r30),%r29		/* Reference param save area */
1979#else
1980	nop
1981#endif
1982	b	syscall_check_resched	/* if resched, we start over again */
1983	nop
1984ENDPROC(syscall_exit)
1985
1986
1987#ifdef CONFIG_FUNCTION_TRACER
1988	.import ftrace_function_trampoline,code
1989ENTRY(_mcount)
1990	copy	%r3, %arg2
1991	b	ftrace_function_trampoline
1992	nop
1993ENDPROC(_mcount)
1994
1995ENTRY(return_to_handler)
1996	load32	return_trampoline, %rp
1997	copy	%ret0, %arg0
1998	copy	%ret1, %arg1
1999	b	ftrace_return_to_handler
2000	nop
2001return_trampoline:
2002	copy	%ret0, %rp
2003	copy	%r23, %ret0
2004	copy	%r24, %ret1
2005
2006.globl ftrace_stub
2007ftrace_stub:
2008	bv	%r0(%rp)
2009	nop
2010ENDPROC(return_to_handler)
2011#endif	/* CONFIG_FUNCTION_TRACER */
2012
2013#ifdef CONFIG_IRQSTACKS
2014/* void call_on_stack(unsigned long param1, void *func,
2015		      unsigned long new_stack) */
2016ENTRY(call_on_stack)
2017	copy	%sp, %r1
2018
2019	/* Regarding the HPPA calling conventions for function pointers,
2020	   we assume the PIC register is not changed across call.  For
2021	   CONFIG_64BIT, the argument pointer is left to point at the
2022	   argument region allocated for the call to call_on_stack. */
2023# ifdef CONFIG_64BIT
2024	/* Switch to new stack.  We allocate two 128 byte frames.  */
2025	ldo	256(%arg2), %sp
2026	/* Save previous stack pointer and return pointer in frame marker */
2027	STREG	%rp, -144(%sp)
2028	/* Calls always use function descriptor */
2029	LDREG	16(%arg1), %arg1
2030	bve,l	(%arg1), %rp
2031	STREG	%r1, -136(%sp)
2032	LDREG	-144(%sp), %rp
2033	bve	(%rp)
2034	LDREG	-136(%sp), %sp
2035# else
2036	/* Switch to new stack.  We allocate two 64 byte frames.  */
2037	ldo	128(%arg2), %sp
2038	/* Save previous stack pointer and return pointer in frame marker */
2039	STREG	%r1, -68(%sp)
2040	STREG	%rp, -84(%sp)
2041	/* Calls use function descriptor if PLABEL bit is set */
2042	bb,>=,n	%arg1, 30, 1f
2043	depwi	0,31,2, %arg1
2044	LDREG	0(%arg1), %arg1
20451:
2046	be,l	0(%sr4,%arg1), %sr0, %r31
2047	copy	%r31, %rp
2048	LDREG	-84(%sp), %rp
2049	bv	(%rp)
2050	LDREG	-68(%sp), %sp
2051# endif /* CONFIG_64BIT */
2052ENDPROC(call_on_stack)
2053#endif /* CONFIG_IRQSTACKS */
2054
2055get_register:
2056	/*
2057	 * get_register is used by the non access tlb miss handlers to
2058	 * copy the value of the general register specified in r8 into
2059	 * r1. This routine can't be used for shadowed registers, since
2060	 * the rfir will restore the original value. So, for the shadowed
2061	 * registers we put a -1 into r1 to indicate that the register
2062	 * should not be used (the register being copied could also have
2063	 * a -1 in it, but that is OK, it just means that we will have
2064	 * to use the slow path instead).
2065	 */
2066	blr     %r8,%r0
2067	nop
2068	bv      %r0(%r25)    /* r0 */
2069	copy    %r0,%r1
2070	bv      %r0(%r25)    /* r1 - shadowed */
2071	ldi     -1,%r1
2072	bv      %r0(%r25)    /* r2 */
2073	copy    %r2,%r1
2074	bv      %r0(%r25)    /* r3 */
2075	copy    %r3,%r1
2076	bv      %r0(%r25)    /* r4 */
2077	copy    %r4,%r1
2078	bv      %r0(%r25)    /* r5 */
2079	copy    %r5,%r1
2080	bv      %r0(%r25)    /* r6 */
2081	copy    %r6,%r1
2082	bv      %r0(%r25)    /* r7 */
2083	copy    %r7,%r1
2084	bv      %r0(%r25)    /* r8 - shadowed */
2085	ldi     -1,%r1
2086	bv      %r0(%r25)    /* r9 - shadowed */
2087	ldi     -1,%r1
2088	bv      %r0(%r25)    /* r10 */
2089	copy    %r10,%r1
2090	bv      %r0(%r25)    /* r11 */
2091	copy    %r11,%r1
2092	bv      %r0(%r25)    /* r12 */
2093	copy    %r12,%r1
2094	bv      %r0(%r25)    /* r13 */
2095	copy    %r13,%r1
2096	bv      %r0(%r25)    /* r14 */
2097	copy    %r14,%r1
2098	bv      %r0(%r25)    /* r15 */
2099	copy    %r15,%r1
2100	bv      %r0(%r25)    /* r16 - shadowed */
2101	ldi     -1,%r1
2102	bv      %r0(%r25)    /* r17 - shadowed */
2103	ldi     -1,%r1
2104	bv      %r0(%r25)    /* r18 */
2105	copy    %r18,%r1
2106	bv      %r0(%r25)    /* r19 */
2107	copy    %r19,%r1
2108	bv      %r0(%r25)    /* r20 */
2109	copy    %r20,%r1
2110	bv      %r0(%r25)    /* r21 */
2111	copy    %r21,%r1
2112	bv      %r0(%r25)    /* r22 */
2113	copy    %r22,%r1
2114	bv      %r0(%r25)    /* r23 */
2115	copy    %r23,%r1
2116	bv      %r0(%r25)    /* r24 - shadowed */
2117	ldi     -1,%r1
2118	bv      %r0(%r25)    /* r25 - shadowed */
2119	ldi     -1,%r1
2120	bv      %r0(%r25)    /* r26 */
2121	copy    %r26,%r1
2122	bv      %r0(%r25)    /* r27 */
2123	copy    %r27,%r1
2124	bv      %r0(%r25)    /* r28 */
2125	copy    %r28,%r1
2126	bv      %r0(%r25)    /* r29 */
2127	copy    %r29,%r1
2128	bv      %r0(%r25)    /* r30 */
2129	copy    %r30,%r1
2130	bv      %r0(%r25)    /* r31 */
2131	copy    %r31,%r1
2132
2133
2134set_register:
2135	/*
2136	 * set_register is used by the non access tlb miss handlers to
2137	 * copy the value of r1 into the general register specified in
2138	 * r8.
2139	 */
2140	blr     %r8,%r0
2141	nop
2142	bv      %r0(%r25)    /* r0 (silly, but it is a place holder) */
2143	copy    %r1,%r0
2144	bv      %r0(%r25)    /* r1 */
2145	copy    %r1,%r1
2146	bv      %r0(%r25)    /* r2 */
2147	copy    %r1,%r2
2148	bv      %r0(%r25)    /* r3 */
2149	copy    %r1,%r3
2150	bv      %r0(%r25)    /* r4 */
2151	copy    %r1,%r4
2152	bv      %r0(%r25)    /* r5 */
2153	copy    %r1,%r5
2154	bv      %r0(%r25)    /* r6 */
2155	copy    %r1,%r6
2156	bv      %r0(%r25)    /* r7 */
2157	copy    %r1,%r7
2158	bv      %r0(%r25)    /* r8 */
2159	copy    %r1,%r8
2160	bv      %r0(%r25)    /* r9 */
2161	copy    %r1,%r9
2162	bv      %r0(%r25)    /* r10 */
2163	copy    %r1,%r10
2164	bv      %r0(%r25)    /* r11 */
2165	copy    %r1,%r11
2166	bv      %r0(%r25)    /* r12 */
2167	copy    %r1,%r12
2168	bv      %r0(%r25)    /* r13 */
2169	copy    %r1,%r13
2170	bv      %r0(%r25)    /* r14 */
2171	copy    %r1,%r14
2172	bv      %r0(%r25)    /* r15 */
2173	copy    %r1,%r15
2174	bv      %r0(%r25)    /* r16 */
2175	copy    %r1,%r16
2176	bv      %r0(%r25)    /* r17 */
2177	copy    %r1,%r17
2178	bv      %r0(%r25)    /* r18 */
2179	copy    %r1,%r18
2180	bv      %r0(%r25)    /* r19 */
2181	copy    %r1,%r19
2182	bv      %r0(%r25)    /* r20 */
2183	copy    %r1,%r20
2184	bv      %r0(%r25)    /* r21 */
2185	copy    %r1,%r21
2186	bv      %r0(%r25)    /* r22 */
2187	copy    %r1,%r22
2188	bv      %r0(%r25)    /* r23 */
2189	copy    %r1,%r23
2190	bv      %r0(%r25)    /* r24 */
2191	copy    %r1,%r24
2192	bv      %r0(%r25)    /* r25 */
2193	copy    %r1,%r25
2194	bv      %r0(%r25)    /* r26 */
2195	copy    %r1,%r26
2196	bv      %r0(%r25)    /* r27 */
2197	copy    %r1,%r27
2198	bv      %r0(%r25)    /* r28 */
2199	copy    %r1,%r28
2200	bv      %r0(%r25)    /* r29 */
2201	copy    %r1,%r29
2202	bv      %r0(%r25)    /* r30 */
2203	copy    %r1,%r30
2204	bv      %r0(%r25)    /* r31 */
2205	copy    %r1,%r31
2206
2207