xref: /linux/arch/parisc/kernel/entry.S (revision 9a379e77033f02c4a071891afdf0f0a01eff8ccb)
1/*
2 * Linux/PA-RISC Project (http://www.parisc-linux.org/)
3 *
4 * kernel entry points (interruptions, system call wrappers)
5 *  Copyright (C) 1999,2000 Philipp Rumpf
6 *  Copyright (C) 1999 SuSE GmbH Nuernberg
7 *  Copyright (C) 2000 Hewlett-Packard (John Marvin)
8 *  Copyright (C) 1999 Hewlett-Packard (Frank Rowand)
9 *
10 *    This program is free software; you can redistribute it and/or modify
11 *    it under the terms of the GNU General Public License as published by
12 *    the Free Software Foundation; either version 2, or (at your option)
13 *    any later version.
14 *
15 *    This program is distributed in the hope that it will be useful,
16 *    but WITHOUT ANY WARRANTY; without even the implied warranty of
17 *    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
18 *    GNU General Public License for more details.
19 *
20 *    You should have received a copy of the GNU General Public License
21 *    along with this program; if not, write to the Free Software
22 *    Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
23 */
24
25#include <asm/asm-offsets.h>
26
27/* we have the following possibilities to act on an interruption:
28 *  - handle in assembly and use shadowed registers only
29 *  - save registers to kernel stack and handle in assembly or C */
30
31
32#include <asm/psw.h>
33#include <asm/cache.h>		/* for L1_CACHE_SHIFT */
34#include <asm/assembly.h>	/* for LDREG/STREG defines */
35#include <asm/pgtable.h>
36#include <asm/signal.h>
37#include <asm/unistd.h>
38#include <asm/ldcw.h>
39#include <asm/thread_info.h>
40
41#include <linux/linkage.h>
42
43#ifdef CONFIG_64BIT
44	.level 2.0w
45#else
46	.level 2.0
47#endif
48
49	.import		pa_tlb_lock,data
50	.macro  load_pa_tlb_lock reg
51#if __PA_LDCW_ALIGNMENT > 4
52	load32	PA(pa_tlb_lock) + __PA_LDCW_ALIGNMENT-1, \reg
53	depi	0,31,__PA_LDCW_ALIGN_ORDER, \reg
54#else
55	load32	PA(pa_tlb_lock), \reg
56#endif
57	.endm
58
59	/* space_to_prot macro creates a prot id from a space id */
60
61#if (SPACEID_SHIFT) == 0
62	.macro  space_to_prot spc prot
63	depd,z  \spc,62,31,\prot
64	.endm
65#else
66	.macro  space_to_prot spc prot
67	extrd,u \spc,(64 - (SPACEID_SHIFT)),32,\prot
68	.endm
69#endif
70
71	/* Switch to virtual mapping, trashing only %r1 */
72	.macro  virt_map
73	/* pcxt_ssm_bug */
74	rsm	PSW_SM_I, %r0	/* barrier for "Relied upon Translation */
75	mtsp	%r0, %sr4
76	mtsp	%r0, %sr5
77	mtsp	%r0, %sr6
78	tovirt_r1 %r29
79	load32	KERNEL_PSW, %r1
80
81	rsm     PSW_SM_QUIET,%r0	/* second "heavy weight" ctl op */
82	mtctl	%r0, %cr17	/* Clear IIASQ tail */
83	mtctl	%r0, %cr17	/* Clear IIASQ head */
84	mtctl	%r1, %ipsw
85	load32	4f, %r1
86	mtctl	%r1, %cr18	/* Set IIAOQ tail */
87	ldo	4(%r1), %r1
88	mtctl	%r1, %cr18	/* Set IIAOQ head */
89	rfir
90	nop
914:
92	.endm
93
94	/*
95	 * The "get_stack" macros are responsible for determining the
96	 * kernel stack value.
97	 *
98	 *      If sr7 == 0
99	 *          Already using a kernel stack, so call the
100	 *          get_stack_use_r30 macro to push a pt_regs structure
101	 *          on the stack, and store registers there.
102	 *      else
103	 *          Need to set up a kernel stack, so call the
104	 *          get_stack_use_cr30 macro to set up a pointer
105	 *          to the pt_regs structure contained within the
106	 *          task pointer pointed to by cr30. Set the stack
107	 *          pointer to point to the end of the task structure.
108	 *
109	 * Note that we use shadowed registers for temps until
110	 * we can save %r26 and %r29. %r26 is used to preserve
111	 * %r8 (a shadowed register) which temporarily contained
112	 * either the fault type ("code") or the eirr. We need
113	 * to use a non-shadowed register to carry the value over
114	 * the rfir in virt_map. We use %r26 since this value winds
115	 * up being passed as the argument to either do_cpu_irq_mask
116	 * or handle_interruption. %r29 is used to hold a pointer
117	 * the register save area, and once again, it needs to
118	 * be a non-shadowed register so that it survives the rfir.
119	 *
120	 * N.B. TASK_SZ_ALGN and PT_SZ_ALGN include space for a stack frame.
121	 */
122
123	.macro  get_stack_use_cr30
124
125	/* we save the registers in the task struct */
126
127	copy	%r30, %r17
128	mfctl   %cr30, %r1
129	ldo	THREAD_SZ_ALGN(%r1), %r30
130	mtsp	%r0,%sr7
131	mtsp	%r16,%sr3
132	tophys  %r1,%r9
133	LDREG	TI_TASK(%r9), %r1	/* thread_info -> task_struct */
134	tophys  %r1,%r9
135	ldo     TASK_REGS(%r9),%r9
136	STREG   %r17,PT_GR30(%r9)
137	STREG   %r29,PT_GR29(%r9)
138	STREG   %r26,PT_GR26(%r9)
139	STREG	%r16,PT_SR7(%r9)
140	copy    %r9,%r29
141	.endm
142
143	.macro  get_stack_use_r30
144
145	/* we put a struct pt_regs on the stack and save the registers there */
146
147	tophys  %r30,%r9
148	copy	%r30,%r1
149	ldo	PT_SZ_ALGN(%r30),%r30
150	STREG   %r1,PT_GR30(%r9)
151	STREG   %r29,PT_GR29(%r9)
152	STREG   %r26,PT_GR26(%r9)
153	STREG	%r16,PT_SR7(%r9)
154	copy    %r9,%r29
155	.endm
156
157	.macro  rest_stack
158	LDREG   PT_GR1(%r29), %r1
159	LDREG   PT_GR30(%r29),%r30
160	LDREG   PT_GR29(%r29),%r29
161	.endm
162
163	/* default interruption handler
164	 * (calls traps.c:handle_interruption) */
165	.macro	def code
166	b	intr_save
167	ldi     \code, %r8
168	.align	32
169	.endm
170
171	/* Interrupt interruption handler
172	 * (calls irq.c:do_cpu_irq_mask) */
173	.macro	extint code
174	b	intr_extint
175	mfsp    %sr7,%r16
176	.align	32
177	.endm
178
179	.import	os_hpmc, code
180
181	/* HPMC handler */
182	.macro	hpmc code
183	nop			/* must be a NOP, will be patched later */
184	load32	PA(os_hpmc), %r3
185	bv,n	0(%r3)
186	nop
187	.word	0		/* checksum (will be patched) */
188	.word	PA(os_hpmc)	/* address of handler */
189	.word	0		/* length of handler */
190	.endm
191
192	/*
193	 * Performance Note: Instructions will be moved up into
194	 * this part of the code later on, once we are sure
195	 * that the tlb miss handlers are close to final form.
196	 */
197
198	/* Register definitions for tlb miss handler macros */
199
200	va  = r8	/* virtual address for which the trap occurred */
201	spc = r24	/* space for which the trap occurred */
202
203#ifndef CONFIG_64BIT
204
205	/*
206	 * itlb miss interruption handler (parisc 1.1 - 32 bit)
207	 */
208
209	.macro	itlb_11 code
210
211	mfctl	%pcsq, spc
212	b	itlb_miss_11
213	mfctl	%pcoq, va
214
215	.align		32
216	.endm
217#endif
218
219	/*
220	 * itlb miss interruption handler (parisc 2.0)
221	 */
222
223	.macro	itlb_20 code
224	mfctl	%pcsq, spc
225#ifdef CONFIG_64BIT
226	b       itlb_miss_20w
227#else
228	b	itlb_miss_20
229#endif
230	mfctl	%pcoq, va
231
232	.align		32
233	.endm
234
235#ifndef CONFIG_64BIT
236	/*
237	 * naitlb miss interruption handler (parisc 1.1 - 32 bit)
238	 */
239
240	.macro	naitlb_11 code
241
242	mfctl	%isr,spc
243	b	naitlb_miss_11
244	mfctl 	%ior,va
245
246	.align		32
247	.endm
248#endif
249
250	/*
251	 * naitlb miss interruption handler (parisc 2.0)
252	 */
253
254	.macro	naitlb_20 code
255
256	mfctl	%isr,spc
257#ifdef CONFIG_64BIT
258	b       naitlb_miss_20w
259#else
260	b	naitlb_miss_20
261#endif
262	mfctl 	%ior,va
263
264	.align		32
265	.endm
266
267#ifndef CONFIG_64BIT
268	/*
269	 * dtlb miss interruption handler (parisc 1.1 - 32 bit)
270	 */
271
272	.macro	dtlb_11 code
273
274	mfctl	%isr, spc
275	b	dtlb_miss_11
276	mfctl	%ior, va
277
278	.align		32
279	.endm
280#endif
281
282	/*
283	 * dtlb miss interruption handler (parisc 2.0)
284	 */
285
286	.macro	dtlb_20 code
287
288	mfctl	%isr, spc
289#ifdef CONFIG_64BIT
290	b       dtlb_miss_20w
291#else
292	b	dtlb_miss_20
293#endif
294	mfctl	%ior, va
295
296	.align		32
297	.endm
298
299#ifndef CONFIG_64BIT
300	/* nadtlb miss interruption handler (parisc 1.1 - 32 bit) */
301
302	.macro	nadtlb_11 code
303
304	mfctl	%isr,spc
305	b       nadtlb_miss_11
306	mfctl	%ior,va
307
308	.align		32
309	.endm
310#endif
311
312	/* nadtlb miss interruption handler (parisc 2.0) */
313
314	.macro	nadtlb_20 code
315
316	mfctl	%isr,spc
317#ifdef CONFIG_64BIT
318	b       nadtlb_miss_20w
319#else
320	b       nadtlb_miss_20
321#endif
322	mfctl	%ior,va
323
324	.align		32
325	.endm
326
327#ifndef CONFIG_64BIT
328	/*
329	 * dirty bit trap interruption handler (parisc 1.1 - 32 bit)
330	 */
331
332	.macro	dbit_11 code
333
334	mfctl	%isr,spc
335	b	dbit_trap_11
336	mfctl	%ior,va
337
338	.align		32
339	.endm
340#endif
341
342	/*
343	 * dirty bit trap interruption handler (parisc 2.0)
344	 */
345
346	.macro	dbit_20 code
347
348	mfctl	%isr,spc
349#ifdef CONFIG_64BIT
350	b       dbit_trap_20w
351#else
352	b	dbit_trap_20
353#endif
354	mfctl	%ior,va
355
356	.align		32
357	.endm
358
359	/* In LP64, the space contains part of the upper 32 bits of the
360	 * fault.  We have to extract this and place it in the va,
361	 * zeroing the corresponding bits in the space register */
362	.macro		space_adjust	spc,va,tmp
363#ifdef CONFIG_64BIT
364	extrd,u		\spc,63,SPACEID_SHIFT,\tmp
365	depd		%r0,63,SPACEID_SHIFT,\spc
366	depd		\tmp,31,SPACEID_SHIFT,\va
367#endif
368	.endm
369
370	.import		swapper_pg_dir,code
371
372	/* Get the pgd.  For faults on space zero (kernel space), this
373	 * is simply swapper_pg_dir.  For user space faults, the
374	 * pgd is stored in %cr25 */
375	.macro		get_pgd		spc,reg
376	ldil		L%PA(swapper_pg_dir),\reg
377	ldo		R%PA(swapper_pg_dir)(\reg),\reg
378	or,COND(=)	%r0,\spc,%r0
379	mfctl		%cr25,\reg
380	.endm
381
382	/*
383		space_check(spc,tmp,fault)
384
385		spc - The space we saw the fault with.
386		tmp - The place to store the current space.
387		fault - Function to call on failure.
388
389		Only allow faults on different spaces from the
390		currently active one if we're the kernel
391
392	*/
393	.macro		space_check	spc,tmp,fault
394	mfsp		%sr7,\tmp
395	or,COND(<>)	%r0,\spc,%r0	/* user may execute gateway page
396					 * as kernel, so defeat the space
397					 * check if it is */
398	copy		\spc,\tmp
399	or,COND(=)	%r0,\tmp,%r0	/* nullify if executing as kernel */
400	cmpb,COND(<>),n	\tmp,\spc,\fault
401	.endm
402
403	/* Look up a PTE in a 2-Level scheme (faulting at each
404	 * level if the entry isn't present
405	 *
406	 * NOTE: we use ldw even for LP64, since the short pointers
407	 * can address up to 1TB
408	 */
409	.macro		L2_ptep	pmd,pte,index,va,fault
410#if CONFIG_PGTABLE_LEVELS == 3
411	extru		\va,31-ASM_PMD_SHIFT,ASM_BITS_PER_PMD,\index
412#else
413# if defined(CONFIG_64BIT)
414	extrd,u		\va,63-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index
415  #else
416  # if PAGE_SIZE > 4096
417	extru		\va,31-ASM_PGDIR_SHIFT,32-ASM_PGDIR_SHIFT,\index
418  # else
419	extru		\va,31-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index
420  # endif
421# endif
422#endif
423	dep             %r0,31,PAGE_SHIFT,\pmd  /* clear offset */
424	copy		%r0,\pte
425	ldw,s		\index(\pmd),\pmd
426	bb,>=,n		\pmd,_PxD_PRESENT_BIT,\fault
427	dep		%r0,31,PxD_FLAG_SHIFT,\pmd /* clear flags */
428	copy		\pmd,%r9
429	SHLREG		%r9,PxD_VALUE_SHIFT,\pmd
430	extru		\va,31-PAGE_SHIFT,ASM_BITS_PER_PTE,\index
431	dep		%r0,31,PAGE_SHIFT,\pmd  /* clear offset */
432	shladd		\index,BITS_PER_PTE_ENTRY,\pmd,\pmd /* pmd is now pte */
433	LDREG		%r0(\pmd),\pte
434	bb,>=,n		\pte,_PAGE_PRESENT_BIT,\fault
435	.endm
436
437	/* Look up PTE in a 3-Level scheme.
438	 *
439	 * Here we implement a Hybrid L2/L3 scheme: we allocate the
440	 * first pmd adjacent to the pgd.  This means that we can
441	 * subtract a constant offset to get to it.  The pmd and pgd
442	 * sizes are arranged so that a single pmd covers 4GB (giving
443	 * a full LP64 process access to 8TB) so our lookups are
444	 * effectively L2 for the first 4GB of the kernel (i.e. for
445	 * all ILP32 processes and all the kernel for machines with
446	 * under 4GB of memory) */
447	.macro		L3_ptep pgd,pte,index,va,fault
448#if CONFIG_PGTABLE_LEVELS == 3 /* we might have a 2-Level scheme, e.g. with 16kb page size */
449	extrd,u		\va,63-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index
450	copy		%r0,\pte
451	extrd,u,*=	\va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0
452	ldw,s		\index(\pgd),\pgd
453	extrd,u,*=	\va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0
454	bb,>=,n		\pgd,_PxD_PRESENT_BIT,\fault
455	extrd,u,*=	\va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0
456	shld		\pgd,PxD_VALUE_SHIFT,\index
457	extrd,u,*=	\va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0
458	copy		\index,\pgd
459	extrd,u,*<>	\va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0
460	ldo		ASM_PGD_PMD_OFFSET(\pgd),\pgd
461#endif
462	L2_ptep		\pgd,\pte,\index,\va,\fault
463	.endm
464
465	/* Acquire pa_tlb_lock lock and recheck page is still present. */
466	.macro		tlb_lock	spc,ptp,pte,tmp,tmp1,fault
467#ifdef CONFIG_SMP
468	cmpib,COND(=),n	0,\spc,2f
469	load_pa_tlb_lock \tmp
4701:	LDCW		0(\tmp),\tmp1
471	cmpib,COND(=)	0,\tmp1,1b
472	nop
473	LDREG		0(\ptp),\pte
474	bb,<,n		\pte,_PAGE_PRESENT_BIT,2f
475	b		\fault
476	stw		 \spc,0(\tmp)
4772:
478#endif
479	.endm
480
481	/* Release pa_tlb_lock lock without reloading lock address. */
482	.macro		tlb_unlock0	spc,tmp
483#ifdef CONFIG_SMP
484	or,COND(=)	%r0,\spc,%r0
485	stw             \spc,0(\tmp)
486#endif
487	.endm
488
489	/* Release pa_tlb_lock lock. */
490	.macro		tlb_unlock1	spc,tmp
491#ifdef CONFIG_SMP
492	load_pa_tlb_lock \tmp
493	tlb_unlock0	\spc,\tmp
494#endif
495	.endm
496
497	/* Set the _PAGE_ACCESSED bit of the PTE.  Be clever and
498	 * don't needlessly dirty the cache line if it was already set */
499	.macro		update_accessed	ptp,pte,tmp,tmp1
500	ldi		_PAGE_ACCESSED,\tmp1
501	or		\tmp1,\pte,\tmp
502	and,COND(<>)	\tmp1,\pte,%r0
503	STREG		\tmp,0(\ptp)
504	.endm
505
506	/* Set the dirty bit (and accessed bit).  No need to be
507	 * clever, this is only used from the dirty fault */
508	.macro		update_dirty	ptp,pte,tmp
509	ldi		_PAGE_ACCESSED|_PAGE_DIRTY,\tmp
510	or		\tmp,\pte,\pte
511	STREG		\pte,0(\ptp)
512	.endm
513
514	/* We have (depending on the page size):
515	 * - 38 to 52-bit Physical Page Number
516	 * - 12 to 26-bit page offset
517	 */
518	/* bitshift difference between a PFN (based on kernel's PAGE_SIZE)
519	 * to a CPU TLB 4k PFN (4k => 12 bits to shift) */
520	#define PAGE_ADD_SHIFT		(PAGE_SHIFT-12)
521	#define PAGE_ADD_HUGE_SHIFT	(REAL_HPAGE_SHIFT-12)
522
523	/* Drop prot bits and convert to page addr for iitlbt and idtlbt */
524	.macro		convert_for_tlb_insert20 pte,tmp
525#ifdef CONFIG_HUGETLB_PAGE
526	copy		\pte,\tmp
527	extrd,u		\tmp,(63-ASM_PFN_PTE_SHIFT)+(63-58)+PAGE_ADD_SHIFT,\
528				64-PAGE_SHIFT-PAGE_ADD_SHIFT,\pte
529
530	depdi		_PAGE_SIZE_ENCODING_DEFAULT,63,\
531				(63-58)+PAGE_ADD_SHIFT,\pte
532	extrd,u,*=	\tmp,_PAGE_HPAGE_BIT+32,1,%r0
533	depdi		_HUGE_PAGE_SIZE_ENCODING_DEFAULT,63,\
534				(63-58)+PAGE_ADD_HUGE_SHIFT,\pte
535#else /* Huge pages disabled */
536	extrd,u		\pte,(63-ASM_PFN_PTE_SHIFT)+(63-58)+PAGE_ADD_SHIFT,\
537				64-PAGE_SHIFT-PAGE_ADD_SHIFT,\pte
538	depdi		_PAGE_SIZE_ENCODING_DEFAULT,63,\
539				(63-58)+PAGE_ADD_SHIFT,\pte
540#endif
541	.endm
542
543	/* Convert the pte and prot to tlb insertion values.  How
544	 * this happens is quite subtle, read below */
545	.macro		make_insert_tlb	spc,pte,prot,tmp
546	space_to_prot   \spc \prot        /* create prot id from space */
547	/* The following is the real subtlety.  This is depositing
548	 * T <-> _PAGE_REFTRAP
549	 * D <-> _PAGE_DIRTY
550	 * B <-> _PAGE_DMB (memory break)
551	 *
552	 * Then incredible subtlety: The access rights are
553	 * _PAGE_GATEWAY, _PAGE_EXEC and _PAGE_WRITE
554	 * See 3-14 of the parisc 2.0 manual
555	 *
556	 * Finally, _PAGE_READ goes in the top bit of PL1 (so we
557	 * trigger an access rights trap in user space if the user
558	 * tries to read an unreadable page */
559	depd            \pte,8,7,\prot
560
561	/* PAGE_USER indicates the page can be read with user privileges,
562	 * so deposit X1|11 to PL1|PL2 (remember the upper bit of PL1
563	 * contains _PAGE_READ) */
564	extrd,u,*=      \pte,_PAGE_USER_BIT+32,1,%r0
565	depdi		7,11,3,\prot
566	/* If we're a gateway page, drop PL2 back to zero for promotion
567	 * to kernel privilege (so we can execute the page as kernel).
568	 * Any privilege promotion page always denys read and write */
569	extrd,u,*= 	\pte,_PAGE_GATEWAY_BIT+32,1,%r0
570	depd		%r0,11,2,\prot	/* If Gateway, Set PL2 to 0 */
571
572	/* Enforce uncacheable pages.
573	 * This should ONLY be use for MMIO on PA 2.0 machines.
574	 * Memory/DMA is cache coherent on all PA2.0 machines we support
575	 * (that means T-class is NOT supported) and the memory controllers
576	 * on most of those machines only handles cache transactions.
577	 */
578	extrd,u,*=	\pte,_PAGE_NO_CACHE_BIT+32,1,%r0
579	depdi		1,12,1,\prot
580
581	/* Drop prot bits and convert to page addr for iitlbt and idtlbt */
582	convert_for_tlb_insert20 \pte \tmp
583	.endm
584
585	/* Identical macro to make_insert_tlb above, except it
586	 * makes the tlb entry for the differently formatted pa11
587	 * insertion instructions */
588	.macro		make_insert_tlb_11	spc,pte,prot
589	zdep		\spc,30,15,\prot
590	dep		\pte,8,7,\prot
591	extru,=		\pte,_PAGE_NO_CACHE_BIT,1,%r0
592	depi		1,12,1,\prot
593	extru,=         \pte,_PAGE_USER_BIT,1,%r0
594	depi		7,11,3,\prot   /* Set for user space (1 rsvd for read) */
595	extru,= 	\pte,_PAGE_GATEWAY_BIT,1,%r0
596	depi		0,11,2,\prot	/* If Gateway, Set PL2 to 0 */
597
598	/* Get rid of prot bits and convert to page addr for iitlba */
599
600	depi		0,31,ASM_PFN_PTE_SHIFT,\pte
601	SHRREG		\pte,(ASM_PFN_PTE_SHIFT-(31-26)),\pte
602	.endm
603
604	/* This is for ILP32 PA2.0 only.  The TLB insertion needs
605	 * to extend into I/O space if the address is 0xfXXXXXXX
606	 * so we extend the f's into the top word of the pte in
607	 * this case */
608	.macro		f_extend	pte,tmp
609	extrd,s		\pte,42,4,\tmp
610	addi,<>		1,\tmp,%r0
611	extrd,s		\pte,63,25,\pte
612	.endm
613
614	/* The alias region is an 8MB aligned 16MB to do clear and
615	 * copy user pages at addresses congruent with the user
616	 * virtual address.
617	 *
618	 * To use the alias page, you set %r26 up with the to TLB
619	 * entry (identifying the physical page) and %r23 up with
620	 * the from tlb entry (or nothing if only a to entry---for
621	 * clear_user_page_asm) */
622	.macro		do_alias	spc,tmp,tmp1,va,pte,prot,fault,patype
623	cmpib,COND(<>),n 0,\spc,\fault
624	ldil		L%(TMPALIAS_MAP_START),\tmp
625#if defined(CONFIG_64BIT) && (TMPALIAS_MAP_START >= 0x80000000)
626	/* on LP64, ldi will sign extend into the upper 32 bits,
627	 * which is behaviour we don't want */
628	depdi		0,31,32,\tmp
629#endif
630	copy		\va,\tmp1
631	depi		0,31,23,\tmp1
632	cmpb,COND(<>),n	\tmp,\tmp1,\fault
633	mfctl		%cr19,\tmp	/* iir */
634	/* get the opcode (first six bits) into \tmp */
635	extrw,u		\tmp,5,6,\tmp
636	/*
637	 * Only setting the T bit prevents data cache movein
638	 * Setting access rights to zero prevents instruction cache movein
639	 *
640	 * Note subtlety here: _PAGE_GATEWAY, _PAGE_EXEC and _PAGE_WRITE go
641	 * to type field and _PAGE_READ goes to top bit of PL1
642	 */
643	ldi		(_PAGE_REFTRAP|_PAGE_READ|_PAGE_WRITE),\prot
644	/*
645	 * so if the opcode is one (i.e. this is a memory management
646	 * instruction) nullify the next load so \prot is only T.
647	 * Otherwise this is a normal data operation
648	 */
649	cmpiclr,=	0x01,\tmp,%r0
650	ldi		(_PAGE_DIRTY|_PAGE_READ|_PAGE_WRITE),\prot
651.ifc \patype,20
652	depd,z		\prot,8,7,\prot
653.else
654.ifc \patype,11
655	depw,z		\prot,8,7,\prot
656.else
657	.error "undefined PA type to do_alias"
658.endif
659.endif
660	/*
661	 * OK, it is in the temp alias region, check whether "from" or "to".
662	 * Check "subtle" note in pacache.S re: r23/r26.
663	 */
664#ifdef CONFIG_64BIT
665	extrd,u,*=	\va,41,1,%r0
666#else
667	extrw,u,=	\va,9,1,%r0
668#endif
669	or,COND(tr)	%r23,%r0,\pte
670	or		%r26,%r0,\pte
671	.endm
672
673
674	/*
675	 * Fault_vectors are architecturally required to be aligned on a 2K
676	 * boundary
677	 */
678
679	.section .text.hot
680	.align 2048
681
682ENTRY(fault_vector_20)
683	/* First vector is invalid (0) */
684	.ascii	"cows can fly"
685	.byte 0
686	.align 32
687
688	hpmc		 1
689	def		 2
690	def		 3
691	extint		 4
692	def		 5
693	itlb_20		 6
694	def		 7
695	def		 8
696	def              9
697	def		10
698	def		11
699	def		12
700	def		13
701	def		14
702	dtlb_20		15
703	naitlb_20	16
704	nadtlb_20	17
705	def		18
706	def		19
707	dbit_20		20
708	def		21
709	def		22
710	def		23
711	def		24
712	def		25
713	def		26
714	def		27
715	def		28
716	def		29
717	def		30
718	def		31
719END(fault_vector_20)
720
721#ifndef CONFIG_64BIT
722
723	.align 2048
724
725ENTRY(fault_vector_11)
726	/* First vector is invalid (0) */
727	.ascii	"cows can fly"
728	.byte 0
729	.align 32
730
731	hpmc		 1
732	def		 2
733	def		 3
734	extint		 4
735	def		 5
736	itlb_11		 6
737	def		 7
738	def		 8
739	def              9
740	def		10
741	def		11
742	def		12
743	def		13
744	def		14
745	dtlb_11		15
746	naitlb_11	16
747	nadtlb_11	17
748	def		18
749	def		19
750	dbit_11		20
751	def		21
752	def		22
753	def		23
754	def		24
755	def		25
756	def		26
757	def		27
758	def		28
759	def		29
760	def		30
761	def		31
762END(fault_vector_11)
763
764#endif
765	/* Fault vector is separately protected and *must* be on its own page */
766	.align		PAGE_SIZE
767ENTRY(end_fault_vector)
768
769	.import		handle_interruption,code
770	.import		do_cpu_irq_mask,code
771
772	/*
773	 * Child Returns here
774	 *
775	 * copy_thread moved args into task save area.
776	 */
777
778ENTRY_CFI(ret_from_kernel_thread)
779
780	/* Call schedule_tail first though */
781	BL	schedule_tail, %r2
782	nop
783
784	LDREG	TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30), %r1
785	LDREG	TASK_PT_GR25(%r1), %r26
786#ifdef CONFIG_64BIT
787	LDREG	TASK_PT_GR27(%r1), %r27
788#endif
789	LDREG	TASK_PT_GR26(%r1), %r1
790	ble	0(%sr7, %r1)
791	copy	%r31, %r2
792	b	finish_child_return
793	nop
794ENDPROC_CFI(ret_from_kernel_thread)
795
796
797	/*
798	 * struct task_struct *_switch_to(struct task_struct *prev,
799	 *	struct task_struct *next)
800	 *
801	 * switch kernel stacks and return prev */
802ENTRY_CFI(_switch_to)
803	STREG	 %r2, -RP_OFFSET(%r30)
804
805	callee_save_float
806	callee_save
807
808	load32	_switch_to_ret, %r2
809
810	STREG	%r2, TASK_PT_KPC(%r26)
811	LDREG	TASK_PT_KPC(%r25), %r2
812
813	STREG	%r30, TASK_PT_KSP(%r26)
814	LDREG	TASK_PT_KSP(%r25), %r30
815	LDREG	TASK_THREAD_INFO(%r25), %r25
816	bv	%r0(%r2)
817	mtctl   %r25,%cr30
818
819_switch_to_ret:
820	mtctl	%r0, %cr0		/* Needed for single stepping */
821	callee_rest
822	callee_rest_float
823
824	LDREG	-RP_OFFSET(%r30), %r2
825	bv	%r0(%r2)
826	copy	%r26, %r28
827ENDPROC_CFI(_switch_to)
828
829	/*
830	 * Common rfi return path for interruptions, kernel execve, and
831	 * sys_rt_sigreturn (sometimes).  The sys_rt_sigreturn syscall will
832	 * return via this path if the signal was received when the process
833	 * was running; if the process was blocked on a syscall then the
834	 * normal syscall_exit path is used.  All syscalls for traced
835	 * proceses exit via intr_restore.
836	 *
837	 * XXX If any syscalls that change a processes space id ever exit
838	 * this way, then we will need to copy %sr3 in to PT_SR[3..7], and
839	 * adjust IASQ[0..1].
840	 *
841	 */
842
843	.align	PAGE_SIZE
844
845ENTRY_CFI(syscall_exit_rfi)
846	mfctl   %cr30,%r16
847	LDREG	TI_TASK(%r16), %r16	/* thread_info -> task_struct */
848	ldo	TASK_REGS(%r16),%r16
849	/* Force iaoq to userspace, as the user has had access to our current
850	 * context via sigcontext. Also Filter the PSW for the same reason.
851	 */
852	LDREG	PT_IAOQ0(%r16),%r19
853	depi	3,31,2,%r19
854	STREG	%r19,PT_IAOQ0(%r16)
855	LDREG	PT_IAOQ1(%r16),%r19
856	depi	3,31,2,%r19
857	STREG	%r19,PT_IAOQ1(%r16)
858	LDREG   PT_PSW(%r16),%r19
859	load32	USER_PSW_MASK,%r1
860#ifdef CONFIG_64BIT
861	load32	USER_PSW_HI_MASK,%r20
862	depd    %r20,31,32,%r1
863#endif
864	and     %r19,%r1,%r19 /* Mask out bits that user shouldn't play with */
865	load32	USER_PSW,%r1
866	or      %r19,%r1,%r19 /* Make sure default USER_PSW bits are set */
867	STREG   %r19,PT_PSW(%r16)
868
869	/*
870	 * If we aren't being traced, we never saved space registers
871	 * (we don't store them in the sigcontext), so set them
872	 * to "proper" values now (otherwise we'll wind up restoring
873	 * whatever was last stored in the task structure, which might
874	 * be inconsistent if an interrupt occurred while on the gateway
875	 * page). Note that we may be "trashing" values the user put in
876	 * them, but we don't support the user changing them.
877	 */
878
879	STREG   %r0,PT_SR2(%r16)
880	mfsp    %sr3,%r19
881	STREG   %r19,PT_SR0(%r16)
882	STREG   %r19,PT_SR1(%r16)
883	STREG   %r19,PT_SR3(%r16)
884	STREG   %r19,PT_SR4(%r16)
885	STREG   %r19,PT_SR5(%r16)
886	STREG   %r19,PT_SR6(%r16)
887	STREG   %r19,PT_SR7(%r16)
888
889intr_return:
890	/* check for reschedule */
891	mfctl   %cr30,%r1
892	LDREG   TI_FLAGS(%r1),%r19	/* sched.h: TIF_NEED_RESCHED */
893	bb,<,n	%r19,31-TIF_NEED_RESCHED,intr_do_resched /* forward */
894
895	.import do_notify_resume,code
896intr_check_sig:
897	/* As above */
898	mfctl   %cr30,%r1
899	LDREG	TI_FLAGS(%r1),%r19
900	ldi	(_TIF_SIGPENDING|_TIF_NOTIFY_RESUME), %r20
901	and,COND(<>)	%r19, %r20, %r0
902	b,n	intr_restore	/* skip past if we've nothing to do */
903
904	/* This check is critical to having LWS
905	 * working. The IASQ is zero on the gateway
906	 * page and we cannot deliver any signals until
907	 * we get off the gateway page.
908	 *
909	 * Only do signals if we are returning to user space
910	 */
911	LDREG	PT_IASQ0(%r16), %r20
912	cmpib,COND(=),n 0,%r20,intr_restore /* backward */
913	LDREG	PT_IASQ1(%r16), %r20
914	cmpib,COND(=),n 0,%r20,intr_restore /* backward */
915
916	/* NOTE: We need to enable interrupts if we have to deliver
917	 * signals. We used to do this earlier but it caused kernel
918	 * stack overflows. */
919	ssm     PSW_SM_I, %r0
920
921	copy	%r0, %r25			/* long in_syscall = 0 */
922#ifdef CONFIG_64BIT
923	ldo	-16(%r30),%r29			/* Reference param save area */
924#endif
925
926	BL	do_notify_resume,%r2
927	copy	%r16, %r26			/* struct pt_regs *regs */
928
929	b,n	intr_check_sig
930
931intr_restore:
932	copy            %r16,%r29
933	ldo             PT_FR31(%r29),%r1
934	rest_fp         %r1
935	rest_general    %r29
936
937	/* inverse of virt_map */
938	pcxt_ssm_bug
939	rsm             PSW_SM_QUIET,%r0	/* prepare for rfi */
940	tophys_r1       %r29
941
942	/* Restore space id's and special cr's from PT_REGS
943	 * structure pointed to by r29
944	 */
945	rest_specials	%r29
946
947	/* IMPORTANT: rest_stack restores r29 last (we are using it)!
948	 * It also restores r1 and r30.
949	 */
950	rest_stack
951
952	rfi
953	nop
954
955#ifndef CONFIG_PREEMPT
956# define intr_do_preempt	intr_restore
957#endif /* !CONFIG_PREEMPT */
958
959	.import schedule,code
960intr_do_resched:
961	/* Only call schedule on return to userspace. If we're returning
962	 * to kernel space, we may schedule if CONFIG_PREEMPT, otherwise
963	 * we jump back to intr_restore.
964	 */
965	LDREG	PT_IASQ0(%r16), %r20
966	cmpib,COND(=)	0, %r20, intr_do_preempt
967	nop
968	LDREG	PT_IASQ1(%r16), %r20
969	cmpib,COND(=)	0, %r20, intr_do_preempt
970	nop
971
972	/* NOTE: We need to enable interrupts if we schedule.  We used
973	 * to do this earlier but it caused kernel stack overflows. */
974	ssm     PSW_SM_I, %r0
975
976#ifdef CONFIG_64BIT
977	ldo	-16(%r30),%r29		/* Reference param save area */
978#endif
979
980	ldil	L%intr_check_sig, %r2
981#ifndef CONFIG_64BIT
982	b	schedule
983#else
984	load32	schedule, %r20
985	bv	%r0(%r20)
986#endif
987	ldo	R%intr_check_sig(%r2), %r2
988
989	/* preempt the current task on returning to kernel
990	 * mode from an interrupt, iff need_resched is set,
991	 * and preempt_count is 0. otherwise, we continue on
992	 * our merry way back to the current running task.
993	 */
994#ifdef CONFIG_PREEMPT
995	.import preempt_schedule_irq,code
996intr_do_preempt:
997	rsm	PSW_SM_I, %r0		/* disable interrupts */
998
999	/* current_thread_info()->preempt_count */
1000	mfctl	%cr30, %r1
1001	LDREG	TI_PRE_COUNT(%r1), %r19
1002	cmpib,COND(<>)	0, %r19, intr_restore	/* if preempt_count > 0 */
1003	nop				/* prev insn branched backwards */
1004
1005	/* check if we interrupted a critical path */
1006	LDREG	PT_PSW(%r16), %r20
1007	bb,<,n	%r20, 31 - PSW_SM_I, intr_restore
1008	nop
1009
1010	BL	preempt_schedule_irq, %r2
1011	nop
1012
1013	b,n	intr_restore		/* ssm PSW_SM_I done by intr_restore */
1014#endif /* CONFIG_PREEMPT */
1015
1016	/*
1017	 * External interrupts.
1018	 */
1019
1020intr_extint:
1021	cmpib,COND(=),n 0,%r16,1f
1022
1023	get_stack_use_cr30
1024	b,n 2f
1025
10261:
1027	get_stack_use_r30
10282:
1029	save_specials	%r29
1030	virt_map
1031	save_general	%r29
1032
1033	ldo	PT_FR0(%r29), %r24
1034	save_fp	%r24
1035
1036	loadgp
1037
1038	copy	%r29, %r26	/* arg0 is pt_regs */
1039	copy	%r29, %r16	/* save pt_regs */
1040
1041	ldil	L%intr_return, %r2
1042
1043#ifdef CONFIG_64BIT
1044	ldo	-16(%r30),%r29	/* Reference param save area */
1045#endif
1046
1047	b	do_cpu_irq_mask
1048	ldo	R%intr_return(%r2), %r2	/* return to intr_return, not here */
1049ENDPROC_CFI(syscall_exit_rfi)
1050
1051
1052	/* Generic interruptions (illegal insn, unaligned, page fault, etc) */
1053
1054ENTRY_CFI(intr_save)		/* for os_hpmc */
1055	mfsp    %sr7,%r16
1056	cmpib,COND(=),n 0,%r16,1f
1057	get_stack_use_cr30
1058	b	2f
1059	copy    %r8,%r26
1060
10611:
1062	get_stack_use_r30
1063	copy    %r8,%r26
1064
10652:
1066	save_specials	%r29
1067
1068	/* If this trap is a itlb miss, skip saving/adjusting isr/ior */
1069
1070	/*
1071	 * FIXME: 1) Use a #define for the hardwired "6" below (and in
1072	 *           traps.c.
1073	 *        2) Once we start executing code above 4 Gb, we need
1074	 *           to adjust iasq/iaoq here in the same way we
1075	 *           adjust isr/ior below.
1076	 */
1077
1078	cmpib,COND(=),n        6,%r26,skip_save_ior
1079
1080
1081	mfctl           %cr20, %r16 /* isr */
1082	nop		/* serialize mfctl on PA 2.0 to avoid 4 cycle penalty */
1083	mfctl           %cr21, %r17 /* ior */
1084
1085
1086#ifdef CONFIG_64BIT
1087	/*
1088	 * If the interrupted code was running with W bit off (32 bit),
1089	 * clear the b bits (bits 0 & 1) in the ior.
1090	 * save_specials left ipsw value in r8 for us to test.
1091	 */
1092	extrd,u,*<>     %r8,PSW_W_BIT,1,%r0
1093	depdi           0,1,2,%r17
1094
1095	/*
1096	 * FIXME: This code has hardwired assumptions about the split
1097	 *        between space bits and offset bits. This will change
1098	 *        when we allow alternate page sizes.
1099	 */
1100
1101	/* adjust isr/ior. */
1102	extrd,u         %r16,63,SPACEID_SHIFT,%r1	/* get high bits from isr for ior */
1103	depd            %r1,31,SPACEID_SHIFT,%r17	/* deposit them into ior */
1104	depdi           0,63,SPACEID_SHIFT,%r16		/* clear them from isr */
1105#endif
1106	STREG           %r16, PT_ISR(%r29)
1107	STREG           %r17, PT_IOR(%r29)
1108
1109
1110skip_save_ior:
1111	virt_map
1112	save_general	%r29
1113
1114	ldo		PT_FR0(%r29), %r25
1115	save_fp		%r25
1116
1117	loadgp
1118
1119	copy		%r29, %r25	/* arg1 is pt_regs */
1120#ifdef CONFIG_64BIT
1121	ldo		-16(%r30),%r29	/* Reference param save area */
1122#endif
1123
1124	ldil		L%intr_check_sig, %r2
1125	copy		%r25, %r16	/* save pt_regs */
1126
1127	b		handle_interruption
1128	ldo		R%intr_check_sig(%r2), %r2
1129ENDPROC_CFI(intr_save)
1130
1131
1132	/*
1133	 * Note for all tlb miss handlers:
1134	 *
1135	 * cr24 contains a pointer to the kernel address space
1136	 * page directory.
1137	 *
1138	 * cr25 contains a pointer to the current user address
1139	 * space page directory.
1140	 *
1141	 * sr3 will contain the space id of the user address space
1142	 * of the current running thread while that thread is
1143	 * running in the kernel.
1144	 */
1145
1146	/*
1147	 * register number allocations.  Note that these are all
1148	 * in the shadowed registers
1149	 */
1150
1151	t0 = r1		/* temporary register 0 */
1152	va = r8		/* virtual address for which the trap occurred */
1153	t1 = r9		/* temporary register 1 */
1154	pte  = r16	/* pte/phys page # */
1155	prot = r17	/* prot bits */
1156	spc  = r24	/* space for which the trap occurred */
1157	ptp = r25	/* page directory/page table pointer */
1158
1159#ifdef CONFIG_64BIT
1160
1161dtlb_miss_20w:
1162	space_adjust	spc,va,t0
1163	get_pgd		spc,ptp
1164	space_check	spc,t0,dtlb_fault
1165
1166	L3_ptep		ptp,pte,t0,va,dtlb_check_alias_20w
1167
1168	tlb_lock	spc,ptp,pte,t0,t1,dtlb_check_alias_20w
1169	update_accessed	ptp,pte,t0,t1
1170
1171	make_insert_tlb	spc,pte,prot,t1
1172
1173	idtlbt          pte,prot
1174
1175	tlb_unlock1	spc,t0
1176	rfir
1177	nop
1178
1179dtlb_check_alias_20w:
1180	do_alias	spc,t0,t1,va,pte,prot,dtlb_fault,20
1181
1182	idtlbt          pte,prot
1183
1184	rfir
1185	nop
1186
1187nadtlb_miss_20w:
1188	space_adjust	spc,va,t0
1189	get_pgd		spc,ptp
1190	space_check	spc,t0,nadtlb_fault
1191
1192	L3_ptep		ptp,pte,t0,va,nadtlb_check_alias_20w
1193
1194	tlb_lock	spc,ptp,pte,t0,t1,nadtlb_check_alias_20w
1195	update_accessed	ptp,pte,t0,t1
1196
1197	make_insert_tlb	spc,pte,prot,t1
1198
1199	idtlbt          pte,prot
1200
1201	tlb_unlock1	spc,t0
1202	rfir
1203	nop
1204
1205nadtlb_check_alias_20w:
1206	do_alias	spc,t0,t1,va,pte,prot,nadtlb_emulate,20
1207
1208	idtlbt          pte,prot
1209
1210	rfir
1211	nop
1212
1213#else
1214
1215dtlb_miss_11:
1216	get_pgd		spc,ptp
1217
1218	space_check	spc,t0,dtlb_fault
1219
1220	L2_ptep		ptp,pte,t0,va,dtlb_check_alias_11
1221
1222	tlb_lock	spc,ptp,pte,t0,t1,dtlb_check_alias_11
1223	update_accessed	ptp,pte,t0,t1
1224
1225	make_insert_tlb_11	spc,pte,prot
1226
1227	mfsp		%sr1,t1  /* Save sr1 so we can use it in tlb inserts */
1228	mtsp		spc,%sr1
1229
1230	idtlba		pte,(%sr1,va)
1231	idtlbp		prot,(%sr1,va)
1232
1233	mtsp		t1, %sr1	/* Restore sr1 */
1234
1235	tlb_unlock1	spc,t0
1236	rfir
1237	nop
1238
1239dtlb_check_alias_11:
1240	do_alias	spc,t0,t1,va,pte,prot,dtlb_fault,11
1241
1242	idtlba          pte,(va)
1243	idtlbp          prot,(va)
1244
1245	rfir
1246	nop
1247
1248nadtlb_miss_11:
1249	get_pgd		spc,ptp
1250
1251	space_check	spc,t0,nadtlb_fault
1252
1253	L2_ptep		ptp,pte,t0,va,nadtlb_check_alias_11
1254
1255	tlb_lock	spc,ptp,pte,t0,t1,nadtlb_check_alias_11
1256	update_accessed	ptp,pte,t0,t1
1257
1258	make_insert_tlb_11	spc,pte,prot
1259
1260	mfsp		%sr1,t1  /* Save sr1 so we can use it in tlb inserts */
1261	mtsp		spc,%sr1
1262
1263	idtlba		pte,(%sr1,va)
1264	idtlbp		prot,(%sr1,va)
1265
1266	mtsp		t1, %sr1	/* Restore sr1 */
1267
1268	tlb_unlock1	spc,t0
1269	rfir
1270	nop
1271
1272nadtlb_check_alias_11:
1273	do_alias	spc,t0,t1,va,pte,prot,nadtlb_emulate,11
1274
1275	idtlba          pte,(va)
1276	idtlbp          prot,(va)
1277
1278	rfir
1279	nop
1280
1281dtlb_miss_20:
1282	space_adjust	spc,va,t0
1283	get_pgd		spc,ptp
1284	space_check	spc,t0,dtlb_fault
1285
1286	L2_ptep		ptp,pte,t0,va,dtlb_check_alias_20
1287
1288	tlb_lock	spc,ptp,pte,t0,t1,dtlb_check_alias_20
1289	update_accessed	ptp,pte,t0,t1
1290
1291	make_insert_tlb	spc,pte,prot,t1
1292
1293	f_extend	pte,t1
1294
1295	idtlbt          pte,prot
1296
1297	tlb_unlock1	spc,t0
1298	rfir
1299	nop
1300
1301dtlb_check_alias_20:
1302	do_alias	spc,t0,t1,va,pte,prot,dtlb_fault,20
1303
1304	idtlbt          pte,prot
1305
1306	rfir
1307	nop
1308
1309nadtlb_miss_20:
1310	get_pgd		spc,ptp
1311
1312	space_check	spc,t0,nadtlb_fault
1313
1314	L2_ptep		ptp,pte,t0,va,nadtlb_check_alias_20
1315
1316	tlb_lock	spc,ptp,pte,t0,t1,nadtlb_check_alias_20
1317	update_accessed	ptp,pte,t0,t1
1318
1319	make_insert_tlb	spc,pte,prot,t1
1320
1321	f_extend	pte,t1
1322
1323	idtlbt		pte,prot
1324
1325	tlb_unlock1	spc,t0
1326	rfir
1327	nop
1328
1329nadtlb_check_alias_20:
1330	do_alias	spc,t0,t1,va,pte,prot,nadtlb_emulate,20
1331
1332	idtlbt          pte,prot
1333
1334	rfir
1335	nop
1336
1337#endif
1338
1339nadtlb_emulate:
1340
1341	/*
1342	 * Non access misses can be caused by fdc,fic,pdc,lpa,probe and
1343	 * probei instructions. We don't want to fault for these
1344	 * instructions (not only does it not make sense, it can cause
1345	 * deadlocks, since some flushes are done with the mmap
1346	 * semaphore held). If the translation doesn't exist, we can't
1347	 * insert a translation, so have to emulate the side effects
1348	 * of the instruction. Since we don't insert a translation
1349	 * we can get a lot of faults during a flush loop, so it makes
1350	 * sense to try to do it here with minimum overhead. We only
1351	 * emulate fdc,fic,pdc,probew,prober instructions whose base
1352	 * and index registers are not shadowed. We defer everything
1353	 * else to the "slow" path.
1354	 */
1355
1356	mfctl           %cr19,%r9 /* Get iir */
1357
1358	/* PA 2.0 Arch Ref. Book pg 382 has a good description of the insn bits.
1359	   Checks for fdc,fdce,pdc,"fic,4f",prober,probeir,probew, probeiw */
1360
1361	/* Checks for fdc,fdce,pdc,"fic,4f" only */
1362	ldi             0x280,%r16
1363	and             %r9,%r16,%r17
1364	cmpb,<>,n       %r16,%r17,nadtlb_probe_check
1365	bb,>=,n         %r9,26,nadtlb_nullify  /* m bit not set, just nullify */
1366	BL		get_register,%r25
1367	extrw,u         %r9,15,5,%r8           /* Get index register # */
1368	cmpib,COND(=),n        -1,%r1,nadtlb_fault    /* have to use slow path */
1369	copy            %r1,%r24
1370	BL		get_register,%r25
1371	extrw,u         %r9,10,5,%r8           /* Get base register # */
1372	cmpib,COND(=),n        -1,%r1,nadtlb_fault    /* have to use slow path */
1373	BL		set_register,%r25
1374	add,l           %r1,%r24,%r1           /* doesn't affect c/b bits */
1375
1376nadtlb_nullify:
1377	mfctl           %ipsw,%r8
1378	ldil            L%PSW_N,%r9
1379	or              %r8,%r9,%r8            /* Set PSW_N */
1380	mtctl           %r8,%ipsw
1381
1382	rfir
1383	nop
1384
1385	/*
1386		When there is no translation for the probe address then we
1387		must nullify the insn and return zero in the target register.
1388		This will indicate to the calling code that it does not have
1389		write/read privileges to this address.
1390
1391		This should technically work for prober and probew in PA 1.1,
1392		and also probe,r and probe,w in PA 2.0
1393
1394		WARNING: USE ONLY NON-SHADOW REGISTERS WITH PROBE INSN!
1395		THE SLOW-PATH EMULATION HAS NOT BEEN WRITTEN YET.
1396
1397	*/
1398nadtlb_probe_check:
1399	ldi             0x80,%r16
1400	and             %r9,%r16,%r17
1401	cmpb,<>,n       %r16,%r17,nadtlb_fault /* Must be probe,[rw]*/
1402	BL              get_register,%r25      /* Find the target register */
1403	extrw,u         %r9,31,5,%r8           /* Get target register */
1404	cmpib,COND(=),n        -1,%r1,nadtlb_fault    /* have to use slow path */
1405	BL		set_register,%r25
1406	copy            %r0,%r1                /* Write zero to target register */
1407	b nadtlb_nullify                       /* Nullify return insn */
1408	nop
1409
1410
1411#ifdef CONFIG_64BIT
1412itlb_miss_20w:
1413
1414	/*
1415	 * I miss is a little different, since we allow users to fault
1416	 * on the gateway page which is in the kernel address space.
1417	 */
1418
1419	space_adjust	spc,va,t0
1420	get_pgd		spc,ptp
1421	space_check	spc,t0,itlb_fault
1422
1423	L3_ptep		ptp,pte,t0,va,itlb_fault
1424
1425	tlb_lock	spc,ptp,pte,t0,t1,itlb_fault
1426	update_accessed	ptp,pte,t0,t1
1427
1428	make_insert_tlb	spc,pte,prot,t1
1429
1430	iitlbt          pte,prot
1431
1432	tlb_unlock1	spc,t0
1433	rfir
1434	nop
1435
1436naitlb_miss_20w:
1437
1438	/*
1439	 * I miss is a little different, since we allow users to fault
1440	 * on the gateway page which is in the kernel address space.
1441	 */
1442
1443	space_adjust	spc,va,t0
1444	get_pgd		spc,ptp
1445	space_check	spc,t0,naitlb_fault
1446
1447	L3_ptep		ptp,pte,t0,va,naitlb_check_alias_20w
1448
1449	tlb_lock	spc,ptp,pte,t0,t1,naitlb_check_alias_20w
1450	update_accessed	ptp,pte,t0,t1
1451
1452	make_insert_tlb	spc,pte,prot,t1
1453
1454	iitlbt          pte,prot
1455
1456	tlb_unlock1	spc,t0
1457	rfir
1458	nop
1459
1460naitlb_check_alias_20w:
1461	do_alias	spc,t0,t1,va,pte,prot,naitlb_fault,20
1462
1463	iitlbt		pte,prot
1464
1465	rfir
1466	nop
1467
1468#else
1469
1470itlb_miss_11:
1471	get_pgd		spc,ptp
1472
1473	space_check	spc,t0,itlb_fault
1474
1475	L2_ptep		ptp,pte,t0,va,itlb_fault
1476
1477	tlb_lock	spc,ptp,pte,t0,t1,itlb_fault
1478	update_accessed	ptp,pte,t0,t1
1479
1480	make_insert_tlb_11	spc,pte,prot
1481
1482	mfsp		%sr1,t1  /* Save sr1 so we can use it in tlb inserts */
1483	mtsp		spc,%sr1
1484
1485	iitlba		pte,(%sr1,va)
1486	iitlbp		prot,(%sr1,va)
1487
1488	mtsp		t1, %sr1	/* Restore sr1 */
1489
1490	tlb_unlock1	spc,t0
1491	rfir
1492	nop
1493
1494naitlb_miss_11:
1495	get_pgd		spc,ptp
1496
1497	space_check	spc,t0,naitlb_fault
1498
1499	L2_ptep		ptp,pte,t0,va,naitlb_check_alias_11
1500
1501	tlb_lock	spc,ptp,pte,t0,t1,naitlb_check_alias_11
1502	update_accessed	ptp,pte,t0,t1
1503
1504	make_insert_tlb_11	spc,pte,prot
1505
1506	mfsp		%sr1,t1  /* Save sr1 so we can use it in tlb inserts */
1507	mtsp		spc,%sr1
1508
1509	iitlba		pte,(%sr1,va)
1510	iitlbp		prot,(%sr1,va)
1511
1512	mtsp		t1, %sr1	/* Restore sr1 */
1513
1514	tlb_unlock1	spc,t0
1515	rfir
1516	nop
1517
1518naitlb_check_alias_11:
1519	do_alias	spc,t0,t1,va,pte,prot,itlb_fault,11
1520
1521	iitlba          pte,(%sr0, va)
1522	iitlbp          prot,(%sr0, va)
1523
1524	rfir
1525	nop
1526
1527
1528itlb_miss_20:
1529	get_pgd		spc,ptp
1530
1531	space_check	spc,t0,itlb_fault
1532
1533	L2_ptep		ptp,pte,t0,va,itlb_fault
1534
1535	tlb_lock	spc,ptp,pte,t0,t1,itlb_fault
1536	update_accessed	ptp,pte,t0,t1
1537
1538	make_insert_tlb	spc,pte,prot,t1
1539
1540	f_extend	pte,t1
1541
1542	iitlbt          pte,prot
1543
1544	tlb_unlock1	spc,t0
1545	rfir
1546	nop
1547
1548naitlb_miss_20:
1549	get_pgd		spc,ptp
1550
1551	space_check	spc,t0,naitlb_fault
1552
1553	L2_ptep		ptp,pte,t0,va,naitlb_check_alias_20
1554
1555	tlb_lock	spc,ptp,pte,t0,t1,naitlb_check_alias_20
1556	update_accessed	ptp,pte,t0,t1
1557
1558	make_insert_tlb	spc,pte,prot,t1
1559
1560	f_extend	pte,t1
1561
1562	iitlbt          pte,prot
1563
1564	tlb_unlock1	spc,t0
1565	rfir
1566	nop
1567
1568naitlb_check_alias_20:
1569	do_alias	spc,t0,t1,va,pte,prot,naitlb_fault,20
1570
1571	iitlbt          pte,prot
1572
1573	rfir
1574	nop
1575
1576#endif
1577
1578#ifdef CONFIG_64BIT
1579
1580dbit_trap_20w:
1581	space_adjust	spc,va,t0
1582	get_pgd		spc,ptp
1583	space_check	spc,t0,dbit_fault
1584
1585	L3_ptep		ptp,pte,t0,va,dbit_fault
1586
1587	tlb_lock	spc,ptp,pte,t0,t1,dbit_fault
1588	update_dirty	ptp,pte,t1
1589
1590	make_insert_tlb	spc,pte,prot,t1
1591
1592	idtlbt          pte,prot
1593
1594	tlb_unlock0	spc,t0
1595	rfir
1596	nop
1597#else
1598
1599dbit_trap_11:
1600
1601	get_pgd		spc,ptp
1602
1603	space_check	spc,t0,dbit_fault
1604
1605	L2_ptep		ptp,pte,t0,va,dbit_fault
1606
1607	tlb_lock	spc,ptp,pte,t0,t1,dbit_fault
1608	update_dirty	ptp,pte,t1
1609
1610	make_insert_tlb_11	spc,pte,prot
1611
1612	mfsp            %sr1,t1  /* Save sr1 so we can use it in tlb inserts */
1613	mtsp		spc,%sr1
1614
1615	idtlba		pte,(%sr1,va)
1616	idtlbp		prot,(%sr1,va)
1617
1618	mtsp            t1, %sr1     /* Restore sr1 */
1619
1620	tlb_unlock0	spc,t0
1621	rfir
1622	nop
1623
1624dbit_trap_20:
1625	get_pgd		spc,ptp
1626
1627	space_check	spc,t0,dbit_fault
1628
1629	L2_ptep		ptp,pte,t0,va,dbit_fault
1630
1631	tlb_lock	spc,ptp,pte,t0,t1,dbit_fault
1632	update_dirty	ptp,pte,t1
1633
1634	make_insert_tlb	spc,pte,prot,t1
1635
1636	f_extend	pte,t1
1637
1638	idtlbt		pte,prot
1639
1640	tlb_unlock0	spc,t0
1641	rfir
1642	nop
1643#endif
1644
1645	.import handle_interruption,code
1646
1647kernel_bad_space:
1648	b               intr_save
1649	ldi             31,%r8  /* Use an unused code */
1650
1651dbit_fault:
1652	b               intr_save
1653	ldi             20,%r8
1654
1655itlb_fault:
1656	b               intr_save
1657	ldi             6,%r8
1658
1659nadtlb_fault:
1660	b               intr_save
1661	ldi             17,%r8
1662
1663naitlb_fault:
1664	b               intr_save
1665	ldi             16,%r8
1666
1667dtlb_fault:
1668	b               intr_save
1669	ldi             15,%r8
1670
1671	/* Register saving semantics for system calls:
1672
1673	   %r1		   clobbered by system call macro in userspace
1674	   %r2		   saved in PT_REGS by gateway page
1675	   %r3  - %r18	   preserved by C code (saved by signal code)
1676	   %r19 - %r20	   saved in PT_REGS by gateway page
1677	   %r21 - %r22	   non-standard syscall args
1678			   stored in kernel stack by gateway page
1679	   %r23 - %r26	   arg3-arg0, saved in PT_REGS by gateway page
1680	   %r27 - %r30	   saved in PT_REGS by gateway page
1681	   %r31		   syscall return pointer
1682	 */
1683
1684	/* Floating point registers (FIXME: what do we do with these?)
1685
1686	   %fr0  - %fr3	   status/exception, not preserved
1687	   %fr4  - %fr7	   arguments
1688	   %fr8	 - %fr11   not preserved by C code
1689	   %fr12 - %fr21   preserved by C code
1690	   %fr22 - %fr31   not preserved by C code
1691	 */
1692
1693	.macro	reg_save regs
1694	STREG	%r3, PT_GR3(\regs)
1695	STREG	%r4, PT_GR4(\regs)
1696	STREG	%r5, PT_GR5(\regs)
1697	STREG	%r6, PT_GR6(\regs)
1698	STREG	%r7, PT_GR7(\regs)
1699	STREG	%r8, PT_GR8(\regs)
1700	STREG	%r9, PT_GR9(\regs)
1701	STREG   %r10,PT_GR10(\regs)
1702	STREG   %r11,PT_GR11(\regs)
1703	STREG   %r12,PT_GR12(\regs)
1704	STREG   %r13,PT_GR13(\regs)
1705	STREG   %r14,PT_GR14(\regs)
1706	STREG   %r15,PT_GR15(\regs)
1707	STREG   %r16,PT_GR16(\regs)
1708	STREG   %r17,PT_GR17(\regs)
1709	STREG   %r18,PT_GR18(\regs)
1710	.endm
1711
1712	.macro	reg_restore regs
1713	LDREG	PT_GR3(\regs), %r3
1714	LDREG	PT_GR4(\regs), %r4
1715	LDREG	PT_GR5(\regs), %r5
1716	LDREG	PT_GR6(\regs), %r6
1717	LDREG	PT_GR7(\regs), %r7
1718	LDREG	PT_GR8(\regs), %r8
1719	LDREG	PT_GR9(\regs), %r9
1720	LDREG   PT_GR10(\regs),%r10
1721	LDREG   PT_GR11(\regs),%r11
1722	LDREG   PT_GR12(\regs),%r12
1723	LDREG   PT_GR13(\regs),%r13
1724	LDREG   PT_GR14(\regs),%r14
1725	LDREG   PT_GR15(\regs),%r15
1726	LDREG   PT_GR16(\regs),%r16
1727	LDREG   PT_GR17(\regs),%r17
1728	LDREG   PT_GR18(\regs),%r18
1729	.endm
1730
1731	.macro	fork_like name
1732ENTRY_CFI(sys_\name\()_wrapper)
1733	LDREG	TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30), %r1
1734	ldo	TASK_REGS(%r1),%r1
1735	reg_save %r1
1736	mfctl	%cr27, %r28
1737	ldil	L%sys_\name, %r31
1738	be	R%sys_\name(%sr4,%r31)
1739	STREG	%r28, PT_CR27(%r1)
1740ENDPROC_CFI(sys_\name\()_wrapper)
1741	.endm
1742
1743fork_like clone
1744fork_like fork
1745fork_like vfork
1746
1747	/* Set the return value for the child */
1748ENTRY_CFI(child_return)
1749	BL	schedule_tail, %r2
1750	nop
1751finish_child_return:
1752	LDREG	TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30), %r1
1753	ldo	TASK_REGS(%r1),%r1	 /* get pt regs */
1754
1755	LDREG	PT_CR27(%r1), %r3
1756	mtctl	%r3, %cr27
1757	reg_restore %r1
1758	b	syscall_exit
1759	copy	%r0,%r28
1760ENDPROC_CFI(child_return)
1761
1762ENTRY_CFI(sys_rt_sigreturn_wrapper)
1763	LDREG	TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r26
1764	ldo	TASK_REGS(%r26),%r26	/* get pt regs */
1765	/* Don't save regs, we are going to restore them from sigcontext. */
1766	STREG	%r2, -RP_OFFSET(%r30)
1767#ifdef CONFIG_64BIT
1768	ldo	FRAME_SIZE(%r30), %r30
1769	BL	sys_rt_sigreturn,%r2
1770	ldo	-16(%r30),%r29		/* Reference param save area */
1771#else
1772	BL	sys_rt_sigreturn,%r2
1773	ldo	FRAME_SIZE(%r30), %r30
1774#endif
1775
1776	ldo	-FRAME_SIZE(%r30), %r30
1777	LDREG	-RP_OFFSET(%r30), %r2
1778
1779	/* FIXME: I think we need to restore a few more things here. */
1780	LDREG	TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
1781	ldo	TASK_REGS(%r1),%r1	/* get pt regs */
1782	reg_restore %r1
1783
1784	/* If the signal was received while the process was blocked on a
1785	 * syscall, then r2 will take us to syscall_exit; otherwise r2 will
1786	 * take us to syscall_exit_rfi and on to intr_return.
1787	 */
1788	bv	%r0(%r2)
1789	LDREG	PT_GR28(%r1),%r28  /* reload original r28 for syscall_exit */
1790ENDPROC_CFI(sys_rt_sigreturn_wrapper)
1791
1792ENTRY_CFI(syscall_exit)
1793	/* NOTE: Not all syscalls exit this way.  rt_sigreturn will exit
1794	 * via syscall_exit_rfi if the signal was received while the process
1795	 * was running.
1796	 */
1797
1798	/* save return value now */
1799
1800	mfctl     %cr30, %r1
1801	LDREG     TI_TASK(%r1),%r1
1802	STREG     %r28,TASK_PT_GR28(%r1)
1803
1804	/* Seems to me that dp could be wrong here, if the syscall involved
1805	 * calling a module, and nothing got round to restoring dp on return.
1806	 */
1807	loadgp
1808
1809syscall_check_resched:
1810
1811	/* check for reschedule */
1812
1813	LDREG	TI_FLAGS-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r19	/* long */
1814	bb,<,n	%r19, 31-TIF_NEED_RESCHED, syscall_do_resched /* forward */
1815
1816	.import do_signal,code
1817syscall_check_sig:
1818	LDREG	TI_FLAGS-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r19
1819	ldi	(_TIF_SIGPENDING|_TIF_NOTIFY_RESUME), %r26
1820	and,COND(<>)	%r19, %r26, %r0
1821	b,n	syscall_restore	/* skip past if we've nothing to do */
1822
1823syscall_do_signal:
1824	/* Save callee-save registers (for sigcontext).
1825	 * FIXME: After this point the process structure should be
1826	 * consistent with all the relevant state of the process
1827	 * before the syscall.  We need to verify this.
1828	 */
1829	LDREG	TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
1830	ldo	TASK_REGS(%r1), %r26		/* struct pt_regs *regs */
1831	reg_save %r26
1832
1833#ifdef CONFIG_64BIT
1834	ldo	-16(%r30),%r29			/* Reference param save area */
1835#endif
1836
1837	BL	do_notify_resume,%r2
1838	ldi	1, %r25				/* long in_syscall = 1 */
1839
1840	LDREG	TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
1841	ldo	TASK_REGS(%r1), %r20		/* reload pt_regs */
1842	reg_restore %r20
1843
1844	b,n     syscall_check_sig
1845
1846syscall_restore:
1847	LDREG	TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
1848
1849	/* Are we being ptraced? */
1850	ldw	TASK_FLAGS(%r1),%r19
1851	ldi	_TIF_SYSCALL_TRACE_MASK,%r2
1852	and,COND(=)	%r19,%r2,%r0
1853	b,n	syscall_restore_rfi
1854
1855	ldo	TASK_PT_FR31(%r1),%r19		   /* reload fpregs */
1856	rest_fp	%r19
1857
1858	LDREG	TASK_PT_SAR(%r1),%r19		   /* restore SAR */
1859	mtsar	%r19
1860
1861	LDREG	TASK_PT_GR2(%r1),%r2		   /* restore user rp */
1862	LDREG	TASK_PT_GR19(%r1),%r19
1863	LDREG   TASK_PT_GR20(%r1),%r20
1864	LDREG	TASK_PT_GR21(%r1),%r21
1865	LDREG	TASK_PT_GR22(%r1),%r22
1866	LDREG	TASK_PT_GR23(%r1),%r23
1867	LDREG	TASK_PT_GR24(%r1),%r24
1868	LDREG	TASK_PT_GR25(%r1),%r25
1869	LDREG	TASK_PT_GR26(%r1),%r26
1870	LDREG	TASK_PT_GR27(%r1),%r27	   /* restore user dp */
1871	LDREG	TASK_PT_GR28(%r1),%r28	   /* syscall return value */
1872	LDREG	TASK_PT_GR29(%r1),%r29
1873	LDREG	TASK_PT_GR31(%r1),%r31	   /* restore syscall rp */
1874
1875	/* NOTE: We use rsm/ssm pair to make this operation atomic */
1876	LDREG   TASK_PT_GR30(%r1),%r1              /* Get user sp */
1877	rsm     PSW_SM_I, %r0
1878	copy    %r1,%r30                           /* Restore user sp */
1879	mfsp    %sr3,%r1                           /* Get user space id */
1880	mtsp    %r1,%sr7                           /* Restore sr7 */
1881	ssm     PSW_SM_I, %r0
1882
1883	/* Set sr2 to zero for userspace syscalls to work. */
1884	mtsp	%r0,%sr2
1885	mtsp	%r1,%sr4			   /* Restore sr4 */
1886	mtsp	%r1,%sr5			   /* Restore sr5 */
1887	mtsp	%r1,%sr6			   /* Restore sr6 */
1888
1889	depi	3,31,2,%r31			   /* ensure return to user mode. */
1890
1891#ifdef CONFIG_64BIT
1892	/* decide whether to reset the wide mode bit
1893	 *
1894	 * For a syscall, the W bit is stored in the lowest bit
1895	 * of sp.  Extract it and reset W if it is zero */
1896	extrd,u,*<>	%r30,63,1,%r1
1897	rsm	PSW_SM_W, %r0
1898	/* now reset the lowest bit of sp if it was set */
1899	xor	%r30,%r1,%r30
1900#endif
1901	be,n    0(%sr3,%r31)                       /* return to user space */
1902
1903	/* We have to return via an RFI, so that PSW T and R bits can be set
1904	 * appropriately.
1905	 * This sets up pt_regs so we can return via intr_restore, which is not
1906	 * the most efficient way of doing things, but it works.
1907	 */
1908syscall_restore_rfi:
1909	ldo	-1(%r0),%r2			   /* Set recovery cntr to -1 */
1910	mtctl	%r2,%cr0			   /*   for immediate trap */
1911	LDREG	TASK_PT_PSW(%r1),%r2		   /* Get old PSW */
1912	ldi	0x0b,%r20			   /* Create new PSW */
1913	depi	-1,13,1,%r20			   /* C, Q, D, and I bits */
1914
1915	/* The values of SINGLESTEP_BIT and BLOCKSTEP_BIT are
1916	 * set in thread_info.h and converted to PA bitmap
1917	 * numbers in asm-offsets.c */
1918
1919	/* if ((%r19.SINGLESTEP_BIT)) { %r20.27=1} */
1920	extru,=	%r19,TIF_SINGLESTEP_PA_BIT,1,%r0
1921	depi	-1,27,1,%r20			   /* R bit */
1922
1923	/* if ((%r19.BLOCKSTEP_BIT)) { %r20.7=1} */
1924	extru,= %r19,TIF_BLOCKSTEP_PA_BIT,1,%r0
1925	depi	-1,7,1,%r20			   /* T bit */
1926
1927	STREG	%r20,TASK_PT_PSW(%r1)
1928
1929	/* Always store space registers, since sr3 can be changed (e.g. fork) */
1930
1931	mfsp    %sr3,%r25
1932	STREG   %r25,TASK_PT_SR3(%r1)
1933	STREG   %r25,TASK_PT_SR4(%r1)
1934	STREG   %r25,TASK_PT_SR5(%r1)
1935	STREG   %r25,TASK_PT_SR6(%r1)
1936	STREG   %r25,TASK_PT_SR7(%r1)
1937	STREG   %r25,TASK_PT_IASQ0(%r1)
1938	STREG   %r25,TASK_PT_IASQ1(%r1)
1939
1940	/* XXX W bit??? */
1941	/* Now if old D bit is clear, it means we didn't save all registers
1942	 * on syscall entry, so do that now.  This only happens on TRACEME
1943	 * calls, or if someone attached to us while we were on a syscall.
1944	 * We could make this more efficient by not saving r3-r18, but
1945	 * then we wouldn't be able to use the common intr_restore path.
1946	 * It is only for traced processes anyway, so performance is not
1947	 * an issue.
1948	 */
1949	bb,<	%r2,30,pt_regs_ok		   /* Branch if D set */
1950	ldo	TASK_REGS(%r1),%r25
1951	reg_save %r25				   /* Save r3 to r18 */
1952
1953	/* Save the current sr */
1954	mfsp	%sr0,%r2
1955	STREG	%r2,TASK_PT_SR0(%r1)
1956
1957	/* Save the scratch sr */
1958	mfsp	%sr1,%r2
1959	STREG	%r2,TASK_PT_SR1(%r1)
1960
1961	/* sr2 should be set to zero for userspace syscalls */
1962	STREG	%r0,TASK_PT_SR2(%r1)
1963
1964	LDREG	TASK_PT_GR31(%r1),%r2
1965	depi	3,31,2,%r2		   /* ensure return to user mode. */
1966	STREG   %r2,TASK_PT_IAOQ0(%r1)
1967	ldo	4(%r2),%r2
1968	STREG	%r2,TASK_PT_IAOQ1(%r1)
1969	b	intr_restore
1970	copy	%r25,%r16
1971
1972pt_regs_ok:
1973	LDREG	TASK_PT_IAOQ0(%r1),%r2
1974	depi	3,31,2,%r2		   /* ensure return to user mode. */
1975	STREG	%r2,TASK_PT_IAOQ0(%r1)
1976	LDREG	TASK_PT_IAOQ1(%r1),%r2
1977	depi	3,31,2,%r2
1978	STREG	%r2,TASK_PT_IAOQ1(%r1)
1979	b	intr_restore
1980	copy	%r25,%r16
1981
1982syscall_do_resched:
1983	load32	syscall_check_resched,%r2 /* if resched, we start over again */
1984	load32	schedule,%r19
1985	bv	%r0(%r19)		/* jumps to schedule() */
1986#ifdef CONFIG_64BIT
1987	ldo	-16(%r30),%r29		/* Reference param save area */
1988#else
1989	nop
1990#endif
1991ENDPROC_CFI(syscall_exit)
1992
1993
1994#ifdef CONFIG_FUNCTION_TRACER
1995
1996	.import ftrace_function_trampoline,code
1997	.align L1_CACHE_BYTES
1998	.globl mcount
1999	.type  mcount, @function
2000ENTRY(mcount)
2001_mcount:
2002	.export _mcount,data
2003	.proc
2004	.callinfo caller,frame=0
2005	.entry
2006	/*
2007	 * The 64bit mcount() function pointer needs 4 dwords, of which the
2008	 * first two are free.  We optimize it here and put 2 instructions for
2009	 * calling mcount(), and 2 instructions for ftrace_stub().  That way we
2010	 * have all on one L1 cacheline.
2011	 */
2012	b	ftrace_function_trampoline
2013	copy	%r3, %arg2	/* caller original %sp */
2014ftrace_stub:
2015	.globl ftrace_stub
2016        .type  ftrace_stub, @function
2017#ifdef CONFIG_64BIT
2018	bve	(%rp)
2019#else
2020	bv	%r0(%rp)
2021#endif
2022	nop
2023#ifdef CONFIG_64BIT
2024	.dword mcount
2025	.dword 0 /* code in head.S puts value of global gp here */
2026#endif
2027	.exit
2028	.procend
2029ENDPROC(mcount)
2030
2031#ifdef CONFIG_FUNCTION_GRAPH_TRACER
2032	.align 8
2033	.globl return_to_handler
2034	.type  return_to_handler, @function
2035ENTRY_CFI(return_to_handler)
2036	.proc
2037	.callinfo caller,frame=FRAME_SIZE
2038	.entry
2039	.export parisc_return_to_handler,data
2040parisc_return_to_handler:
2041	copy %r3,%r1
2042	STREG %r0,-RP_OFFSET(%sp)	/* store 0 as %rp */
2043	copy %sp,%r3
2044	STREGM %r1,FRAME_SIZE(%sp)
2045	STREG %ret0,8(%r3)
2046	STREG %ret1,16(%r3)
2047
2048#ifdef CONFIG_64BIT
2049	loadgp
2050#endif
2051
2052	/* call ftrace_return_to_handler(0) */
2053	.import ftrace_return_to_handler,code
2054	load32 ftrace_return_to_handler,%ret0
2055	load32 .Lftrace_ret,%r2
2056#ifdef CONFIG_64BIT
2057	ldo -16(%sp),%ret1		/* Reference param save area */
2058	bve	(%ret0)
2059#else
2060	bv	%r0(%ret0)
2061#endif
2062	ldi 0,%r26
2063.Lftrace_ret:
2064	copy %ret0,%rp
2065
2066	/* restore original return values */
2067	LDREG 8(%r3),%ret0
2068	LDREG 16(%r3),%ret1
2069
2070	/* return from function */
2071#ifdef CONFIG_64BIT
2072	bve	(%rp)
2073#else
2074	bv	%r0(%rp)
2075#endif
2076	LDREGM -FRAME_SIZE(%sp),%r3
2077	.exit
2078	.procend
2079ENDPROC_CFI(return_to_handler)
2080
2081#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
2082
2083#endif	/* CONFIG_FUNCTION_TRACER */
2084
2085#ifdef CONFIG_IRQSTACKS
2086/* void call_on_stack(unsigned long param1, void *func,
2087		      unsigned long new_stack) */
2088ENTRY_CFI(call_on_stack)
2089	copy	%sp, %r1
2090
2091	/* Regarding the HPPA calling conventions for function pointers,
2092	   we assume the PIC register is not changed across call.  For
2093	   CONFIG_64BIT, the argument pointer is left to point at the
2094	   argument region allocated for the call to call_on_stack. */
2095# ifdef CONFIG_64BIT
2096	/* Switch to new stack.  We allocate two 128 byte frames.  */
2097	ldo	256(%arg2), %sp
2098	/* Save previous stack pointer and return pointer in frame marker */
2099	STREG	%rp, -144(%sp)
2100	/* Calls always use function descriptor */
2101	LDREG	16(%arg1), %arg1
2102	bve,l	(%arg1), %rp
2103	STREG	%r1, -136(%sp)
2104	LDREG	-144(%sp), %rp
2105	bve	(%rp)
2106	LDREG	-136(%sp), %sp
2107# else
2108	/* Switch to new stack.  We allocate two 64 byte frames.  */
2109	ldo	128(%arg2), %sp
2110	/* Save previous stack pointer and return pointer in frame marker */
2111	STREG	%r1, -68(%sp)
2112	STREG	%rp, -84(%sp)
2113	/* Calls use function descriptor if PLABEL bit is set */
2114	bb,>=,n	%arg1, 30, 1f
2115	depwi	0,31,2, %arg1
2116	LDREG	0(%arg1), %arg1
21171:
2118	be,l	0(%sr4,%arg1), %sr0, %r31
2119	copy	%r31, %rp
2120	LDREG	-84(%sp), %rp
2121	bv	(%rp)
2122	LDREG	-68(%sp), %sp
2123# endif /* CONFIG_64BIT */
2124ENDPROC_CFI(call_on_stack)
2125#endif /* CONFIG_IRQSTACKS */
2126
2127ENTRY_CFI(get_register)
2128	/*
2129	 * get_register is used by the non access tlb miss handlers to
2130	 * copy the value of the general register specified in r8 into
2131	 * r1. This routine can't be used for shadowed registers, since
2132	 * the rfir will restore the original value. So, for the shadowed
2133	 * registers we put a -1 into r1 to indicate that the register
2134	 * should not be used (the register being copied could also have
2135	 * a -1 in it, but that is OK, it just means that we will have
2136	 * to use the slow path instead).
2137	 */
2138	blr     %r8,%r0
2139	nop
2140	bv      %r0(%r25)    /* r0 */
2141	copy    %r0,%r1
2142	bv      %r0(%r25)    /* r1 - shadowed */
2143	ldi     -1,%r1
2144	bv      %r0(%r25)    /* r2 */
2145	copy    %r2,%r1
2146	bv      %r0(%r25)    /* r3 */
2147	copy    %r3,%r1
2148	bv      %r0(%r25)    /* r4 */
2149	copy    %r4,%r1
2150	bv      %r0(%r25)    /* r5 */
2151	copy    %r5,%r1
2152	bv      %r0(%r25)    /* r6 */
2153	copy    %r6,%r1
2154	bv      %r0(%r25)    /* r7 */
2155	copy    %r7,%r1
2156	bv      %r0(%r25)    /* r8 - shadowed */
2157	ldi     -1,%r1
2158	bv      %r0(%r25)    /* r9 - shadowed */
2159	ldi     -1,%r1
2160	bv      %r0(%r25)    /* r10 */
2161	copy    %r10,%r1
2162	bv      %r0(%r25)    /* r11 */
2163	copy    %r11,%r1
2164	bv      %r0(%r25)    /* r12 */
2165	copy    %r12,%r1
2166	bv      %r0(%r25)    /* r13 */
2167	copy    %r13,%r1
2168	bv      %r0(%r25)    /* r14 */
2169	copy    %r14,%r1
2170	bv      %r0(%r25)    /* r15 */
2171	copy    %r15,%r1
2172	bv      %r0(%r25)    /* r16 - shadowed */
2173	ldi     -1,%r1
2174	bv      %r0(%r25)    /* r17 - shadowed */
2175	ldi     -1,%r1
2176	bv      %r0(%r25)    /* r18 */
2177	copy    %r18,%r1
2178	bv      %r0(%r25)    /* r19 */
2179	copy    %r19,%r1
2180	bv      %r0(%r25)    /* r20 */
2181	copy    %r20,%r1
2182	bv      %r0(%r25)    /* r21 */
2183	copy    %r21,%r1
2184	bv      %r0(%r25)    /* r22 */
2185	copy    %r22,%r1
2186	bv      %r0(%r25)    /* r23 */
2187	copy    %r23,%r1
2188	bv      %r0(%r25)    /* r24 - shadowed */
2189	ldi     -1,%r1
2190	bv      %r0(%r25)    /* r25 - shadowed */
2191	ldi     -1,%r1
2192	bv      %r0(%r25)    /* r26 */
2193	copy    %r26,%r1
2194	bv      %r0(%r25)    /* r27 */
2195	copy    %r27,%r1
2196	bv      %r0(%r25)    /* r28 */
2197	copy    %r28,%r1
2198	bv      %r0(%r25)    /* r29 */
2199	copy    %r29,%r1
2200	bv      %r0(%r25)    /* r30 */
2201	copy    %r30,%r1
2202	bv      %r0(%r25)    /* r31 */
2203	copy    %r31,%r1
2204ENDPROC_CFI(get_register)
2205
2206
2207ENTRY_CFI(set_register)
2208	/*
2209	 * set_register is used by the non access tlb miss handlers to
2210	 * copy the value of r1 into the general register specified in
2211	 * r8.
2212	 */
2213	blr     %r8,%r0
2214	nop
2215	bv      %r0(%r25)    /* r0 (silly, but it is a place holder) */
2216	copy    %r1,%r0
2217	bv      %r0(%r25)    /* r1 */
2218	copy    %r1,%r1
2219	bv      %r0(%r25)    /* r2 */
2220	copy    %r1,%r2
2221	bv      %r0(%r25)    /* r3 */
2222	copy    %r1,%r3
2223	bv      %r0(%r25)    /* r4 */
2224	copy    %r1,%r4
2225	bv      %r0(%r25)    /* r5 */
2226	copy    %r1,%r5
2227	bv      %r0(%r25)    /* r6 */
2228	copy    %r1,%r6
2229	bv      %r0(%r25)    /* r7 */
2230	copy    %r1,%r7
2231	bv      %r0(%r25)    /* r8 */
2232	copy    %r1,%r8
2233	bv      %r0(%r25)    /* r9 */
2234	copy    %r1,%r9
2235	bv      %r0(%r25)    /* r10 */
2236	copy    %r1,%r10
2237	bv      %r0(%r25)    /* r11 */
2238	copy    %r1,%r11
2239	bv      %r0(%r25)    /* r12 */
2240	copy    %r1,%r12
2241	bv      %r0(%r25)    /* r13 */
2242	copy    %r1,%r13
2243	bv      %r0(%r25)    /* r14 */
2244	copy    %r1,%r14
2245	bv      %r0(%r25)    /* r15 */
2246	copy    %r1,%r15
2247	bv      %r0(%r25)    /* r16 */
2248	copy    %r1,%r16
2249	bv      %r0(%r25)    /* r17 */
2250	copy    %r1,%r17
2251	bv      %r0(%r25)    /* r18 */
2252	copy    %r1,%r18
2253	bv      %r0(%r25)    /* r19 */
2254	copy    %r1,%r19
2255	bv      %r0(%r25)    /* r20 */
2256	copy    %r1,%r20
2257	bv      %r0(%r25)    /* r21 */
2258	copy    %r1,%r21
2259	bv      %r0(%r25)    /* r22 */
2260	copy    %r1,%r22
2261	bv      %r0(%r25)    /* r23 */
2262	copy    %r1,%r23
2263	bv      %r0(%r25)    /* r24 */
2264	copy    %r1,%r24
2265	bv      %r0(%r25)    /* r25 */
2266	copy    %r1,%r25
2267	bv      %r0(%r25)    /* r26 */
2268	copy    %r1,%r26
2269	bv      %r0(%r25)    /* r27 */
2270	copy    %r1,%r27
2271	bv      %r0(%r25)    /* r28 */
2272	copy    %r1,%r28
2273	bv      %r0(%r25)    /* r29 */
2274	copy    %r1,%r29
2275	bv      %r0(%r25)    /* r30 */
2276	copy    %r1,%r30
2277	bv      %r0(%r25)    /* r31 */
2278	copy    %r1,%r31
2279ENDPROC_CFI(set_register)
2280
2281