xref: /linux/arch/parisc/kernel/entry.S (revision c6ed444fd6fffaaf2e3857d926ed18bf3df81e8e)
1/*
2 * Linux/PA-RISC Project (http://www.parisc-linux.org/)
3 *
4 * kernel entry points (interruptions, system call wrappers)
5 *  Copyright (C) 1999,2000 Philipp Rumpf
6 *  Copyright (C) 1999 SuSE GmbH Nuernberg
7 *  Copyright (C) 2000 Hewlett-Packard (John Marvin)
8 *  Copyright (C) 1999 Hewlett-Packard (Frank Rowand)
9 *
10 *    This program is free software; you can redistribute it and/or modify
11 *    it under the terms of the GNU General Public License as published by
12 *    the Free Software Foundation; either version 2, or (at your option)
13 *    any later version.
14 *
15 *    This program is distributed in the hope that it will be useful,
16 *    but WITHOUT ANY WARRANTY; without even the implied warranty of
17 *    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
18 *    GNU General Public License for more details.
19 *
20 *    You should have received a copy of the GNU General Public License
21 *    along with this program; if not, write to the Free Software
22 *    Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
23 */
24
25#include <asm/asm-offsets.h>
26
27/* we have the following possibilities to act on an interruption:
28 *  - handle in assembly and use shadowed registers only
29 *  - save registers to kernel stack and handle in assembly or C */
30
31
32#include <asm/psw.h>
33#include <asm/cache.h>		/* for L1_CACHE_SHIFT */
34#include <asm/assembly.h>	/* for LDREG/STREG defines */
35#include <asm/pgtable.h>
36#include <asm/signal.h>
37#include <asm/unistd.h>
38#include <asm/ldcw.h>
39#include <asm/thread_info.h>
40
41#include <linux/linkage.h>
42
43#ifdef CONFIG_64BIT
44	.level 2.0w
45#else
46	.level 2.0
47#endif
48
49	.import		pa_tlb_lock,data
50	.macro  load_pa_tlb_lock reg
51#if __PA_LDCW_ALIGNMENT > 4
52	load32	PA(pa_tlb_lock) + __PA_LDCW_ALIGNMENT-1, \reg
53	depi	0,31,__PA_LDCW_ALIGN_ORDER, \reg
54#else
55	load32	PA(pa_tlb_lock), \reg
56#endif
57	.endm
58
59	/* space_to_prot macro creates a prot id from a space id */
60
61#if (SPACEID_SHIFT) == 0
62	.macro  space_to_prot spc prot
63	depd,z  \spc,62,31,\prot
64	.endm
65#else
66	.macro  space_to_prot spc prot
67	extrd,u \spc,(64 - (SPACEID_SHIFT)),32,\prot
68	.endm
69#endif
70
71	/* Switch to virtual mapping, trashing only %r1 */
72	.macro  virt_map
73	/* pcxt_ssm_bug */
74	rsm	PSW_SM_I, %r0	/* barrier for "Relied upon Translation */
75	mtsp	%r0, %sr4
76	mtsp	%r0, %sr5
77	mtsp	%r0, %sr6
78	tovirt_r1 %r29
79	load32	KERNEL_PSW, %r1
80
81	rsm     PSW_SM_QUIET,%r0	/* second "heavy weight" ctl op */
82	mtctl	%r0, %cr17	/* Clear IIASQ tail */
83	mtctl	%r0, %cr17	/* Clear IIASQ head */
84	mtctl	%r1, %ipsw
85	load32	4f, %r1
86	mtctl	%r1, %cr18	/* Set IIAOQ tail */
87	ldo	4(%r1), %r1
88	mtctl	%r1, %cr18	/* Set IIAOQ head */
89	rfir
90	nop
914:
92	.endm
93
94	/*
95	 * The "get_stack" macros are responsible for determining the
96	 * kernel stack value.
97	 *
98	 *      If sr7 == 0
99	 *          Already using a kernel stack, so call the
100	 *          get_stack_use_r30 macro to push a pt_regs structure
101	 *          on the stack, and store registers there.
102	 *      else
103	 *          Need to set up a kernel stack, so call the
104	 *          get_stack_use_cr30 macro to set up a pointer
105	 *          to the pt_regs structure contained within the
106	 *          task pointer pointed to by cr30. Set the stack
107	 *          pointer to point to the end of the task structure.
108	 *
109	 * Note that we use shadowed registers for temps until
110	 * we can save %r26 and %r29. %r26 is used to preserve
111	 * %r8 (a shadowed register) which temporarily contained
112	 * either the fault type ("code") or the eirr. We need
113	 * to use a non-shadowed register to carry the value over
114	 * the rfir in virt_map. We use %r26 since this value winds
115	 * up being passed as the argument to either do_cpu_irq_mask
116	 * or handle_interruption. %r29 is used to hold a pointer
117	 * the register save area, and once again, it needs to
118	 * be a non-shadowed register so that it survives the rfir.
119	 *
120	 * N.B. TASK_SZ_ALGN and PT_SZ_ALGN include space for a stack frame.
121	 */
122
123	.macro  get_stack_use_cr30
124
125	/* we save the registers in the task struct */
126
127	copy	%r30, %r17
128	mfctl   %cr30, %r1
129	ldo	THREAD_SZ_ALGN(%r1), %r30
130	mtsp	%r0,%sr7
131	mtsp	%r16,%sr3
132	tophys  %r1,%r9
133	LDREG	TI_TASK(%r9), %r1	/* thread_info -> task_struct */
134	tophys  %r1,%r9
135	ldo     TASK_REGS(%r9),%r9
136	STREG   %r17,PT_GR30(%r9)
137	STREG   %r29,PT_GR29(%r9)
138	STREG   %r26,PT_GR26(%r9)
139	STREG	%r16,PT_SR7(%r9)
140	copy    %r9,%r29
141	.endm
142
143	.macro  get_stack_use_r30
144
145	/* we put a struct pt_regs on the stack and save the registers there */
146
147	tophys  %r30,%r9
148	copy	%r30,%r1
149	ldo	PT_SZ_ALGN(%r30),%r30
150	STREG   %r1,PT_GR30(%r9)
151	STREG   %r29,PT_GR29(%r9)
152	STREG   %r26,PT_GR26(%r9)
153	STREG	%r16,PT_SR7(%r9)
154	copy    %r9,%r29
155	.endm
156
157	.macro  rest_stack
158	LDREG   PT_GR1(%r29), %r1
159	LDREG   PT_GR30(%r29),%r30
160	LDREG   PT_GR29(%r29),%r29
161	.endm
162
163	/* default interruption handler
164	 * (calls traps.c:handle_interruption) */
165	.macro	def code
166	b	intr_save
167	ldi     \code, %r8
168	.align	32
169	.endm
170
171	/* Interrupt interruption handler
172	 * (calls irq.c:do_cpu_irq_mask) */
173	.macro	extint code
174	b	intr_extint
175	mfsp    %sr7,%r16
176	.align	32
177	.endm
178
179	.import	os_hpmc, code
180
181	/* HPMC handler */
182	.macro	hpmc code
183	nop			/* must be a NOP, will be patched later */
184	load32	PA(os_hpmc), %r3
185	bv,n	0(%r3)
186	nop
187	.word	0		/* checksum (will be patched) */
188	.word	PA(os_hpmc)	/* address of handler */
189	.word	0		/* length of handler */
190	.endm
191
192	/*
193	 * Performance Note: Instructions will be moved up into
194	 * this part of the code later on, once we are sure
195	 * that the tlb miss handlers are close to final form.
196	 */
197
198	/* Register definitions for tlb miss handler macros */
199
200	va  = r8	/* virtual address for which the trap occurred */
201	spc = r24	/* space for which the trap occurred */
202
203#ifndef CONFIG_64BIT
204
205	/*
206	 * itlb miss interruption handler (parisc 1.1 - 32 bit)
207	 */
208
209	.macro	itlb_11 code
210
211	mfctl	%pcsq, spc
212	b	itlb_miss_11
213	mfctl	%pcoq, va
214
215	.align		32
216	.endm
217#endif
218
219	/*
220	 * itlb miss interruption handler (parisc 2.0)
221	 */
222
223	.macro	itlb_20 code
224	mfctl	%pcsq, spc
225#ifdef CONFIG_64BIT
226	b       itlb_miss_20w
227#else
228	b	itlb_miss_20
229#endif
230	mfctl	%pcoq, va
231
232	.align		32
233	.endm
234
235#ifndef CONFIG_64BIT
236	/*
237	 * naitlb miss interruption handler (parisc 1.1 - 32 bit)
238	 */
239
240	.macro	naitlb_11 code
241
242	mfctl	%isr,spc
243	b	naitlb_miss_11
244	mfctl 	%ior,va
245
246	.align		32
247	.endm
248#endif
249
250	/*
251	 * naitlb miss interruption handler (parisc 2.0)
252	 */
253
254	.macro	naitlb_20 code
255
256	mfctl	%isr,spc
257#ifdef CONFIG_64BIT
258	b       naitlb_miss_20w
259#else
260	b	naitlb_miss_20
261#endif
262	mfctl 	%ior,va
263
264	.align		32
265	.endm
266
267#ifndef CONFIG_64BIT
268	/*
269	 * dtlb miss interruption handler (parisc 1.1 - 32 bit)
270	 */
271
272	.macro	dtlb_11 code
273
274	mfctl	%isr, spc
275	b	dtlb_miss_11
276	mfctl	%ior, va
277
278	.align		32
279	.endm
280#endif
281
282	/*
283	 * dtlb miss interruption handler (parisc 2.0)
284	 */
285
286	.macro	dtlb_20 code
287
288	mfctl	%isr, spc
289#ifdef CONFIG_64BIT
290	b       dtlb_miss_20w
291#else
292	b	dtlb_miss_20
293#endif
294	mfctl	%ior, va
295
296	.align		32
297	.endm
298
299#ifndef CONFIG_64BIT
300	/* nadtlb miss interruption handler (parisc 1.1 - 32 bit) */
301
302	.macro	nadtlb_11 code
303
304	mfctl	%isr,spc
305	b       nadtlb_miss_11
306	mfctl	%ior,va
307
308	.align		32
309	.endm
310#endif
311
312	/* nadtlb miss interruption handler (parisc 2.0) */
313
314	.macro	nadtlb_20 code
315
316	mfctl	%isr,spc
317#ifdef CONFIG_64BIT
318	b       nadtlb_miss_20w
319#else
320	b       nadtlb_miss_20
321#endif
322	mfctl	%ior,va
323
324	.align		32
325	.endm
326
327#ifndef CONFIG_64BIT
328	/*
329	 * dirty bit trap interruption handler (parisc 1.1 - 32 bit)
330	 */
331
332	.macro	dbit_11 code
333
334	mfctl	%isr,spc
335	b	dbit_trap_11
336	mfctl	%ior,va
337
338	.align		32
339	.endm
340#endif
341
342	/*
343	 * dirty bit trap interruption handler (parisc 2.0)
344	 */
345
346	.macro	dbit_20 code
347
348	mfctl	%isr,spc
349#ifdef CONFIG_64BIT
350	b       dbit_trap_20w
351#else
352	b	dbit_trap_20
353#endif
354	mfctl	%ior,va
355
356	.align		32
357	.endm
358
359	/* In LP64, the space contains part of the upper 32 bits of the
360	 * fault.  We have to extract this and place it in the va,
361	 * zeroing the corresponding bits in the space register */
362	.macro		space_adjust	spc,va,tmp
363#ifdef CONFIG_64BIT
364	extrd,u		\spc,63,SPACEID_SHIFT,\tmp
365	depd		%r0,63,SPACEID_SHIFT,\spc
366	depd		\tmp,31,SPACEID_SHIFT,\va
367#endif
368	.endm
369
370	.import		swapper_pg_dir,code
371
372	/* Get the pgd.  For faults on space zero (kernel space), this
373	 * is simply swapper_pg_dir.  For user space faults, the
374	 * pgd is stored in %cr25 */
375	.macro		get_pgd		spc,reg
376	ldil		L%PA(swapper_pg_dir),\reg
377	ldo		R%PA(swapper_pg_dir)(\reg),\reg
378	or,COND(=)	%r0,\spc,%r0
379	mfctl		%cr25,\reg
380	.endm
381
382	/*
383		space_check(spc,tmp,fault)
384
385		spc - The space we saw the fault with.
386		tmp - The place to store the current space.
387		fault - Function to call on failure.
388
389		Only allow faults on different spaces from the
390		currently active one if we're the kernel
391
392	*/
393	.macro		space_check	spc,tmp,fault
394	mfsp		%sr7,\tmp
395	or,COND(<>)	%r0,\spc,%r0	/* user may execute gateway page
396					 * as kernel, so defeat the space
397					 * check if it is */
398	copy		\spc,\tmp
399	or,COND(=)	%r0,\tmp,%r0	/* nullify if executing as kernel */
400	cmpb,COND(<>),n	\tmp,\spc,\fault
401	.endm
402
403	/* Look up a PTE in a 2-Level scheme (faulting at each
404	 * level if the entry isn't present
405	 *
406	 * NOTE: we use ldw even for LP64, since the short pointers
407	 * can address up to 1TB
408	 */
409	.macro		L2_ptep	pmd,pte,index,va,fault
410#if CONFIG_PGTABLE_LEVELS == 3
411	extru		\va,31-ASM_PMD_SHIFT,ASM_BITS_PER_PMD,\index
412#else
413# if defined(CONFIG_64BIT)
414	extrd,u		\va,63-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index
415  #else
416  # if PAGE_SIZE > 4096
417	extru		\va,31-ASM_PGDIR_SHIFT,32-ASM_PGDIR_SHIFT,\index
418  # else
419	extru		\va,31-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index
420  # endif
421# endif
422#endif
423	dep             %r0,31,PAGE_SHIFT,\pmd  /* clear offset */
424	copy		%r0,\pte
425	ldw,s		\index(\pmd),\pmd
426	bb,>=,n		\pmd,_PxD_PRESENT_BIT,\fault
427	dep		%r0,31,PxD_FLAG_SHIFT,\pmd /* clear flags */
428	copy		\pmd,%r9
429	SHLREG		%r9,PxD_VALUE_SHIFT,\pmd
430	extru		\va,31-PAGE_SHIFT,ASM_BITS_PER_PTE,\index
431	dep		%r0,31,PAGE_SHIFT,\pmd  /* clear offset */
432	shladd		\index,BITS_PER_PTE_ENTRY,\pmd,\pmd /* pmd is now pte */
433	LDREG		%r0(\pmd),\pte
434	bb,>=,n		\pte,_PAGE_PRESENT_BIT,\fault
435	.endm
436
437	/* Look up PTE in a 3-Level scheme.
438	 *
439	 * Here we implement a Hybrid L2/L3 scheme: we allocate the
440	 * first pmd adjacent to the pgd.  This means that we can
441	 * subtract a constant offset to get to it.  The pmd and pgd
442	 * sizes are arranged so that a single pmd covers 4GB (giving
443	 * a full LP64 process access to 8TB) so our lookups are
444	 * effectively L2 for the first 4GB of the kernel (i.e. for
445	 * all ILP32 processes and all the kernel for machines with
446	 * under 4GB of memory) */
447	.macro		L3_ptep pgd,pte,index,va,fault
448#if CONFIG_PGTABLE_LEVELS == 3 /* we might have a 2-Level scheme, e.g. with 16kb page size */
449	extrd,u		\va,63-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index
450	copy		%r0,\pte
451	extrd,u,*=	\va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0
452	ldw,s		\index(\pgd),\pgd
453	extrd,u,*=	\va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0
454	bb,>=,n		\pgd,_PxD_PRESENT_BIT,\fault
455	extrd,u,*=	\va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0
456	shld		\pgd,PxD_VALUE_SHIFT,\index
457	extrd,u,*=	\va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0
458	copy		\index,\pgd
459	extrd,u,*<>	\va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0
460	ldo		ASM_PGD_PMD_OFFSET(\pgd),\pgd
461#endif
462	L2_ptep		\pgd,\pte,\index,\va,\fault
463	.endm
464
465	/* Acquire pa_tlb_lock lock and recheck page is still present. */
466	.macro		tlb_lock	spc,ptp,pte,tmp,tmp1,fault
467#ifdef CONFIG_SMP
468	cmpib,COND(=),n	0,\spc,2f
469	load_pa_tlb_lock \tmp
4701:	LDCW		0(\tmp),\tmp1
471	cmpib,COND(=)	0,\tmp1,1b
472	nop
473	LDREG		0(\ptp),\pte
474	bb,<,n		\pte,_PAGE_PRESENT_BIT,2f
475	b		\fault
476	stw		 \spc,0(\tmp)
4772:
478#endif
479	.endm
480
481	/* Release pa_tlb_lock lock without reloading lock address. */
482	.macro		tlb_unlock0	spc,tmp
483#ifdef CONFIG_SMP
484	or,COND(=)	%r0,\spc,%r0
485	sync
486	or,COND(=)	%r0,\spc,%r0
487	stw             \spc,0(\tmp)
488#endif
489	.endm
490
491	/* Release pa_tlb_lock lock. */
492	.macro		tlb_unlock1	spc,tmp
493#ifdef CONFIG_SMP
494	load_pa_tlb_lock \tmp
495	tlb_unlock0	\spc,\tmp
496#endif
497	.endm
498
499	/* Set the _PAGE_ACCESSED bit of the PTE.  Be clever and
500	 * don't needlessly dirty the cache line if it was already set */
501	.macro		update_accessed	ptp,pte,tmp,tmp1
502	ldi		_PAGE_ACCESSED,\tmp1
503	or		\tmp1,\pte,\tmp
504	and,COND(<>)	\tmp1,\pte,%r0
505	STREG		\tmp,0(\ptp)
506	.endm
507
508	/* Set the dirty bit (and accessed bit).  No need to be
509	 * clever, this is only used from the dirty fault */
510	.macro		update_dirty	ptp,pte,tmp
511	ldi		_PAGE_ACCESSED|_PAGE_DIRTY,\tmp
512	or		\tmp,\pte,\pte
513	STREG		\pte,0(\ptp)
514	.endm
515
516	/* We have (depending on the page size):
517	 * - 38 to 52-bit Physical Page Number
518	 * - 12 to 26-bit page offset
519	 */
520	/* bitshift difference between a PFN (based on kernel's PAGE_SIZE)
521	 * to a CPU TLB 4k PFN (4k => 12 bits to shift) */
522	#define PAGE_ADD_SHIFT		(PAGE_SHIFT-12)
523	#define PAGE_ADD_HUGE_SHIFT	(REAL_HPAGE_SHIFT-12)
524
525	/* Drop prot bits and convert to page addr for iitlbt and idtlbt */
526	.macro		convert_for_tlb_insert20 pte,tmp
527#ifdef CONFIG_HUGETLB_PAGE
528	copy		\pte,\tmp
529	extrd,u		\tmp,(63-ASM_PFN_PTE_SHIFT)+(63-58)+PAGE_ADD_SHIFT,\
530				64-PAGE_SHIFT-PAGE_ADD_SHIFT,\pte
531
532	depdi		_PAGE_SIZE_ENCODING_DEFAULT,63,\
533				(63-58)+PAGE_ADD_SHIFT,\pte
534	extrd,u,*=	\tmp,_PAGE_HPAGE_BIT+32,1,%r0
535	depdi		_HUGE_PAGE_SIZE_ENCODING_DEFAULT,63,\
536				(63-58)+PAGE_ADD_HUGE_SHIFT,\pte
537#else /* Huge pages disabled */
538	extrd,u		\pte,(63-ASM_PFN_PTE_SHIFT)+(63-58)+PAGE_ADD_SHIFT,\
539				64-PAGE_SHIFT-PAGE_ADD_SHIFT,\pte
540	depdi		_PAGE_SIZE_ENCODING_DEFAULT,63,\
541				(63-58)+PAGE_ADD_SHIFT,\pte
542#endif
543	.endm
544
545	/* Convert the pte and prot to tlb insertion values.  How
546	 * this happens is quite subtle, read below */
547	.macro		make_insert_tlb	spc,pte,prot,tmp
548	space_to_prot   \spc \prot        /* create prot id from space */
549	/* The following is the real subtlety.  This is depositing
550	 * T <-> _PAGE_REFTRAP
551	 * D <-> _PAGE_DIRTY
552	 * B <-> _PAGE_DMB (memory break)
553	 *
554	 * Then incredible subtlety: The access rights are
555	 * _PAGE_GATEWAY, _PAGE_EXEC and _PAGE_WRITE
556	 * See 3-14 of the parisc 2.0 manual
557	 *
558	 * Finally, _PAGE_READ goes in the top bit of PL1 (so we
559	 * trigger an access rights trap in user space if the user
560	 * tries to read an unreadable page */
561	depd            \pte,8,7,\prot
562
563	/* PAGE_USER indicates the page can be read with user privileges,
564	 * so deposit X1|11 to PL1|PL2 (remember the upper bit of PL1
565	 * contains _PAGE_READ) */
566	extrd,u,*=      \pte,_PAGE_USER_BIT+32,1,%r0
567	depdi		7,11,3,\prot
568	/* If we're a gateway page, drop PL2 back to zero for promotion
569	 * to kernel privilege (so we can execute the page as kernel).
570	 * Any privilege promotion page always denys read and write */
571	extrd,u,*= 	\pte,_PAGE_GATEWAY_BIT+32,1,%r0
572	depd		%r0,11,2,\prot	/* If Gateway, Set PL2 to 0 */
573
574	/* Enforce uncacheable pages.
575	 * This should ONLY be use for MMIO on PA 2.0 machines.
576	 * Memory/DMA is cache coherent on all PA2.0 machines we support
577	 * (that means T-class is NOT supported) and the memory controllers
578	 * on most of those machines only handles cache transactions.
579	 */
580	extrd,u,*=	\pte,_PAGE_NO_CACHE_BIT+32,1,%r0
581	depdi		1,12,1,\prot
582
583	/* Drop prot bits and convert to page addr for iitlbt and idtlbt */
584	convert_for_tlb_insert20 \pte \tmp
585	.endm
586
587	/* Identical macro to make_insert_tlb above, except it
588	 * makes the tlb entry for the differently formatted pa11
589	 * insertion instructions */
590	.macro		make_insert_tlb_11	spc,pte,prot
591	zdep		\spc,30,15,\prot
592	dep		\pte,8,7,\prot
593	extru,=		\pte,_PAGE_NO_CACHE_BIT,1,%r0
594	depi		1,12,1,\prot
595	extru,=         \pte,_PAGE_USER_BIT,1,%r0
596	depi		7,11,3,\prot   /* Set for user space (1 rsvd for read) */
597	extru,= 	\pte,_PAGE_GATEWAY_BIT,1,%r0
598	depi		0,11,2,\prot	/* If Gateway, Set PL2 to 0 */
599
600	/* Get rid of prot bits and convert to page addr for iitlba */
601
602	depi		0,31,ASM_PFN_PTE_SHIFT,\pte
603	SHRREG		\pte,(ASM_PFN_PTE_SHIFT-(31-26)),\pte
604	.endm
605
606	/* This is for ILP32 PA2.0 only.  The TLB insertion needs
607	 * to extend into I/O space if the address is 0xfXXXXXXX
608	 * so we extend the f's into the top word of the pte in
609	 * this case */
610	.macro		f_extend	pte,tmp
611	extrd,s		\pte,42,4,\tmp
612	addi,<>		1,\tmp,%r0
613	extrd,s		\pte,63,25,\pte
614	.endm
615
616	/* The alias region is an 8MB aligned 16MB to do clear and
617	 * copy user pages at addresses congruent with the user
618	 * virtual address.
619	 *
620	 * To use the alias page, you set %r26 up with the to TLB
621	 * entry (identifying the physical page) and %r23 up with
622	 * the from tlb entry (or nothing if only a to entry---for
623	 * clear_user_page_asm) */
624	.macro		do_alias	spc,tmp,tmp1,va,pte,prot,fault,patype
625	cmpib,COND(<>),n 0,\spc,\fault
626	ldil		L%(TMPALIAS_MAP_START),\tmp
627#if defined(CONFIG_64BIT) && (TMPALIAS_MAP_START >= 0x80000000)
628	/* on LP64, ldi will sign extend into the upper 32 bits,
629	 * which is behaviour we don't want */
630	depdi		0,31,32,\tmp
631#endif
632	copy		\va,\tmp1
633	depi		0,31,23,\tmp1
634	cmpb,COND(<>),n	\tmp,\tmp1,\fault
635	mfctl		%cr19,\tmp	/* iir */
636	/* get the opcode (first six bits) into \tmp */
637	extrw,u		\tmp,5,6,\tmp
638	/*
639	 * Only setting the T bit prevents data cache movein
640	 * Setting access rights to zero prevents instruction cache movein
641	 *
642	 * Note subtlety here: _PAGE_GATEWAY, _PAGE_EXEC and _PAGE_WRITE go
643	 * to type field and _PAGE_READ goes to top bit of PL1
644	 */
645	ldi		(_PAGE_REFTRAP|_PAGE_READ|_PAGE_WRITE),\prot
646	/*
647	 * so if the opcode is one (i.e. this is a memory management
648	 * instruction) nullify the next load so \prot is only T.
649	 * Otherwise this is a normal data operation
650	 */
651	cmpiclr,=	0x01,\tmp,%r0
652	ldi		(_PAGE_DIRTY|_PAGE_READ|_PAGE_WRITE),\prot
653.ifc \patype,20
654	depd,z		\prot,8,7,\prot
655.else
656.ifc \patype,11
657	depw,z		\prot,8,7,\prot
658.else
659	.error "undefined PA type to do_alias"
660.endif
661.endif
662	/*
663	 * OK, it is in the temp alias region, check whether "from" or "to".
664	 * Check "subtle" note in pacache.S re: r23/r26.
665	 */
666#ifdef CONFIG_64BIT
667	extrd,u,*=	\va,41,1,%r0
668#else
669	extrw,u,=	\va,9,1,%r0
670#endif
671	or,COND(tr)	%r23,%r0,\pte
672	or		%r26,%r0,\pte
673	.endm
674
675
676	/*
677	 * Fault_vectors are architecturally required to be aligned on a 2K
678	 * boundary
679	 */
680
681	.section .text.hot
682	.align 2048
683
684ENTRY(fault_vector_20)
685	/* First vector is invalid (0) */
686	.ascii	"cows can fly"
687	.byte 0
688	.align 32
689
690	hpmc		 1
691	def		 2
692	def		 3
693	extint		 4
694	def		 5
695	itlb_20		 6
696	def		 7
697	def		 8
698	def              9
699	def		10
700	def		11
701	def		12
702	def		13
703	def		14
704	dtlb_20		15
705	naitlb_20	16
706	nadtlb_20	17
707	def		18
708	def		19
709	dbit_20		20
710	def		21
711	def		22
712	def		23
713	def		24
714	def		25
715	def		26
716	def		27
717	def		28
718	def		29
719	def		30
720	def		31
721END(fault_vector_20)
722
723#ifndef CONFIG_64BIT
724
725	.align 2048
726
727ENTRY(fault_vector_11)
728	/* First vector is invalid (0) */
729	.ascii	"cows can fly"
730	.byte 0
731	.align 32
732
733	hpmc		 1
734	def		 2
735	def		 3
736	extint		 4
737	def		 5
738	itlb_11		 6
739	def		 7
740	def		 8
741	def              9
742	def		10
743	def		11
744	def		12
745	def		13
746	def		14
747	dtlb_11		15
748	naitlb_11	16
749	nadtlb_11	17
750	def		18
751	def		19
752	dbit_11		20
753	def		21
754	def		22
755	def		23
756	def		24
757	def		25
758	def		26
759	def		27
760	def		28
761	def		29
762	def		30
763	def		31
764END(fault_vector_11)
765
766#endif
767	/* Fault vector is separately protected and *must* be on its own page */
768	.align		PAGE_SIZE
769
770	.import		handle_interruption,code
771	.import		do_cpu_irq_mask,code
772
773	/*
774	 * Child Returns here
775	 *
776	 * copy_thread moved args into task save area.
777	 */
778
779ENTRY_CFI(ret_from_kernel_thread)
780	/* Call schedule_tail first though */
781	BL	schedule_tail, %r2
782	nop
783
784	LDREG	TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30), %r1
785	LDREG	TASK_PT_GR25(%r1), %r26
786#ifdef CONFIG_64BIT
787	LDREG	TASK_PT_GR27(%r1), %r27
788#endif
789	LDREG	TASK_PT_GR26(%r1), %r1
790	ble	0(%sr7, %r1)
791	copy	%r31, %r2
792	b	finish_child_return
793	nop
794ENDPROC_CFI(ret_from_kernel_thread)
795
796
797	/*
798	 * struct task_struct *_switch_to(struct task_struct *prev,
799	 *	struct task_struct *next)
800	 *
801	 * switch kernel stacks and return prev */
802ENTRY_CFI(_switch_to)
803	STREG	 %r2, -RP_OFFSET(%r30)
804
805	callee_save_float
806	callee_save
807
808	load32	_switch_to_ret, %r2
809
810	STREG	%r2, TASK_PT_KPC(%r26)
811	LDREG	TASK_PT_KPC(%r25), %r2
812
813	STREG	%r30, TASK_PT_KSP(%r26)
814	LDREG	TASK_PT_KSP(%r25), %r30
815	LDREG	TASK_THREAD_INFO(%r25), %r25
816	bv	%r0(%r2)
817	mtctl   %r25,%cr30
818ENDPROC_CFI(_switch_to)
819
820ENTRY_CFI(_switch_to_ret)
821	mtctl	%r0, %cr0		/* Needed for single stepping */
822	callee_rest
823	callee_rest_float
824
825	LDREG	-RP_OFFSET(%r30), %r2
826	bv	%r0(%r2)
827	copy	%r26, %r28
828ENDPROC_CFI(_switch_to_ret)
829
830	/*
831	 * Common rfi return path for interruptions, kernel execve, and
832	 * sys_rt_sigreturn (sometimes).  The sys_rt_sigreturn syscall will
833	 * return via this path if the signal was received when the process
834	 * was running; if the process was blocked on a syscall then the
835	 * normal syscall_exit path is used.  All syscalls for traced
836	 * proceses exit via intr_restore.
837	 *
838	 * XXX If any syscalls that change a processes space id ever exit
839	 * this way, then we will need to copy %sr3 in to PT_SR[3..7], and
840	 * adjust IASQ[0..1].
841	 *
842	 */
843
844	.align	PAGE_SIZE
845
846ENTRY_CFI(syscall_exit_rfi)
847	mfctl   %cr30,%r16
848	LDREG	TI_TASK(%r16), %r16	/* thread_info -> task_struct */
849	ldo	TASK_REGS(%r16),%r16
850	/* Force iaoq to userspace, as the user has had access to our current
851	 * context via sigcontext. Also Filter the PSW for the same reason.
852	 */
853	LDREG	PT_IAOQ0(%r16),%r19
854	depi	3,31,2,%r19
855	STREG	%r19,PT_IAOQ0(%r16)
856	LDREG	PT_IAOQ1(%r16),%r19
857	depi	3,31,2,%r19
858	STREG	%r19,PT_IAOQ1(%r16)
859	LDREG   PT_PSW(%r16),%r19
860	load32	USER_PSW_MASK,%r1
861#ifdef CONFIG_64BIT
862	load32	USER_PSW_HI_MASK,%r20
863	depd    %r20,31,32,%r1
864#endif
865	and     %r19,%r1,%r19 /* Mask out bits that user shouldn't play with */
866	load32	USER_PSW,%r1
867	or      %r19,%r1,%r19 /* Make sure default USER_PSW bits are set */
868	STREG   %r19,PT_PSW(%r16)
869
870	/*
871	 * If we aren't being traced, we never saved space registers
872	 * (we don't store them in the sigcontext), so set them
873	 * to "proper" values now (otherwise we'll wind up restoring
874	 * whatever was last stored in the task structure, which might
875	 * be inconsistent if an interrupt occurred while on the gateway
876	 * page). Note that we may be "trashing" values the user put in
877	 * them, but we don't support the user changing them.
878	 */
879
880	STREG   %r0,PT_SR2(%r16)
881	mfsp    %sr3,%r19
882	STREG   %r19,PT_SR0(%r16)
883	STREG   %r19,PT_SR1(%r16)
884	STREG   %r19,PT_SR3(%r16)
885	STREG   %r19,PT_SR4(%r16)
886	STREG   %r19,PT_SR5(%r16)
887	STREG   %r19,PT_SR6(%r16)
888	STREG   %r19,PT_SR7(%r16)
889ENDPROC_CFI(syscall_exit_rfi)
890
891ENTRY_CFI(intr_return)
892	/* check for reschedule */
893	mfctl   %cr30,%r1
894	LDREG   TI_FLAGS(%r1),%r19	/* sched.h: TIF_NEED_RESCHED */
895	bb,<,n	%r19,31-TIF_NEED_RESCHED,intr_do_resched /* forward */
896ENDPROC_CFI(intr_return)
897
898	.import do_notify_resume,code
899intr_check_sig:
900	/* As above */
901	mfctl   %cr30,%r1
902	LDREG	TI_FLAGS(%r1),%r19
903	ldi	(_TIF_SIGPENDING|_TIF_NOTIFY_RESUME), %r20
904	and,COND(<>)	%r19, %r20, %r0
905	b,n	intr_restore	/* skip past if we've nothing to do */
906
907	/* This check is critical to having LWS
908	 * working. The IASQ is zero on the gateway
909	 * page and we cannot deliver any signals until
910	 * we get off the gateway page.
911	 *
912	 * Only do signals if we are returning to user space
913	 */
914	LDREG	PT_IASQ0(%r16), %r20
915	cmpib,COND(=),n 0,%r20,intr_restore /* backward */
916	LDREG	PT_IASQ1(%r16), %r20
917	cmpib,COND(=),n 0,%r20,intr_restore /* backward */
918
919	/* NOTE: We need to enable interrupts if we have to deliver
920	 * signals. We used to do this earlier but it caused kernel
921	 * stack overflows. */
922	ssm     PSW_SM_I, %r0
923
924	copy	%r0, %r25			/* long in_syscall = 0 */
925#ifdef CONFIG_64BIT
926	ldo	-16(%r30),%r29			/* Reference param save area */
927#endif
928
929	BL	do_notify_resume,%r2
930	copy	%r16, %r26			/* struct pt_regs *regs */
931
932	b,n	intr_check_sig
933
934intr_restore:
935	copy            %r16,%r29
936	ldo             PT_FR31(%r29),%r1
937	rest_fp         %r1
938	rest_general    %r29
939
940	/* inverse of virt_map */
941	pcxt_ssm_bug
942	rsm             PSW_SM_QUIET,%r0	/* prepare for rfi */
943	tophys_r1       %r29
944
945	/* Restore space id's and special cr's from PT_REGS
946	 * structure pointed to by r29
947	 */
948	rest_specials	%r29
949
950	/* IMPORTANT: rest_stack restores r29 last (we are using it)!
951	 * It also restores r1 and r30.
952	 */
953	rest_stack
954
955	rfi
956	nop
957
958#ifndef CONFIG_PREEMPT
959# define intr_do_preempt	intr_restore
960#endif /* !CONFIG_PREEMPT */
961
962	.import schedule,code
963intr_do_resched:
964	/* Only call schedule on return to userspace. If we're returning
965	 * to kernel space, we may schedule if CONFIG_PREEMPT, otherwise
966	 * we jump back to intr_restore.
967	 */
968	LDREG	PT_IASQ0(%r16), %r20
969	cmpib,COND(=)	0, %r20, intr_do_preempt
970	nop
971	LDREG	PT_IASQ1(%r16), %r20
972	cmpib,COND(=)	0, %r20, intr_do_preempt
973	nop
974
975	/* NOTE: We need to enable interrupts if we schedule.  We used
976	 * to do this earlier but it caused kernel stack overflows. */
977	ssm     PSW_SM_I, %r0
978
979#ifdef CONFIG_64BIT
980	ldo	-16(%r30),%r29		/* Reference param save area */
981#endif
982
983	ldil	L%intr_check_sig, %r2
984#ifndef CONFIG_64BIT
985	b	schedule
986#else
987	load32	schedule, %r20
988	bv	%r0(%r20)
989#endif
990	ldo	R%intr_check_sig(%r2), %r2
991
992	/* preempt the current task on returning to kernel
993	 * mode from an interrupt, iff need_resched is set,
994	 * and preempt_count is 0. otherwise, we continue on
995	 * our merry way back to the current running task.
996	 */
997#ifdef CONFIG_PREEMPT
998	.import preempt_schedule_irq,code
999intr_do_preempt:
1000	rsm	PSW_SM_I, %r0		/* disable interrupts */
1001
1002	/* current_thread_info()->preempt_count */
1003	mfctl	%cr30, %r1
1004	LDREG	TI_PRE_COUNT(%r1), %r19
1005	cmpib,COND(<>)	0, %r19, intr_restore	/* if preempt_count > 0 */
1006	nop				/* prev insn branched backwards */
1007
1008	/* check if we interrupted a critical path */
1009	LDREG	PT_PSW(%r16), %r20
1010	bb,<,n	%r20, 31 - PSW_SM_I, intr_restore
1011	nop
1012
1013	BL	preempt_schedule_irq, %r2
1014	nop
1015
1016	b,n	intr_restore		/* ssm PSW_SM_I done by intr_restore */
1017#endif /* CONFIG_PREEMPT */
1018
1019	/*
1020	 * External interrupts.
1021	 */
1022
1023intr_extint:
1024	cmpib,COND(=),n 0,%r16,1f
1025
1026	get_stack_use_cr30
1027	b,n 2f
1028
10291:
1030	get_stack_use_r30
10312:
1032	save_specials	%r29
1033	virt_map
1034	save_general	%r29
1035
1036	ldo	PT_FR0(%r29), %r24
1037	save_fp	%r24
1038
1039	loadgp
1040
1041	copy	%r29, %r26	/* arg0 is pt_regs */
1042	copy	%r29, %r16	/* save pt_regs */
1043
1044	ldil	L%intr_return, %r2
1045
1046#ifdef CONFIG_64BIT
1047	ldo	-16(%r30),%r29	/* Reference param save area */
1048#endif
1049
1050	b	do_cpu_irq_mask
1051	ldo	R%intr_return(%r2), %r2	/* return to intr_return, not here */
1052
1053
1054	/* Generic interruptions (illegal insn, unaligned, page fault, etc) */
1055
1056ENTRY_CFI(intr_save)		/* for os_hpmc */
1057	mfsp    %sr7,%r16
1058	cmpib,COND(=),n 0,%r16,1f
1059	get_stack_use_cr30
1060	b	2f
1061	copy    %r8,%r26
1062
10631:
1064	get_stack_use_r30
1065	copy    %r8,%r26
1066
10672:
1068	save_specials	%r29
1069
1070	/* If this trap is a itlb miss, skip saving/adjusting isr/ior */
1071
1072	/*
1073	 * FIXME: 1) Use a #define for the hardwired "6" below (and in
1074	 *           traps.c.
1075	 *        2) Once we start executing code above 4 Gb, we need
1076	 *           to adjust iasq/iaoq here in the same way we
1077	 *           adjust isr/ior below.
1078	 */
1079
1080	cmpib,COND(=),n        6,%r26,skip_save_ior
1081
1082
1083	mfctl           %cr20, %r16 /* isr */
1084	nop		/* serialize mfctl on PA 2.0 to avoid 4 cycle penalty */
1085	mfctl           %cr21, %r17 /* ior */
1086
1087
1088#ifdef CONFIG_64BIT
1089	/*
1090	 * If the interrupted code was running with W bit off (32 bit),
1091	 * clear the b bits (bits 0 & 1) in the ior.
1092	 * save_specials left ipsw value in r8 for us to test.
1093	 */
1094	extrd,u,*<>     %r8,PSW_W_BIT,1,%r0
1095	depdi           0,1,2,%r17
1096
1097	/*
1098	 * FIXME: This code has hardwired assumptions about the split
1099	 *        between space bits and offset bits. This will change
1100	 *        when we allow alternate page sizes.
1101	 */
1102
1103	/* adjust isr/ior. */
1104	extrd,u         %r16,63,SPACEID_SHIFT,%r1	/* get high bits from isr for ior */
1105	depd            %r1,31,SPACEID_SHIFT,%r17	/* deposit them into ior */
1106	depdi           0,63,SPACEID_SHIFT,%r16		/* clear them from isr */
1107#endif
1108	STREG           %r16, PT_ISR(%r29)
1109	STREG           %r17, PT_IOR(%r29)
1110
1111
1112skip_save_ior:
1113	virt_map
1114	save_general	%r29
1115
1116	ldo		PT_FR0(%r29), %r25
1117	save_fp		%r25
1118
1119	loadgp
1120
1121	copy		%r29, %r25	/* arg1 is pt_regs */
1122#ifdef CONFIG_64BIT
1123	ldo		-16(%r30),%r29	/* Reference param save area */
1124#endif
1125
1126	ldil		L%intr_check_sig, %r2
1127	copy		%r25, %r16	/* save pt_regs */
1128
1129	b		handle_interruption
1130	ldo		R%intr_check_sig(%r2), %r2
1131ENDPROC_CFI(intr_save)
1132
1133
1134	/*
1135	 * Note for all tlb miss handlers:
1136	 *
1137	 * cr24 contains a pointer to the kernel address space
1138	 * page directory.
1139	 *
1140	 * cr25 contains a pointer to the current user address
1141	 * space page directory.
1142	 *
1143	 * sr3 will contain the space id of the user address space
1144	 * of the current running thread while that thread is
1145	 * running in the kernel.
1146	 */
1147
1148	/*
1149	 * register number allocations.  Note that these are all
1150	 * in the shadowed registers
1151	 */
1152
1153	t0 = r1		/* temporary register 0 */
1154	va = r8		/* virtual address for which the trap occurred */
1155	t1 = r9		/* temporary register 1 */
1156	pte  = r16	/* pte/phys page # */
1157	prot = r17	/* prot bits */
1158	spc  = r24	/* space for which the trap occurred */
1159	ptp = r25	/* page directory/page table pointer */
1160
1161#ifdef CONFIG_64BIT
1162
1163dtlb_miss_20w:
1164	space_adjust	spc,va,t0
1165	get_pgd		spc,ptp
1166	space_check	spc,t0,dtlb_fault
1167
1168	L3_ptep		ptp,pte,t0,va,dtlb_check_alias_20w
1169
1170	tlb_lock	spc,ptp,pte,t0,t1,dtlb_check_alias_20w
1171	update_accessed	ptp,pte,t0,t1
1172
1173	make_insert_tlb	spc,pte,prot,t1
1174
1175	idtlbt          pte,prot
1176
1177	tlb_unlock1	spc,t0
1178	rfir
1179	nop
1180
1181dtlb_check_alias_20w:
1182	do_alias	spc,t0,t1,va,pte,prot,dtlb_fault,20
1183
1184	idtlbt          pte,prot
1185
1186	rfir
1187	nop
1188
1189nadtlb_miss_20w:
1190	space_adjust	spc,va,t0
1191	get_pgd		spc,ptp
1192	space_check	spc,t0,nadtlb_fault
1193
1194	L3_ptep		ptp,pte,t0,va,nadtlb_check_alias_20w
1195
1196	tlb_lock	spc,ptp,pte,t0,t1,nadtlb_check_alias_20w
1197	update_accessed	ptp,pte,t0,t1
1198
1199	make_insert_tlb	spc,pte,prot,t1
1200
1201	idtlbt          pte,prot
1202
1203	tlb_unlock1	spc,t0
1204	rfir
1205	nop
1206
1207nadtlb_check_alias_20w:
1208	do_alias	spc,t0,t1,va,pte,prot,nadtlb_emulate,20
1209
1210	idtlbt          pte,prot
1211
1212	rfir
1213	nop
1214
1215#else
1216
1217dtlb_miss_11:
1218	get_pgd		spc,ptp
1219
1220	space_check	spc,t0,dtlb_fault
1221
1222	L2_ptep		ptp,pte,t0,va,dtlb_check_alias_11
1223
1224	tlb_lock	spc,ptp,pte,t0,t1,dtlb_check_alias_11
1225	update_accessed	ptp,pte,t0,t1
1226
1227	make_insert_tlb_11	spc,pte,prot
1228
1229	mfsp		%sr1,t1  /* Save sr1 so we can use it in tlb inserts */
1230	mtsp		spc,%sr1
1231
1232	idtlba		pte,(%sr1,va)
1233	idtlbp		prot,(%sr1,va)
1234
1235	mtsp		t1, %sr1	/* Restore sr1 */
1236
1237	tlb_unlock1	spc,t0
1238	rfir
1239	nop
1240
1241dtlb_check_alias_11:
1242	do_alias	spc,t0,t1,va,pte,prot,dtlb_fault,11
1243
1244	idtlba          pte,(va)
1245	idtlbp          prot,(va)
1246
1247	rfir
1248	nop
1249
1250nadtlb_miss_11:
1251	get_pgd		spc,ptp
1252
1253	space_check	spc,t0,nadtlb_fault
1254
1255	L2_ptep		ptp,pte,t0,va,nadtlb_check_alias_11
1256
1257	tlb_lock	spc,ptp,pte,t0,t1,nadtlb_check_alias_11
1258	update_accessed	ptp,pte,t0,t1
1259
1260	make_insert_tlb_11	spc,pte,prot
1261
1262	mfsp		%sr1,t1  /* Save sr1 so we can use it in tlb inserts */
1263	mtsp		spc,%sr1
1264
1265	idtlba		pte,(%sr1,va)
1266	idtlbp		prot,(%sr1,va)
1267
1268	mtsp		t1, %sr1	/* Restore sr1 */
1269
1270	tlb_unlock1	spc,t0
1271	rfir
1272	nop
1273
1274nadtlb_check_alias_11:
1275	do_alias	spc,t0,t1,va,pte,prot,nadtlb_emulate,11
1276
1277	idtlba          pte,(va)
1278	idtlbp          prot,(va)
1279
1280	rfir
1281	nop
1282
1283dtlb_miss_20:
1284	space_adjust	spc,va,t0
1285	get_pgd		spc,ptp
1286	space_check	spc,t0,dtlb_fault
1287
1288	L2_ptep		ptp,pte,t0,va,dtlb_check_alias_20
1289
1290	tlb_lock	spc,ptp,pte,t0,t1,dtlb_check_alias_20
1291	update_accessed	ptp,pte,t0,t1
1292
1293	make_insert_tlb	spc,pte,prot,t1
1294
1295	f_extend	pte,t1
1296
1297	idtlbt          pte,prot
1298
1299	tlb_unlock1	spc,t0
1300	rfir
1301	nop
1302
1303dtlb_check_alias_20:
1304	do_alias	spc,t0,t1,va,pte,prot,dtlb_fault,20
1305
1306	idtlbt          pte,prot
1307
1308	rfir
1309	nop
1310
1311nadtlb_miss_20:
1312	get_pgd		spc,ptp
1313
1314	space_check	spc,t0,nadtlb_fault
1315
1316	L2_ptep		ptp,pte,t0,va,nadtlb_check_alias_20
1317
1318	tlb_lock	spc,ptp,pte,t0,t1,nadtlb_check_alias_20
1319	update_accessed	ptp,pte,t0,t1
1320
1321	make_insert_tlb	spc,pte,prot,t1
1322
1323	f_extend	pte,t1
1324
1325	idtlbt		pte,prot
1326
1327	tlb_unlock1	spc,t0
1328	rfir
1329	nop
1330
1331nadtlb_check_alias_20:
1332	do_alias	spc,t0,t1,va,pte,prot,nadtlb_emulate,20
1333
1334	idtlbt          pte,prot
1335
1336	rfir
1337	nop
1338
1339#endif
1340
1341nadtlb_emulate:
1342
1343	/*
1344	 * Non access misses can be caused by fdc,fic,pdc,lpa,probe and
1345	 * probei instructions. We don't want to fault for these
1346	 * instructions (not only does it not make sense, it can cause
1347	 * deadlocks, since some flushes are done with the mmap
1348	 * semaphore held). If the translation doesn't exist, we can't
1349	 * insert a translation, so have to emulate the side effects
1350	 * of the instruction. Since we don't insert a translation
1351	 * we can get a lot of faults during a flush loop, so it makes
1352	 * sense to try to do it here with minimum overhead. We only
1353	 * emulate fdc,fic,pdc,probew,prober instructions whose base
1354	 * and index registers are not shadowed. We defer everything
1355	 * else to the "slow" path.
1356	 */
1357
1358	mfctl           %cr19,%r9 /* Get iir */
1359
1360	/* PA 2.0 Arch Ref. Book pg 382 has a good description of the insn bits.
1361	   Checks for fdc,fdce,pdc,"fic,4f",prober,probeir,probew, probeiw */
1362
1363	/* Checks for fdc,fdce,pdc,"fic,4f" only */
1364	ldi             0x280,%r16
1365	and             %r9,%r16,%r17
1366	cmpb,<>,n       %r16,%r17,nadtlb_probe_check
1367	bb,>=,n         %r9,26,nadtlb_nullify  /* m bit not set, just nullify */
1368	BL		get_register,%r25
1369	extrw,u         %r9,15,5,%r8           /* Get index register # */
1370	cmpib,COND(=),n        -1,%r1,nadtlb_fault    /* have to use slow path */
1371	copy            %r1,%r24
1372	BL		get_register,%r25
1373	extrw,u         %r9,10,5,%r8           /* Get base register # */
1374	cmpib,COND(=),n        -1,%r1,nadtlb_fault    /* have to use slow path */
1375	BL		set_register,%r25
1376	add,l           %r1,%r24,%r1           /* doesn't affect c/b bits */
1377
1378nadtlb_nullify:
1379	mfctl           %ipsw,%r8
1380	ldil            L%PSW_N,%r9
1381	or              %r8,%r9,%r8            /* Set PSW_N */
1382	mtctl           %r8,%ipsw
1383
1384	rfir
1385	nop
1386
1387	/*
1388		When there is no translation for the probe address then we
1389		must nullify the insn and return zero in the target register.
1390		This will indicate to the calling code that it does not have
1391		write/read privileges to this address.
1392
1393		This should technically work for prober and probew in PA 1.1,
1394		and also probe,r and probe,w in PA 2.0
1395
1396		WARNING: USE ONLY NON-SHADOW REGISTERS WITH PROBE INSN!
1397		THE SLOW-PATH EMULATION HAS NOT BEEN WRITTEN YET.
1398
1399	*/
1400nadtlb_probe_check:
1401	ldi             0x80,%r16
1402	and             %r9,%r16,%r17
1403	cmpb,<>,n       %r16,%r17,nadtlb_fault /* Must be probe,[rw]*/
1404	BL              get_register,%r25      /* Find the target register */
1405	extrw,u         %r9,31,5,%r8           /* Get target register */
1406	cmpib,COND(=),n        -1,%r1,nadtlb_fault    /* have to use slow path */
1407	BL		set_register,%r25
1408	copy            %r0,%r1                /* Write zero to target register */
1409	b nadtlb_nullify                       /* Nullify return insn */
1410	nop
1411
1412
1413#ifdef CONFIG_64BIT
1414itlb_miss_20w:
1415
1416	/*
1417	 * I miss is a little different, since we allow users to fault
1418	 * on the gateway page which is in the kernel address space.
1419	 */
1420
1421	space_adjust	spc,va,t0
1422	get_pgd		spc,ptp
1423	space_check	spc,t0,itlb_fault
1424
1425	L3_ptep		ptp,pte,t0,va,itlb_fault
1426
1427	tlb_lock	spc,ptp,pte,t0,t1,itlb_fault
1428	update_accessed	ptp,pte,t0,t1
1429
1430	make_insert_tlb	spc,pte,prot,t1
1431
1432	iitlbt          pte,prot
1433
1434	tlb_unlock1	spc,t0
1435	rfir
1436	nop
1437
1438naitlb_miss_20w:
1439
1440	/*
1441	 * I miss is a little different, since we allow users to fault
1442	 * on the gateway page which is in the kernel address space.
1443	 */
1444
1445	space_adjust	spc,va,t0
1446	get_pgd		spc,ptp
1447	space_check	spc,t0,naitlb_fault
1448
1449	L3_ptep		ptp,pte,t0,va,naitlb_check_alias_20w
1450
1451	tlb_lock	spc,ptp,pte,t0,t1,naitlb_check_alias_20w
1452	update_accessed	ptp,pte,t0,t1
1453
1454	make_insert_tlb	spc,pte,prot,t1
1455
1456	iitlbt          pte,prot
1457
1458	tlb_unlock1	spc,t0
1459	rfir
1460	nop
1461
1462naitlb_check_alias_20w:
1463	do_alias	spc,t0,t1,va,pte,prot,naitlb_fault,20
1464
1465	iitlbt		pte,prot
1466
1467	rfir
1468	nop
1469
1470#else
1471
1472itlb_miss_11:
1473	get_pgd		spc,ptp
1474
1475	space_check	spc,t0,itlb_fault
1476
1477	L2_ptep		ptp,pte,t0,va,itlb_fault
1478
1479	tlb_lock	spc,ptp,pte,t0,t1,itlb_fault
1480	update_accessed	ptp,pte,t0,t1
1481
1482	make_insert_tlb_11	spc,pte,prot
1483
1484	mfsp		%sr1,t1  /* Save sr1 so we can use it in tlb inserts */
1485	mtsp		spc,%sr1
1486
1487	iitlba		pte,(%sr1,va)
1488	iitlbp		prot,(%sr1,va)
1489
1490	mtsp		t1, %sr1	/* Restore sr1 */
1491
1492	tlb_unlock1	spc,t0
1493	rfir
1494	nop
1495
1496naitlb_miss_11:
1497	get_pgd		spc,ptp
1498
1499	space_check	spc,t0,naitlb_fault
1500
1501	L2_ptep		ptp,pte,t0,va,naitlb_check_alias_11
1502
1503	tlb_lock	spc,ptp,pte,t0,t1,naitlb_check_alias_11
1504	update_accessed	ptp,pte,t0,t1
1505
1506	make_insert_tlb_11	spc,pte,prot
1507
1508	mfsp		%sr1,t1  /* Save sr1 so we can use it in tlb inserts */
1509	mtsp		spc,%sr1
1510
1511	iitlba		pte,(%sr1,va)
1512	iitlbp		prot,(%sr1,va)
1513
1514	mtsp		t1, %sr1	/* Restore sr1 */
1515
1516	tlb_unlock1	spc,t0
1517	rfir
1518	nop
1519
1520naitlb_check_alias_11:
1521	do_alias	spc,t0,t1,va,pte,prot,itlb_fault,11
1522
1523	iitlba          pte,(%sr0, va)
1524	iitlbp          prot,(%sr0, va)
1525
1526	rfir
1527	nop
1528
1529
1530itlb_miss_20:
1531	get_pgd		spc,ptp
1532
1533	space_check	spc,t0,itlb_fault
1534
1535	L2_ptep		ptp,pte,t0,va,itlb_fault
1536
1537	tlb_lock	spc,ptp,pte,t0,t1,itlb_fault
1538	update_accessed	ptp,pte,t0,t1
1539
1540	make_insert_tlb	spc,pte,prot,t1
1541
1542	f_extend	pte,t1
1543
1544	iitlbt          pte,prot
1545
1546	tlb_unlock1	spc,t0
1547	rfir
1548	nop
1549
1550naitlb_miss_20:
1551	get_pgd		spc,ptp
1552
1553	space_check	spc,t0,naitlb_fault
1554
1555	L2_ptep		ptp,pte,t0,va,naitlb_check_alias_20
1556
1557	tlb_lock	spc,ptp,pte,t0,t1,naitlb_check_alias_20
1558	update_accessed	ptp,pte,t0,t1
1559
1560	make_insert_tlb	spc,pte,prot,t1
1561
1562	f_extend	pte,t1
1563
1564	iitlbt          pte,prot
1565
1566	tlb_unlock1	spc,t0
1567	rfir
1568	nop
1569
1570naitlb_check_alias_20:
1571	do_alias	spc,t0,t1,va,pte,prot,naitlb_fault,20
1572
1573	iitlbt          pte,prot
1574
1575	rfir
1576	nop
1577
1578#endif
1579
1580#ifdef CONFIG_64BIT
1581
1582dbit_trap_20w:
1583	space_adjust	spc,va,t0
1584	get_pgd		spc,ptp
1585	space_check	spc,t0,dbit_fault
1586
1587	L3_ptep		ptp,pte,t0,va,dbit_fault
1588
1589	tlb_lock	spc,ptp,pte,t0,t1,dbit_fault
1590	update_dirty	ptp,pte,t1
1591
1592	make_insert_tlb	spc,pte,prot,t1
1593
1594	idtlbt          pte,prot
1595
1596	tlb_unlock0	spc,t0
1597	rfir
1598	nop
1599#else
1600
1601dbit_trap_11:
1602
1603	get_pgd		spc,ptp
1604
1605	space_check	spc,t0,dbit_fault
1606
1607	L2_ptep		ptp,pte,t0,va,dbit_fault
1608
1609	tlb_lock	spc,ptp,pte,t0,t1,dbit_fault
1610	update_dirty	ptp,pte,t1
1611
1612	make_insert_tlb_11	spc,pte,prot
1613
1614	mfsp            %sr1,t1  /* Save sr1 so we can use it in tlb inserts */
1615	mtsp		spc,%sr1
1616
1617	idtlba		pte,(%sr1,va)
1618	idtlbp		prot,(%sr1,va)
1619
1620	mtsp            t1, %sr1     /* Restore sr1 */
1621
1622	tlb_unlock0	spc,t0
1623	rfir
1624	nop
1625
1626dbit_trap_20:
1627	get_pgd		spc,ptp
1628
1629	space_check	spc,t0,dbit_fault
1630
1631	L2_ptep		ptp,pte,t0,va,dbit_fault
1632
1633	tlb_lock	spc,ptp,pte,t0,t1,dbit_fault
1634	update_dirty	ptp,pte,t1
1635
1636	make_insert_tlb	spc,pte,prot,t1
1637
1638	f_extend	pte,t1
1639
1640	idtlbt		pte,prot
1641
1642	tlb_unlock0	spc,t0
1643	rfir
1644	nop
1645#endif
1646
1647	.import handle_interruption,code
1648
1649kernel_bad_space:
1650	b               intr_save
1651	ldi             31,%r8  /* Use an unused code */
1652
1653dbit_fault:
1654	b               intr_save
1655	ldi             20,%r8
1656
1657itlb_fault:
1658	b               intr_save
1659	ldi             6,%r8
1660
1661nadtlb_fault:
1662	b               intr_save
1663	ldi             17,%r8
1664
1665naitlb_fault:
1666	b               intr_save
1667	ldi             16,%r8
1668
1669dtlb_fault:
1670	b               intr_save
1671	ldi             15,%r8
1672
1673	/* Register saving semantics for system calls:
1674
1675	   %r1		   clobbered by system call macro in userspace
1676	   %r2		   saved in PT_REGS by gateway page
1677	   %r3  - %r18	   preserved by C code (saved by signal code)
1678	   %r19 - %r20	   saved in PT_REGS by gateway page
1679	   %r21 - %r22	   non-standard syscall args
1680			   stored in kernel stack by gateway page
1681	   %r23 - %r26	   arg3-arg0, saved in PT_REGS by gateway page
1682	   %r27 - %r30	   saved in PT_REGS by gateway page
1683	   %r31		   syscall return pointer
1684	 */
1685
1686	/* Floating point registers (FIXME: what do we do with these?)
1687
1688	   %fr0  - %fr3	   status/exception, not preserved
1689	   %fr4  - %fr7	   arguments
1690	   %fr8	 - %fr11   not preserved by C code
1691	   %fr12 - %fr21   preserved by C code
1692	   %fr22 - %fr31   not preserved by C code
1693	 */
1694
1695	.macro	reg_save regs
1696	STREG	%r3, PT_GR3(\regs)
1697	STREG	%r4, PT_GR4(\regs)
1698	STREG	%r5, PT_GR5(\regs)
1699	STREG	%r6, PT_GR6(\regs)
1700	STREG	%r7, PT_GR7(\regs)
1701	STREG	%r8, PT_GR8(\regs)
1702	STREG	%r9, PT_GR9(\regs)
1703	STREG   %r10,PT_GR10(\regs)
1704	STREG   %r11,PT_GR11(\regs)
1705	STREG   %r12,PT_GR12(\regs)
1706	STREG   %r13,PT_GR13(\regs)
1707	STREG   %r14,PT_GR14(\regs)
1708	STREG   %r15,PT_GR15(\regs)
1709	STREG   %r16,PT_GR16(\regs)
1710	STREG   %r17,PT_GR17(\regs)
1711	STREG   %r18,PT_GR18(\regs)
1712	.endm
1713
1714	.macro	reg_restore regs
1715	LDREG	PT_GR3(\regs), %r3
1716	LDREG	PT_GR4(\regs), %r4
1717	LDREG	PT_GR5(\regs), %r5
1718	LDREG	PT_GR6(\regs), %r6
1719	LDREG	PT_GR7(\regs), %r7
1720	LDREG	PT_GR8(\regs), %r8
1721	LDREG	PT_GR9(\regs), %r9
1722	LDREG   PT_GR10(\regs),%r10
1723	LDREG   PT_GR11(\regs),%r11
1724	LDREG   PT_GR12(\regs),%r12
1725	LDREG   PT_GR13(\regs),%r13
1726	LDREG   PT_GR14(\regs),%r14
1727	LDREG   PT_GR15(\regs),%r15
1728	LDREG   PT_GR16(\regs),%r16
1729	LDREG   PT_GR17(\regs),%r17
1730	LDREG   PT_GR18(\regs),%r18
1731	.endm
1732
1733	.macro	fork_like name
1734ENTRY_CFI(sys_\name\()_wrapper)
1735	LDREG	TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30), %r1
1736	ldo	TASK_REGS(%r1),%r1
1737	reg_save %r1
1738	mfctl	%cr27, %r28
1739	ldil	L%sys_\name, %r31
1740	be	R%sys_\name(%sr4,%r31)
1741	STREG	%r28, PT_CR27(%r1)
1742ENDPROC_CFI(sys_\name\()_wrapper)
1743	.endm
1744
1745fork_like clone
1746fork_like fork
1747fork_like vfork
1748
1749	/* Set the return value for the child */
1750ENTRY_CFI(child_return)
1751	BL	schedule_tail, %r2
1752	nop
1753finish_child_return:
1754	LDREG	TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30), %r1
1755	ldo	TASK_REGS(%r1),%r1	 /* get pt regs */
1756
1757	LDREG	PT_CR27(%r1), %r3
1758	mtctl	%r3, %cr27
1759	reg_restore %r1
1760	b	syscall_exit
1761	copy	%r0,%r28
1762ENDPROC_CFI(child_return)
1763
1764ENTRY_CFI(sys_rt_sigreturn_wrapper)
1765	LDREG	TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r26
1766	ldo	TASK_REGS(%r26),%r26	/* get pt regs */
1767	/* Don't save regs, we are going to restore them from sigcontext. */
1768	STREG	%r2, -RP_OFFSET(%r30)
1769#ifdef CONFIG_64BIT
1770	ldo	FRAME_SIZE(%r30), %r30
1771	BL	sys_rt_sigreturn,%r2
1772	ldo	-16(%r30),%r29		/* Reference param save area */
1773#else
1774	BL	sys_rt_sigreturn,%r2
1775	ldo	FRAME_SIZE(%r30), %r30
1776#endif
1777
1778	ldo	-FRAME_SIZE(%r30), %r30
1779	LDREG	-RP_OFFSET(%r30), %r2
1780
1781	/* FIXME: I think we need to restore a few more things here. */
1782	LDREG	TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
1783	ldo	TASK_REGS(%r1),%r1	/* get pt regs */
1784	reg_restore %r1
1785
1786	/* If the signal was received while the process was blocked on a
1787	 * syscall, then r2 will take us to syscall_exit; otherwise r2 will
1788	 * take us to syscall_exit_rfi and on to intr_return.
1789	 */
1790	bv	%r0(%r2)
1791	LDREG	PT_GR28(%r1),%r28  /* reload original r28 for syscall_exit */
1792ENDPROC_CFI(sys_rt_sigreturn_wrapper)
1793
1794ENTRY_CFI(syscall_exit)
1795	/* NOTE: Not all syscalls exit this way.  rt_sigreturn will exit
1796	 * via syscall_exit_rfi if the signal was received while the process
1797	 * was running.
1798	 */
1799
1800	/* save return value now */
1801
1802	mfctl     %cr30, %r1
1803	LDREG     TI_TASK(%r1),%r1
1804	STREG     %r28,TASK_PT_GR28(%r1)
1805
1806	/* Seems to me that dp could be wrong here, if the syscall involved
1807	 * calling a module, and nothing got round to restoring dp on return.
1808	 */
1809	loadgp
1810
1811syscall_check_resched:
1812
1813	/* check for reschedule */
1814
1815	LDREG	TI_FLAGS-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r19	/* long */
1816	bb,<,n	%r19, 31-TIF_NEED_RESCHED, syscall_do_resched /* forward */
1817
1818	.import do_signal,code
1819syscall_check_sig:
1820	LDREG	TI_FLAGS-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r19
1821	ldi	(_TIF_SIGPENDING|_TIF_NOTIFY_RESUME), %r26
1822	and,COND(<>)	%r19, %r26, %r0
1823	b,n	syscall_restore	/* skip past if we've nothing to do */
1824
1825syscall_do_signal:
1826	/* Save callee-save registers (for sigcontext).
1827	 * FIXME: After this point the process structure should be
1828	 * consistent with all the relevant state of the process
1829	 * before the syscall.  We need to verify this.
1830	 */
1831	LDREG	TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
1832	ldo	TASK_REGS(%r1), %r26		/* struct pt_regs *regs */
1833	reg_save %r26
1834
1835#ifdef CONFIG_64BIT
1836	ldo	-16(%r30),%r29			/* Reference param save area */
1837#endif
1838
1839	BL	do_notify_resume,%r2
1840	ldi	1, %r25				/* long in_syscall = 1 */
1841
1842	LDREG	TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
1843	ldo	TASK_REGS(%r1), %r20		/* reload pt_regs */
1844	reg_restore %r20
1845
1846	b,n     syscall_check_sig
1847
1848syscall_restore:
1849	LDREG	TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
1850
1851	/* Are we being ptraced? */
1852	ldw	TASK_FLAGS(%r1),%r19
1853	ldi	_TIF_SYSCALL_TRACE_MASK,%r2
1854	and,COND(=)	%r19,%r2,%r0
1855	b,n	syscall_restore_rfi
1856
1857	ldo	TASK_PT_FR31(%r1),%r19		   /* reload fpregs */
1858	rest_fp	%r19
1859
1860	LDREG	TASK_PT_SAR(%r1),%r19		   /* restore SAR */
1861	mtsar	%r19
1862
1863	LDREG	TASK_PT_GR2(%r1),%r2		   /* restore user rp */
1864	LDREG	TASK_PT_GR19(%r1),%r19
1865	LDREG   TASK_PT_GR20(%r1),%r20
1866	LDREG	TASK_PT_GR21(%r1),%r21
1867	LDREG	TASK_PT_GR22(%r1),%r22
1868	LDREG	TASK_PT_GR23(%r1),%r23
1869	LDREG	TASK_PT_GR24(%r1),%r24
1870	LDREG	TASK_PT_GR25(%r1),%r25
1871	LDREG	TASK_PT_GR26(%r1),%r26
1872	LDREG	TASK_PT_GR27(%r1),%r27	   /* restore user dp */
1873	LDREG	TASK_PT_GR28(%r1),%r28	   /* syscall return value */
1874	LDREG	TASK_PT_GR29(%r1),%r29
1875	LDREG	TASK_PT_GR31(%r1),%r31	   /* restore syscall rp */
1876
1877	/* NOTE: We use rsm/ssm pair to make this operation atomic */
1878	LDREG   TASK_PT_GR30(%r1),%r1              /* Get user sp */
1879	rsm     PSW_SM_I, %r0
1880	copy    %r1,%r30                           /* Restore user sp */
1881	mfsp    %sr3,%r1                           /* Get user space id */
1882	mtsp    %r1,%sr7                           /* Restore sr7 */
1883	ssm     PSW_SM_I, %r0
1884
1885	/* Set sr2 to zero for userspace syscalls to work. */
1886	mtsp	%r0,%sr2
1887	mtsp	%r1,%sr4			   /* Restore sr4 */
1888	mtsp	%r1,%sr5			   /* Restore sr5 */
1889	mtsp	%r1,%sr6			   /* Restore sr6 */
1890
1891	depi	3,31,2,%r31			   /* ensure return to user mode. */
1892
1893#ifdef CONFIG_64BIT
1894	/* decide whether to reset the wide mode bit
1895	 *
1896	 * For a syscall, the W bit is stored in the lowest bit
1897	 * of sp.  Extract it and reset W if it is zero */
1898	extrd,u,*<>	%r30,63,1,%r1
1899	rsm	PSW_SM_W, %r0
1900	/* now reset the lowest bit of sp if it was set */
1901	xor	%r30,%r1,%r30
1902#endif
1903	be,n    0(%sr3,%r31)                       /* return to user space */
1904
1905	/* We have to return via an RFI, so that PSW T and R bits can be set
1906	 * appropriately.
1907	 * This sets up pt_regs so we can return via intr_restore, which is not
1908	 * the most efficient way of doing things, but it works.
1909	 */
1910syscall_restore_rfi:
1911	ldo	-1(%r0),%r2			   /* Set recovery cntr to -1 */
1912	mtctl	%r2,%cr0			   /*   for immediate trap */
1913	LDREG	TASK_PT_PSW(%r1),%r2		   /* Get old PSW */
1914	ldi	0x0b,%r20			   /* Create new PSW */
1915	depi	-1,13,1,%r20			   /* C, Q, D, and I bits */
1916
1917	/* The values of SINGLESTEP_BIT and BLOCKSTEP_BIT are
1918	 * set in thread_info.h and converted to PA bitmap
1919	 * numbers in asm-offsets.c */
1920
1921	/* if ((%r19.SINGLESTEP_BIT)) { %r20.27=1} */
1922	extru,=	%r19,TIF_SINGLESTEP_PA_BIT,1,%r0
1923	depi	-1,27,1,%r20			   /* R bit */
1924
1925	/* if ((%r19.BLOCKSTEP_BIT)) { %r20.7=1} */
1926	extru,= %r19,TIF_BLOCKSTEP_PA_BIT,1,%r0
1927	depi	-1,7,1,%r20			   /* T bit */
1928
1929	STREG	%r20,TASK_PT_PSW(%r1)
1930
1931	/* Always store space registers, since sr3 can be changed (e.g. fork) */
1932
1933	mfsp    %sr3,%r25
1934	STREG   %r25,TASK_PT_SR3(%r1)
1935	STREG   %r25,TASK_PT_SR4(%r1)
1936	STREG   %r25,TASK_PT_SR5(%r1)
1937	STREG   %r25,TASK_PT_SR6(%r1)
1938	STREG   %r25,TASK_PT_SR7(%r1)
1939	STREG   %r25,TASK_PT_IASQ0(%r1)
1940	STREG   %r25,TASK_PT_IASQ1(%r1)
1941
1942	/* XXX W bit??? */
1943	/* Now if old D bit is clear, it means we didn't save all registers
1944	 * on syscall entry, so do that now.  This only happens on TRACEME
1945	 * calls, or if someone attached to us while we were on a syscall.
1946	 * We could make this more efficient by not saving r3-r18, but
1947	 * then we wouldn't be able to use the common intr_restore path.
1948	 * It is only for traced processes anyway, so performance is not
1949	 * an issue.
1950	 */
1951	bb,<	%r2,30,pt_regs_ok		   /* Branch if D set */
1952	ldo	TASK_REGS(%r1),%r25
1953	reg_save %r25				   /* Save r3 to r18 */
1954
1955	/* Save the current sr */
1956	mfsp	%sr0,%r2
1957	STREG	%r2,TASK_PT_SR0(%r1)
1958
1959	/* Save the scratch sr */
1960	mfsp	%sr1,%r2
1961	STREG	%r2,TASK_PT_SR1(%r1)
1962
1963	/* sr2 should be set to zero for userspace syscalls */
1964	STREG	%r0,TASK_PT_SR2(%r1)
1965
1966	LDREG	TASK_PT_GR31(%r1),%r2
1967	depi	3,31,2,%r2		   /* ensure return to user mode. */
1968	STREG   %r2,TASK_PT_IAOQ0(%r1)
1969	ldo	4(%r2),%r2
1970	STREG	%r2,TASK_PT_IAOQ1(%r1)
1971	b	intr_restore
1972	copy	%r25,%r16
1973
1974pt_regs_ok:
1975	LDREG	TASK_PT_IAOQ0(%r1),%r2
1976	depi	3,31,2,%r2		   /* ensure return to user mode. */
1977	STREG	%r2,TASK_PT_IAOQ0(%r1)
1978	LDREG	TASK_PT_IAOQ1(%r1),%r2
1979	depi	3,31,2,%r2
1980	STREG	%r2,TASK_PT_IAOQ1(%r1)
1981	b	intr_restore
1982	copy	%r25,%r16
1983
1984syscall_do_resched:
1985	load32	syscall_check_resched,%r2 /* if resched, we start over again */
1986	load32	schedule,%r19
1987	bv	%r0(%r19)		/* jumps to schedule() */
1988#ifdef CONFIG_64BIT
1989	ldo	-16(%r30),%r29		/* Reference param save area */
1990#else
1991	nop
1992#endif
1993ENDPROC_CFI(syscall_exit)
1994
1995
1996#ifdef CONFIG_FUNCTION_TRACER
1997
1998	.import ftrace_function_trampoline,code
1999	.align L1_CACHE_BYTES
2000	.globl mcount
2001	.type  mcount, @function
2002ENTRY_CFI(mcount, caller)
2003_mcount:
2004	.export _mcount,data
2005	/*
2006	 * The 64bit mcount() function pointer needs 4 dwords, of which the
2007	 * first two are free.  We optimize it here and put 2 instructions for
2008	 * calling mcount(), and 2 instructions for ftrace_stub().  That way we
2009	 * have all on one L1 cacheline.
2010	 */
2011	b	ftrace_function_trampoline
2012	copy	%r3, %arg2	/* caller original %sp */
2013ftrace_stub:
2014	.globl ftrace_stub
2015        .type  ftrace_stub, @function
2016#ifdef CONFIG_64BIT
2017	bve	(%rp)
2018#else
2019	bv	%r0(%rp)
2020#endif
2021	nop
2022#ifdef CONFIG_64BIT
2023	.dword mcount
2024	.dword 0 /* code in head.S puts value of global gp here */
2025#endif
2026ENDPROC_CFI(mcount)
2027
2028#ifdef CONFIG_FUNCTION_GRAPH_TRACER
2029	.align 8
2030	.globl return_to_handler
2031	.type  return_to_handler, @function
2032ENTRY_CFI(return_to_handler, caller,frame=FRAME_SIZE)
2033	.export parisc_return_to_handler,data
2034parisc_return_to_handler:
2035	copy %r3,%r1
2036	STREG %r0,-RP_OFFSET(%sp)	/* store 0 as %rp */
2037	copy %sp,%r3
2038	STREGM %r1,FRAME_SIZE(%sp)
2039	STREG %ret0,8(%r3)
2040	STREG %ret1,16(%r3)
2041
2042#ifdef CONFIG_64BIT
2043	loadgp
2044#endif
2045
2046	/* call ftrace_return_to_handler(0) */
2047	.import ftrace_return_to_handler,code
2048	load32 ftrace_return_to_handler,%ret0
2049	load32 .Lftrace_ret,%r2
2050#ifdef CONFIG_64BIT
2051	ldo -16(%sp),%ret1		/* Reference param save area */
2052	bve	(%ret0)
2053#else
2054	bv	%r0(%ret0)
2055#endif
2056	ldi 0,%r26
2057.Lftrace_ret:
2058	copy %ret0,%rp
2059
2060	/* restore original return values */
2061	LDREG 8(%r3),%ret0
2062	LDREG 16(%r3),%ret1
2063
2064	/* return from function */
2065#ifdef CONFIG_64BIT
2066	bve	(%rp)
2067#else
2068	bv	%r0(%rp)
2069#endif
2070	LDREGM -FRAME_SIZE(%sp),%r3
2071ENDPROC_CFI(return_to_handler)
2072
2073#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
2074
2075#endif	/* CONFIG_FUNCTION_TRACER */
2076
2077#ifdef CONFIG_IRQSTACKS
2078/* void call_on_stack(unsigned long param1, void *func,
2079		      unsigned long new_stack) */
2080ENTRY_CFI(call_on_stack, FRAME=2*FRAME_SIZE,CALLS,SAVE_RP,SAVE_SP)
2081	copy	%sp, %r1
2082
2083	/* Regarding the HPPA calling conventions for function pointers,
2084	   we assume the PIC register is not changed across call.  For
2085	   CONFIG_64BIT, the argument pointer is left to point at the
2086	   argument region allocated for the call to call_on_stack. */
2087
2088	/* Switch to new stack.  We allocate two frames.  */
2089	ldo	2*FRAME_SIZE(%arg2), %sp
2090# ifdef CONFIG_64BIT
2091	/* Save previous stack pointer and return pointer in frame marker */
2092	STREG	%rp, -FRAME_SIZE-RP_OFFSET(%sp)
2093	/* Calls always use function descriptor */
2094	LDREG	16(%arg1), %arg1
2095	bve,l	(%arg1), %rp
2096	STREG	%r1, -FRAME_SIZE-REG_SZ(%sp)
2097	LDREG	-FRAME_SIZE-RP_OFFSET(%sp), %rp
2098	bve	(%rp)
2099	LDREG	-FRAME_SIZE-REG_SZ(%sp), %sp
2100# else
2101	/* Save previous stack pointer and return pointer in frame marker */
2102	STREG	%r1, -FRAME_SIZE-REG_SZ(%sp)
2103	STREG	%rp, -FRAME_SIZE-RP_OFFSET(%sp)
2104	/* Calls use function descriptor if PLABEL bit is set */
2105	bb,>=,n	%arg1, 30, 1f
2106	depwi	0,31,2, %arg1
2107	LDREG	0(%arg1), %arg1
21081:
2109	be,l	0(%sr4,%arg1), %sr0, %r31
2110	copy	%r31, %rp
2111	LDREG	-FRAME_SIZE-RP_OFFSET(%sp), %rp
2112	bv	(%rp)
2113	LDREG	-FRAME_SIZE-REG_SZ(%sp), %sp
2114# endif /* CONFIG_64BIT */
2115ENDPROC_CFI(call_on_stack)
2116#endif /* CONFIG_IRQSTACKS */
2117
2118ENTRY_CFI(get_register)
2119	/*
2120	 * get_register is used by the non access tlb miss handlers to
2121	 * copy the value of the general register specified in r8 into
2122	 * r1. This routine can't be used for shadowed registers, since
2123	 * the rfir will restore the original value. So, for the shadowed
2124	 * registers we put a -1 into r1 to indicate that the register
2125	 * should not be used (the register being copied could also have
2126	 * a -1 in it, but that is OK, it just means that we will have
2127	 * to use the slow path instead).
2128	 */
2129	blr     %r8,%r0
2130	nop
2131	bv      %r0(%r25)    /* r0 */
2132	copy    %r0,%r1
2133	bv      %r0(%r25)    /* r1 - shadowed */
2134	ldi     -1,%r1
2135	bv      %r0(%r25)    /* r2 */
2136	copy    %r2,%r1
2137	bv      %r0(%r25)    /* r3 */
2138	copy    %r3,%r1
2139	bv      %r0(%r25)    /* r4 */
2140	copy    %r4,%r1
2141	bv      %r0(%r25)    /* r5 */
2142	copy    %r5,%r1
2143	bv      %r0(%r25)    /* r6 */
2144	copy    %r6,%r1
2145	bv      %r0(%r25)    /* r7 */
2146	copy    %r7,%r1
2147	bv      %r0(%r25)    /* r8 - shadowed */
2148	ldi     -1,%r1
2149	bv      %r0(%r25)    /* r9 - shadowed */
2150	ldi     -1,%r1
2151	bv      %r0(%r25)    /* r10 */
2152	copy    %r10,%r1
2153	bv      %r0(%r25)    /* r11 */
2154	copy    %r11,%r1
2155	bv      %r0(%r25)    /* r12 */
2156	copy    %r12,%r1
2157	bv      %r0(%r25)    /* r13 */
2158	copy    %r13,%r1
2159	bv      %r0(%r25)    /* r14 */
2160	copy    %r14,%r1
2161	bv      %r0(%r25)    /* r15 */
2162	copy    %r15,%r1
2163	bv      %r0(%r25)    /* r16 - shadowed */
2164	ldi     -1,%r1
2165	bv      %r0(%r25)    /* r17 - shadowed */
2166	ldi     -1,%r1
2167	bv      %r0(%r25)    /* r18 */
2168	copy    %r18,%r1
2169	bv      %r0(%r25)    /* r19 */
2170	copy    %r19,%r1
2171	bv      %r0(%r25)    /* r20 */
2172	copy    %r20,%r1
2173	bv      %r0(%r25)    /* r21 */
2174	copy    %r21,%r1
2175	bv      %r0(%r25)    /* r22 */
2176	copy    %r22,%r1
2177	bv      %r0(%r25)    /* r23 */
2178	copy    %r23,%r1
2179	bv      %r0(%r25)    /* r24 - shadowed */
2180	ldi     -1,%r1
2181	bv      %r0(%r25)    /* r25 - shadowed */
2182	ldi     -1,%r1
2183	bv      %r0(%r25)    /* r26 */
2184	copy    %r26,%r1
2185	bv      %r0(%r25)    /* r27 */
2186	copy    %r27,%r1
2187	bv      %r0(%r25)    /* r28 */
2188	copy    %r28,%r1
2189	bv      %r0(%r25)    /* r29 */
2190	copy    %r29,%r1
2191	bv      %r0(%r25)    /* r30 */
2192	copy    %r30,%r1
2193	bv      %r0(%r25)    /* r31 */
2194	copy    %r31,%r1
2195ENDPROC_CFI(get_register)
2196
2197
2198ENTRY_CFI(set_register)
2199	/*
2200	 * set_register is used by the non access tlb miss handlers to
2201	 * copy the value of r1 into the general register specified in
2202	 * r8.
2203	 */
2204	blr     %r8,%r0
2205	nop
2206	bv      %r0(%r25)    /* r0 (silly, but it is a place holder) */
2207	copy    %r1,%r0
2208	bv      %r0(%r25)    /* r1 */
2209	copy    %r1,%r1
2210	bv      %r0(%r25)    /* r2 */
2211	copy    %r1,%r2
2212	bv      %r0(%r25)    /* r3 */
2213	copy    %r1,%r3
2214	bv      %r0(%r25)    /* r4 */
2215	copy    %r1,%r4
2216	bv      %r0(%r25)    /* r5 */
2217	copy    %r1,%r5
2218	bv      %r0(%r25)    /* r6 */
2219	copy    %r1,%r6
2220	bv      %r0(%r25)    /* r7 */
2221	copy    %r1,%r7
2222	bv      %r0(%r25)    /* r8 */
2223	copy    %r1,%r8
2224	bv      %r0(%r25)    /* r9 */
2225	copy    %r1,%r9
2226	bv      %r0(%r25)    /* r10 */
2227	copy    %r1,%r10
2228	bv      %r0(%r25)    /* r11 */
2229	copy    %r1,%r11
2230	bv      %r0(%r25)    /* r12 */
2231	copy    %r1,%r12
2232	bv      %r0(%r25)    /* r13 */
2233	copy    %r1,%r13
2234	bv      %r0(%r25)    /* r14 */
2235	copy    %r1,%r14
2236	bv      %r0(%r25)    /* r15 */
2237	copy    %r1,%r15
2238	bv      %r0(%r25)    /* r16 */
2239	copy    %r1,%r16
2240	bv      %r0(%r25)    /* r17 */
2241	copy    %r1,%r17
2242	bv      %r0(%r25)    /* r18 */
2243	copy    %r1,%r18
2244	bv      %r0(%r25)    /* r19 */
2245	copy    %r1,%r19
2246	bv      %r0(%r25)    /* r20 */
2247	copy    %r1,%r20
2248	bv      %r0(%r25)    /* r21 */
2249	copy    %r1,%r21
2250	bv      %r0(%r25)    /* r22 */
2251	copy    %r1,%r22
2252	bv      %r0(%r25)    /* r23 */
2253	copy    %r1,%r23
2254	bv      %r0(%r25)    /* r24 */
2255	copy    %r1,%r24
2256	bv      %r0(%r25)    /* r25 */
2257	copy    %r1,%r25
2258	bv      %r0(%r25)    /* r26 */
2259	copy    %r1,%r26
2260	bv      %r0(%r25)    /* r27 */
2261	copy    %r1,%r27
2262	bv      %r0(%r25)    /* r28 */
2263	copy    %r1,%r28
2264	bv      %r0(%r25)    /* r29 */
2265	copy    %r1,%r29
2266	bv      %r0(%r25)    /* r30 */
2267	copy    %r1,%r30
2268	bv      %r0(%r25)    /* r31 */
2269	copy    %r1,%r31
2270ENDPROC_CFI(set_register)
2271
2272