xref: /linux/arch/parisc/kernel/entry.S (revision ea8a163e02d6925773129e2dd86e419e491b791d)
1/* SPDX-License-Identifier: GPL-2.0-or-later */
2/*
3 * Linux/PA-RISC Project (http://www.parisc-linux.org/)
4 *
5 * kernel entry points (interruptions, system call wrappers)
6 *  Copyright (C) 1999,2000 Philipp Rumpf
7 *  Copyright (C) 1999 SuSE GmbH Nuernberg
8 *  Copyright (C) 2000 Hewlett-Packard (John Marvin)
9 *  Copyright (C) 1999 Hewlett-Packard (Frank Rowand)
10 */
11
12#include <asm/asm-offsets.h>
13
14/* we have the following possibilities to act on an interruption:
15 *  - handle in assembly and use shadowed registers only
16 *  - save registers to kernel stack and handle in assembly or C */
17
18
19#include <asm/psw.h>
20#include <asm/cache.h>		/* for L1_CACHE_SHIFT */
21#include <asm/assembly.h>	/* for LDREG/STREG defines */
22#include <asm/signal.h>
23#include <asm/unistd.h>
24#include <asm/ldcw.h>
25#include <asm/traps.h>
26#include <asm/thread_info.h>
27#include <asm/alternative.h>
28
29#include <linux/linkage.h>
30#include <linux/pgtable.h>
31
32#ifdef CONFIG_64BIT
33	.level 2.0w
34#else
35	.level 2.0
36#endif
37
38	/* Get aligned page_table_lock address for this mm from cr28/tr4 */
39	.macro  get_ptl reg
40	mfctl	%cr28,\reg
41	.endm
42
43	/* space_to_prot macro creates a prot id from a space id */
44
45#if (SPACEID_SHIFT) == 0
46	.macro  space_to_prot spc prot
47	depd,z  \spc,62,31,\prot
48	.endm
49#else
50	.macro  space_to_prot spc prot
51	extrd,u \spc,(64 - (SPACEID_SHIFT)),32,\prot
52	.endm
53#endif
54	/*
55	 * The "get_stack" macros are responsible for determining the
56	 * kernel stack value.
57	 *
58	 *      If sr7 == 0
59	 *          Already using a kernel stack, so call the
60	 *          get_stack_use_r30 macro to push a pt_regs structure
61	 *          on the stack, and store registers there.
62	 *      else
63	 *          Need to set up a kernel stack, so call the
64	 *          get_stack_use_cr30 macro to set up a pointer
65	 *          to the pt_regs structure contained within the
66	 *          task pointer pointed to by cr30. Load the stack
67	 *          pointer from the task structure.
68	 *
69	 * Note that we use shadowed registers for temps until
70	 * we can save %r26 and %r29. %r26 is used to preserve
71	 * %r8 (a shadowed register) which temporarily contained
72	 * either the fault type ("code") or the eirr. We need
73	 * to use a non-shadowed register to carry the value over
74	 * the rfir in virt_map. We use %r26 since this value winds
75	 * up being passed as the argument to either do_cpu_irq_mask
76	 * or handle_interruption. %r29 is used to hold a pointer
77	 * the register save area, and once again, it needs to
78	 * be a non-shadowed register so that it survives the rfir.
79	 */
80
81	.macro  get_stack_use_cr30
82
83	/* we save the registers in the task struct */
84
85	copy	%r30, %r17
86	mfctl   %cr30, %r1
87	tophys  %r1,%r9		/* task_struct */
88	LDREG	TASK_STACK(%r9),%r30
89	ldo	PT_SZ_ALGN(%r30),%r30
90	mtsp	%r0,%sr7	/* clear sr7 after kernel stack was set! */
91	mtsp	%r16,%sr3
92	ldo     TASK_REGS(%r9),%r9
93	STREG   %r17,PT_GR30(%r9)
94	STREG   %r29,PT_GR29(%r9)
95	STREG   %r26,PT_GR26(%r9)
96	STREG	%r16,PT_SR7(%r9)
97	copy    %r9,%r29
98	.endm
99
100	.macro  get_stack_use_r30
101
102	/* we put a struct pt_regs on the stack and save the registers there */
103
104	tophys  %r30,%r9
105	copy	%r30,%r1
106	ldo	PT_SZ_ALGN(%r30),%r30
107	STREG   %r1,PT_GR30(%r9)
108	STREG   %r29,PT_GR29(%r9)
109	STREG   %r26,PT_GR26(%r9)
110	STREG	%r16,PT_SR7(%r9)
111	copy    %r9,%r29
112	.endm
113
114	.macro  rest_stack
115	LDREG   PT_GR1(%r29), %r1
116	LDREG   PT_GR30(%r29),%r30
117	LDREG   PT_GR29(%r29),%r29
118	.endm
119
120	/* default interruption handler
121	 * (calls traps.c:handle_interruption) */
122	.macro	def code
123	b	intr_save
124	ldi     \code, %r8
125	.align	32
126	.endm
127
128	/* Interrupt interruption handler
129	 * (calls irq.c:do_cpu_irq_mask) */
130	.macro	extint code
131	b	intr_extint
132	mfsp    %sr7,%r16
133	.align	32
134	.endm
135
136	.import	os_hpmc, code
137
138	/* HPMC handler */
139	.macro	hpmc code
140	nop			/* must be a NOP, will be patched later */
141	load32	PA(os_hpmc), %r3
142	bv,n	0(%r3)
143	nop
144	.word	0		/* checksum (will be patched) */
145	.word	0		/* address of handler */
146	.word	0		/* length of handler */
147	.endm
148
149	/*
150	 * Performance Note: Instructions will be moved up into
151	 * this part of the code later on, once we are sure
152	 * that the tlb miss handlers are close to final form.
153	 */
154
155	/* Register definitions for tlb miss handler macros */
156
157	va  = r8	/* virtual address for which the trap occurred */
158	spc = r24	/* space for which the trap occurred */
159
160#ifndef CONFIG_64BIT
161
162	/*
163	 * itlb miss interruption handler (parisc 1.1 - 32 bit)
164	 */
165
166	.macro	itlb_11 code
167
168	mfctl	%pcsq, spc
169	b	itlb_miss_11
170	mfctl	%pcoq, va
171
172	.align		32
173	.endm
174#endif
175
176	/*
177	 * itlb miss interruption handler (parisc 2.0)
178	 */
179
180	.macro	itlb_20 code
181	mfctl	%pcsq, spc
182#ifdef CONFIG_64BIT
183	b       itlb_miss_20w
184#else
185	b	itlb_miss_20
186#endif
187	mfctl	%pcoq, va
188
189	.align		32
190	.endm
191
192#ifndef CONFIG_64BIT
193	/*
194	 * naitlb miss interruption handler (parisc 1.1 - 32 bit)
195	 */
196
197	.macro	naitlb_11 code
198
199	mfctl	%isr,spc
200	b	naitlb_miss_11
201	mfctl 	%ior,va
202
203	.align		32
204	.endm
205#endif
206
207	/*
208	 * naitlb miss interruption handler (parisc 2.0)
209	 */
210
211	.macro	naitlb_20 code
212
213	mfctl	%isr,spc
214#ifdef CONFIG_64BIT
215	b       naitlb_miss_20w
216#else
217	b	naitlb_miss_20
218#endif
219	mfctl 	%ior,va
220
221	.align		32
222	.endm
223
224#ifndef CONFIG_64BIT
225	/*
226	 * dtlb miss interruption handler (parisc 1.1 - 32 bit)
227	 */
228
229	.macro	dtlb_11 code
230
231	mfctl	%isr, spc
232	b	dtlb_miss_11
233	mfctl	%ior, va
234
235	.align		32
236	.endm
237#endif
238
239	/*
240	 * dtlb miss interruption handler (parisc 2.0)
241	 */
242
243	.macro	dtlb_20 code
244
245	mfctl	%isr, spc
246#ifdef CONFIG_64BIT
247	b       dtlb_miss_20w
248#else
249	b	dtlb_miss_20
250#endif
251	mfctl	%ior, va
252
253	.align		32
254	.endm
255
256#ifndef CONFIG_64BIT
257	/* nadtlb miss interruption handler (parisc 1.1 - 32 bit) */
258
259	.macro	nadtlb_11 code
260
261	mfctl	%isr,spc
262	b       nadtlb_miss_11
263	mfctl	%ior,va
264
265	.align		32
266	.endm
267#endif
268
269	/* nadtlb miss interruption handler (parisc 2.0) */
270
271	.macro	nadtlb_20 code
272
273	mfctl	%isr,spc
274#ifdef CONFIG_64BIT
275	b       nadtlb_miss_20w
276#else
277	b       nadtlb_miss_20
278#endif
279	mfctl	%ior,va
280
281	.align		32
282	.endm
283
284#ifndef CONFIG_64BIT
285	/*
286	 * dirty bit trap interruption handler (parisc 1.1 - 32 bit)
287	 */
288
289	.macro	dbit_11 code
290
291	mfctl	%isr,spc
292	b	dbit_trap_11
293	mfctl	%ior,va
294
295	.align		32
296	.endm
297#endif
298
299	/*
300	 * dirty bit trap interruption handler (parisc 2.0)
301	 */
302
303	.macro	dbit_20 code
304
305	mfctl	%isr,spc
306#ifdef CONFIG_64BIT
307	b       dbit_trap_20w
308#else
309	b	dbit_trap_20
310#endif
311	mfctl	%ior,va
312
313	.align		32
314	.endm
315
316	/* In LP64, the space contains part of the upper 32 bits of the
317	 * fault.  We have to extract this and place it in the va,
318	 * zeroing the corresponding bits in the space register */
319	.macro		space_adjust	spc,va,tmp
320#ifdef CONFIG_64BIT
321	extrd,u		\spc,63,SPACEID_SHIFT,\tmp
322	depd		%r0,63,SPACEID_SHIFT,\spc
323	depd		\tmp,31,SPACEID_SHIFT,\va
324#endif
325	.endm
326
327	.import		swapper_pg_dir,code
328
329	/* Get the pgd.  For faults on space zero (kernel space), this
330	 * is simply swapper_pg_dir.  For user space faults, the
331	 * pgd is stored in %cr25 */
332	.macro		get_pgd		spc,reg
333	ldil		L%PA(swapper_pg_dir),\reg
334	ldo		R%PA(swapper_pg_dir)(\reg),\reg
335	or,COND(=)	%r0,\spc,%r0
336	mfctl		%cr25,\reg
337	.endm
338
339	/*
340		space_check(spc,tmp,fault)
341
342		spc - The space we saw the fault with.
343		tmp - The place to store the current space.
344		fault - Function to call on failure.
345
346		Only allow faults on different spaces from the
347		currently active one if we're the kernel
348
349	*/
350	.macro		space_check	spc,tmp,fault
351	mfsp		%sr7,\tmp
352	/* check against %r0 which is same value as LINUX_GATEWAY_SPACE */
353	or,COND(<>)	%r0,\spc,%r0	/* user may execute gateway page
354					 * as kernel, so defeat the space
355					 * check if it is */
356	copy		\spc,\tmp
357	or,COND(=)	%r0,\tmp,%r0	/* nullify if executing as kernel */
358	cmpb,COND(<>),n	\tmp,\spc,\fault
359	.endm
360
361	/* Look up a PTE in a 2-Level scheme (faulting at each
362	 * level if the entry isn't present
363	 *
364	 * NOTE: we use ldw even for LP64, since the short pointers
365	 * can address up to 1TB
366	 */
367	.macro		L2_ptep	pmd,pte,index,va,fault
368#if CONFIG_PGTABLE_LEVELS == 3
369	extru		\va,31-ASM_PMD_SHIFT,ASM_BITS_PER_PMD,\index
370#else
371# if defined(CONFIG_64BIT)
372	extrd,u		\va,63-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index
373  #else
374  # if PAGE_SIZE > 4096
375	extru		\va,31-ASM_PGDIR_SHIFT,32-ASM_PGDIR_SHIFT,\index
376  # else
377	extru		\va,31-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index
378  # endif
379# endif
380#endif
381	dep             %r0,31,PAGE_SHIFT,\pmd  /* clear offset */
382#if CONFIG_PGTABLE_LEVELS < 3
383	copy		%r0,\pte
384#endif
385	ldw,s		\index(\pmd),\pmd
386	bb,>=,n		\pmd,_PxD_PRESENT_BIT,\fault
387	dep		%r0,31,PxD_FLAG_SHIFT,\pmd /* clear flags */
388	SHLREG		\pmd,PxD_VALUE_SHIFT,\pmd
389	extru		\va,31-PAGE_SHIFT,ASM_BITS_PER_PTE,\index
390	dep		%r0,31,PAGE_SHIFT,\pmd  /* clear offset */
391	shladd		\index,BITS_PER_PTE_ENTRY,\pmd,\pmd /* pmd is now pte */
392	.endm
393
394	/* Look up PTE in a 3-Level scheme. */
395	.macro		L3_ptep pgd,pte,index,va,fault
396#if CONFIG_PGTABLE_LEVELS == 3
397	copy		%r0,\pte
398	extrd,u		\va,63-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index
399	ldw,s		\index(\pgd),\pgd
400	bb,>=,n		\pgd,_PxD_PRESENT_BIT,\fault
401	shld		\pgd,PxD_VALUE_SHIFT,\pgd
402#endif
403	L2_ptep		\pgd,\pte,\index,\va,\fault
404	.endm
405
406	/* Acquire page_table_lock and check page is present. */
407	.macro		ptl_lock	spc,ptp,pte,tmp,tmp1,fault
408#ifdef CONFIG_TLB_PTLOCK
40998:	cmpib,COND(=),n	0,\spc,2f
410	get_ptl		\tmp
4111:	LDCW		0(\tmp),\tmp1
412	cmpib,COND(=)	0,\tmp1,1b
413	nop
414	LDREG		0(\ptp),\pte
415	bb,<,n		\pte,_PAGE_PRESENT_BIT,3f
416	b		\fault
417	stw		\spc,0(\tmp)
41899:	ALTERNATIVE(98b, 99b, ALT_COND_NO_SMP, INSN_NOP)
419#endif
4202:	LDREG		0(\ptp),\pte
421	bb,>=,n		\pte,_PAGE_PRESENT_BIT,\fault
4223:
423	.endm
424
425	/* Release page_table_lock without reloading lock address.
426	   Note that the values in the register spc are limited to
427	   NR_SPACE_IDS (262144). Thus, the stw instruction always
428	   stores a nonzero value even when register spc is 64 bits.
429	   We use an ordered store to ensure all prior accesses are
430	   performed prior to releasing the lock. */
431	.macro		ptl_unlock0	spc,tmp
432#ifdef CONFIG_TLB_PTLOCK
43398:	or,COND(=)	%r0,\spc,%r0
434	stw,ma		\spc,0(\tmp)
43599:	ALTERNATIVE(98b, 99b, ALT_COND_NO_SMP, INSN_NOP)
436#endif
437	.endm
438
439	/* Release page_table_lock. */
440	.macro		ptl_unlock1	spc,tmp
441#ifdef CONFIG_TLB_PTLOCK
44298:	get_ptl		\tmp
443	ptl_unlock0	\spc,\tmp
44499:	ALTERNATIVE(98b, 99b, ALT_COND_NO_SMP, INSN_NOP)
445#endif
446	.endm
447
448	/* Set the _PAGE_ACCESSED bit of the PTE.  Be clever and
449	 * don't needlessly dirty the cache line if it was already set */
450	.macro		update_accessed	ptp,pte,tmp,tmp1
451	ldi		_PAGE_ACCESSED,\tmp1
452	or		\tmp1,\pte,\tmp
453	and,COND(<>)	\tmp1,\pte,%r0
454	STREG		\tmp,0(\ptp)
455	.endm
456
457	/* Set the dirty bit (and accessed bit).  No need to be
458	 * clever, this is only used from the dirty fault */
459	.macro		update_dirty	ptp,pte,tmp
460	ldi		_PAGE_ACCESSED|_PAGE_DIRTY,\tmp
461	or		\tmp,\pte,\pte
462	STREG		\pte,0(\ptp)
463	.endm
464
465	/* We have (depending on the page size):
466	 * - 38 to 52-bit Physical Page Number
467	 * - 12 to 26-bit page offset
468	 */
469	/* bitshift difference between a PFN (based on kernel's PAGE_SIZE)
470	 * to a CPU TLB 4k PFN (4k => 12 bits to shift) */
471	#define PAGE_ADD_SHIFT		(PAGE_SHIFT-12)
472	#define PAGE_ADD_HUGE_SHIFT	(REAL_HPAGE_SHIFT-12)
473
474	/* Drop prot bits and convert to page addr for iitlbt and idtlbt */
475	.macro		convert_for_tlb_insert20 pte,tmp
476#ifdef CONFIG_HUGETLB_PAGE
477	copy		\pte,\tmp
478	extrd,u		\tmp,(63-ASM_PFN_PTE_SHIFT)+(63-58)+PAGE_ADD_SHIFT,\
479				64-PAGE_SHIFT-PAGE_ADD_SHIFT,\pte
480
481	depdi		_PAGE_SIZE_ENCODING_DEFAULT,63,\
482				(63-58)+PAGE_ADD_SHIFT,\pte
483	extrd,u,*=	\tmp,_PAGE_HPAGE_BIT+32,1,%r0
484	depdi		_HUGE_PAGE_SIZE_ENCODING_DEFAULT,63,\
485				(63-58)+PAGE_ADD_HUGE_SHIFT,\pte
486#else /* Huge pages disabled */
487	extrd,u		\pte,(63-ASM_PFN_PTE_SHIFT)+(63-58)+PAGE_ADD_SHIFT,\
488				64-PAGE_SHIFT-PAGE_ADD_SHIFT,\pte
489	depdi		_PAGE_SIZE_ENCODING_DEFAULT,63,\
490				(63-58)+PAGE_ADD_SHIFT,\pte
491#endif
492	.endm
493
494	/* Convert the pte and prot to tlb insertion values.  How
495	 * this happens is quite subtle, read below */
496	.macro		make_insert_tlb	spc,pte,prot,tmp
497	space_to_prot   \spc \prot        /* create prot id from space */
498	/* The following is the real subtlety.  This is depositing
499	 * T <-> _PAGE_REFTRAP
500	 * D <-> _PAGE_DIRTY
501	 * B <-> _PAGE_DMB (memory break)
502	 *
503	 * Then incredible subtlety: The access rights are
504	 * _PAGE_GATEWAY, _PAGE_EXEC and _PAGE_WRITE
505	 * See 3-14 of the parisc 2.0 manual
506	 *
507	 * Finally, _PAGE_READ goes in the top bit of PL1 (so we
508	 * trigger an access rights trap in user space if the user
509	 * tries to read an unreadable page */
510	depd            \pte,8,7,\prot
511
512	/* PAGE_USER indicates the page can be read with user privileges,
513	 * so deposit X1|11 to PL1|PL2 (remember the upper bit of PL1
514	 * contains _PAGE_READ) */
515	extrd,u,*=      \pte,_PAGE_USER_BIT+32,1,%r0
516	depdi		7,11,3,\prot
517	/* If we're a gateway page, drop PL2 back to zero for promotion
518	 * to kernel privilege (so we can execute the page as kernel).
519	 * Any privilege promotion page always denys read and write */
520	extrd,u,*= 	\pte,_PAGE_GATEWAY_BIT+32,1,%r0
521	depd		%r0,11,2,\prot	/* If Gateway, Set PL2 to 0 */
522
523	/* Enforce uncacheable pages.
524	 * This should ONLY be use for MMIO on PA 2.0 machines.
525	 * Memory/DMA is cache coherent on all PA2.0 machines we support
526	 * (that means T-class is NOT supported) and the memory controllers
527	 * on most of those machines only handles cache transactions.
528	 */
529	extrd,u,*=	\pte,_PAGE_NO_CACHE_BIT+32,1,%r0
530	depdi		1,12,1,\prot
531
532	/* Drop prot bits and convert to page addr for iitlbt and idtlbt */
533	convert_for_tlb_insert20 \pte \tmp
534	.endm
535
536	/* Identical macro to make_insert_tlb above, except it
537	 * makes the tlb entry for the differently formatted pa11
538	 * insertion instructions */
539	.macro		make_insert_tlb_11	spc,pte,prot
540	zdep		\spc,30,15,\prot
541	dep		\pte,8,7,\prot
542	extru,=		\pte,_PAGE_NO_CACHE_BIT,1,%r0
543	depi		1,12,1,\prot
544	extru,=         \pte,_PAGE_USER_BIT,1,%r0
545	depi		7,11,3,\prot   /* Set for user space (1 rsvd for read) */
546	extru,= 	\pte,_PAGE_GATEWAY_BIT,1,%r0
547	depi		0,11,2,\prot	/* If Gateway, Set PL2 to 0 */
548
549	/* Get rid of prot bits and convert to page addr for iitlba */
550
551	depi		0,31,ASM_PFN_PTE_SHIFT,\pte
552	SHRREG		\pte,(ASM_PFN_PTE_SHIFT-(31-26)),\pte
553	.endm
554
555	/* This is for ILP32 PA2.0 only.  The TLB insertion needs
556	 * to extend into I/O space if the address is 0xfXXXXXXX
557	 * so we extend the f's into the top word of the pte in
558	 * this case */
559	.macro		f_extend	pte,tmp
560	extrd,s		\pte,42,4,\tmp
561	addi,<>		1,\tmp,%r0
562	extrd,s		\pte,63,25,\pte
563	.endm
564
565	/* The alias region is an 8MB aligned 16MB to do clear and
566	 * copy user pages at addresses congruent with the user
567	 * virtual address.
568	 *
569	 * To use the alias page, you set %r26 up with the to TLB
570	 * entry (identifying the physical page) and %r23 up with
571	 * the from tlb entry (or nothing if only a to entry---for
572	 * clear_user_page_asm) */
573	.macro		do_alias	spc,tmp,tmp1,va,pte,prot,fault,patype
574	cmpib,COND(<>),n 0,\spc,\fault
575	ldil		L%(TMPALIAS_MAP_START),\tmp
576#if defined(CONFIG_64BIT) && (TMPALIAS_MAP_START >= 0x80000000)
577	/* on LP64, ldi will sign extend into the upper 32 bits,
578	 * which is behaviour we don't want */
579	depdi		0,31,32,\tmp
580#endif
581	copy		\va,\tmp1
582	depi		0,31,23,\tmp1
583	cmpb,COND(<>),n	\tmp,\tmp1,\fault
584	mfctl		%cr19,\tmp	/* iir */
585	/* get the opcode (first six bits) into \tmp */
586	extrw,u		\tmp,5,6,\tmp
587	/*
588	 * Only setting the T bit prevents data cache movein
589	 * Setting access rights to zero prevents instruction cache movein
590	 *
591	 * Note subtlety here: _PAGE_GATEWAY, _PAGE_EXEC and _PAGE_WRITE go
592	 * to type field and _PAGE_READ goes to top bit of PL1
593	 */
594	ldi		(_PAGE_REFTRAP|_PAGE_READ|_PAGE_WRITE),\prot
595	/*
596	 * so if the opcode is one (i.e. this is a memory management
597	 * instruction) nullify the next load so \prot is only T.
598	 * Otherwise this is a normal data operation
599	 */
600	cmpiclr,=	0x01,\tmp,%r0
601	ldi		(_PAGE_DIRTY|_PAGE_READ|_PAGE_WRITE),\prot
602.ifc \patype,20
603	depd,z		\prot,8,7,\prot
604.else
605.ifc \patype,11
606	depw,z		\prot,8,7,\prot
607.else
608	.error "undefined PA type to do_alias"
609.endif
610.endif
611	/*
612	 * OK, it is in the temp alias region, check whether "from" or "to".
613	 * Check "subtle" note in pacache.S re: r23/r26.
614	 */
615#ifdef CONFIG_64BIT
616	extrd,u,*=	\va,41,1,%r0
617#else
618	extrw,u,=	\va,9,1,%r0
619#endif
620	or,COND(tr)	%r23,%r0,\pte
621	or		%r26,%r0,\pte
622	.endm
623
624
625	/*
626	 * Fault_vectors are architecturally required to be aligned on a 2K
627	 * boundary
628	 */
629
630	.section .text.hot
631	.align 2048
632
633ENTRY(fault_vector_20)
634	/* First vector is invalid (0) */
635	.ascii	"cows can fly"
636	.byte 0
637	.align 32
638
639	hpmc		 1
640	def		 2
641	def		 3
642	extint		 4
643	def		 5
644	itlb_20		 PARISC_ITLB_TRAP
645	def		 7
646	def		 8
647	def              9
648	def		10
649	def		11
650	def		12
651	def		13
652	def		14
653	dtlb_20		15
654	naitlb_20	16
655	nadtlb_20	17
656	def		18
657	def		19
658	dbit_20		20
659	def		21
660	def		22
661	def		23
662	def		24
663	def		25
664	def		26
665	def		27
666	def		28
667	def		29
668	def		30
669	def		31
670END(fault_vector_20)
671
672#ifndef CONFIG_64BIT
673
674	.align 2048
675
676ENTRY(fault_vector_11)
677	/* First vector is invalid (0) */
678	.ascii	"cows can fly"
679	.byte 0
680	.align 32
681
682	hpmc		 1
683	def		 2
684	def		 3
685	extint		 4
686	def		 5
687	itlb_11		 PARISC_ITLB_TRAP
688	def		 7
689	def		 8
690	def              9
691	def		10
692	def		11
693	def		12
694	def		13
695	def		14
696	dtlb_11		15
697	naitlb_11	16
698	nadtlb_11	17
699	def		18
700	def		19
701	dbit_11		20
702	def		21
703	def		22
704	def		23
705	def		24
706	def		25
707	def		26
708	def		27
709	def		28
710	def		29
711	def		30
712	def		31
713END(fault_vector_11)
714
715#endif
716	/* Fault vector is separately protected and *must* be on its own page */
717	.align		PAGE_SIZE
718
719	.import		handle_interruption,code
720	.import		do_cpu_irq_mask,code
721
722	/*
723	 * Child Returns here
724	 *
725	 * copy_thread moved args into task save area.
726	 */
727
728ENTRY(ret_from_kernel_thread)
729	/* Call schedule_tail first though */
730	BL	schedule_tail, %r2
731	nop
732
733	mfctl	%cr30,%r1	/* task_struct */
734	LDREG	TASK_PT_GR25(%r1), %r26
735#ifdef CONFIG_64BIT
736	LDREG	TASK_PT_GR27(%r1), %r27
737#endif
738	LDREG	TASK_PT_GR26(%r1), %r1
739	ble	0(%sr7, %r1)
740	copy	%r31, %r2
741	b	finish_child_return
742	nop
743END(ret_from_kernel_thread)
744
745
746	/*
747	 * struct task_struct *_switch_to(struct task_struct *prev,
748	 *	struct task_struct *next)
749	 *
750	 * switch kernel stacks and return prev */
751ENTRY_CFI(_switch_to)
752	STREG	 %r2, -RP_OFFSET(%r30)
753
754	callee_save_float
755	callee_save
756
757	load32	_switch_to_ret, %r2
758
759	STREG	%r2, TASK_PT_KPC(%r26)
760	LDREG	TASK_PT_KPC(%r25), %r2
761
762	STREG	%r30, TASK_PT_KSP(%r26)
763	LDREG	TASK_PT_KSP(%r25), %r30
764	bv	%r0(%r2)
765	mtctl   %r25,%cr30
766
767ENTRY(_switch_to_ret)
768	mtctl	%r0, %cr0		/* Needed for single stepping */
769	callee_rest
770	callee_rest_float
771
772	LDREG	-RP_OFFSET(%r30), %r2
773	bv	%r0(%r2)
774	copy	%r26, %r28
775ENDPROC_CFI(_switch_to)
776
777	/*
778	 * Common rfi return path for interruptions, kernel execve, and
779	 * sys_rt_sigreturn (sometimes).  The sys_rt_sigreturn syscall will
780	 * return via this path if the signal was received when the process
781	 * was running; if the process was blocked on a syscall then the
782	 * normal syscall_exit path is used.  All syscalls for traced
783	 * proceses exit via intr_restore.
784	 *
785	 * XXX If any syscalls that change a processes space id ever exit
786	 * this way, then we will need to copy %sr3 in to PT_SR[3..7], and
787	 * adjust IASQ[0..1].
788	 *
789	 */
790
791	.align	PAGE_SIZE
792
793ENTRY_CFI(syscall_exit_rfi)
794	mfctl	%cr30,%r16		/* task_struct */
795	ldo	TASK_REGS(%r16),%r16
796	/* Force iaoq to userspace, as the user has had access to our current
797	 * context via sigcontext. Also Filter the PSW for the same reason.
798	 */
799	LDREG	PT_IAOQ0(%r16),%r19
800	depi	PRIV_USER,31,2,%r19
801	STREG	%r19,PT_IAOQ0(%r16)
802	LDREG	PT_IAOQ1(%r16),%r19
803	depi	PRIV_USER,31,2,%r19
804	STREG	%r19,PT_IAOQ1(%r16)
805	LDREG   PT_PSW(%r16),%r19
806	load32	USER_PSW_MASK,%r1
807#ifdef CONFIG_64BIT
808	load32	USER_PSW_HI_MASK,%r20
809	depd    %r20,31,32,%r1
810#endif
811	and     %r19,%r1,%r19 /* Mask out bits that user shouldn't play with */
812	load32	USER_PSW,%r1
813	or      %r19,%r1,%r19 /* Make sure default USER_PSW bits are set */
814	STREG   %r19,PT_PSW(%r16)
815
816	/*
817	 * If we aren't being traced, we never saved space registers
818	 * (we don't store them in the sigcontext), so set them
819	 * to "proper" values now (otherwise we'll wind up restoring
820	 * whatever was last stored in the task structure, which might
821	 * be inconsistent if an interrupt occurred while on the gateway
822	 * page). Note that we may be "trashing" values the user put in
823	 * them, but we don't support the user changing them.
824	 */
825
826	STREG   %r0,PT_SR2(%r16)
827	mfsp    %sr3,%r19
828	STREG   %r19,PT_SR0(%r16)
829	STREG   %r19,PT_SR1(%r16)
830	STREG   %r19,PT_SR3(%r16)
831	STREG   %r19,PT_SR4(%r16)
832	STREG   %r19,PT_SR5(%r16)
833	STREG   %r19,PT_SR6(%r16)
834	STREG   %r19,PT_SR7(%r16)
835
836ENTRY(intr_return)
837	/* check for reschedule */
838	mfctl   %cr30,%r1
839	LDREG   TASK_TI_FLAGS(%r1),%r19	/* sched.h: TIF_NEED_RESCHED */
840	bb,<,n	%r19,31-TIF_NEED_RESCHED,intr_do_resched /* forward */
841
842	.import do_notify_resume,code
843intr_check_sig:
844	/* As above */
845	mfctl   %cr30,%r1
846	LDREG	TASK_TI_FLAGS(%r1),%r19
847	ldi	(_TIF_USER_WORK_MASK & ~_TIF_NEED_RESCHED), %r20
848	and,COND(<>)	%r19, %r20, %r0
849	b,n	intr_restore	/* skip past if we've nothing to do */
850
851	/* This check is critical to having LWS
852	 * working. The IASQ is zero on the gateway
853	 * page and we cannot deliver any signals until
854	 * we get off the gateway page.
855	 *
856	 * Only do signals if we are returning to user space
857	 */
858	LDREG	PT_IASQ0(%r16), %r20
859	cmpib,COND(=),n LINUX_GATEWAY_SPACE, %r20, intr_restore /* forward */
860	LDREG	PT_IASQ1(%r16), %r20
861	cmpib,COND(=),n LINUX_GATEWAY_SPACE, %r20, intr_restore /* forward */
862
863	copy	%r0, %r25			/* long in_syscall = 0 */
864#ifdef CONFIG_64BIT
865	ldo	-16(%r30),%r29			/* Reference param save area */
866#endif
867
868	/* NOTE: We need to enable interrupts if we have to deliver
869	 * signals. We used to do this earlier but it caused kernel
870	 * stack overflows. */
871	ssm	PSW_SM_I, %r0
872
873	BL	do_notify_resume,%r2
874	copy	%r16, %r26			/* struct pt_regs *regs */
875
876	b,n	intr_check_sig
877
878intr_restore:
879	copy            %r16,%r29
880	ldo             PT_FR31(%r29),%r1
881	rest_fp         %r1
882	rest_general    %r29
883
884	/* inverse of virt_map */
885	pcxt_ssm_bug
886	rsm             PSW_SM_QUIET,%r0	/* prepare for rfi */
887	tophys_r1       %r29
888
889	/* Restore space id's and special cr's from PT_REGS
890	 * structure pointed to by r29
891	 */
892	rest_specials	%r29
893
894	/* IMPORTANT: rest_stack restores r29 last (we are using it)!
895	 * It also restores r1 and r30.
896	 */
897	rest_stack
898
899	rfi
900	nop
901
902#ifndef CONFIG_PREEMPTION
903# define intr_do_preempt	intr_restore
904#endif /* !CONFIG_PREEMPTION */
905
906	.import schedule,code
907intr_do_resched:
908	/* Only call schedule on return to userspace. If we're returning
909	 * to kernel space, we may schedule if CONFIG_PREEMPTION, otherwise
910	 * we jump back to intr_restore.
911	 */
912	LDREG	PT_IASQ0(%r16), %r20
913	cmpib,COND(=)	0, %r20, intr_do_preempt
914	nop
915	LDREG	PT_IASQ1(%r16), %r20
916	cmpib,COND(=)	0, %r20, intr_do_preempt
917	nop
918
919	/* NOTE: We need to enable interrupts if we schedule.  We used
920	 * to do this earlier but it caused kernel stack overflows. */
921	ssm     PSW_SM_I, %r0
922
923#ifdef CONFIG_64BIT
924	ldo	-16(%r30),%r29		/* Reference param save area */
925#endif
926
927	ldil	L%intr_check_sig, %r2
928#ifndef CONFIG_64BIT
929	b	schedule
930#else
931	load32	schedule, %r20
932	bv	%r0(%r20)
933#endif
934	ldo	R%intr_check_sig(%r2), %r2
935
936	/* preempt the current task on returning to kernel
937	 * mode from an interrupt, iff need_resched is set,
938	 * and preempt_count is 0. otherwise, we continue on
939	 * our merry way back to the current running task.
940	 */
941#ifdef CONFIG_PREEMPTION
942	.import preempt_schedule_irq,code
943intr_do_preempt:
944	rsm	PSW_SM_I, %r0		/* disable interrupts */
945
946	/* current_thread_info()->preempt_count */
947	mfctl	%cr30, %r1
948	ldw	TI_PRE_COUNT(%r1), %r19
949	cmpib,<>	0, %r19, intr_restore	/* if preempt_count > 0 */
950	nop				/* prev insn branched backwards */
951
952	/* check if we interrupted a critical path */
953	LDREG	PT_PSW(%r16), %r20
954	bb,<,n	%r20, 31 - PSW_SM_I, intr_restore
955	nop
956
957	/* ssm PSW_SM_I done later in intr_restore */
958#ifdef CONFIG_MLONGCALLS
959	ldil	L%intr_restore, %r2
960	load32	preempt_schedule_irq, %r1
961	bv	%r0(%r1)
962	ldo	R%intr_restore(%r2), %r2
963#else
964	ldil	L%intr_restore, %r1
965	BL	preempt_schedule_irq, %r2
966	ldo	R%intr_restore(%r1), %r2
967#endif
968#endif /* CONFIG_PREEMPTION */
969
970	/*
971	 * External interrupts.
972	 */
973
974intr_extint:
975	cmpib,COND(=),n 0,%r16,1f
976
977	get_stack_use_cr30
978	b,n 2f
979
9801:
981	get_stack_use_r30
9822:
983	save_specials	%r29
984	virt_map
985	save_general	%r29
986
987	ldo	PT_FR0(%r29), %r24
988	save_fp	%r24
989
990	loadgp
991
992	copy	%r29, %r26	/* arg0 is pt_regs */
993	copy	%r29, %r16	/* save pt_regs */
994
995	ldil	L%intr_return, %r2
996
997#ifdef CONFIG_64BIT
998	ldo	-16(%r30),%r29	/* Reference param save area */
999#endif
1000
1001	b	do_cpu_irq_mask
1002	ldo	R%intr_return(%r2), %r2	/* return to intr_return, not here */
1003ENDPROC_CFI(syscall_exit_rfi)
1004
1005
1006	/* Generic interruptions (illegal insn, unaligned, page fault, etc) */
1007
1008ENTRY_CFI(intr_save)		/* for os_hpmc */
1009	mfsp    %sr7,%r16
1010	cmpib,COND(=),n 0,%r16,1f
1011	get_stack_use_cr30
1012	b	2f
1013	copy    %r8,%r26
1014
10151:
1016	get_stack_use_r30
1017	copy    %r8,%r26
1018
10192:
1020	save_specials	%r29
1021
1022	/* If this trap is a itlb miss, skip saving/adjusting isr/ior */
1023	cmpib,COND(=),n        PARISC_ITLB_TRAP,%r26,skip_save_ior
1024
1025
1026	mfctl           %isr, %r16
1027	nop		/* serialize mfctl on PA 2.0 to avoid 4 cycle penalty */
1028	mfctl           %ior, %r17
1029
1030
1031#ifdef CONFIG_64BIT
1032	/*
1033	 * If the interrupted code was running with W bit off (32 bit),
1034	 * clear the b bits (bits 0 & 1) in the ior.
1035	 * save_specials left ipsw value in r8 for us to test.
1036	 */
1037	extrd,u,*<>     %r8,PSW_W_BIT,1,%r0
1038	depdi           0,1,2,%r17
1039
1040	/* adjust isr/ior: get high bits from isr and deposit in ior */
1041	space_adjust	%r16,%r17,%r1
1042#endif
1043	STREG           %r16, PT_ISR(%r29)
1044	STREG           %r17, PT_IOR(%r29)
1045
1046#if 0 && defined(CONFIG_64BIT)
1047	/* Revisit when we have 64-bit code above 4Gb */
1048	b,n		intr_save2
1049
1050skip_save_ior:
1051	/* We have a itlb miss, and when executing code above 4 Gb on ILP64, we
1052	 * need to adjust iasq/iaoq here in the same way we adjusted isr/ior
1053	 * above.
1054	 */
1055	extrd,u,*	%r8,PSW_W_BIT,1,%r1
1056	cmpib,COND(=),n	1,%r1,intr_save2
1057	LDREG		PT_IASQ0(%r29), %r16
1058	LDREG		PT_IAOQ0(%r29), %r17
1059	/* adjust iasq/iaoq */
1060	space_adjust	%r16,%r17,%r1
1061	STREG           %r16, PT_IASQ0(%r29)
1062	STREG           %r17, PT_IAOQ0(%r29)
1063#else
1064skip_save_ior:
1065#endif
1066
1067intr_save2:
1068	virt_map
1069	save_general	%r29
1070
1071	ldo		PT_FR0(%r29), %r25
1072	save_fp		%r25
1073
1074	loadgp
1075
1076	copy		%r29, %r25	/* arg1 is pt_regs */
1077#ifdef CONFIG_64BIT
1078	ldo		-16(%r30),%r29	/* Reference param save area */
1079#endif
1080
1081	ldil		L%intr_check_sig, %r2
1082	copy		%r25, %r16	/* save pt_regs */
1083
1084	b		handle_interruption
1085	ldo		R%intr_check_sig(%r2), %r2
1086ENDPROC_CFI(intr_save)
1087
1088
1089	/*
1090	 * Note for all tlb miss handlers:
1091	 *
1092	 * cr24 contains a pointer to the kernel address space
1093	 * page directory.
1094	 *
1095	 * cr25 contains a pointer to the current user address
1096	 * space page directory.
1097	 *
1098	 * sr3 will contain the space id of the user address space
1099	 * of the current running thread while that thread is
1100	 * running in the kernel.
1101	 */
1102
1103	/*
1104	 * register number allocations.  Note that these are all
1105	 * in the shadowed registers
1106	 */
1107
1108	t0 = r1		/* temporary register 0 */
1109	va = r8		/* virtual address for which the trap occurred */
1110	t1 = r9		/* temporary register 1 */
1111	pte  = r16	/* pte/phys page # */
1112	prot = r17	/* prot bits */
1113	spc  = r24	/* space for which the trap occurred */
1114	ptp = r25	/* page directory/page table pointer */
1115
1116#ifdef CONFIG_64BIT
1117
1118dtlb_miss_20w:
1119	space_adjust	spc,va,t0
1120	get_pgd		spc,ptp
1121	space_check	spc,t0,dtlb_fault
1122
1123	L3_ptep		ptp,pte,t0,va,dtlb_check_alias_20w
1124
1125	ptl_lock	spc,ptp,pte,t0,t1,dtlb_check_alias_20w
1126	update_accessed	ptp,pte,t0,t1
1127
1128	make_insert_tlb	spc,pte,prot,t1
1129
1130	idtlbt          pte,prot
1131
1132	ptl_unlock1	spc,t0
1133	rfir
1134	nop
1135
1136dtlb_check_alias_20w:
1137	do_alias	spc,t0,t1,va,pte,prot,dtlb_fault,20
1138
1139	idtlbt          pte,prot
1140
1141	rfir
1142	nop
1143
1144nadtlb_miss_20w:
1145	space_adjust	spc,va,t0
1146	get_pgd		spc,ptp
1147	space_check	spc,t0,nadtlb_fault
1148
1149	L3_ptep		ptp,pte,t0,va,nadtlb_check_alias_20w
1150
1151	ptl_lock	spc,ptp,pte,t0,t1,nadtlb_check_alias_20w
1152	update_accessed	ptp,pte,t0,t1
1153
1154	make_insert_tlb	spc,pte,prot,t1
1155
1156	idtlbt          pte,prot
1157
1158	ptl_unlock1	spc,t0
1159	rfir
1160	nop
1161
1162nadtlb_check_alias_20w:
1163	do_alias	spc,t0,t1,va,pte,prot,nadtlb_emulate,20
1164
1165	idtlbt          pte,prot
1166
1167	rfir
1168	nop
1169
1170#else
1171
1172dtlb_miss_11:
1173	get_pgd		spc,ptp
1174
1175	space_check	spc,t0,dtlb_fault
1176
1177	L2_ptep		ptp,pte,t0,va,dtlb_check_alias_11
1178
1179	ptl_lock	spc,ptp,pte,t0,t1,dtlb_check_alias_11
1180	update_accessed	ptp,pte,t0,t1
1181
1182	make_insert_tlb_11	spc,pte,prot
1183
1184	mfsp		%sr1,t1  /* Save sr1 so we can use it in tlb inserts */
1185	mtsp		spc,%sr1
1186
1187	idtlba		pte,(%sr1,va)
1188	idtlbp		prot,(%sr1,va)
1189
1190	mtsp		t1, %sr1	/* Restore sr1 */
1191
1192	ptl_unlock1	spc,t0
1193	rfir
1194	nop
1195
1196dtlb_check_alias_11:
1197	do_alias	spc,t0,t1,va,pte,prot,dtlb_fault,11
1198
1199	idtlba          pte,(va)
1200	idtlbp          prot,(va)
1201
1202	rfir
1203	nop
1204
1205nadtlb_miss_11:
1206	get_pgd		spc,ptp
1207
1208	space_check	spc,t0,nadtlb_fault
1209
1210	L2_ptep		ptp,pte,t0,va,nadtlb_check_alias_11
1211
1212	ptl_lock	spc,ptp,pte,t0,t1,nadtlb_check_alias_11
1213	update_accessed	ptp,pte,t0,t1
1214
1215	make_insert_tlb_11	spc,pte,prot
1216
1217	mfsp		%sr1,t1  /* Save sr1 so we can use it in tlb inserts */
1218	mtsp		spc,%sr1
1219
1220	idtlba		pte,(%sr1,va)
1221	idtlbp		prot,(%sr1,va)
1222
1223	mtsp		t1, %sr1	/* Restore sr1 */
1224
1225	ptl_unlock1	spc,t0
1226	rfir
1227	nop
1228
1229nadtlb_check_alias_11:
1230	do_alias	spc,t0,t1,va,pte,prot,nadtlb_emulate,11
1231
1232	idtlba          pte,(va)
1233	idtlbp          prot,(va)
1234
1235	rfir
1236	nop
1237
1238dtlb_miss_20:
1239	space_adjust	spc,va,t0
1240	get_pgd		spc,ptp
1241	space_check	spc,t0,dtlb_fault
1242
1243	L2_ptep		ptp,pte,t0,va,dtlb_check_alias_20
1244
1245	ptl_lock	spc,ptp,pte,t0,t1,dtlb_check_alias_20
1246	update_accessed	ptp,pte,t0,t1
1247
1248	make_insert_tlb	spc,pte,prot,t1
1249
1250	f_extend	pte,t1
1251
1252	idtlbt          pte,prot
1253
1254	ptl_unlock1	spc,t0
1255	rfir
1256	nop
1257
1258dtlb_check_alias_20:
1259	do_alias	spc,t0,t1,va,pte,prot,dtlb_fault,20
1260
1261	idtlbt          pte,prot
1262
1263	rfir
1264	nop
1265
1266nadtlb_miss_20:
1267	get_pgd		spc,ptp
1268
1269	space_check	spc,t0,nadtlb_fault
1270
1271	L2_ptep		ptp,pte,t0,va,nadtlb_check_alias_20
1272
1273	ptl_lock	spc,ptp,pte,t0,t1,nadtlb_check_alias_20
1274	update_accessed	ptp,pte,t0,t1
1275
1276	make_insert_tlb	spc,pte,prot,t1
1277
1278	f_extend	pte,t1
1279
1280	idtlbt		pte,prot
1281
1282	ptl_unlock1	spc,t0
1283	rfir
1284	nop
1285
1286nadtlb_check_alias_20:
1287	do_alias	spc,t0,t1,va,pte,prot,nadtlb_emulate,20
1288
1289	idtlbt          pte,prot
1290
1291	rfir
1292	nop
1293
1294#endif
1295
1296nadtlb_emulate:
1297
1298	/*
1299	 * Non access misses can be caused by fdc,fic,pdc,lpa,probe and
1300	 * probei instructions. We don't want to fault for these
1301	 * instructions (not only does it not make sense, it can cause
1302	 * deadlocks, since some flushes are done with the mmap
1303	 * semaphore held). If the translation doesn't exist, we can't
1304	 * insert a translation, so have to emulate the side effects
1305	 * of the instruction. Since we don't insert a translation
1306	 * we can get a lot of faults during a flush loop, so it makes
1307	 * sense to try to do it here with minimum overhead. We only
1308	 * emulate fdc,fic,pdc,probew,prober instructions whose base
1309	 * and index registers are not shadowed. We defer everything
1310	 * else to the "slow" path.
1311	 */
1312
1313	mfctl           %cr19,%r9 /* Get iir */
1314
1315	/* PA 2.0 Arch Ref. Book pg 382 has a good description of the insn bits.
1316	   Checks for fdc,fdce,pdc,"fic,4f",prober,probeir,probew, probeiw */
1317
1318	/* Checks for fdc,fdce,pdc,"fic,4f" only */
1319	ldi             0x280,%r16
1320	and             %r9,%r16,%r17
1321	cmpb,<>,n       %r16,%r17,nadtlb_probe_check
1322	bb,>=,n         %r9,26,nadtlb_nullify  /* m bit not set, just nullify */
1323	BL		get_register,%r25
1324	extrw,u         %r9,15,5,%r8           /* Get index register # */
1325	cmpib,COND(=),n        -1,%r1,nadtlb_fault    /* have to use slow path */
1326	copy            %r1,%r24
1327	BL		get_register,%r25
1328	extrw,u         %r9,10,5,%r8           /* Get base register # */
1329	cmpib,COND(=),n        -1,%r1,nadtlb_fault    /* have to use slow path */
1330	BL		set_register,%r25
1331	add,l           %r1,%r24,%r1           /* doesn't affect c/b bits */
1332
1333nadtlb_nullify:
1334	mfctl           %ipsw,%r8
1335	ldil            L%PSW_N,%r9
1336	or              %r8,%r9,%r8            /* Set PSW_N */
1337	mtctl           %r8,%ipsw
1338
1339	rfir
1340	nop
1341
1342	/*
1343		When there is no translation for the probe address then we
1344		must nullify the insn and return zero in the target register.
1345		This will indicate to the calling code that it does not have
1346		write/read privileges to this address.
1347
1348		This should technically work for prober and probew in PA 1.1,
1349		and also probe,r and probe,w in PA 2.0
1350
1351		WARNING: USE ONLY NON-SHADOW REGISTERS WITH PROBE INSN!
1352		THE SLOW-PATH EMULATION HAS NOT BEEN WRITTEN YET.
1353
1354	*/
1355nadtlb_probe_check:
1356	ldi             0x80,%r16
1357	and             %r9,%r16,%r17
1358	cmpb,<>,n       %r16,%r17,nadtlb_fault /* Must be probe,[rw]*/
1359	BL              get_register,%r25      /* Find the target register */
1360	extrw,u         %r9,31,5,%r8           /* Get target register */
1361	cmpib,COND(=),n        -1,%r1,nadtlb_fault    /* have to use slow path */
1362	BL		set_register,%r25
1363	copy            %r0,%r1                /* Write zero to target register */
1364	b nadtlb_nullify                       /* Nullify return insn */
1365	nop
1366
1367
1368#ifdef CONFIG_64BIT
1369itlb_miss_20w:
1370
1371	/*
1372	 * I miss is a little different, since we allow users to fault
1373	 * on the gateway page which is in the kernel address space.
1374	 */
1375
1376	space_adjust	spc,va,t0
1377	get_pgd		spc,ptp
1378	space_check	spc,t0,itlb_fault
1379
1380	L3_ptep		ptp,pte,t0,va,itlb_fault
1381
1382	ptl_lock	spc,ptp,pte,t0,t1,itlb_fault
1383	update_accessed	ptp,pte,t0,t1
1384
1385	make_insert_tlb	spc,pte,prot,t1
1386
1387	iitlbt          pte,prot
1388
1389	ptl_unlock1	spc,t0
1390	rfir
1391	nop
1392
1393naitlb_miss_20w:
1394
1395	/*
1396	 * I miss is a little different, since we allow users to fault
1397	 * on the gateway page which is in the kernel address space.
1398	 */
1399
1400	space_adjust	spc,va,t0
1401	get_pgd		spc,ptp
1402	space_check	spc,t0,naitlb_fault
1403
1404	L3_ptep		ptp,pte,t0,va,naitlb_check_alias_20w
1405
1406	ptl_lock	spc,ptp,pte,t0,t1,naitlb_check_alias_20w
1407	update_accessed	ptp,pte,t0,t1
1408
1409	make_insert_tlb	spc,pte,prot,t1
1410
1411	iitlbt          pte,prot
1412
1413	ptl_unlock1	spc,t0
1414	rfir
1415	nop
1416
1417naitlb_check_alias_20w:
1418	do_alias	spc,t0,t1,va,pte,prot,naitlb_fault,20
1419
1420	iitlbt		pte,prot
1421
1422	rfir
1423	nop
1424
1425#else
1426
1427itlb_miss_11:
1428	get_pgd		spc,ptp
1429
1430	space_check	spc,t0,itlb_fault
1431
1432	L2_ptep		ptp,pte,t0,va,itlb_fault
1433
1434	ptl_lock	spc,ptp,pte,t0,t1,itlb_fault
1435	update_accessed	ptp,pte,t0,t1
1436
1437	make_insert_tlb_11	spc,pte,prot
1438
1439	mfsp		%sr1,t1  /* Save sr1 so we can use it in tlb inserts */
1440	mtsp		spc,%sr1
1441
1442	iitlba		pte,(%sr1,va)
1443	iitlbp		prot,(%sr1,va)
1444
1445	mtsp		t1, %sr1	/* Restore sr1 */
1446
1447	ptl_unlock1	spc,t0
1448	rfir
1449	nop
1450
1451naitlb_miss_11:
1452	get_pgd		spc,ptp
1453
1454	space_check	spc,t0,naitlb_fault
1455
1456	L2_ptep		ptp,pte,t0,va,naitlb_check_alias_11
1457
1458	ptl_lock	spc,ptp,pte,t0,t1,naitlb_check_alias_11
1459	update_accessed	ptp,pte,t0,t1
1460
1461	make_insert_tlb_11	spc,pte,prot
1462
1463	mfsp		%sr1,t1  /* Save sr1 so we can use it in tlb inserts */
1464	mtsp		spc,%sr1
1465
1466	iitlba		pte,(%sr1,va)
1467	iitlbp		prot,(%sr1,va)
1468
1469	mtsp		t1, %sr1	/* Restore sr1 */
1470
1471	ptl_unlock1	spc,t0
1472	rfir
1473	nop
1474
1475naitlb_check_alias_11:
1476	do_alias	spc,t0,t1,va,pte,prot,itlb_fault,11
1477
1478	iitlba          pte,(%sr0, va)
1479	iitlbp          prot,(%sr0, va)
1480
1481	rfir
1482	nop
1483
1484
1485itlb_miss_20:
1486	get_pgd		spc,ptp
1487
1488	space_check	spc,t0,itlb_fault
1489
1490	L2_ptep		ptp,pte,t0,va,itlb_fault
1491
1492	ptl_lock	spc,ptp,pte,t0,t1,itlb_fault
1493	update_accessed	ptp,pte,t0,t1
1494
1495	make_insert_tlb	spc,pte,prot,t1
1496
1497	f_extend	pte,t1
1498
1499	iitlbt          pte,prot
1500
1501	ptl_unlock1	spc,t0
1502	rfir
1503	nop
1504
1505naitlb_miss_20:
1506	get_pgd		spc,ptp
1507
1508	space_check	spc,t0,naitlb_fault
1509
1510	L2_ptep		ptp,pte,t0,va,naitlb_check_alias_20
1511
1512	ptl_lock	spc,ptp,pte,t0,t1,naitlb_check_alias_20
1513	update_accessed	ptp,pte,t0,t1
1514
1515	make_insert_tlb	spc,pte,prot,t1
1516
1517	f_extend	pte,t1
1518
1519	iitlbt          pte,prot
1520
1521	ptl_unlock1	spc,t0
1522	rfir
1523	nop
1524
1525naitlb_check_alias_20:
1526	do_alias	spc,t0,t1,va,pte,prot,naitlb_fault,20
1527
1528	iitlbt          pte,prot
1529
1530	rfir
1531	nop
1532
1533#endif
1534
1535#ifdef CONFIG_64BIT
1536
1537dbit_trap_20w:
1538	space_adjust	spc,va,t0
1539	get_pgd		spc,ptp
1540	space_check	spc,t0,dbit_fault
1541
1542	L3_ptep		ptp,pte,t0,va,dbit_fault
1543
1544	ptl_lock	spc,ptp,pte,t0,t1,dbit_fault
1545	update_dirty	ptp,pte,t1
1546
1547	make_insert_tlb	spc,pte,prot,t1
1548
1549	idtlbt          pte,prot
1550
1551	ptl_unlock0	spc,t0
1552	rfir
1553	nop
1554#else
1555
1556dbit_trap_11:
1557
1558	get_pgd		spc,ptp
1559
1560	space_check	spc,t0,dbit_fault
1561
1562	L2_ptep		ptp,pte,t0,va,dbit_fault
1563
1564	ptl_lock	spc,ptp,pte,t0,t1,dbit_fault
1565	update_dirty	ptp,pte,t1
1566
1567	make_insert_tlb_11	spc,pte,prot
1568
1569	mfsp            %sr1,t1  /* Save sr1 so we can use it in tlb inserts */
1570	mtsp		spc,%sr1
1571
1572	idtlba		pte,(%sr1,va)
1573	idtlbp		prot,(%sr1,va)
1574
1575	mtsp            t1, %sr1     /* Restore sr1 */
1576
1577	ptl_unlock0	spc,t0
1578	rfir
1579	nop
1580
1581dbit_trap_20:
1582	get_pgd		spc,ptp
1583
1584	space_check	spc,t0,dbit_fault
1585
1586	L2_ptep		ptp,pte,t0,va,dbit_fault
1587
1588	ptl_lock	spc,ptp,pte,t0,t1,dbit_fault
1589	update_dirty	ptp,pte,t1
1590
1591	make_insert_tlb	spc,pte,prot,t1
1592
1593	f_extend	pte,t1
1594
1595	idtlbt		pte,prot
1596
1597	ptl_unlock0	spc,t0
1598	rfir
1599	nop
1600#endif
1601
1602	.import handle_interruption,code
1603
1604kernel_bad_space:
1605	b               intr_save
1606	ldi             31,%r8  /* Use an unused code */
1607
1608dbit_fault:
1609	b               intr_save
1610	ldi             20,%r8
1611
1612itlb_fault:
1613	b               intr_save
1614	ldi             PARISC_ITLB_TRAP,%r8
1615
1616nadtlb_fault:
1617	b               intr_save
1618	ldi             17,%r8
1619
1620naitlb_fault:
1621	b               intr_save
1622	ldi             16,%r8
1623
1624dtlb_fault:
1625	b               intr_save
1626	ldi             15,%r8
1627
1628	/* Register saving semantics for system calls:
1629
1630	   %r1		   clobbered by system call macro in userspace
1631	   %r2		   saved in PT_REGS by gateway page
1632	   %r3  - %r18	   preserved by C code (saved by signal code)
1633	   %r19 - %r20	   saved in PT_REGS by gateway page
1634	   %r21 - %r22	   non-standard syscall args
1635			   stored in kernel stack by gateway page
1636	   %r23 - %r26	   arg3-arg0, saved in PT_REGS by gateway page
1637	   %r27 - %r30	   saved in PT_REGS by gateway page
1638	   %r31		   syscall return pointer
1639	 */
1640
1641	/* Floating point registers (FIXME: what do we do with these?)
1642
1643	   %fr0  - %fr3	   status/exception, not preserved
1644	   %fr4  - %fr7	   arguments
1645	   %fr8	 - %fr11   not preserved by C code
1646	   %fr12 - %fr21   preserved by C code
1647	   %fr22 - %fr31   not preserved by C code
1648	 */
1649
1650	.macro	reg_save regs
1651	STREG	%r3, PT_GR3(\regs)
1652	STREG	%r4, PT_GR4(\regs)
1653	STREG	%r5, PT_GR5(\regs)
1654	STREG	%r6, PT_GR6(\regs)
1655	STREG	%r7, PT_GR7(\regs)
1656	STREG	%r8, PT_GR8(\regs)
1657	STREG	%r9, PT_GR9(\regs)
1658	STREG   %r10,PT_GR10(\regs)
1659	STREG   %r11,PT_GR11(\regs)
1660	STREG   %r12,PT_GR12(\regs)
1661	STREG   %r13,PT_GR13(\regs)
1662	STREG   %r14,PT_GR14(\regs)
1663	STREG   %r15,PT_GR15(\regs)
1664	STREG   %r16,PT_GR16(\regs)
1665	STREG   %r17,PT_GR17(\regs)
1666	STREG   %r18,PT_GR18(\regs)
1667	.endm
1668
1669	.macro	reg_restore regs
1670	LDREG	PT_GR3(\regs), %r3
1671	LDREG	PT_GR4(\regs), %r4
1672	LDREG	PT_GR5(\regs), %r5
1673	LDREG	PT_GR6(\regs), %r6
1674	LDREG	PT_GR7(\regs), %r7
1675	LDREG	PT_GR8(\regs), %r8
1676	LDREG	PT_GR9(\regs), %r9
1677	LDREG   PT_GR10(\regs),%r10
1678	LDREG   PT_GR11(\regs),%r11
1679	LDREG   PT_GR12(\regs),%r12
1680	LDREG   PT_GR13(\regs),%r13
1681	LDREG   PT_GR14(\regs),%r14
1682	LDREG   PT_GR15(\regs),%r15
1683	LDREG   PT_GR16(\regs),%r16
1684	LDREG   PT_GR17(\regs),%r17
1685	LDREG   PT_GR18(\regs),%r18
1686	.endm
1687
1688	.macro	fork_like name
1689ENTRY_CFI(sys_\name\()_wrapper)
1690	mfctl	%cr30,%r1
1691	ldo	TASK_REGS(%r1),%r1
1692	reg_save %r1
1693	mfctl	%cr27, %r28
1694	ldil	L%sys_\name, %r31
1695	be	R%sys_\name(%sr4,%r31)
1696	STREG	%r28, PT_CR27(%r1)
1697ENDPROC_CFI(sys_\name\()_wrapper)
1698	.endm
1699
1700fork_like clone
1701fork_like clone3
1702fork_like fork
1703fork_like vfork
1704
1705	/* Set the return value for the child */
1706ENTRY(child_return)
1707	BL	schedule_tail, %r2
1708	nop
1709finish_child_return:
1710	mfctl	%cr30,%r1
1711	ldo	TASK_REGS(%r1),%r1	 /* get pt regs */
1712
1713	LDREG	PT_CR27(%r1), %r3
1714	mtctl	%r3, %cr27
1715	reg_restore %r1
1716	b	syscall_exit
1717	copy	%r0,%r28
1718END(child_return)
1719
1720ENTRY_CFI(sys_rt_sigreturn_wrapper)
1721	mfctl	%cr30,%r26
1722	ldo	TASK_REGS(%r26),%r26	/* get pt regs */
1723	/* Don't save regs, we are going to restore them from sigcontext. */
1724	STREG	%r2, -RP_OFFSET(%r30)
1725#ifdef CONFIG_64BIT
1726	ldo	FRAME_SIZE(%r30), %r30
1727	BL	sys_rt_sigreturn,%r2
1728	ldo	-16(%r30),%r29		/* Reference param save area */
1729#else
1730	BL	sys_rt_sigreturn,%r2
1731	ldo	FRAME_SIZE(%r30), %r30
1732#endif
1733
1734	ldo	-FRAME_SIZE(%r30), %r30
1735	LDREG	-RP_OFFSET(%r30), %r2
1736
1737	/* FIXME: I think we need to restore a few more things here. */
1738	mfctl	%cr30,%r1
1739	ldo	TASK_REGS(%r1),%r1	/* get pt regs */
1740	reg_restore %r1
1741
1742	/* If the signal was received while the process was blocked on a
1743	 * syscall, then r2 will take us to syscall_exit; otherwise r2 will
1744	 * take us to syscall_exit_rfi and on to intr_return.
1745	 */
1746	bv	%r0(%r2)
1747	LDREG	PT_GR28(%r1),%r28  /* reload original r28 for syscall_exit */
1748ENDPROC_CFI(sys_rt_sigreturn_wrapper)
1749
1750ENTRY(syscall_exit)
1751	/* NOTE: Not all syscalls exit this way.  rt_sigreturn will exit
1752	 * via syscall_exit_rfi if the signal was received while the process
1753	 * was running.
1754	 */
1755
1756	/* save return value now */
1757	mfctl     %cr30, %r1
1758	STREG     %r28,TASK_PT_GR28(%r1)
1759
1760	/* Seems to me that dp could be wrong here, if the syscall involved
1761	 * calling a module, and nothing got round to restoring dp on return.
1762	 */
1763	loadgp
1764
1765syscall_check_resched:
1766
1767	/* check for reschedule */
1768	mfctl	%cr30,%r19
1769	LDREG	TASK_TI_FLAGS(%r19),%r19	/* long */
1770	bb,<,n	%r19, 31-TIF_NEED_RESCHED, syscall_do_resched /* forward */
1771
1772	.import do_signal,code
1773syscall_check_sig:
1774	mfctl	%cr30,%r19
1775	LDREG	TASK_TI_FLAGS(%r19),%r19
1776	ldi	(_TIF_USER_WORK_MASK & ~_TIF_NEED_RESCHED), %r26
1777	and,COND(<>)	%r19, %r26, %r0
1778	b,n	syscall_restore	/* skip past if we've nothing to do */
1779
1780syscall_do_signal:
1781	/* Save callee-save registers (for sigcontext).
1782	 * FIXME: After this point the process structure should be
1783	 * consistent with all the relevant state of the process
1784	 * before the syscall.  We need to verify this.
1785	 */
1786	mfctl	%cr30,%r1
1787	ldo	TASK_REGS(%r1), %r26		/* struct pt_regs *regs */
1788	reg_save %r26
1789
1790#ifdef CONFIG_64BIT
1791	ldo	-16(%r30),%r29			/* Reference param save area */
1792#endif
1793
1794	BL	do_notify_resume,%r2
1795	ldi	1, %r25				/* long in_syscall = 1 */
1796
1797	mfctl	%cr30,%r1
1798	ldo	TASK_REGS(%r1), %r20		/* reload pt_regs */
1799	reg_restore %r20
1800
1801	b,n     syscall_check_sig
1802
1803syscall_restore:
1804	mfctl	%cr30,%r1
1805
1806	/* Are we being ptraced? */
1807	LDREG	TASK_TI_FLAGS(%r1),%r19
1808	ldi	_TIF_SINGLESTEP|_TIF_BLOCKSTEP,%r2
1809	and,COND(=)	%r19,%r2,%r0
1810	b,n	syscall_restore_rfi
1811
1812	ldo	TASK_PT_FR31(%r1),%r19		   /* reload fpregs */
1813	rest_fp	%r19
1814
1815	LDREG	TASK_PT_SAR(%r1),%r19		   /* restore SAR */
1816	mtsar	%r19
1817
1818	LDREG	TASK_PT_GR2(%r1),%r2		   /* restore user rp */
1819	LDREG	TASK_PT_GR19(%r1),%r19
1820	LDREG   TASK_PT_GR20(%r1),%r20
1821	LDREG	TASK_PT_GR21(%r1),%r21
1822	LDREG	TASK_PT_GR22(%r1),%r22
1823	LDREG	TASK_PT_GR23(%r1),%r23
1824	LDREG	TASK_PT_GR24(%r1),%r24
1825	LDREG	TASK_PT_GR25(%r1),%r25
1826	LDREG	TASK_PT_GR26(%r1),%r26
1827	LDREG	TASK_PT_GR27(%r1),%r27	   /* restore user dp */
1828	LDREG	TASK_PT_GR28(%r1),%r28	   /* syscall return value */
1829	LDREG	TASK_PT_GR29(%r1),%r29
1830	LDREG	TASK_PT_GR31(%r1),%r31	   /* restore syscall rp */
1831
1832	/* NOTE: We use rsm/ssm pair to make this operation atomic */
1833	LDREG   TASK_PT_GR30(%r1),%r1              /* Get user sp */
1834	rsm     PSW_SM_I, %r0
1835	copy    %r1,%r30                           /* Restore user sp */
1836	mfsp    %sr3,%r1                           /* Get user space id */
1837	mtsp    %r1,%sr7                           /* Restore sr7 */
1838	ssm     PSW_SM_I, %r0
1839
1840	/* Set sr2 to zero for userspace syscalls to work. */
1841	mtsp	%r0,%sr2
1842	mtsp	%r1,%sr4			   /* Restore sr4 */
1843	mtsp	%r1,%sr5			   /* Restore sr5 */
1844	mtsp	%r1,%sr6			   /* Restore sr6 */
1845
1846	depi	PRIV_USER,31,2,%r31	/* ensure return to user mode. */
1847
1848#ifdef CONFIG_64BIT
1849	/* decide whether to reset the wide mode bit
1850	 *
1851	 * For a syscall, the W bit is stored in the lowest bit
1852	 * of sp.  Extract it and reset W if it is zero */
1853	extrd,u,*<>	%r30,63,1,%r1
1854	rsm	PSW_SM_W, %r0
1855	/* now reset the lowest bit of sp if it was set */
1856	xor	%r30,%r1,%r30
1857#endif
1858	be,n    0(%sr3,%r31)                       /* return to user space */
1859
1860	/* We have to return via an RFI, so that PSW T and R bits can be set
1861	 * appropriately.
1862	 * This sets up pt_regs so we can return via intr_restore, which is not
1863	 * the most efficient way of doing things, but it works.
1864	 */
1865syscall_restore_rfi:
1866	ldo	-1(%r0),%r2			   /* Set recovery cntr to -1 */
1867	mtctl	%r2,%cr0			   /*   for immediate trap */
1868	LDREG	TASK_PT_PSW(%r1),%r2		   /* Get old PSW */
1869	ldi	0x0b,%r20			   /* Create new PSW */
1870	depi	-1,13,1,%r20			   /* C, Q, D, and I bits */
1871
1872	/* The values of SINGLESTEP_BIT and BLOCKSTEP_BIT are
1873	 * set in thread_info.h and converted to PA bitmap
1874	 * numbers in asm-offsets.c */
1875
1876	/* if ((%r19.SINGLESTEP_BIT)) { %r20.27=1} */
1877	extru,=	%r19,TIF_SINGLESTEP_PA_BIT,1,%r0
1878	depi	-1,27,1,%r20			   /* R bit */
1879
1880	/* if ((%r19.BLOCKSTEP_BIT)) { %r20.7=1} */
1881	extru,= %r19,TIF_BLOCKSTEP_PA_BIT,1,%r0
1882	depi	-1,7,1,%r20			   /* T bit */
1883
1884	STREG	%r20,TASK_PT_PSW(%r1)
1885
1886	/* Always store space registers, since sr3 can be changed (e.g. fork) */
1887
1888	mfsp    %sr3,%r25
1889	STREG   %r25,TASK_PT_SR3(%r1)
1890	STREG   %r25,TASK_PT_SR4(%r1)
1891	STREG   %r25,TASK_PT_SR5(%r1)
1892	STREG   %r25,TASK_PT_SR6(%r1)
1893	STREG   %r25,TASK_PT_SR7(%r1)
1894	STREG   %r25,TASK_PT_IASQ0(%r1)
1895	STREG   %r25,TASK_PT_IASQ1(%r1)
1896
1897	/* XXX W bit??? */
1898	/* Now if old D bit is clear, it means we didn't save all registers
1899	 * on syscall entry, so do that now.  This only happens on TRACEME
1900	 * calls, or if someone attached to us while we were on a syscall.
1901	 * We could make this more efficient by not saving r3-r18, but
1902	 * then we wouldn't be able to use the common intr_restore path.
1903	 * It is only for traced processes anyway, so performance is not
1904	 * an issue.
1905	 */
1906	bb,<	%r2,30,pt_regs_ok		   /* Branch if D set */
1907	ldo	TASK_REGS(%r1),%r25
1908	reg_save %r25				   /* Save r3 to r18 */
1909
1910	/* Save the current sr */
1911	mfsp	%sr0,%r2
1912	STREG	%r2,TASK_PT_SR0(%r1)
1913
1914	/* Save the scratch sr */
1915	mfsp	%sr1,%r2
1916	STREG	%r2,TASK_PT_SR1(%r1)
1917
1918	/* sr2 should be set to zero for userspace syscalls */
1919	STREG	%r0,TASK_PT_SR2(%r1)
1920
1921	LDREG	TASK_PT_GR31(%r1),%r2
1922	depi	PRIV_USER,31,2,%r2	/* ensure return to user mode. */
1923	STREG   %r2,TASK_PT_IAOQ0(%r1)
1924	ldo	4(%r2),%r2
1925	STREG	%r2,TASK_PT_IAOQ1(%r1)
1926	b	intr_restore
1927	copy	%r25,%r16
1928
1929pt_regs_ok:
1930	LDREG	TASK_PT_IAOQ0(%r1),%r2
1931	depi	PRIV_USER,31,2,%r2	/* ensure return to user mode. */
1932	STREG	%r2,TASK_PT_IAOQ0(%r1)
1933	LDREG	TASK_PT_IAOQ1(%r1),%r2
1934	depi	PRIV_USER,31,2,%r2
1935	STREG	%r2,TASK_PT_IAOQ1(%r1)
1936	b	intr_restore
1937	copy	%r25,%r16
1938
1939syscall_do_resched:
1940	load32	syscall_check_resched,%r2 /* if resched, we start over again */
1941	load32	schedule,%r19
1942	bv	%r0(%r19)		/* jumps to schedule() */
1943#ifdef CONFIG_64BIT
1944	ldo	-16(%r30),%r29		/* Reference param save area */
1945#else
1946	nop
1947#endif
1948END(syscall_exit)
1949
1950
1951#ifdef CONFIG_FUNCTION_TRACER
1952
1953	.import ftrace_function_trampoline,code
1954	.align L1_CACHE_BYTES
1955ENTRY_CFI(mcount, caller)
1956_mcount:
1957	.export _mcount,data
1958	/*
1959	 * The 64bit mcount() function pointer needs 4 dwords, of which the
1960	 * first two are free.  We optimize it here and put 2 instructions for
1961	 * calling mcount(), and 2 instructions for ftrace_stub().  That way we
1962	 * have all on one L1 cacheline.
1963	 */
1964	ldi	0, %arg3
1965	b	ftrace_function_trampoline
1966	copy	%r3, %arg2	/* caller original %sp */
1967ftrace_stub:
1968	.globl ftrace_stub
1969        .type  ftrace_stub, @function
1970#ifdef CONFIG_64BIT
1971	bve	(%rp)
1972#else
1973	bv	%r0(%rp)
1974#endif
1975	nop
1976#ifdef CONFIG_64BIT
1977	.dword mcount
1978	.dword 0 /* code in head.S puts value of global gp here */
1979#endif
1980ENDPROC_CFI(mcount)
1981
1982#ifdef CONFIG_DYNAMIC_FTRACE
1983
1984#ifdef CONFIG_64BIT
1985#define FTRACE_FRAME_SIZE (2*FRAME_SIZE)
1986#else
1987#define FTRACE_FRAME_SIZE FRAME_SIZE
1988#endif
1989ENTRY_CFI(ftrace_caller, caller,frame=FTRACE_FRAME_SIZE,CALLS,SAVE_RP,SAVE_SP)
1990ftrace_caller:
1991	.global ftrace_caller
1992
1993	STREG	%r3, -FTRACE_FRAME_SIZE+1*REG_SZ(%sp)
1994	ldo	-FTRACE_FRAME_SIZE(%sp), %r3
1995	STREG	%rp, -RP_OFFSET(%r3)
1996
1997	/* Offset 0 is already allocated for %r1 */
1998	STREG	%r23, 2*REG_SZ(%r3)
1999	STREG	%r24, 3*REG_SZ(%r3)
2000	STREG	%r25, 4*REG_SZ(%r3)
2001	STREG	%r26, 5*REG_SZ(%r3)
2002	STREG	%r28, 6*REG_SZ(%r3)
2003	STREG	%r29, 7*REG_SZ(%r3)
2004#ifdef CONFIG_64BIT
2005	STREG	%r19, 8*REG_SZ(%r3)
2006	STREG	%r20, 9*REG_SZ(%r3)
2007	STREG	%r21, 10*REG_SZ(%r3)
2008	STREG	%r22, 11*REG_SZ(%r3)
2009	STREG	%r27, 12*REG_SZ(%r3)
2010	STREG	%r31, 13*REG_SZ(%r3)
2011	loadgp
2012	ldo	-16(%sp),%r29
2013#endif
2014	LDREG	0(%r3), %r25
2015	copy	%rp, %r26
2016	ldo	-8(%r25), %r25
2017	ldi	0, %r23		/* no pt_regs */
2018	b,l	ftrace_function_trampoline, %rp
2019	copy	%r3, %r24
2020
2021	LDREG	-RP_OFFSET(%r3), %rp
2022	LDREG	2*REG_SZ(%r3), %r23
2023	LDREG	3*REG_SZ(%r3), %r24
2024	LDREG	4*REG_SZ(%r3), %r25
2025	LDREG	5*REG_SZ(%r3), %r26
2026	LDREG	6*REG_SZ(%r3), %r28
2027	LDREG	7*REG_SZ(%r3), %r29
2028#ifdef CONFIG_64BIT
2029	LDREG	8*REG_SZ(%r3), %r19
2030	LDREG	9*REG_SZ(%r3), %r20
2031	LDREG	10*REG_SZ(%r3), %r21
2032	LDREG	11*REG_SZ(%r3), %r22
2033	LDREG	12*REG_SZ(%r3), %r27
2034	LDREG	13*REG_SZ(%r3), %r31
2035#endif
2036	LDREG	1*REG_SZ(%r3), %r3
2037
2038	LDREGM	-FTRACE_FRAME_SIZE(%sp), %r1
2039	/* Adjust return point to jump back to beginning of traced function */
2040	ldo	-4(%r1), %r1
2041	bv,n	(%r1)
2042
2043ENDPROC_CFI(ftrace_caller)
2044
2045#ifdef CONFIG_HAVE_DYNAMIC_FTRACE_WITH_REGS
2046ENTRY_CFI(ftrace_regs_caller,caller,frame=FTRACE_FRAME_SIZE+PT_SZ_ALGN,
2047	CALLS,SAVE_RP,SAVE_SP)
2048ftrace_regs_caller:
2049	.global ftrace_regs_caller
2050
2051	ldo	-FTRACE_FRAME_SIZE(%sp), %r1
2052	STREG	%rp, -RP_OFFSET(%r1)
2053
2054	copy	%sp, %r1
2055	ldo	PT_SZ_ALGN(%sp), %sp
2056
2057	STREG	%rp, PT_GR2(%r1)
2058	STREG	%r3, PT_GR3(%r1)
2059	STREG	%r4, PT_GR4(%r1)
2060	STREG	%r5, PT_GR5(%r1)
2061	STREG	%r6, PT_GR6(%r1)
2062	STREG	%r7, PT_GR7(%r1)
2063	STREG	%r8, PT_GR8(%r1)
2064	STREG	%r9, PT_GR9(%r1)
2065	STREG   %r10, PT_GR10(%r1)
2066	STREG   %r11, PT_GR11(%r1)
2067	STREG   %r12, PT_GR12(%r1)
2068	STREG   %r13, PT_GR13(%r1)
2069	STREG   %r14, PT_GR14(%r1)
2070	STREG   %r15, PT_GR15(%r1)
2071	STREG   %r16, PT_GR16(%r1)
2072	STREG   %r17, PT_GR17(%r1)
2073	STREG   %r18, PT_GR18(%r1)
2074	STREG	%r19, PT_GR19(%r1)
2075	STREG	%r20, PT_GR20(%r1)
2076	STREG	%r21, PT_GR21(%r1)
2077	STREG	%r22, PT_GR22(%r1)
2078	STREG	%r23, PT_GR23(%r1)
2079	STREG	%r24, PT_GR24(%r1)
2080	STREG	%r25, PT_GR25(%r1)
2081	STREG	%r26, PT_GR26(%r1)
2082	STREG	%r27, PT_GR27(%r1)
2083	STREG	%r28, PT_GR28(%r1)
2084	STREG	%r29, PT_GR29(%r1)
2085	STREG	%r30, PT_GR30(%r1)
2086	STREG	%r31, PT_GR31(%r1)
2087	mfctl	%cr11, %r26
2088	STREG	%r26, PT_SAR(%r1)
2089
2090	copy	%rp, %r26
2091	LDREG	-FTRACE_FRAME_SIZE-PT_SZ_ALGN(%sp), %r25
2092	ldo	-8(%r25), %r25
2093	ldo	-FTRACE_FRAME_SIZE(%r1), %arg2
2094	b,l	ftrace_function_trampoline, %rp
2095	copy	%r1, %arg3 /* struct pt_regs */
2096
2097	ldo	-PT_SZ_ALGN(%sp), %r1
2098
2099	LDREG	PT_SAR(%r1), %rp
2100	mtctl	%rp, %cr11
2101
2102	LDREG	PT_GR2(%r1), %rp
2103	LDREG	PT_GR3(%r1), %r3
2104	LDREG	PT_GR4(%r1), %r4
2105	LDREG	PT_GR5(%r1), %r5
2106	LDREG	PT_GR6(%r1), %r6
2107	LDREG	PT_GR7(%r1), %r7
2108	LDREG	PT_GR8(%r1), %r8
2109	LDREG	PT_GR9(%r1), %r9
2110	LDREG   PT_GR10(%r1),%r10
2111	LDREG   PT_GR11(%r1),%r11
2112	LDREG   PT_GR12(%r1),%r12
2113	LDREG   PT_GR13(%r1),%r13
2114	LDREG   PT_GR14(%r1),%r14
2115	LDREG   PT_GR15(%r1),%r15
2116	LDREG   PT_GR16(%r1),%r16
2117	LDREG   PT_GR17(%r1),%r17
2118	LDREG   PT_GR18(%r1),%r18
2119	LDREG   PT_GR19(%r1),%r19
2120	LDREG   PT_GR20(%r1),%r20
2121	LDREG   PT_GR21(%r1),%r21
2122	LDREG   PT_GR22(%r1),%r22
2123	LDREG   PT_GR23(%r1),%r23
2124	LDREG   PT_GR24(%r1),%r24
2125	LDREG   PT_GR25(%r1),%r25
2126	LDREG   PT_GR26(%r1),%r26
2127	LDREG   PT_GR27(%r1),%r27
2128	LDREG   PT_GR28(%r1),%r28
2129	LDREG   PT_GR29(%r1),%r29
2130	LDREG   PT_GR30(%r1),%r30
2131	LDREG   PT_GR31(%r1),%r31
2132
2133	ldo	-PT_SZ_ALGN(%sp), %sp
2134	LDREGM	-FTRACE_FRAME_SIZE(%sp), %r1
2135	/* Adjust return point to jump back to beginning of traced function */
2136	ldo	-4(%r1), %r1
2137	bv,n	(%r1)
2138
2139ENDPROC_CFI(ftrace_regs_caller)
2140
2141#endif
2142#endif
2143
2144#ifdef CONFIG_FUNCTION_GRAPH_TRACER
2145	.align 8
2146ENTRY_CFI(return_to_handler, caller,frame=FRAME_SIZE)
2147	.export parisc_return_to_handler,data
2148parisc_return_to_handler:
2149	copy %r3,%r1
2150	STREG %r0,-RP_OFFSET(%sp)	/* store 0 as %rp */
2151	copy %sp,%r3
2152	STREGM %r1,FRAME_SIZE(%sp)
2153	STREG %ret0,8(%r3)
2154	STREG %ret1,16(%r3)
2155
2156#ifdef CONFIG_64BIT
2157	loadgp
2158#endif
2159
2160	/* call ftrace_return_to_handler(0) */
2161	.import ftrace_return_to_handler,code
2162	load32 ftrace_return_to_handler,%ret0
2163	load32 .Lftrace_ret,%r2
2164#ifdef CONFIG_64BIT
2165	ldo -16(%sp),%ret1		/* Reference param save area */
2166	bve	(%ret0)
2167#else
2168	bv	%r0(%ret0)
2169#endif
2170	ldi 0,%r26
2171.Lftrace_ret:
2172	copy %ret0,%rp
2173
2174	/* restore original return values */
2175	LDREG 8(%r3),%ret0
2176	LDREG 16(%r3),%ret1
2177
2178	/* return from function */
2179#ifdef CONFIG_64BIT
2180	bve	(%rp)
2181#else
2182	bv	%r0(%rp)
2183#endif
2184	LDREGM -FRAME_SIZE(%sp),%r3
2185ENDPROC_CFI(return_to_handler)
2186
2187#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
2188
2189#endif	/* CONFIG_FUNCTION_TRACER */
2190
2191#ifdef CONFIG_IRQSTACKS
2192/* void call_on_stack(unsigned long param1, void *func,
2193		      unsigned long new_stack) */
2194ENTRY_CFI(call_on_stack, FRAME=2*FRAME_SIZE,CALLS,SAVE_RP,SAVE_SP)
2195ENTRY(_call_on_stack)
2196	copy	%sp, %r1
2197
2198	/* Regarding the HPPA calling conventions for function pointers,
2199	   we assume the PIC register is not changed across call.  For
2200	   CONFIG_64BIT, the argument pointer is left to point at the
2201	   argument region allocated for the call to call_on_stack. */
2202
2203	/* Switch to new stack.  We allocate two frames.  */
2204	ldo	2*FRAME_SIZE(%arg2), %sp
2205# ifdef CONFIG_64BIT
2206	/* Save previous stack pointer and return pointer in frame marker */
2207	STREG	%rp, -FRAME_SIZE-RP_OFFSET(%sp)
2208	/* Calls always use function descriptor */
2209	LDREG	16(%arg1), %arg1
2210	bve,l	(%arg1), %rp
2211	STREG	%r1, -FRAME_SIZE-REG_SZ(%sp)
2212	LDREG	-FRAME_SIZE-RP_OFFSET(%sp), %rp
2213	bve	(%rp)
2214	LDREG	-FRAME_SIZE-REG_SZ(%sp), %sp
2215# else
2216	/* Save previous stack pointer and return pointer in frame marker */
2217	STREG	%r1, -FRAME_SIZE-REG_SZ(%sp)
2218	STREG	%rp, -FRAME_SIZE-RP_OFFSET(%sp)
2219	/* Calls use function descriptor if PLABEL bit is set */
2220	bb,>=,n	%arg1, 30, 1f
2221	depwi	0,31,2, %arg1
2222	LDREG	0(%arg1), %arg1
22231:
2224	be,l	0(%sr4,%arg1), %sr0, %r31
2225	copy	%r31, %rp
2226	LDREG	-FRAME_SIZE-RP_OFFSET(%sp), %rp
2227	bv	(%rp)
2228	LDREG	-FRAME_SIZE-REG_SZ(%sp), %sp
2229# endif /* CONFIG_64BIT */
2230ENDPROC_CFI(call_on_stack)
2231#endif /* CONFIG_IRQSTACKS */
2232
2233ENTRY_CFI(get_register)
2234	/*
2235	 * get_register is used by the non access tlb miss handlers to
2236	 * copy the value of the general register specified in r8 into
2237	 * r1. This routine can't be used for shadowed registers, since
2238	 * the rfir will restore the original value. So, for the shadowed
2239	 * registers we put a -1 into r1 to indicate that the register
2240	 * should not be used (the register being copied could also have
2241	 * a -1 in it, but that is OK, it just means that we will have
2242	 * to use the slow path instead).
2243	 */
2244	blr     %r8,%r0
2245	nop
2246	bv      %r0(%r25)    /* r0 */
2247	copy    %r0,%r1
2248	bv      %r0(%r25)    /* r1 - shadowed */
2249	ldi     -1,%r1
2250	bv      %r0(%r25)    /* r2 */
2251	copy    %r2,%r1
2252	bv      %r0(%r25)    /* r3 */
2253	copy    %r3,%r1
2254	bv      %r0(%r25)    /* r4 */
2255	copy    %r4,%r1
2256	bv      %r0(%r25)    /* r5 */
2257	copy    %r5,%r1
2258	bv      %r0(%r25)    /* r6 */
2259	copy    %r6,%r1
2260	bv      %r0(%r25)    /* r7 */
2261	copy    %r7,%r1
2262	bv      %r0(%r25)    /* r8 - shadowed */
2263	ldi     -1,%r1
2264	bv      %r0(%r25)    /* r9 - shadowed */
2265	ldi     -1,%r1
2266	bv      %r0(%r25)    /* r10 */
2267	copy    %r10,%r1
2268	bv      %r0(%r25)    /* r11 */
2269	copy    %r11,%r1
2270	bv      %r0(%r25)    /* r12 */
2271	copy    %r12,%r1
2272	bv      %r0(%r25)    /* r13 */
2273	copy    %r13,%r1
2274	bv      %r0(%r25)    /* r14 */
2275	copy    %r14,%r1
2276	bv      %r0(%r25)    /* r15 */
2277	copy    %r15,%r1
2278	bv      %r0(%r25)    /* r16 - shadowed */
2279	ldi     -1,%r1
2280	bv      %r0(%r25)    /* r17 - shadowed */
2281	ldi     -1,%r1
2282	bv      %r0(%r25)    /* r18 */
2283	copy    %r18,%r1
2284	bv      %r0(%r25)    /* r19 */
2285	copy    %r19,%r1
2286	bv      %r0(%r25)    /* r20 */
2287	copy    %r20,%r1
2288	bv      %r0(%r25)    /* r21 */
2289	copy    %r21,%r1
2290	bv      %r0(%r25)    /* r22 */
2291	copy    %r22,%r1
2292	bv      %r0(%r25)    /* r23 */
2293	copy    %r23,%r1
2294	bv      %r0(%r25)    /* r24 - shadowed */
2295	ldi     -1,%r1
2296	bv      %r0(%r25)    /* r25 - shadowed */
2297	ldi     -1,%r1
2298	bv      %r0(%r25)    /* r26 */
2299	copy    %r26,%r1
2300	bv      %r0(%r25)    /* r27 */
2301	copy    %r27,%r1
2302	bv      %r0(%r25)    /* r28 */
2303	copy    %r28,%r1
2304	bv      %r0(%r25)    /* r29 */
2305	copy    %r29,%r1
2306	bv      %r0(%r25)    /* r30 */
2307	copy    %r30,%r1
2308	bv      %r0(%r25)    /* r31 */
2309	copy    %r31,%r1
2310ENDPROC_CFI(get_register)
2311
2312
2313ENTRY_CFI(set_register)
2314	/*
2315	 * set_register is used by the non access tlb miss handlers to
2316	 * copy the value of r1 into the general register specified in
2317	 * r8.
2318	 */
2319	blr     %r8,%r0
2320	nop
2321	bv      %r0(%r25)    /* r0 (silly, but it is a place holder) */
2322	copy    %r1,%r0
2323	bv      %r0(%r25)    /* r1 */
2324	copy    %r1,%r1
2325	bv      %r0(%r25)    /* r2 */
2326	copy    %r1,%r2
2327	bv      %r0(%r25)    /* r3 */
2328	copy    %r1,%r3
2329	bv      %r0(%r25)    /* r4 */
2330	copy    %r1,%r4
2331	bv      %r0(%r25)    /* r5 */
2332	copy    %r1,%r5
2333	bv      %r0(%r25)    /* r6 */
2334	copy    %r1,%r6
2335	bv      %r0(%r25)    /* r7 */
2336	copy    %r1,%r7
2337	bv      %r0(%r25)    /* r8 */
2338	copy    %r1,%r8
2339	bv      %r0(%r25)    /* r9 */
2340	copy    %r1,%r9
2341	bv      %r0(%r25)    /* r10 */
2342	copy    %r1,%r10
2343	bv      %r0(%r25)    /* r11 */
2344	copy    %r1,%r11
2345	bv      %r0(%r25)    /* r12 */
2346	copy    %r1,%r12
2347	bv      %r0(%r25)    /* r13 */
2348	copy    %r1,%r13
2349	bv      %r0(%r25)    /* r14 */
2350	copy    %r1,%r14
2351	bv      %r0(%r25)    /* r15 */
2352	copy    %r1,%r15
2353	bv      %r0(%r25)    /* r16 */
2354	copy    %r1,%r16
2355	bv      %r0(%r25)    /* r17 */
2356	copy    %r1,%r17
2357	bv      %r0(%r25)    /* r18 */
2358	copy    %r1,%r18
2359	bv      %r0(%r25)    /* r19 */
2360	copy    %r1,%r19
2361	bv      %r0(%r25)    /* r20 */
2362	copy    %r1,%r20
2363	bv      %r0(%r25)    /* r21 */
2364	copy    %r1,%r21
2365	bv      %r0(%r25)    /* r22 */
2366	copy    %r1,%r22
2367	bv      %r0(%r25)    /* r23 */
2368	copy    %r1,%r23
2369	bv      %r0(%r25)    /* r24 */
2370	copy    %r1,%r24
2371	bv      %r0(%r25)    /* r25 */
2372	copy    %r1,%r25
2373	bv      %r0(%r25)    /* r26 */
2374	copy    %r1,%r26
2375	bv      %r0(%r25)    /* r27 */
2376	copy    %r1,%r27
2377	bv      %r0(%r25)    /* r28 */
2378	copy    %r1,%r28
2379	bv      %r0(%r25)    /* r29 */
2380	copy    %r1,%r29
2381	bv      %r0(%r25)    /* r30 */
2382	copy    %r1,%r30
2383	bv      %r0(%r25)    /* r31 */
2384	copy    %r1,%r31
2385ENDPROC_CFI(set_register)
2386
2387