xref: /linux/arch/parisc/kernel/pacache.S (revision 98838d95075a5295f3478ceba18bcccf472e30f4)
1/*
2 *  PARISC TLB and cache flushing support
3 *  Copyright (C) 2000-2001 Hewlett-Packard (John Marvin)
4 *  Copyright (C) 2001 Matthew Wilcox (willy at parisc-linux.org)
5 *  Copyright (C) 2002 Richard Hirst (rhirst with parisc-linux.org)
6 *
7 *    This program is free software; you can redistribute it and/or modify
8 *    it under the terms of the GNU General Public License as published by
9 *    the Free Software Foundation; either version 2, or (at your option)
10 *    any later version.
11 *
12 *    This program is distributed in the hope that it will be useful,
13 *    but WITHOUT ANY WARRANTY; without even the implied warranty of
14 *    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15 *    GNU General Public License for more details.
16 *
17 *    You should have received a copy of the GNU General Public License
18 *    along with this program; if not, write to the Free Software
19 *    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
20 */
21
22/*
23 * NOTE: fdc,fic, and pdc instructions that use base register modification
24 *       should only use index and base registers that are not shadowed,
25 *       so that the fast path emulation in the non access miss handler
26 *       can be used.
27 */
28
29#ifdef CONFIG_64BIT
30	.level	2.0w
31#else
32	.level	2.0
33#endif
34
35#include <asm/psw.h>
36#include <asm/assembly.h>
37#include <asm/pgtable.h>
38#include <asm/cache.h>
39#include <linux/linkage.h>
40
41	.text
42	.align	128
43
44ENTRY_CFI(flush_tlb_all_local)
45	.proc
46	.callinfo NO_CALLS
47	.entry
48
49	/*
50	 * The pitlbe and pdtlbe instructions should only be used to
51	 * flush the entire tlb. Also, there needs to be no intervening
52	 * tlb operations, e.g. tlb misses, so the operation needs
53	 * to happen in real mode with all interruptions disabled.
54	 */
55
56	/* pcxt_ssm_bug	- relied upon translation! PA 2.0 Arch. F-4 and F-5 */
57	rsm		PSW_SM_I, %r19		/* save I-bit state */
58	load32		PA(1f), %r1
59	nop
60	nop
61	nop
62	nop
63	nop
64
65	rsm		PSW_SM_Q, %r0		/* prep to load iia queue */
66	mtctl		%r0, %cr17		/* Clear IIASQ tail */
67	mtctl		%r0, %cr17		/* Clear IIASQ head */
68	mtctl		%r1, %cr18		/* IIAOQ head */
69	ldo		4(%r1), %r1
70	mtctl		%r1, %cr18		/* IIAOQ tail */
71	load32		REAL_MODE_PSW, %r1
72	mtctl           %r1, %ipsw
73	rfi
74	nop
75
761:      load32		PA(cache_info), %r1
77
78	/* Flush Instruction Tlb */
79
80	LDREG		ITLB_SID_BASE(%r1), %r20
81	LDREG		ITLB_SID_STRIDE(%r1), %r21
82	LDREG		ITLB_SID_COUNT(%r1), %r22
83	LDREG		ITLB_OFF_BASE(%r1), %arg0
84	LDREG		ITLB_OFF_STRIDE(%r1), %arg1
85	LDREG		ITLB_OFF_COUNT(%r1), %arg2
86	LDREG		ITLB_LOOP(%r1), %arg3
87
88	addib,COND(=)		-1, %arg3, fitoneloop	/* Preadjust and test */
89	movb,<,n	%arg3, %r31, fitdone	/* If loop < 0, skip */
90	copy		%arg0, %r28		/* Init base addr */
91
92fitmanyloop:					/* Loop if LOOP >= 2 */
93	mtsp		%r20, %sr1
94	add		%r21, %r20, %r20	/* increment space */
95	copy		%arg2, %r29		/* Init middle loop count */
96
97fitmanymiddle:					/* Loop if LOOP >= 2 */
98	addib,COND(>)		-1, %r31, fitmanymiddle	/* Adjusted inner loop decr */
99	pitlbe		0(%sr1, %r28)
100	pitlbe,m	%arg1(%sr1, %r28)	/* Last pitlbe and addr adjust */
101	addib,COND(>)		-1, %r29, fitmanymiddle	/* Middle loop decr */
102	copy		%arg3, %r31		/* Re-init inner loop count */
103
104	movb,tr		%arg0, %r28, fitmanyloop /* Re-init base addr */
105	addib,COND(<=),n	-1, %r22, fitdone	/* Outer loop count decr */
106
107fitoneloop:					/* Loop if LOOP = 1 */
108	mtsp		%r20, %sr1
109	copy		%arg0, %r28		/* init base addr */
110	copy		%arg2, %r29		/* init middle loop count */
111
112fitonemiddle:					/* Loop if LOOP = 1 */
113	addib,COND(>)		-1, %r29, fitonemiddle	/* Middle loop count decr */
114	pitlbe,m	%arg1(%sr1, %r28)	/* pitlbe for one loop */
115
116	addib,COND(>)		-1, %r22, fitoneloop	/* Outer loop count decr */
117	add		%r21, %r20, %r20		/* increment space */
118
119fitdone:
120
121	/* Flush Data Tlb */
122
123	LDREG		DTLB_SID_BASE(%r1), %r20
124	LDREG		DTLB_SID_STRIDE(%r1), %r21
125	LDREG		DTLB_SID_COUNT(%r1), %r22
126	LDREG		DTLB_OFF_BASE(%r1), %arg0
127	LDREG		DTLB_OFF_STRIDE(%r1), %arg1
128	LDREG		DTLB_OFF_COUNT(%r1), %arg2
129	LDREG		DTLB_LOOP(%r1), %arg3
130
131	addib,COND(=)		-1, %arg3, fdtoneloop	/* Preadjust and test */
132	movb,<,n	%arg3, %r31, fdtdone	/* If loop < 0, skip */
133	copy		%arg0, %r28		/* Init base addr */
134
135fdtmanyloop:					/* Loop if LOOP >= 2 */
136	mtsp		%r20, %sr1
137	add		%r21, %r20, %r20	/* increment space */
138	copy		%arg2, %r29		/* Init middle loop count */
139
140fdtmanymiddle:					/* Loop if LOOP >= 2 */
141	addib,COND(>)		-1, %r31, fdtmanymiddle	/* Adjusted inner loop decr */
142	pdtlbe		0(%sr1, %r28)
143	pdtlbe,m	%arg1(%sr1, %r28)	/* Last pdtlbe and addr adjust */
144	addib,COND(>)		-1, %r29, fdtmanymiddle	/* Middle loop decr */
145	copy		%arg3, %r31		/* Re-init inner loop count */
146
147	movb,tr		%arg0, %r28, fdtmanyloop /* Re-init base addr */
148	addib,COND(<=),n	-1, %r22,fdtdone	/* Outer loop count decr */
149
150fdtoneloop:					/* Loop if LOOP = 1 */
151	mtsp		%r20, %sr1
152	copy		%arg0, %r28		/* init base addr */
153	copy		%arg2, %r29		/* init middle loop count */
154
155fdtonemiddle:					/* Loop if LOOP = 1 */
156	addib,COND(>)		-1, %r29, fdtonemiddle	/* Middle loop count decr */
157	pdtlbe,m	%arg1(%sr1, %r28)	/* pdtlbe for one loop */
158
159	addib,COND(>)		-1, %r22, fdtoneloop	/* Outer loop count decr */
160	add		%r21, %r20, %r20	/* increment space */
161
162
163fdtdone:
164	/*
165	 * Switch back to virtual mode
166	 */
167	/* pcxt_ssm_bug */
168	rsm		PSW_SM_I, %r0
169	load32		2f, %r1
170	nop
171	nop
172	nop
173	nop
174	nop
175
176	rsm		PSW_SM_Q, %r0		/* prep to load iia queue */
177	mtctl		%r0, %cr17		/* Clear IIASQ tail */
178	mtctl		%r0, %cr17		/* Clear IIASQ head */
179	mtctl		%r1, %cr18		/* IIAOQ head */
180	ldo		4(%r1), %r1
181	mtctl		%r1, %cr18		/* IIAOQ tail */
182	load32		KERNEL_PSW, %r1
183	or		%r1, %r19, %r1	/* I-bit to state on entry */
184	mtctl		%r1, %ipsw	/* restore I-bit (entire PSW) */
185	rfi
186	nop
187
1882:      bv		%r0(%r2)
189	nop
190
191	.exit
192	.procend
193ENDPROC_CFI(flush_tlb_all_local)
194
195	.import cache_info,data
196
197ENTRY_CFI(flush_instruction_cache_local)
198	.proc
199	.callinfo NO_CALLS
200	.entry
201
202	load32		cache_info, %r1
203
204	/* Flush Instruction Cache */
205
206	LDREG		ICACHE_BASE(%r1), %arg0
207	LDREG		ICACHE_STRIDE(%r1), %arg1
208	LDREG		ICACHE_COUNT(%r1), %arg2
209	LDREG		ICACHE_LOOP(%r1), %arg3
210	rsm		PSW_SM_I, %r22		/* No mmgt ops during loop*/
211	mtsp		%r0, %sr1
212	addib,COND(=)		-1, %arg3, fioneloop	/* Preadjust and test */
213	movb,<,n	%arg3, %r31, fisync	/* If loop < 0, do sync */
214
215fimanyloop:					/* Loop if LOOP >= 2 */
216	addib,COND(>)		-1, %r31, fimanyloop	/* Adjusted inner loop decr */
217	fice            %r0(%sr1, %arg0)
218	fice,m		%arg1(%sr1, %arg0)	/* Last fice and addr adjust */
219	movb,tr		%arg3, %r31, fimanyloop	/* Re-init inner loop count */
220	addib,COND(<=),n	-1, %arg2, fisync	/* Outer loop decr */
221
222fioneloop:					/* Loop if LOOP = 1 */
223	/* Some implementations may flush with a single fice instruction */
224	cmpib,COND(>>=),n	15, %arg2, fioneloop2
225
226fioneloop1:
227	fice,m		%arg1(%sr1, %arg0)
228	fice,m		%arg1(%sr1, %arg0)
229	fice,m		%arg1(%sr1, %arg0)
230	fice,m		%arg1(%sr1, %arg0)
231	fice,m		%arg1(%sr1, %arg0)
232	fice,m		%arg1(%sr1, %arg0)
233	fice,m		%arg1(%sr1, %arg0)
234	fice,m		%arg1(%sr1, %arg0)
235	fice,m		%arg1(%sr1, %arg0)
236	fice,m		%arg1(%sr1, %arg0)
237	fice,m		%arg1(%sr1, %arg0)
238	fice,m		%arg1(%sr1, %arg0)
239	fice,m		%arg1(%sr1, %arg0)
240	fice,m		%arg1(%sr1, %arg0)
241	fice,m		%arg1(%sr1, %arg0)
242	addib,COND(>)	-16, %arg2, fioneloop1
243	fice,m		%arg1(%sr1, %arg0)
244
245	/* Check if done */
246	cmpb,COND(=),n	%arg2, %r0, fisync	/* Predict branch taken */
247
248fioneloop2:
249	addib,COND(>)	-1, %arg2, fioneloop2	/* Outer loop count decr */
250	fice,m		%arg1(%sr1, %arg0)	/* Fice for one loop */
251
252fisync:
253	sync
254	mtsm		%r22			/* restore I-bit */
255	bv		%r0(%r2)
256	nop
257	.exit
258
259	.procend
260ENDPROC_CFI(flush_instruction_cache_local)
261
262
263	.import cache_info, data
264ENTRY_CFI(flush_data_cache_local)
265	.proc
266	.callinfo NO_CALLS
267	.entry
268
269	load32		cache_info, %r1
270
271	/* Flush Data Cache */
272
273	LDREG		DCACHE_BASE(%r1), %arg0
274	LDREG		DCACHE_STRIDE(%r1), %arg1
275	LDREG		DCACHE_COUNT(%r1), %arg2
276	LDREG		DCACHE_LOOP(%r1), %arg3
277	rsm		PSW_SM_I, %r22		/* No mmgt ops during loop*/
278	mtsp		%r0, %sr1
279	addib,COND(=)		-1, %arg3, fdoneloop	/* Preadjust and test */
280	movb,<,n	%arg3, %r31, fdsync	/* If loop < 0, do sync */
281
282fdmanyloop:					/* Loop if LOOP >= 2 */
283	addib,COND(>)		-1, %r31, fdmanyloop	/* Adjusted inner loop decr */
284	fdce		%r0(%sr1, %arg0)
285	fdce,m		%arg1(%sr1, %arg0)	/* Last fdce and addr adjust */
286	movb,tr		%arg3, %r31, fdmanyloop	/* Re-init inner loop count */
287	addib,COND(<=),n	-1, %arg2, fdsync	/* Outer loop decr */
288
289fdoneloop:					/* Loop if LOOP = 1 */
290	/* Some implementations may flush with a single fdce instruction */
291	cmpib,COND(>>=),n	15, %arg2, fdoneloop2
292
293fdoneloop1:
294	fdce,m		%arg1(%sr1, %arg0)
295	fdce,m		%arg1(%sr1, %arg0)
296	fdce,m		%arg1(%sr1, %arg0)
297	fdce,m		%arg1(%sr1, %arg0)
298	fdce,m		%arg1(%sr1, %arg0)
299	fdce,m		%arg1(%sr1, %arg0)
300	fdce,m		%arg1(%sr1, %arg0)
301	fdce,m		%arg1(%sr1, %arg0)
302	fdce,m		%arg1(%sr1, %arg0)
303	fdce,m		%arg1(%sr1, %arg0)
304	fdce,m		%arg1(%sr1, %arg0)
305	fdce,m		%arg1(%sr1, %arg0)
306	fdce,m		%arg1(%sr1, %arg0)
307	fdce,m		%arg1(%sr1, %arg0)
308	fdce,m		%arg1(%sr1, %arg0)
309	addib,COND(>)	-16, %arg2, fdoneloop1
310	fdce,m		%arg1(%sr1, %arg0)
311
312	/* Check if done */
313	cmpb,COND(=),n	%arg2, %r0, fdsync	/* Predict branch taken */
314
315fdoneloop2:
316	addib,COND(>)	-1, %arg2, fdoneloop2	/* Outer loop count decr */
317	fdce,m		%arg1(%sr1, %arg0)	/* Fdce for one loop */
318
319fdsync:
320	syncdma
321	sync
322	mtsm		%r22			/* restore I-bit */
323	bv		%r0(%r2)
324	nop
325	.exit
326
327	.procend
328ENDPROC_CFI(flush_data_cache_local)
329
330	.align	16
331
332/* Macros to serialize TLB purge operations on SMP.  */
333
334	.macro	tlb_lock	la,flags,tmp
335#ifdef CONFIG_SMP
336	ldil		L%pa_tlb_lock,%r1
337	ldo		R%pa_tlb_lock(%r1),\la
338	rsm		PSW_SM_I,\flags
3391:	LDCW		0(\la),\tmp
340	cmpib,<>,n	0,\tmp,3f
3412:	ldw		0(\la),\tmp
342	cmpb,<>		%r0,\tmp,1b
343	nop
344	b,n		2b
3453:
346#endif
347	.endm
348
349	.macro	tlb_unlock	la,flags,tmp
350#ifdef CONFIG_SMP
351	ldi		1,\tmp
352	stw		\tmp,0(\la)
353	mtsm		\flags
354#endif
355	.endm
356
357/* Clear page using kernel mapping.  */
358
359ENTRY_CFI(clear_page_asm)
360	.proc
361	.callinfo NO_CALLS
362	.entry
363
364#ifdef CONFIG_64BIT
365
366	/* Unroll the loop.  */
367	ldi		(PAGE_SIZE / 128), %r1
368
3691:
370	std		%r0, 0(%r26)
371	std		%r0, 8(%r26)
372	std		%r0, 16(%r26)
373	std		%r0, 24(%r26)
374	std		%r0, 32(%r26)
375	std		%r0, 40(%r26)
376	std		%r0, 48(%r26)
377	std		%r0, 56(%r26)
378	std		%r0, 64(%r26)
379	std		%r0, 72(%r26)
380	std		%r0, 80(%r26)
381	std		%r0, 88(%r26)
382	std		%r0, 96(%r26)
383	std		%r0, 104(%r26)
384	std		%r0, 112(%r26)
385	std		%r0, 120(%r26)
386
387	/* Note reverse branch hint for addib is taken.  */
388	addib,COND(>),n	-1, %r1, 1b
389	ldo		128(%r26), %r26
390
391#else
392
393	/*
394	 * Note that until (if) we start saving the full 64-bit register
395	 * values on interrupt, we can't use std on a 32 bit kernel.
396	 */
397	ldi		(PAGE_SIZE / 64), %r1
398
3991:
400	stw		%r0, 0(%r26)
401	stw		%r0, 4(%r26)
402	stw		%r0, 8(%r26)
403	stw		%r0, 12(%r26)
404	stw		%r0, 16(%r26)
405	stw		%r0, 20(%r26)
406	stw		%r0, 24(%r26)
407	stw		%r0, 28(%r26)
408	stw		%r0, 32(%r26)
409	stw		%r0, 36(%r26)
410	stw		%r0, 40(%r26)
411	stw		%r0, 44(%r26)
412	stw		%r0, 48(%r26)
413	stw		%r0, 52(%r26)
414	stw		%r0, 56(%r26)
415	stw		%r0, 60(%r26)
416
417	addib,COND(>),n	-1, %r1, 1b
418	ldo		64(%r26), %r26
419#endif
420	bv		%r0(%r2)
421	nop
422	.exit
423
424	.procend
425ENDPROC_CFI(clear_page_asm)
426
427/* Copy page using kernel mapping.  */
428
429ENTRY_CFI(copy_page_asm)
430	.proc
431	.callinfo NO_CALLS
432	.entry
433
434#ifdef CONFIG_64BIT
435	/* PA8x00 CPUs can consume 2 loads or 1 store per cycle.
436	 * Unroll the loop by hand and arrange insn appropriately.
437	 * Prefetch doesn't improve performance on rp3440.
438	 * GCC probably can do this just as well...
439	 */
440
441	ldi		(PAGE_SIZE / 128), %r1
442
4431:	ldd		0(%r25), %r19
444	ldd		8(%r25), %r20
445
446	ldd		16(%r25), %r21
447	ldd		24(%r25), %r22
448	std		%r19, 0(%r26)
449	std		%r20, 8(%r26)
450
451	ldd		32(%r25), %r19
452	ldd		40(%r25), %r20
453	std		%r21, 16(%r26)
454	std		%r22, 24(%r26)
455
456	ldd		48(%r25), %r21
457	ldd		56(%r25), %r22
458	std		%r19, 32(%r26)
459	std		%r20, 40(%r26)
460
461	ldd		64(%r25), %r19
462	ldd		72(%r25), %r20
463	std		%r21, 48(%r26)
464	std		%r22, 56(%r26)
465
466	ldd		80(%r25), %r21
467	ldd		88(%r25), %r22
468	std		%r19, 64(%r26)
469	std		%r20, 72(%r26)
470
471	ldd		 96(%r25), %r19
472	ldd		104(%r25), %r20
473	std		%r21, 80(%r26)
474	std		%r22, 88(%r26)
475
476	ldd		112(%r25), %r21
477	ldd		120(%r25), %r22
478	ldo		128(%r25), %r25
479	std		%r19, 96(%r26)
480	std		%r20, 104(%r26)
481
482	std		%r21, 112(%r26)
483	std		%r22, 120(%r26)
484
485	/* Note reverse branch hint for addib is taken.  */
486	addib,COND(>),n	-1, %r1, 1b
487	ldo		128(%r26), %r26
488
489#else
490
491	/*
492	 * This loop is optimized for PCXL/PCXL2 ldw/ldw and stw/stw
493	 * bundles (very restricted rules for bundling).
494	 * Note that until (if) we start saving
495	 * the full 64 bit register values on interrupt, we can't
496	 * use ldd/std on a 32 bit kernel.
497	 */
498	ldw		0(%r25), %r19
499	ldi		(PAGE_SIZE / 64), %r1
500
5011:
502	ldw		4(%r25), %r20
503	ldw		8(%r25), %r21
504	ldw		12(%r25), %r22
505	stw		%r19, 0(%r26)
506	stw		%r20, 4(%r26)
507	stw		%r21, 8(%r26)
508	stw		%r22, 12(%r26)
509	ldw		16(%r25), %r19
510	ldw		20(%r25), %r20
511	ldw		24(%r25), %r21
512	ldw		28(%r25), %r22
513	stw		%r19, 16(%r26)
514	stw		%r20, 20(%r26)
515	stw		%r21, 24(%r26)
516	stw		%r22, 28(%r26)
517	ldw		32(%r25), %r19
518	ldw		36(%r25), %r20
519	ldw		40(%r25), %r21
520	ldw		44(%r25), %r22
521	stw		%r19, 32(%r26)
522	stw		%r20, 36(%r26)
523	stw		%r21, 40(%r26)
524	stw		%r22, 44(%r26)
525	ldw		48(%r25), %r19
526	ldw		52(%r25), %r20
527	ldw		56(%r25), %r21
528	ldw		60(%r25), %r22
529	stw		%r19, 48(%r26)
530	stw		%r20, 52(%r26)
531	ldo		64(%r25), %r25
532	stw		%r21, 56(%r26)
533	stw		%r22, 60(%r26)
534	ldo		64(%r26), %r26
535	addib,COND(>),n	-1, %r1, 1b
536	ldw		0(%r25), %r19
537#endif
538	bv		%r0(%r2)
539	nop
540	.exit
541
542	.procend
543ENDPROC_CFI(copy_page_asm)
544
545/*
546 * NOTE: Code in clear_user_page has a hard coded dependency on the
547 *       maximum alias boundary being 4 Mb. We've been assured by the
548 *       parisc chip designers that there will not ever be a parisc
549 *       chip with a larger alias boundary (Never say never :-) ).
550 *
551 *       Subtle: the dtlb miss handlers support the temp alias region by
552 *       "knowing" that if a dtlb miss happens within the temp alias
553 *       region it must have occurred while in clear_user_page. Since
554 *       this routine makes use of processor local translations, we
555 *       don't want to insert them into the kernel page table. Instead,
556 *       we load up some general registers (they need to be registers
557 *       which aren't shadowed) with the physical page numbers (preshifted
558 *       for tlb insertion) needed to insert the translations. When we
559 *       miss on the translation, the dtlb miss handler inserts the
560 *       translation into the tlb using these values:
561 *
562 *          %r26 physical page (shifted for tlb insert) of "to" translation
563 *          %r23 physical page (shifted for tlb insert) of "from" translation
564 */
565
566        /* Drop prot bits and convert to page addr for iitlbt and idtlbt */
567        #define PAGE_ADD_SHIFT  (PAGE_SHIFT-12)
568        .macro          convert_phys_for_tlb_insert20  phys
569        extrd,u         \phys, 56-PAGE_ADD_SHIFT, 32-PAGE_ADD_SHIFT, \phys
570#if _PAGE_SIZE_ENCODING_DEFAULT
571        depdi           _PAGE_SIZE_ENCODING_DEFAULT, 63, (63-58), \phys
572#endif
573	.endm
574
575	/*
576	 * copy_user_page_asm() performs a page copy using mappings
577	 * equivalent to the user page mappings.  It can be used to
578	 * implement copy_user_page() but unfortunately both the `from'
579	 * and `to' pages need to be flushed through mappings equivalent
580	 * to the user mappings after the copy because the kernel accesses
581	 * the `from' page through the kmap kernel mapping and the `to'
582	 * page needs to be flushed since code can be copied.  As a
583	 * result, this implementation is less efficient than the simpler
584	 * copy using the kernel mapping.  It only needs the `from' page
585	 * to flushed via the user mapping.  The kunmap routines handle
586	 * the flushes needed for the kernel mapping.
587	 *
588	 * I'm still keeping this around because it may be possible to
589	 * use it if more information is passed into copy_user_page().
590	 * Have to do some measurements to see if it is worthwhile to
591	 * lobby for such a change.
592	 *
593	 */
594
595ENTRY_CFI(copy_user_page_asm)
596	.proc
597	.callinfo NO_CALLS
598	.entry
599
600	/* Convert virtual `to' and `from' addresses to physical addresses.
601	   Move `from' physical address to non shadowed register.  */
602	ldil		L%(__PAGE_OFFSET), %r1
603	sub		%r26, %r1, %r26
604	sub		%r25, %r1, %r23
605
606	ldil		L%(TMPALIAS_MAP_START), %r28
607#ifdef CONFIG_64BIT
608#if (TMPALIAS_MAP_START >= 0x80000000)
609	depdi		0, 31,32, %r28		/* clear any sign extension */
610#endif
611	convert_phys_for_tlb_insert20 %r26	/* convert phys addr to tlb insert format */
612	convert_phys_for_tlb_insert20 %r23	/* convert phys addr to tlb insert format */
613	depd		%r24,63,22, %r28	/* Form aliased virtual address 'to' */
614	depdi		0, 63,PAGE_SHIFT, %r28	/* Clear any offset bits */
615	copy		%r28, %r29
616	depdi		1, 41,1, %r29		/* Form aliased virtual address 'from' */
617#else
618	extrw,u		%r26, 24,25, %r26	/* convert phys addr to tlb insert format */
619	extrw,u		%r23, 24,25, %r23	/* convert phys addr to tlb insert format */
620	depw		%r24, 31,22, %r28	/* Form aliased virtual address 'to' */
621	depwi		0, 31,PAGE_SHIFT, %r28	/* Clear any offset bits */
622	copy		%r28, %r29
623	depwi		1, 9,1, %r29		/* Form aliased virtual address 'from' */
624#endif
625
626	/* Purge any old translations */
627
628#ifdef CONFIG_PA20
629	pdtlb,l		0(%r28)
630	pdtlb,l		0(%r29)
631#else
632	tlb_lock	%r20,%r21,%r22
633	pdtlb		0(%r28)
634	pdtlb		0(%r29)
635	tlb_unlock	%r20,%r21,%r22
636#endif
637
638#ifdef CONFIG_64BIT
639	/* PA8x00 CPUs can consume 2 loads or 1 store per cycle.
640	 * Unroll the loop by hand and arrange insn appropriately.
641	 * GCC probably can do this just as well.
642	 */
643
644	ldd		0(%r29), %r19
645	ldi		(PAGE_SIZE / 128), %r1
646
6471:	ldd		8(%r29), %r20
648
649	ldd		16(%r29), %r21
650	ldd		24(%r29), %r22
651	std		%r19, 0(%r28)
652	std		%r20, 8(%r28)
653
654	ldd		32(%r29), %r19
655	ldd		40(%r29), %r20
656	std		%r21, 16(%r28)
657	std		%r22, 24(%r28)
658
659	ldd		48(%r29), %r21
660	ldd		56(%r29), %r22
661	std		%r19, 32(%r28)
662	std		%r20, 40(%r28)
663
664	ldd		64(%r29), %r19
665	ldd		72(%r29), %r20
666	std		%r21, 48(%r28)
667	std		%r22, 56(%r28)
668
669	ldd		80(%r29), %r21
670	ldd		88(%r29), %r22
671	std		%r19, 64(%r28)
672	std		%r20, 72(%r28)
673
674	ldd		 96(%r29), %r19
675	ldd		104(%r29), %r20
676	std		%r21, 80(%r28)
677	std		%r22, 88(%r28)
678
679	ldd		112(%r29), %r21
680	ldd		120(%r29), %r22
681	std		%r19, 96(%r28)
682	std		%r20, 104(%r28)
683
684	ldo		128(%r29), %r29
685	std		%r21, 112(%r28)
686	std		%r22, 120(%r28)
687	ldo		128(%r28), %r28
688
689	/* conditional branches nullify on forward taken branch, and on
690	 * non-taken backward branch. Note that .+4 is a backwards branch.
691	 * The ldd should only get executed if the branch is taken.
692	 */
693	addib,COND(>),n	-1, %r1, 1b		/* bundle 10 */
694	ldd		0(%r29), %r19		/* start next loads */
695
696#else
697	ldi		(PAGE_SIZE / 64), %r1
698
699	/*
700	 * This loop is optimized for PCXL/PCXL2 ldw/ldw and stw/stw
701	 * bundles (very restricted rules for bundling). It probably
702	 * does OK on PCXU and better, but we could do better with
703	 * ldd/std instructions. Note that until (if) we start saving
704	 * the full 64 bit register values on interrupt, we can't
705	 * use ldd/std on a 32 bit kernel.
706	 */
707
7081:	ldw		0(%r29), %r19
709	ldw		4(%r29), %r20
710	ldw		8(%r29), %r21
711	ldw		12(%r29), %r22
712	stw		%r19, 0(%r28)
713	stw		%r20, 4(%r28)
714	stw		%r21, 8(%r28)
715	stw		%r22, 12(%r28)
716	ldw		16(%r29), %r19
717	ldw		20(%r29), %r20
718	ldw		24(%r29), %r21
719	ldw		28(%r29), %r22
720	stw		%r19, 16(%r28)
721	stw		%r20, 20(%r28)
722	stw		%r21, 24(%r28)
723	stw		%r22, 28(%r28)
724	ldw		32(%r29), %r19
725	ldw		36(%r29), %r20
726	ldw		40(%r29), %r21
727	ldw		44(%r29), %r22
728	stw		%r19, 32(%r28)
729	stw		%r20, 36(%r28)
730	stw		%r21, 40(%r28)
731	stw		%r22, 44(%r28)
732	ldw		48(%r29), %r19
733	ldw		52(%r29), %r20
734	ldw		56(%r29), %r21
735	ldw		60(%r29), %r22
736	stw		%r19, 48(%r28)
737	stw		%r20, 52(%r28)
738	stw		%r21, 56(%r28)
739	stw		%r22, 60(%r28)
740	ldo		64(%r28), %r28
741
742	addib,COND(>)		-1, %r1,1b
743	ldo		64(%r29), %r29
744#endif
745
746	bv		%r0(%r2)
747	nop
748	.exit
749
750	.procend
751ENDPROC_CFI(copy_user_page_asm)
752
753ENTRY_CFI(clear_user_page_asm)
754	.proc
755	.callinfo NO_CALLS
756	.entry
757
758	tophys_r1	%r26
759
760	ldil		L%(TMPALIAS_MAP_START), %r28
761#ifdef CONFIG_64BIT
762#if (TMPALIAS_MAP_START >= 0x80000000)
763	depdi		0, 31,32, %r28		/* clear any sign extension */
764#endif
765	convert_phys_for_tlb_insert20 %r26	/* convert phys addr to tlb insert format */
766	depd		%r25, 63,22, %r28	/* Form aliased virtual address 'to' */
767	depdi		0, 63,PAGE_SHIFT, %r28	/* Clear any offset bits */
768#else
769	extrw,u		%r26, 24,25, %r26	/* convert phys addr to tlb insert format */
770	depw		%r25, 31,22, %r28	/* Form aliased virtual address 'to' */
771	depwi		0, 31,PAGE_SHIFT, %r28	/* Clear any offset bits */
772#endif
773
774	/* Purge any old translation */
775
776#ifdef CONFIG_PA20
777	pdtlb,l		0(%r28)
778#else
779	tlb_lock	%r20,%r21,%r22
780	pdtlb		0(%r28)
781	tlb_unlock	%r20,%r21,%r22
782#endif
783
784#ifdef CONFIG_64BIT
785	ldi		(PAGE_SIZE / 128), %r1
786
787	/* PREFETCH (Write) has not (yet) been proven to help here */
788	/* #define	PREFETCHW_OP	ldd		256(%0), %r0 */
789
7901:	std		%r0, 0(%r28)
791	std		%r0, 8(%r28)
792	std		%r0, 16(%r28)
793	std		%r0, 24(%r28)
794	std		%r0, 32(%r28)
795	std		%r0, 40(%r28)
796	std		%r0, 48(%r28)
797	std		%r0, 56(%r28)
798	std		%r0, 64(%r28)
799	std		%r0, 72(%r28)
800	std		%r0, 80(%r28)
801	std		%r0, 88(%r28)
802	std		%r0, 96(%r28)
803	std		%r0, 104(%r28)
804	std		%r0, 112(%r28)
805	std		%r0, 120(%r28)
806	addib,COND(>)		-1, %r1, 1b
807	ldo		128(%r28), %r28
808
809#else	/* ! CONFIG_64BIT */
810	ldi		(PAGE_SIZE / 64), %r1
811
8121:	stw		%r0, 0(%r28)
813	stw		%r0, 4(%r28)
814	stw		%r0, 8(%r28)
815	stw		%r0, 12(%r28)
816	stw		%r0, 16(%r28)
817	stw		%r0, 20(%r28)
818	stw		%r0, 24(%r28)
819	stw		%r0, 28(%r28)
820	stw		%r0, 32(%r28)
821	stw		%r0, 36(%r28)
822	stw		%r0, 40(%r28)
823	stw		%r0, 44(%r28)
824	stw		%r0, 48(%r28)
825	stw		%r0, 52(%r28)
826	stw		%r0, 56(%r28)
827	stw		%r0, 60(%r28)
828	addib,COND(>)		-1, %r1, 1b
829	ldo		64(%r28), %r28
830#endif	/* CONFIG_64BIT */
831
832	bv		%r0(%r2)
833	nop
834	.exit
835
836	.procend
837ENDPROC_CFI(clear_user_page_asm)
838
839ENTRY_CFI(flush_dcache_page_asm)
840	.proc
841	.callinfo NO_CALLS
842	.entry
843
844	ldil		L%(TMPALIAS_MAP_START), %r28
845#ifdef CONFIG_64BIT
846#if (TMPALIAS_MAP_START >= 0x80000000)
847	depdi		0, 31,32, %r28		/* clear any sign extension */
848#endif
849	convert_phys_for_tlb_insert20 %r26	/* convert phys addr to tlb insert format */
850	depd		%r25, 63,22, %r28	/* Form aliased virtual address 'to' */
851	depdi		0, 63,PAGE_SHIFT, %r28	/* Clear any offset bits */
852#else
853	extrw,u		%r26, 24,25, %r26	/* convert phys addr to tlb insert format */
854	depw		%r25, 31,22, %r28	/* Form aliased virtual address 'to' */
855	depwi		0, 31,PAGE_SHIFT, %r28	/* Clear any offset bits */
856#endif
857
858	/* Purge any old translation */
859
860#ifdef CONFIG_PA20
861	pdtlb,l		0(%r28)
862#else
863	tlb_lock	%r20,%r21,%r22
864	pdtlb		0(%r28)
865	tlb_unlock	%r20,%r21,%r22
866#endif
867
868	ldil		L%dcache_stride, %r1
869	ldw		R%dcache_stride(%r1), r31
870
871#ifdef CONFIG_64BIT
872	depdi,z		1, 63-PAGE_SHIFT,1, %r25
873#else
874	depwi,z		1, 31-PAGE_SHIFT,1, %r25
875#endif
876	add		%r28, %r25, %r25
877	sub		%r25, r31, %r25
878
879
8801:      fdc,m		r31(%r28)
881	fdc,m		r31(%r28)
882	fdc,m		r31(%r28)
883	fdc,m		r31(%r28)
884	fdc,m		r31(%r28)
885	fdc,m		r31(%r28)
886	fdc,m		r31(%r28)
887	fdc,m		r31(%r28)
888	fdc,m		r31(%r28)
889	fdc,m		r31(%r28)
890	fdc,m		r31(%r28)
891	fdc,m		r31(%r28)
892	fdc,m		r31(%r28)
893	fdc,m		r31(%r28)
894	fdc,m		r31(%r28)
895	cmpb,COND(<<)		%r28, %r25,1b
896	fdc,m		r31(%r28)
897
898	sync
899
900#ifdef CONFIG_PA20
901	pdtlb,l		0(%r25)
902#else
903	tlb_lock	%r20,%r21,%r22
904	pdtlb		0(%r25)
905	tlb_unlock	%r20,%r21,%r22
906#endif
907
908	bv		%r0(%r2)
909	nop
910	.exit
911
912	.procend
913ENDPROC_CFI(flush_dcache_page_asm)
914
915ENTRY_CFI(flush_icache_page_asm)
916	.proc
917	.callinfo NO_CALLS
918	.entry
919
920	ldil		L%(TMPALIAS_MAP_START), %r28
921#ifdef CONFIG_64BIT
922#if (TMPALIAS_MAP_START >= 0x80000000)
923	depdi		0, 31,32, %r28		/* clear any sign extension */
924#endif
925	convert_phys_for_tlb_insert20 %r26	/* convert phys addr to tlb insert format */
926	depd		%r25, 63,22, %r28	/* Form aliased virtual address 'to' */
927	depdi		0, 63,PAGE_SHIFT, %r28	/* Clear any offset bits */
928#else
929	extrw,u		%r26, 24,25, %r26	/* convert phys addr to tlb insert format */
930	depw		%r25, 31,22, %r28	/* Form aliased virtual address 'to' */
931	depwi		0, 31,PAGE_SHIFT, %r28	/* Clear any offset bits */
932#endif
933
934	/* Purge any old translation */
935
936#ifdef CONFIG_PA20
937	pitlb,l         %r0(%sr4,%r28)
938#else
939	tlb_lock        %r20,%r21,%r22
940	pitlb           (%sr4,%r28)
941	tlb_unlock      %r20,%r21,%r22
942#endif
943
944	ldil		L%icache_stride, %r1
945	ldw		R%icache_stride(%r1), %r31
946
947#ifdef CONFIG_64BIT
948	depdi,z		1, 63-PAGE_SHIFT,1, %r25
949#else
950	depwi,z		1, 31-PAGE_SHIFT,1, %r25
951#endif
952	add		%r28, %r25, %r25
953	sub		%r25, %r31, %r25
954
955
956	/* fic only has the type 26 form on PA1.1, requiring an
957	 * explicit space specification, so use %sr4 */
9581:      fic,m		%r31(%sr4,%r28)
959	fic,m		%r31(%sr4,%r28)
960	fic,m		%r31(%sr4,%r28)
961	fic,m		%r31(%sr4,%r28)
962	fic,m		%r31(%sr4,%r28)
963	fic,m		%r31(%sr4,%r28)
964	fic,m		%r31(%sr4,%r28)
965	fic,m		%r31(%sr4,%r28)
966	fic,m		%r31(%sr4,%r28)
967	fic,m		%r31(%sr4,%r28)
968	fic,m		%r31(%sr4,%r28)
969	fic,m		%r31(%sr4,%r28)
970	fic,m		%r31(%sr4,%r28)
971	fic,m		%r31(%sr4,%r28)
972	fic,m		%r31(%sr4,%r28)
973	cmpb,COND(<<)	%r28, %r25,1b
974	fic,m		%r31(%sr4,%r28)
975
976	sync
977
978#ifdef CONFIG_PA20
979	pitlb,l         %r0(%sr4,%r25)
980#else
981	tlb_lock        %r20,%r21,%r22
982	pitlb           (%sr4,%r25)
983	tlb_unlock      %r20,%r21,%r22
984#endif
985
986	bv		%r0(%r2)
987	nop
988	.exit
989
990	.procend
991ENDPROC_CFI(flush_icache_page_asm)
992
993ENTRY_CFI(flush_kernel_dcache_page_asm)
994	.proc
995	.callinfo NO_CALLS
996	.entry
997
998	ldil		L%dcache_stride, %r1
999	ldw		R%dcache_stride(%r1), %r23
1000
1001#ifdef CONFIG_64BIT
1002	depdi,z		1, 63-PAGE_SHIFT,1, %r25
1003#else
1004	depwi,z		1, 31-PAGE_SHIFT,1, %r25
1005#endif
1006	add		%r26, %r25, %r25
1007	sub		%r25, %r23, %r25
1008
1009
10101:      fdc,m		%r23(%r26)
1011	fdc,m		%r23(%r26)
1012	fdc,m		%r23(%r26)
1013	fdc,m		%r23(%r26)
1014	fdc,m		%r23(%r26)
1015	fdc,m		%r23(%r26)
1016	fdc,m		%r23(%r26)
1017	fdc,m		%r23(%r26)
1018	fdc,m		%r23(%r26)
1019	fdc,m		%r23(%r26)
1020	fdc,m		%r23(%r26)
1021	fdc,m		%r23(%r26)
1022	fdc,m		%r23(%r26)
1023	fdc,m		%r23(%r26)
1024	fdc,m		%r23(%r26)
1025	cmpb,COND(<<)		%r26, %r25,1b
1026	fdc,m		%r23(%r26)
1027
1028	sync
1029	bv		%r0(%r2)
1030	nop
1031	.exit
1032
1033	.procend
1034ENDPROC_CFI(flush_kernel_dcache_page_asm)
1035
1036ENTRY_CFI(purge_kernel_dcache_page_asm)
1037	.proc
1038	.callinfo NO_CALLS
1039	.entry
1040
1041	ldil		L%dcache_stride, %r1
1042	ldw		R%dcache_stride(%r1), %r23
1043
1044#ifdef CONFIG_64BIT
1045	depdi,z		1, 63-PAGE_SHIFT,1, %r25
1046#else
1047	depwi,z		1, 31-PAGE_SHIFT,1, %r25
1048#endif
1049	add		%r26, %r25, %r25
1050	sub		%r25, %r23, %r25
1051
10521:      pdc,m		%r23(%r26)
1053	pdc,m		%r23(%r26)
1054	pdc,m		%r23(%r26)
1055	pdc,m		%r23(%r26)
1056	pdc,m		%r23(%r26)
1057	pdc,m		%r23(%r26)
1058	pdc,m		%r23(%r26)
1059	pdc,m		%r23(%r26)
1060	pdc,m		%r23(%r26)
1061	pdc,m		%r23(%r26)
1062	pdc,m		%r23(%r26)
1063	pdc,m		%r23(%r26)
1064	pdc,m		%r23(%r26)
1065	pdc,m		%r23(%r26)
1066	pdc,m		%r23(%r26)
1067	cmpb,COND(<<)		%r26, %r25, 1b
1068	pdc,m		%r23(%r26)
1069
1070	sync
1071	bv		%r0(%r2)
1072	nop
1073	.exit
1074
1075	.procend
1076ENDPROC_CFI(purge_kernel_dcache_page_asm)
1077
1078ENTRY_CFI(flush_user_dcache_range_asm)
1079	.proc
1080	.callinfo NO_CALLS
1081	.entry
1082
1083	ldil		L%dcache_stride, %r1
1084	ldw		R%dcache_stride(%r1), %r23
1085	ldo		-1(%r23), %r21
1086	ANDCM		%r26, %r21, %r26
1087
10881:      cmpb,COND(<<),n	%r26, %r25, 1b
1089	fdc,m		%r23(%sr3, %r26)
1090
1091	sync
1092	bv		%r0(%r2)
1093	nop
1094	.exit
1095
1096	.procend
1097ENDPROC_CFI(flush_user_dcache_range_asm)
1098
1099ENTRY_CFI(flush_kernel_dcache_range_asm)
1100	.proc
1101	.callinfo NO_CALLS
1102	.entry
1103
1104	ldil		L%dcache_stride, %r1
1105	ldw		R%dcache_stride(%r1), %r23
1106	ldo		-1(%r23), %r21
1107	ANDCM		%r26, %r21, %r26
1108
11091:      cmpb,COND(<<),n	%r26, %r25,1b
1110	fdc,m		%r23(%r26)
1111
1112	sync
1113	syncdma
1114	bv		%r0(%r2)
1115	nop
1116	.exit
1117
1118	.procend
1119ENDPROC_CFI(flush_kernel_dcache_range_asm)
1120
1121ENTRY_CFI(flush_user_icache_range_asm)
1122	.proc
1123	.callinfo NO_CALLS
1124	.entry
1125
1126	ldil		L%icache_stride, %r1
1127	ldw		R%icache_stride(%r1), %r23
1128	ldo		-1(%r23), %r21
1129	ANDCM		%r26, %r21, %r26
1130
11311:      cmpb,COND(<<),n	%r26, %r25,1b
1132	fic,m		%r23(%sr3, %r26)
1133
1134	sync
1135	bv		%r0(%r2)
1136	nop
1137	.exit
1138
1139	.procend
1140ENDPROC_CFI(flush_user_icache_range_asm)
1141
1142ENTRY_CFI(flush_kernel_icache_page)
1143	.proc
1144	.callinfo NO_CALLS
1145	.entry
1146
1147	ldil		L%icache_stride, %r1
1148	ldw		R%icache_stride(%r1), %r23
1149
1150#ifdef CONFIG_64BIT
1151	depdi,z		1, 63-PAGE_SHIFT,1, %r25
1152#else
1153	depwi,z		1, 31-PAGE_SHIFT,1, %r25
1154#endif
1155	add		%r26, %r25, %r25
1156	sub		%r25, %r23, %r25
1157
1158
11591:      fic,m		%r23(%sr4, %r26)
1160	fic,m		%r23(%sr4, %r26)
1161	fic,m		%r23(%sr4, %r26)
1162	fic,m		%r23(%sr4, %r26)
1163	fic,m		%r23(%sr4, %r26)
1164	fic,m		%r23(%sr4, %r26)
1165	fic,m		%r23(%sr4, %r26)
1166	fic,m		%r23(%sr4, %r26)
1167	fic,m		%r23(%sr4, %r26)
1168	fic,m		%r23(%sr4, %r26)
1169	fic,m		%r23(%sr4, %r26)
1170	fic,m		%r23(%sr4, %r26)
1171	fic,m		%r23(%sr4, %r26)
1172	fic,m		%r23(%sr4, %r26)
1173	fic,m		%r23(%sr4, %r26)
1174	cmpb,COND(<<)		%r26, %r25, 1b
1175	fic,m		%r23(%sr4, %r26)
1176
1177	sync
1178	bv		%r0(%r2)
1179	nop
1180	.exit
1181
1182	.procend
1183ENDPROC_CFI(flush_kernel_icache_page)
1184
1185ENTRY_CFI(flush_kernel_icache_range_asm)
1186	.proc
1187	.callinfo NO_CALLS
1188	.entry
1189
1190	ldil		L%icache_stride, %r1
1191	ldw		R%icache_stride(%r1), %r23
1192	ldo		-1(%r23), %r21
1193	ANDCM		%r26, %r21, %r26
1194
11951:      cmpb,COND(<<),n	%r26, %r25, 1b
1196	fic,m		%r23(%sr4, %r26)
1197
1198	sync
1199	bv		%r0(%r2)
1200	nop
1201	.exit
1202	.procend
1203ENDPROC_CFI(flush_kernel_icache_range_asm)
1204
1205	/* align should cover use of rfi in disable_sr_hashing_asm and
1206	 * srdis_done.
1207	 */
1208	.align	256
1209ENTRY_CFI(disable_sr_hashing_asm)
1210	.proc
1211	.callinfo NO_CALLS
1212	.entry
1213
1214	/*
1215	 * Switch to real mode
1216	 */
1217	/* pcxt_ssm_bug */
1218	rsm		PSW_SM_I, %r0
1219	load32		PA(1f), %r1
1220	nop
1221	nop
1222	nop
1223	nop
1224	nop
1225
1226	rsm		PSW_SM_Q, %r0		/* prep to load iia queue */
1227	mtctl		%r0, %cr17		/* Clear IIASQ tail */
1228	mtctl		%r0, %cr17		/* Clear IIASQ head */
1229	mtctl		%r1, %cr18		/* IIAOQ head */
1230	ldo		4(%r1), %r1
1231	mtctl		%r1, %cr18		/* IIAOQ tail */
1232	load32		REAL_MODE_PSW, %r1
1233	mtctl		%r1, %ipsw
1234	rfi
1235	nop
1236
12371:      cmpib,=,n	SRHASH_PCXST, %r26,srdis_pcxs
1238	cmpib,=,n	SRHASH_PCXL, %r26,srdis_pcxl
1239	cmpib,=,n	SRHASH_PA20, %r26,srdis_pa20
1240	b,n		srdis_done
1241
1242srdis_pcxs:
1243
1244	/* Disable Space Register Hashing for PCXS,PCXT,PCXT' */
1245
1246	.word		0x141c1a00		/* mfdiag %dr0, %r28 */
1247	.word		0x141c1a00		/* must issue twice */
1248	depwi		0,18,1, %r28		/* Clear DHE (dcache hash enable) */
1249	depwi		0,20,1, %r28		/* Clear IHE (icache hash enable) */
1250	.word		0x141c1600		/* mtdiag %r28, %dr0 */
1251	.word		0x141c1600		/* must issue twice */
1252	b,n		srdis_done
1253
1254srdis_pcxl:
1255
1256	/* Disable Space Register Hashing for PCXL */
1257
1258	.word		0x141c0600		/* mfdiag %dr0, %r28 */
1259	depwi           0,28,2, %r28		/* Clear DHASH_EN & IHASH_EN */
1260	.word		0x141c0240		/* mtdiag %r28, %dr0 */
1261	b,n		srdis_done
1262
1263srdis_pa20:
1264
1265	/* Disable Space Register Hashing for PCXU,PCXU+,PCXW,PCXW+,PCXW2 */
1266
1267	.word		0x144008bc		/* mfdiag %dr2, %r28 */
1268	depdi		0, 54,1, %r28		/* clear DIAG_SPHASH_ENAB (bit 54) */
1269	.word		0x145c1840		/* mtdiag %r28, %dr2 */
1270
1271
1272srdis_done:
1273	/* Switch back to virtual mode */
1274	rsm		PSW_SM_I, %r0		/* prep to load iia queue */
1275	load32 	   	2f, %r1
1276	nop
1277	nop
1278	nop
1279	nop
1280	nop
1281
1282	rsm		PSW_SM_Q, %r0		/* prep to load iia queue */
1283	mtctl		%r0, %cr17		/* Clear IIASQ tail */
1284	mtctl		%r0, %cr17		/* Clear IIASQ head */
1285	mtctl		%r1, %cr18		/* IIAOQ head */
1286	ldo		4(%r1), %r1
1287	mtctl		%r1, %cr18		/* IIAOQ tail */
1288	load32		KERNEL_PSW, %r1
1289	mtctl		%r1, %ipsw
1290	rfi
1291	nop
1292
12932:      bv		%r0(%r2)
1294	nop
1295	.exit
1296
1297	.procend
1298ENDPROC_CFI(disable_sr_hashing_asm)
1299
1300	.end
1301