xref: /linux/arch/powerpc/mm/nohash/tlb_low_64e.S (revision a4eb44a6435d6d8f9e642407a4a06f65eb90ca04)
1/* SPDX-License-Identifier: GPL-2.0-or-later */
2/*
3 *  Low level TLB miss handlers for Book3E
4 *
5 *  Copyright (C) 2008-2009
6 *      Ben. Herrenschmidt (benh@kernel.crashing.org), IBM Corp.
7 */
8
9#include <linux/pgtable.h>
10#include <asm/processor.h>
11#include <asm/reg.h>
12#include <asm/page.h>
13#include <asm/mmu.h>
14#include <asm/ppc_asm.h>
15#include <asm/asm-offsets.h>
16#include <asm/cputable.h>
17#include <asm/exception-64e.h>
18#include <asm/ppc-opcode.h>
19#include <asm/kvm_asm.h>
20#include <asm/kvm_booke_hv_asm.h>
21#include <asm/feature-fixups.h>
22
23#define VPTE_PMD_SHIFT	(PTE_INDEX_SIZE)
24#define VPTE_PUD_SHIFT	(VPTE_PMD_SHIFT + PMD_INDEX_SIZE)
25#define VPTE_PGD_SHIFT	(VPTE_PUD_SHIFT + PUD_INDEX_SIZE)
26#define VPTE_INDEX_SIZE (VPTE_PGD_SHIFT + PGD_INDEX_SIZE)
27
28/**********************************************************************
29 *                                                                    *
30 * TLB miss handling for Book3E with a bolted linear mapping          *
31 * No virtual page table, no nested TLB misses                        *
32 *                                                                    *
33 **********************************************************************/
34
35/*
36 * Note that, unlike non-bolted handlers, TLB_EXFRAME is not
37 * modified by the TLB miss handlers themselves, since the TLB miss
38 * handler code will not itself cause a recursive TLB miss.
39 *
40 * TLB_EXFRAME will be modified when crit/mc/debug exceptions are
41 * entered/exited.
42 */
43.macro tlb_prolog_bolted intnum addr
44	mtspr	SPRN_SPRG_GEN_SCRATCH,r12
45	mfspr	r12,SPRN_SPRG_TLB_EXFRAME
46	std	r13,EX_TLB_R13(r12)
47	std	r10,EX_TLB_R10(r12)
48	mfspr	r13,SPRN_SPRG_PACA
49
50	mfcr	r10
51	std	r11,EX_TLB_R11(r12)
52#ifdef CONFIG_KVM_BOOKE_HV
53BEGIN_FTR_SECTION
54	mfspr	r11, SPRN_SRR1
55END_FTR_SECTION_IFSET(CPU_FTR_EMB_HV)
56#endif
57	DO_KVM	\intnum, SPRN_SRR1
58	std	r16,EX_TLB_R16(r12)
59	mfspr	r16,\addr		/* get faulting address */
60	std	r14,EX_TLB_R14(r12)
61	ld	r14,PACAPGD(r13)
62	std	r15,EX_TLB_R15(r12)
63	std	r10,EX_TLB_CR(r12)
64#ifdef CONFIG_PPC_FSL_BOOK3E
65START_BTB_FLUSH_SECTION
66	mfspr r11, SPRN_SRR1
67	andi. r10,r11,MSR_PR
68	beq 1f
69	BTB_FLUSH(r10)
701:
71END_BTB_FLUSH_SECTION
72	std	r7,EX_TLB_R7(r12)
73#endif
74.endm
75
76.macro tlb_epilog_bolted
77	ld	r14,EX_TLB_CR(r12)
78#ifdef CONFIG_PPC_FSL_BOOK3E
79	ld	r7,EX_TLB_R7(r12)
80#endif
81	ld	r10,EX_TLB_R10(r12)
82	ld	r11,EX_TLB_R11(r12)
83	ld	r13,EX_TLB_R13(r12)
84	mtcr	r14
85	ld	r14,EX_TLB_R14(r12)
86	ld	r15,EX_TLB_R15(r12)
87	ld	r16,EX_TLB_R16(r12)
88	mfspr	r12,SPRN_SPRG_GEN_SCRATCH
89.endm
90
91/* Data TLB miss */
92	START_EXCEPTION(data_tlb_miss_bolted)
93	tlb_prolog_bolted BOOKE_INTERRUPT_DTLB_MISS SPRN_DEAR
94
95	/* We need _PAGE_PRESENT and  _PAGE_ACCESSED set */
96
97	/* We do the user/kernel test for the PID here along with the RW test
98	 */
99	/* We pre-test some combination of permissions to avoid double
100	 * faults:
101	 *
102	 * We move the ESR:ST bit into the position of _PAGE_BAP_SW in the PTE
103	 * ESR_ST   is 0x00800000
104	 * _PAGE_BAP_SW is 0x00000010
105	 * So the shift is >> 19. This tests for supervisor writeability.
106	 * If the page happens to be supervisor writeable and not user
107	 * writeable, we will take a new fault later, but that should be
108	 * a rare enough case.
109	 *
110	 * We also move ESR_ST in _PAGE_DIRTY position
111	 * _PAGE_DIRTY is 0x00001000 so the shift is >> 11
112	 *
113	 * MAS1 is preset for all we need except for TID that needs to
114	 * be cleared for kernel translations
115	 */
116
117	mfspr	r11,SPRN_ESR
118
119	srdi	r15,r16,60		/* get region */
120	rldicl.	r10,r16,64-PGTABLE_EADDR_SIZE,PGTABLE_EADDR_SIZE+4
121	bne-	dtlb_miss_fault_bolted	/* Bail if fault addr is invalid */
122
123	rlwinm	r10,r11,32-19,27,27
124	rlwimi	r10,r11,32-16,19,19
125	cmpwi	r15,0			/* user vs kernel check */
126	ori	r10,r10,_PAGE_PRESENT
127	oris	r11,r10,_PAGE_ACCESSED@h
128
129	bne	tlb_miss_kernel_bolted
130
131tlb_miss_user_bolted:
132#ifdef CONFIG_PPC_KUAP
133	mfspr	r10,SPRN_MAS1
134	rlwinm.	r10,r10,0,0x3fff0000
135	beq-	tlb_miss_fault_bolted /* KUAP fault */
136#endif
137
138tlb_miss_common_bolted:
139/*
140 * This is the guts of the TLB miss handler for bolted-linear.
141 * We are entered with:
142 *
143 * r16 = faulting address
144 * r15 = crap (free to use)
145 * r14 = page table base
146 * r13 = PACA
147 * r11 = PTE permission mask
148 * r10 = crap (free to use)
149 */
150	rldicl	r15,r16,64-PGDIR_SHIFT+3,64-PGD_INDEX_SIZE-3
151	cmpldi	cr0,r14,0
152	clrrdi	r15,r15,3
153	beq	tlb_miss_fault_bolted	/* No PGDIR, bail */
154
155BEGIN_MMU_FTR_SECTION
156	/* Set the TLB reservation and search for existing entry. Then load
157	 * the entry.
158	 */
159	PPC_TLBSRX_DOT(0,R16)
160	ldx	r14,r14,r15		/* grab pgd entry */
161	beq	tlb_miss_done_bolted	/* tlb exists already, bail */
162MMU_FTR_SECTION_ELSE
163	ldx	r14,r14,r15		/* grab pgd entry */
164ALT_MMU_FTR_SECTION_END_IFSET(MMU_FTR_USE_TLBRSRV)
165
166	rldicl	r15,r16,64-PUD_SHIFT+3,64-PUD_INDEX_SIZE-3
167	clrrdi	r15,r15,3
168	cmpdi	cr0,r14,0
169	bge	tlb_miss_fault_bolted	/* Bad pgd entry or hugepage; bail */
170	ldx	r14,r14,r15		/* grab pud entry */
171
172	rldicl	r15,r16,64-PMD_SHIFT+3,64-PMD_INDEX_SIZE-3
173	clrrdi	r15,r15,3
174	cmpdi	cr0,r14,0
175	bge	tlb_miss_fault_bolted
176	ldx	r14,r14,r15		/* Grab pmd entry */
177
178	rldicl	r15,r16,64-PAGE_SHIFT+3,64-PTE_INDEX_SIZE-3
179	clrrdi	r15,r15,3
180	cmpdi	cr0,r14,0
181	bge	tlb_miss_fault_bolted
182	ldx	r14,r14,r15		/* Grab PTE, normal (!huge) page */
183
184	/* Check if required permissions are met */
185	andc.	r15,r11,r14
186	rldicr	r15,r14,64-(PTE_RPN_SHIFT-PAGE_SHIFT),63-PAGE_SHIFT
187	bne-	tlb_miss_fault_bolted
188
189	/* Now we build the MAS:
190	 *
191	 * MAS 0   :	Fully setup with defaults in MAS4 and TLBnCFG
192	 * MAS 1   :	Almost fully setup
193	 *               - PID already updated by caller if necessary
194	 *               - TSIZE need change if !base page size, not
195	 *                 yet implemented for now
196	 * MAS 2   :	Defaults not useful, need to be redone
197	 * MAS 3+7 :	Needs to be done
198	 */
199	clrrdi	r11,r16,12		/* Clear low crap in EA */
200	clrldi	r15,r15,12		/* Clear crap at the top */
201	rlwimi	r11,r14,32-19,27,31	/* Insert WIMGE */
202	rlwimi	r15,r14,32-8,22,25	/* Move in U bits */
203	mtspr	SPRN_MAS2,r11
204	andi.	r11,r14,_PAGE_DIRTY
205	rlwimi	r15,r14,32-2,26,31	/* Move in BAP bits */
206
207	/* Mask out SW and UW if !DIRTY (XXX optimize this !) */
208	bne	1f
209	li	r11,MAS3_SW|MAS3_UW
210	andc	r15,r15,r11
2111:
212	mtspr	SPRN_MAS7_MAS3,r15
213	tlbwe
214
215tlb_miss_done_bolted:
216	tlb_epilog_bolted
217	rfi
218
219itlb_miss_kernel_bolted:
220	li	r11,_PAGE_PRESENT|_PAGE_BAP_SX	/* Base perm */
221	oris	r11,r11,_PAGE_ACCESSED@h
222tlb_miss_kernel_bolted:
223	mfspr	r10,SPRN_MAS1
224	ld	r14,PACA_KERNELPGD(r13)
225	cmpldi	cr0,r15,8		/* Check for vmalloc region */
226	rlwinm	r10,r10,0,16,1		/* Clear TID */
227	mtspr	SPRN_MAS1,r10
228	beq+	tlb_miss_common_bolted
229
230tlb_miss_fault_bolted:
231	/* We need to check if it was an instruction miss */
232	andi.	r10,r11,_PAGE_BAP_UX|_PAGE_BAP_SX
233	bne	itlb_miss_fault_bolted
234dtlb_miss_fault_bolted:
235	tlb_epilog_bolted
236	b	exc_data_storage_book3e
237itlb_miss_fault_bolted:
238	tlb_epilog_bolted
239	b	exc_instruction_storage_book3e
240
241/* Instruction TLB miss */
242	START_EXCEPTION(instruction_tlb_miss_bolted)
243	tlb_prolog_bolted BOOKE_INTERRUPT_ITLB_MISS SPRN_SRR0
244
245	rldicl.	r10,r16,64-PGTABLE_EADDR_SIZE,PGTABLE_EADDR_SIZE+4
246	srdi	r15,r16,60		/* get region */
247	bne-	itlb_miss_fault_bolted
248
249	li	r11,_PAGE_PRESENT|_PAGE_BAP_UX	/* Base perm */
250
251	/* We do the user/kernel test for the PID here along with the RW test
252	 */
253
254	cmpldi	cr0,r15,0			/* Check for user region */
255	oris	r11,r11,_PAGE_ACCESSED@h
256	beq	tlb_miss_user_bolted
257	b	itlb_miss_kernel_bolted
258
259#ifdef CONFIG_PPC_FSL_BOOK3E
260/*
261 * TLB miss handling for e6500 and derivatives, using hardware tablewalk.
262 *
263 * Linear mapping is bolted: no virtual page table or nested TLB misses
264 * Indirect entries in TLB1, hardware loads resulting direct entries
265 *    into TLB0
266 * No HES or NV hint on TLB1, so we need to do software round-robin
267 * No tlbsrx. so we need a spinlock, and we have to deal
268 *    with MAS-damage caused by tlbsx
269 * 4K pages only
270 */
271
272	START_EXCEPTION(instruction_tlb_miss_e6500)
273	tlb_prolog_bolted BOOKE_INTERRUPT_ITLB_MISS SPRN_SRR0
274
275	ld	r11,PACA_TCD_PTR(r13)
276	srdi.	r15,r16,60		/* get region */
277	ori	r16,r16,1
278
279	bne	tlb_miss_kernel_e6500	/* user/kernel test */
280
281	b	tlb_miss_common_e6500
282
283	START_EXCEPTION(data_tlb_miss_e6500)
284	tlb_prolog_bolted BOOKE_INTERRUPT_DTLB_MISS SPRN_DEAR
285
286	ld	r11,PACA_TCD_PTR(r13)
287	srdi.	r15,r16,60		/* get region */
288	rldicr	r16,r16,0,62
289
290	bne	tlb_miss_kernel_e6500	/* user vs kernel check */
291
292/*
293 * This is the guts of the TLB miss handler for e6500 and derivatives.
294 * We are entered with:
295 *
296 * r16 = page of faulting address (low bit 0 if data, 1 if instruction)
297 * r15 = crap (free to use)
298 * r14 = page table base
299 * r13 = PACA
300 * r11 = tlb_per_core ptr
301 * r10 = crap (free to use)
302 * r7  = esel_next
303 */
304tlb_miss_common_e6500:
305	crmove	cr2*4+2,cr0*4+2		/* cr2.eq != 0 if kernel address */
306
307BEGIN_FTR_SECTION		/* CPU_FTR_SMT */
308	/*
309	 * Search if we already have an indirect entry for that virtual
310	 * address, and if we do, bail out.
311	 *
312	 * MAS6:IND should be already set based on MAS4
313	 */
314	lhz	r10,PACAPACAINDEX(r13)
315	addi	r10,r10,1
316	crclr	cr1*4+eq	/* set cr1.eq = 0 for non-recursive */
3171:	lbarx	r15,0,r11
318	cmpdi	r15,0
319	bne	2f
320	stbcx.	r10,0,r11
321	bne	1b
3223:
323	.subsection 1
3242:	cmpd	cr1,r15,r10	/* recursive lock due to mcheck/crit/etc? */
325	beq	cr1,3b		/* unlock will happen if cr1.eq = 0 */
32610:	lbz	r15,0(r11)
327	cmpdi	r15,0
328	bne	10b
329	b	1b
330	.previous
331END_FTR_SECTION_IFSET(CPU_FTR_SMT)
332
333	lbz	r7,TCD_ESEL_NEXT(r11)
334
335BEGIN_FTR_SECTION		/* CPU_FTR_SMT */
336	/*
337	 * Erratum A-008139 says that we can't use tlbwe to change
338	 * an indirect entry in any way (including replacing or
339	 * invalidating) if the other thread could be in the process
340	 * of a lookup.  The workaround is to invalidate the entry
341	 * with tlbilx before overwriting.
342	 */
343
344	rlwinm	r10,r7,16,0xff0000
345	oris	r10,r10,MAS0_TLBSEL(1)@h
346	mtspr	SPRN_MAS0,r10
347	isync
348	tlbre
349	mfspr	r15,SPRN_MAS1
350	andis.	r15,r15,MAS1_VALID@h
351	beq	5f
352
353BEGIN_FTR_SECTION_NESTED(532)
354	mfspr	r10,SPRN_MAS8
355	rlwinm	r10,r10,0,0x80000fff  /* tgs,tlpid -> sgs,slpid */
356	mtspr	SPRN_MAS5,r10
357END_FTR_SECTION_NESTED(CPU_FTR_EMB_HV,CPU_FTR_EMB_HV,532)
358
359	mfspr	r10,SPRN_MAS1
360	rlwinm	r15,r10,0,0x3fff0000  /* tid -> spid */
361	rlwimi	r15,r10,20,0x00000003 /* ind,ts -> sind,sas */
362	mfspr	r10,SPRN_MAS6
363	mtspr	SPRN_MAS6,r15
364
365	mfspr	r15,SPRN_MAS2
366	isync
367	tlbilxva 0,r15
368	isync
369
370	mtspr	SPRN_MAS6,r10
371
3725:
373BEGIN_FTR_SECTION_NESTED(532)
374	li	r10,0
375	mtspr	SPRN_MAS8,r10
376	mtspr	SPRN_MAS5,r10
377END_FTR_SECTION_NESTED(CPU_FTR_EMB_HV,CPU_FTR_EMB_HV,532)
378
379	tlbsx	0,r16
380	mfspr	r10,SPRN_MAS1
381	andis.	r15,r10,MAS1_VALID@h
382	bne	tlb_miss_done_e6500
383FTR_SECTION_ELSE
384	mfspr	r10,SPRN_MAS1
385ALT_FTR_SECTION_END_IFSET(CPU_FTR_SMT)
386
387	oris	r10,r10,MAS1_VALID@h
388	beq	cr2,4f
389	rlwinm	r10,r10,0,16,1		/* Clear TID */
3904:	mtspr	SPRN_MAS1,r10
391
392	/* Now, we need to walk the page tables. First check if we are in
393	 * range.
394	 */
395	rldicl.	r10,r16,64-PGTABLE_EADDR_SIZE,PGTABLE_EADDR_SIZE+4
396	bne-	tlb_miss_fault_e6500
397
398	rldicl	r15,r16,64-PGDIR_SHIFT+3,64-PGD_INDEX_SIZE-3
399	cmpldi	cr0,r14,0
400	clrrdi	r15,r15,3
401	beq-	tlb_miss_fault_e6500 /* No PGDIR, bail */
402	ldx	r14,r14,r15		/* grab pgd entry */
403
404	rldicl	r15,r16,64-PUD_SHIFT+3,64-PUD_INDEX_SIZE-3
405	clrrdi	r15,r15,3
406	cmpdi	cr0,r14,0
407	bge	tlb_miss_huge_e6500	/* Bad pgd entry or hugepage; bail */
408	ldx	r14,r14,r15		/* grab pud entry */
409
410	rldicl	r15,r16,64-PMD_SHIFT+3,64-PMD_INDEX_SIZE-3
411	clrrdi	r15,r15,3
412	cmpdi	cr0,r14,0
413	bge	tlb_miss_huge_e6500
414	ldx	r14,r14,r15		/* Grab pmd entry */
415
416	mfspr	r10,SPRN_MAS0
417	cmpdi	cr0,r14,0
418	bge	tlb_miss_huge_e6500
419
420	/* Now we build the MAS for a 2M indirect page:
421	 *
422	 * MAS 0   :	ESEL needs to be filled by software round-robin
423	 * MAS 1   :	Fully set up
424	 *               - PID already updated by caller if necessary
425	 *               - TSIZE for now is base ind page size always
426	 *               - TID already cleared if necessary
427	 * MAS 2   :	Default not 2M-aligned, need to be redone
428	 * MAS 3+7 :	Needs to be done
429	 */
430
431	ori	r14,r14,(BOOK3E_PAGESZ_4K << MAS3_SPSIZE_SHIFT)
432	mtspr	SPRN_MAS7_MAS3,r14
433
434	clrrdi	r15,r16,21		/* make EA 2M-aligned */
435	mtspr	SPRN_MAS2,r15
436
437tlb_miss_huge_done_e6500:
438	lbz	r16,TCD_ESEL_MAX(r11)
439	lbz	r14,TCD_ESEL_FIRST(r11)
440	rlwimi	r10,r7,16,0x00ff0000	/* insert esel_next into MAS0 */
441	addi	r7,r7,1			/* increment esel_next */
442	mtspr	SPRN_MAS0,r10
443	cmpw	r7,r16
444	iseleq	r7,r14,r7		/* if next == last use first */
445	stb	r7,TCD_ESEL_NEXT(r11)
446
447	tlbwe
448
449tlb_miss_done_e6500:
450	.macro	tlb_unlock_e6500
451BEGIN_FTR_SECTION
452	beq	cr1,1f		/* no unlock if lock was recursively grabbed */
453	li	r15,0
454	isync
455	stb	r15,0(r11)
4561:
457END_FTR_SECTION_IFSET(CPU_FTR_SMT)
458	.endm
459
460	tlb_unlock_e6500
461	tlb_epilog_bolted
462	rfi
463
464tlb_miss_huge_e6500:
465	beq	tlb_miss_fault_e6500
466	li	r10,1
467	andi.	r15,r14,HUGEPD_SHIFT_MASK@l /* r15 = psize */
468	rldimi	r14,r10,63,0		/* Set PD_HUGE */
469	xor	r14,r14,r15		/* Clear size bits */
470	ldx	r14,0,r14
471
472	/*
473	 * Now we build the MAS for a huge page.
474	 *
475	 * MAS 0   :	ESEL needs to be filled by software round-robin
476	 *		 - can be handled by indirect code
477	 * MAS 1   :	Need to clear IND and set TSIZE
478	 * MAS 2,3+7:	Needs to be redone similar to non-tablewalk handler
479	 */
480
481	subi	r15,r15,10		/* Convert psize to tsize */
482	mfspr	r10,SPRN_MAS1
483	rlwinm	r10,r10,0,~MAS1_IND
484	rlwimi	r10,r15,MAS1_TSIZE_SHIFT,MAS1_TSIZE_MASK
485	mtspr	SPRN_MAS1,r10
486
487	li	r10,-0x400
488	sld	r15,r10,r15		/* Generate mask based on size */
489	and	r10,r16,r15
490	rldicr	r15,r14,64-(PTE_RPN_SHIFT-PAGE_SHIFT),63-PAGE_SHIFT
491	rlwimi	r10,r14,32-19,27,31	/* Insert WIMGE */
492	clrldi	r15,r15,PAGE_SHIFT	/* Clear crap at the top */
493	rlwimi	r15,r14,32-8,22,25	/* Move in U bits */
494	mtspr	SPRN_MAS2,r10
495	andi.	r10,r14,_PAGE_DIRTY
496	rlwimi	r15,r14,32-2,26,31	/* Move in BAP bits */
497
498	/* Mask out SW and UW if !DIRTY (XXX optimize this !) */
499	bne	1f
500	li	r10,MAS3_SW|MAS3_UW
501	andc	r15,r15,r10
5021:
503	mtspr	SPRN_MAS7_MAS3,r15
504
505	mfspr	r10,SPRN_MAS0
506	b	tlb_miss_huge_done_e6500
507
508tlb_miss_kernel_e6500:
509	ld	r14,PACA_KERNELPGD(r13)
510	cmpldi	cr1,r15,8		/* Check for vmalloc region */
511	beq+	cr1,tlb_miss_common_e6500
512
513tlb_miss_fault_e6500:
514	tlb_unlock_e6500
515	/* We need to check if it was an instruction miss */
516	andi.	r16,r16,1
517	bne	itlb_miss_fault_e6500
518dtlb_miss_fault_e6500:
519	tlb_epilog_bolted
520	b	exc_data_storage_book3e
521itlb_miss_fault_e6500:
522	tlb_epilog_bolted
523	b	exc_instruction_storage_book3e
524#endif /* CONFIG_PPC_FSL_BOOK3E */
525
526/**********************************************************************
527 *                                                                    *
528 * TLB miss handling for Book3E with TLB reservation and HES support  *
529 *                                                                    *
530 **********************************************************************/
531
532
533/* Data TLB miss */
534	START_EXCEPTION(data_tlb_miss)
535	TLB_MISS_PROLOG
536
537	/* Now we handle the fault proper. We only save DEAR in normal
538	 * fault case since that's the only interesting values here.
539	 * We could probably also optimize by not saving SRR0/1 in the
540	 * linear mapping case but I'll leave that for later
541	 */
542	mfspr	r14,SPRN_ESR
543	mfspr	r16,SPRN_DEAR		/* get faulting address */
544	srdi	r15,r16,60		/* get region */
545	cmpldi	cr0,r15,0xc		/* linear mapping ? */
546	beq	tlb_load_linear		/* yes -> go to linear map load */
547
548	/* The page tables are mapped virtually linear. At this point, though,
549	 * we don't know whether we are trying to fault in a first level
550	 * virtual address or a virtual page table address. We can get that
551	 * from bit 0x1 of the region ID which we have set for a page table
552	 */
553	andi.	r10,r15,0x1
554	bne-	virt_page_table_tlb_miss
555
556	std	r14,EX_TLB_ESR(r12);	/* save ESR */
557	std	r16,EX_TLB_DEAR(r12);	/* save DEAR */
558
559	 /* We need _PAGE_PRESENT and  _PAGE_ACCESSED set */
560	li	r11,_PAGE_PRESENT
561	oris	r11,r11,_PAGE_ACCESSED@h
562
563	/* We do the user/kernel test for the PID here along with the RW test
564	 */
565	cmpldi	cr0,r15,0		/* Check for user region */
566
567	/* We pre-test some combination of permissions to avoid double
568	 * faults:
569	 *
570	 * We move the ESR:ST bit into the position of _PAGE_BAP_SW in the PTE
571	 * ESR_ST   is 0x00800000
572	 * _PAGE_BAP_SW is 0x00000010
573	 * So the shift is >> 19. This tests for supervisor writeability.
574	 * If the page happens to be supervisor writeable and not user
575	 * writeable, we will take a new fault later, but that should be
576	 * a rare enough case.
577	 *
578	 * We also move ESR_ST in _PAGE_DIRTY position
579	 * _PAGE_DIRTY is 0x00001000 so the shift is >> 11
580	 *
581	 * MAS1 is preset for all we need except for TID that needs to
582	 * be cleared for kernel translations
583	 */
584	rlwimi	r11,r14,32-19,27,27
585	rlwimi	r11,r14,32-16,19,19
586	beq	normal_tlb_miss
587	/* XXX replace the RMW cycles with immediate loads + writes */
5881:	mfspr	r10,SPRN_MAS1
589	cmpldi	cr0,r15,8		/* Check for vmalloc region */
590	rlwinm	r10,r10,0,16,1		/* Clear TID */
591	mtspr	SPRN_MAS1,r10
592	beq+	normal_tlb_miss
593
594	/* We got a crappy address, just fault with whatever DEAR and ESR
595	 * are here
596	 */
597	TLB_MISS_EPILOG_ERROR
598	b	exc_data_storage_book3e
599
600/* Instruction TLB miss */
601	START_EXCEPTION(instruction_tlb_miss)
602	TLB_MISS_PROLOG
603
604	/* If we take a recursive fault, the second level handler may need
605	 * to know whether we are handling a data or instruction fault in
606	 * order to get to the right store fault handler. We provide that
607	 * info by writing a crazy value in ESR in our exception frame
608	 */
609	li	r14,-1	/* store to exception frame is done later */
610
611	/* Now we handle the fault proper. We only save DEAR in the non
612	 * linear mapping case since we know the linear mapping case will
613	 * not re-enter. We could indeed optimize and also not save SRR0/1
614	 * in the linear mapping case but I'll leave that for later
615	 *
616	 * Faulting address is SRR0 which is already in r16
617	 */
618	srdi	r15,r16,60		/* get region */
619	cmpldi	cr0,r15,0xc		/* linear mapping ? */
620	beq	tlb_load_linear		/* yes -> go to linear map load */
621
622	/* We do the user/kernel test for the PID here along with the RW test
623	 */
624	li	r11,_PAGE_PRESENT|_PAGE_BAP_UX	/* Base perm */
625	oris	r11,r11,_PAGE_ACCESSED@h
626
627	cmpldi	cr0,r15,0			/* Check for user region */
628	std	r14,EX_TLB_ESR(r12)		/* write crazy -1 to frame */
629	beq	normal_tlb_miss
630
631	li	r11,_PAGE_PRESENT|_PAGE_BAP_SX	/* Base perm */
632	oris	r11,r11,_PAGE_ACCESSED@h
633	/* XXX replace the RMW cycles with immediate loads + writes */
634	mfspr	r10,SPRN_MAS1
635	cmpldi	cr0,r15,8			/* Check for vmalloc region */
636	rlwinm	r10,r10,0,16,1			/* Clear TID */
637	mtspr	SPRN_MAS1,r10
638	beq+	normal_tlb_miss
639
640	/* We got a crappy address, just fault */
641	TLB_MISS_EPILOG_ERROR
642	b	exc_instruction_storage_book3e
643
644/*
645 * This is the guts of the first-level TLB miss handler for direct
646 * misses. We are entered with:
647 *
648 * r16 = faulting address
649 * r15 = region ID
650 * r14 = crap (free to use)
651 * r13 = PACA
652 * r12 = TLB exception frame in PACA
653 * r11 = PTE permission mask
654 * r10 = crap (free to use)
655 */
656normal_tlb_miss:
657	/* So we first construct the page table address. We do that by
658	 * shifting the bottom of the address (not the region ID) by
659	 * PAGE_SHIFT-3, clearing the bottom 3 bits (get a PTE ptr) and
660	 * or'ing the fourth high bit.
661	 *
662	 * NOTE: For 64K pages, we do things slightly differently in
663	 * order to handle the weird page table format used by linux
664	 */
665	ori	r10,r15,0x1
666	rldicl	r14,r16,64-(PAGE_SHIFT-3),PAGE_SHIFT-3+4
667	sldi	r15,r10,60
668	clrrdi	r14,r14,3
669	or	r10,r15,r14
670
671BEGIN_MMU_FTR_SECTION
672	/* Set the TLB reservation and search for existing entry. Then load
673	 * the entry.
674	 */
675	PPC_TLBSRX_DOT(0,R16)
676	ld	r14,0(r10)
677	beq	normal_tlb_miss_done
678MMU_FTR_SECTION_ELSE
679	ld	r14,0(r10)
680ALT_MMU_FTR_SECTION_END_IFSET(MMU_FTR_USE_TLBRSRV)
681
682finish_normal_tlb_miss:
683	/* Check if required permissions are met */
684	andc.	r15,r11,r14
685	bne-	normal_tlb_miss_access_fault
686#ifdef CONFIG_PPC_KUAP
687	mfspr	r11,SPRN_MAS1
688	rlwinm.	r10,r11,0,0x3fff0000
689	beq-	normal_tlb_miss_access_fault /* KUAP fault */
690#endif
691
692	/* Now we build the MAS:
693	 *
694	 * MAS 0   :	Fully setup with defaults in MAS4 and TLBnCFG
695	 * MAS 1   :	Almost fully setup
696	 *               - PID already updated by caller if necessary
697	 *               - TSIZE need change if !base page size, not
698	 *                 yet implemented for now
699	 * MAS 2   :	Defaults not useful, need to be redone
700	 * MAS 3+7 :	Needs to be done
701	 *
702	 * TODO: mix up code below for better scheduling
703	 */
704	clrrdi	r10,r16,12		/* Clear low crap in EA */
705	rlwimi	r10,r14,32-19,27,31	/* Insert WIMGE */
706	mtspr	SPRN_MAS2,r10
707
708	/* Check page size, if not standard, update MAS1 */
709	rldicl	r10,r14,64-8,64-8
710	cmpldi	cr0,r10,BOOK3E_PAGESZ_4K
711	beq-	1f
712#ifndef CONFIG_PPC_KUAP
713	mfspr	r11,SPRN_MAS1
714#endif
715	rlwimi	r11,r14,31,21,24
716	rlwinm	r11,r11,0,21,19
717	mtspr	SPRN_MAS1,r11
7181:
719	/* Move RPN in position */
720	rldicr	r11,r14,64-(PTE_RPN_SHIFT-PAGE_SHIFT),63-PAGE_SHIFT
721	clrldi	r15,r11,12		/* Clear crap at the top */
722	rlwimi	r15,r14,32-8,22,25	/* Move in U bits */
723	rlwimi	r15,r14,32-2,26,31	/* Move in BAP bits */
724
725	/* Mask out SW and UW if !DIRTY (XXX optimize this !) */
726	andi.	r11,r14,_PAGE_DIRTY
727	bne	1f
728	li	r11,MAS3_SW|MAS3_UW
729	andc	r15,r15,r11
7301:
731BEGIN_MMU_FTR_SECTION
732	srdi	r16,r15,32
733	mtspr	SPRN_MAS3,r15
734	mtspr	SPRN_MAS7,r16
735MMU_FTR_SECTION_ELSE
736	mtspr	SPRN_MAS7_MAS3,r15
737ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_USE_PAIRED_MAS)
738
739	tlbwe
740
741normal_tlb_miss_done:
742	/* We don't bother with restoring DEAR or ESR since we know we are
743	 * level 0 and just going back to userland. They are only needed
744	 * if you are going to take an access fault
745	 */
746	TLB_MISS_EPILOG_SUCCESS
747	rfi
748
749normal_tlb_miss_access_fault:
750	/* We need to check if it was an instruction miss */
751	andi.	r10,r11,_PAGE_BAP_UX
752	bne	1f
753	ld	r14,EX_TLB_DEAR(r12)
754	ld	r15,EX_TLB_ESR(r12)
755	mtspr	SPRN_DEAR,r14
756	mtspr	SPRN_ESR,r15
757	TLB_MISS_EPILOG_ERROR
758	b	exc_data_storage_book3e
7591:	TLB_MISS_EPILOG_ERROR
760	b	exc_instruction_storage_book3e
761
762
763/*
764 * This is the guts of the second-level TLB miss handler for direct
765 * misses. We are entered with:
766 *
767 * r16 = virtual page table faulting address
768 * r15 = region (top 4 bits of address)
769 * r14 = crap (free to use)
770 * r13 = PACA
771 * r12 = TLB exception frame in PACA
772 * r11 = crap (free to use)
773 * r10 = crap (free to use)
774 *
775 * Note that this should only ever be called as a second level handler
776 * with the current scheme when using SW load.
777 * That means we can always get the original fault DEAR at
778 * EX_TLB_DEAR-EX_TLB_SIZE(r12)
779 *
780 * It can be re-entered by the linear mapping miss handler. However, to
781 * avoid too much complication, it will restart the whole fault at level
782 * 0 so we don't care too much about clobbers
783 *
784 * XXX That code was written back when we couldn't clobber r14. We can now,
785 * so we could probably optimize things a bit
786 */
787virt_page_table_tlb_miss:
788	/* Are we hitting a kernel page table ? */
789	andi.	r10,r15,0x8
790
791	/* The cool thing now is that r10 contains 0 for user and 8 for kernel,
792	 * and we happen to have the swapper_pg_dir at offset 8 from the user
793	 * pgdir in the PACA :-).
794	 */
795	add	r11,r10,r13
796
797	/* If kernel, we need to clear MAS1 TID */
798	beq	1f
799	/* XXX replace the RMW cycles with immediate loads + writes */
800	mfspr	r10,SPRN_MAS1
801	rlwinm	r10,r10,0,16,1			/* Clear TID */
802	mtspr	SPRN_MAS1,r10
803#ifdef CONFIG_PPC_KUAP
804	b	2f
8051:
806	mfspr	r10,SPRN_MAS1
807	rlwinm.	r10,r10,0,0x3fff0000
808	beq-	virt_page_table_tlb_miss_fault /* KUAP fault */
8092:
810#else
8111:
812#endif
813BEGIN_MMU_FTR_SECTION
814	/* Search if we already have a TLB entry for that virtual address, and
815	 * if we do, bail out.
816	 */
817	PPC_TLBSRX_DOT(0,R16)
818	beq	virt_page_table_tlb_miss_done
819END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_TLBRSRV)
820
821	/* Now, we need to walk the page tables. First check if we are in
822	 * range.
823	 */
824	rldicl.	r10,r16,64-(VPTE_INDEX_SIZE+3),VPTE_INDEX_SIZE+3+4
825	bne-	virt_page_table_tlb_miss_fault
826
827	/* Get the PGD pointer */
828	ld	r15,PACAPGD(r11)
829	cmpldi	cr0,r15,0
830	beq-	virt_page_table_tlb_miss_fault
831
832	/* Get to PGD entry */
833	rldicl	r11,r16,64-VPTE_PGD_SHIFT,64-PGD_INDEX_SIZE-3
834	clrrdi	r10,r11,3
835	ldx	r15,r10,r15
836	cmpdi	cr0,r15,0
837	bge	virt_page_table_tlb_miss_fault
838
839	/* Get to PUD entry */
840	rldicl	r11,r16,64-VPTE_PUD_SHIFT,64-PUD_INDEX_SIZE-3
841	clrrdi	r10,r11,3
842	ldx	r15,r10,r15
843	cmpdi	cr0,r15,0
844	bge	virt_page_table_tlb_miss_fault
845
846	/* Get to PMD entry */
847	rldicl	r11,r16,64-VPTE_PMD_SHIFT,64-PMD_INDEX_SIZE-3
848	clrrdi	r10,r11,3
849	ldx	r15,r10,r15
850	cmpdi	cr0,r15,0
851	bge	virt_page_table_tlb_miss_fault
852
853	/* Ok, we're all right, we can now create a kernel translation for
854	 * a 4K or 64K page from r16 -> r15.
855	 */
856	/* Now we build the MAS:
857	 *
858	 * MAS 0   :	Fully setup with defaults in MAS4 and TLBnCFG
859	 * MAS 1   :	Almost fully setup
860	 *               - PID already updated by caller if necessary
861	 *               - TSIZE for now is base page size always
862	 * MAS 2   :	Use defaults
863	 * MAS 3+7 :	Needs to be done
864	 *
865	 * So we only do MAS 2 and 3 for now...
866	 */
867	clrldi	r11,r15,4		/* remove region ID from RPN */
868	ori	r10,r11,1		/* Or-in SR */
869
870BEGIN_MMU_FTR_SECTION
871	srdi	r16,r10,32
872	mtspr	SPRN_MAS3,r10
873	mtspr	SPRN_MAS7,r16
874MMU_FTR_SECTION_ELSE
875	mtspr	SPRN_MAS7_MAS3,r10
876ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_USE_PAIRED_MAS)
877
878	tlbwe
879
880BEGIN_MMU_FTR_SECTION
881virt_page_table_tlb_miss_done:
882
883	/* We have overridden MAS2:EPN but currently our primary TLB miss
884	 * handler will always restore it so that should not be an issue,
885	 * if we ever optimize the primary handler to not write MAS2 on
886	 * some cases, we'll have to restore MAS2:EPN here based on the
887	 * original fault's DEAR. If we do that we have to modify the
888	 * ITLB miss handler to also store SRR0 in the exception frame
889	 * as DEAR.
890	 *
891	 * However, one nasty thing we did is we cleared the reservation
892	 * (well, potentially we did). We do a trick here thus if we
893	 * are not a level 0 exception (we interrupted the TLB miss) we
894	 * offset the return address by -4 in order to replay the tlbsrx
895	 * instruction there
896	 */
897	subf	r10,r13,r12
898	cmpldi	cr0,r10,PACA_EXTLB+EX_TLB_SIZE
899	bne-	1f
900	ld	r11,PACA_EXTLB+EX_TLB_SIZE+EX_TLB_SRR0(r13)
901	addi	r10,r11,-4
902	std	r10,PACA_EXTLB+EX_TLB_SIZE+EX_TLB_SRR0(r13)
9031:
904END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_TLBRSRV)
905	/* Return to caller, normal case */
906	TLB_MISS_EPILOG_SUCCESS
907	rfi
908
909virt_page_table_tlb_miss_fault:
910	/* If we fault here, things are a little bit tricky. We need to call
911	 * either data or instruction store fault, and we need to retrieve
912	 * the original fault address and ESR (for data).
913	 *
914	 * The thing is, we know that in normal circumstances, this is
915	 * always called as a second level tlb miss for SW load or as a first
916	 * level TLB miss for HW load, so we should be able to peek at the
917	 * relevant information in the first exception frame in the PACA.
918	 *
919	 * However, we do need to double check that, because we may just hit
920	 * a stray kernel pointer or a userland attack trying to hit those
921	 * areas. If that is the case, we do a data fault. (We can't get here
922	 * from an instruction tlb miss anyway).
923	 *
924	 * Note also that when going to a fault, we must unwind the previous
925	 * level as well. Since we are doing that, we don't need to clear or
926	 * restore the TLB reservation neither.
927	 */
928	subf	r10,r13,r12
929	cmpldi	cr0,r10,PACA_EXTLB+EX_TLB_SIZE
930	bne-	virt_page_table_tlb_miss_whacko_fault
931
932	/* We dig the original DEAR and ESR from slot 0 */
933	ld	r15,EX_TLB_DEAR+PACA_EXTLB(r13)
934	ld	r16,EX_TLB_ESR+PACA_EXTLB(r13)
935
936	/* We check for the "special" ESR value for instruction faults */
937	cmpdi	cr0,r16,-1
938	beq	1f
939	mtspr	SPRN_DEAR,r15
940	mtspr	SPRN_ESR,r16
941	TLB_MISS_EPILOG_ERROR
942	b	exc_data_storage_book3e
9431:	TLB_MISS_EPILOG_ERROR
944	b	exc_instruction_storage_book3e
945
946virt_page_table_tlb_miss_whacko_fault:
947	/* The linear fault will restart everything so ESR and DEAR will
948	 * not have been clobbered, let's just fault with what we have
949	 */
950	TLB_MISS_EPILOG_ERROR
951	b	exc_data_storage_book3e
952
953
954/**************************************************************
955 *                                                            *
956 * TLB miss handling for Book3E with hw page table support    *
957 *                                                            *
958 **************************************************************/
959
960
961/* Data TLB miss */
962	START_EXCEPTION(data_tlb_miss_htw)
963	TLB_MISS_PROLOG
964
965	/* Now we handle the fault proper. We only save DEAR in normal
966	 * fault case since that's the only interesting values here.
967	 * We could probably also optimize by not saving SRR0/1 in the
968	 * linear mapping case but I'll leave that for later
969	 */
970	mfspr	r14,SPRN_ESR
971	mfspr	r16,SPRN_DEAR		/* get faulting address */
972	srdi	r11,r16,60		/* get region */
973	cmpldi	cr0,r11,0xc		/* linear mapping ? */
974	beq	tlb_load_linear		/* yes -> go to linear map load */
975
976	/* We do the user/kernel test for the PID here along with the RW test
977	 */
978	cmpldi	cr0,r11,0		/* Check for user region */
979	ld	r15,PACAPGD(r13)	/* Load user pgdir */
980	beq	htw_tlb_miss
981
982	/* XXX replace the RMW cycles with immediate loads + writes */
9831:	mfspr	r10,SPRN_MAS1
984	cmpldi	cr0,r11,8		/* Check for vmalloc region */
985	rlwinm	r10,r10,0,16,1		/* Clear TID */
986	mtspr	SPRN_MAS1,r10
987	ld	r15,PACA_KERNELPGD(r13)	/* Load kernel pgdir */
988	beq+	htw_tlb_miss
989
990	/* We got a crappy address, just fault with whatever DEAR and ESR
991	 * are here
992	 */
993	TLB_MISS_EPILOG_ERROR
994	b	exc_data_storage_book3e
995
996/* Instruction TLB miss */
997	START_EXCEPTION(instruction_tlb_miss_htw)
998	TLB_MISS_PROLOG
999
1000	/* If we take a recursive fault, the second level handler may need
1001	 * to know whether we are handling a data or instruction fault in
1002	 * order to get to the right store fault handler. We provide that
1003	 * info by keeping a crazy value for ESR in r14
1004	 */
1005	li	r14,-1	/* store to exception frame is done later */
1006
1007	/* Now we handle the fault proper. We only save DEAR in the non
1008	 * linear mapping case since we know the linear mapping case will
1009	 * not re-enter. We could indeed optimize and also not save SRR0/1
1010	 * in the linear mapping case but I'll leave that for later
1011	 *
1012	 * Faulting address is SRR0 which is already in r16
1013	 */
1014	srdi	r11,r16,60		/* get region */
1015	cmpldi	cr0,r11,0xc		/* linear mapping ? */
1016	beq	tlb_load_linear		/* yes -> go to linear map load */
1017
1018	/* We do the user/kernel test for the PID here along with the RW test
1019	 */
1020	cmpldi	cr0,r11,0			/* Check for user region */
1021	ld	r15,PACAPGD(r13)		/* Load user pgdir */
1022	beq	htw_tlb_miss
1023
1024	/* XXX replace the RMW cycles with immediate loads + writes */
10251:	mfspr	r10,SPRN_MAS1
1026	cmpldi	cr0,r11,8			/* Check for vmalloc region */
1027	rlwinm	r10,r10,0,16,1			/* Clear TID */
1028	mtspr	SPRN_MAS1,r10
1029	ld	r15,PACA_KERNELPGD(r13)		/* Load kernel pgdir */
1030	beq+	htw_tlb_miss
1031
1032	/* We got a crappy address, just fault */
1033	TLB_MISS_EPILOG_ERROR
1034	b	exc_instruction_storage_book3e
1035
1036
1037/*
1038 * This is the guts of the second-level TLB miss handler for direct
1039 * misses. We are entered with:
1040 *
1041 * r16 = virtual page table faulting address
1042 * r15 = PGD pointer
1043 * r14 = ESR
1044 * r13 = PACA
1045 * r12 = TLB exception frame in PACA
1046 * r11 = crap (free to use)
1047 * r10 = crap (free to use)
1048 *
1049 * It can be re-entered by the linear mapping miss handler. However, to
1050 * avoid too much complication, it will save/restore things for us
1051 */
1052htw_tlb_miss:
1053#ifdef CONFIG_PPC_KUAP
1054	mfspr	r10,SPRN_MAS1
1055	rlwinm.	r10,r10,0,0x3fff0000
1056	beq-	htw_tlb_miss_fault /* KUAP fault */
1057#endif
1058	/* Search if we already have a TLB entry for that virtual address, and
1059	 * if we do, bail out.
1060	 *
1061	 * MAS1:IND should be already set based on MAS4
1062	 */
1063	PPC_TLBSRX_DOT(0,R16)
1064	beq	htw_tlb_miss_done
1065
1066	/* Now, we need to walk the page tables. First check if we are in
1067	 * range.
1068	 */
1069	rldicl.	r10,r16,64-PGTABLE_EADDR_SIZE,PGTABLE_EADDR_SIZE+4
1070	bne-	htw_tlb_miss_fault
1071
1072	/* Get the PGD pointer */
1073	cmpldi	cr0,r15,0
1074	beq-	htw_tlb_miss_fault
1075
1076	/* Get to PGD entry */
1077	rldicl	r11,r16,64-(PGDIR_SHIFT-3),64-PGD_INDEX_SIZE-3
1078	clrrdi	r10,r11,3
1079	ldx	r15,r10,r15
1080	cmpdi	cr0,r15,0
1081	bge	htw_tlb_miss_fault
1082
1083	/* Get to PUD entry */
1084	rldicl	r11,r16,64-(PUD_SHIFT-3),64-PUD_INDEX_SIZE-3
1085	clrrdi	r10,r11,3
1086	ldx	r15,r10,r15
1087	cmpdi	cr0,r15,0
1088	bge	htw_tlb_miss_fault
1089
1090	/* Get to PMD entry */
1091	rldicl	r11,r16,64-(PMD_SHIFT-3),64-PMD_INDEX_SIZE-3
1092	clrrdi	r10,r11,3
1093	ldx	r15,r10,r15
1094	cmpdi	cr0,r15,0
1095	bge	htw_tlb_miss_fault
1096
1097	/* Ok, we're all right, we can now create an indirect entry for
1098	 * a 1M or 256M page.
1099	 *
1100	 * The last trick is now that because we use "half" pages for
1101	 * the HTW (1M IND is 2K and 256M IND is 32K) we need to account
1102	 * for an added LSB bit to the RPN. For 64K pages, there is no
1103	 * problem as we already use 32K arrays (half PTE pages), but for
1104	 * 4K page we need to extract a bit from the virtual address and
1105	 * insert it into the "PA52" bit of the RPN.
1106	 */
1107	rlwimi	r15,r16,32-9,20,20
1108	/* Now we build the MAS:
1109	 *
1110	 * MAS 0   :	Fully setup with defaults in MAS4 and TLBnCFG
1111	 * MAS 1   :	Almost fully setup
1112	 *               - PID already updated by caller if necessary
1113	 *               - TSIZE for now is base ind page size always
1114	 * MAS 2   :	Use defaults
1115	 * MAS 3+7 :	Needs to be done
1116	 */
1117	ori	r10,r15,(BOOK3E_PAGESZ_4K << MAS3_SPSIZE_SHIFT)
1118
1119BEGIN_MMU_FTR_SECTION
1120	srdi	r16,r10,32
1121	mtspr	SPRN_MAS3,r10
1122	mtspr	SPRN_MAS7,r16
1123MMU_FTR_SECTION_ELSE
1124	mtspr	SPRN_MAS7_MAS3,r10
1125ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_USE_PAIRED_MAS)
1126
1127	tlbwe
1128
1129htw_tlb_miss_done:
1130	/* We don't bother with restoring DEAR or ESR since we know we are
1131	 * level 0 and just going back to userland. They are only needed
1132	 * if you are going to take an access fault
1133	 */
1134	TLB_MISS_EPILOG_SUCCESS
1135	rfi
1136
1137htw_tlb_miss_fault:
1138	/* We need to check if it was an instruction miss. We know this
1139	 * though because r14 would contain -1
1140	 */
1141	cmpdi	cr0,r14,-1
1142	beq	1f
1143	mtspr	SPRN_DEAR,r16
1144	mtspr	SPRN_ESR,r14
1145	TLB_MISS_EPILOG_ERROR
1146	b	exc_data_storage_book3e
11471:	TLB_MISS_EPILOG_ERROR
1148	b	exc_instruction_storage_book3e
1149
1150/*
1151 * This is the guts of "any" level TLB miss handler for kernel linear
1152 * mapping misses. We are entered with:
1153 *
1154 *
1155 * r16 = faulting address
1156 * r15 = crap (free to use)
1157 * r14 = ESR (data) or -1 (instruction)
1158 * r13 = PACA
1159 * r12 = TLB exception frame in PACA
1160 * r11 = crap (free to use)
1161 * r10 = crap (free to use)
1162 *
1163 * In addition we know that we will not re-enter, so in theory, we could
1164 * use a simpler epilog not restoring SRR0/1 etc.. but we'll do that later.
1165 *
1166 * We also need to be careful about MAS registers here & TLB reservation,
1167 * as we know we'll have clobbered them if we interrupt the main TLB miss
1168 * handlers in which case we probably want to do a full restart at level
1169 * 0 rather than saving / restoring the MAS.
1170 *
1171 * Note: If we care about performance of that core, we can easily shuffle
1172 *       a few things around
1173 */
1174tlb_load_linear:
1175	/* For now, we assume the linear mapping is contiguous and stops at
1176	 * linear_map_top. We also assume the size is a multiple of 1G, thus
1177	 * we only use 1G pages for now. That might have to be changed in a
1178	 * final implementation, especially when dealing with hypervisors
1179	 */
1180	ld	r11,PACATOC(r13)
1181	ld	r11,linear_map_top@got(r11)
1182	ld	r10,0(r11)
1183	tovirt(10,10)
1184	cmpld	cr0,r16,r10
1185	bge	tlb_load_linear_fault
1186
1187	/* MAS1 need whole new setup. */
1188	li	r15,(BOOK3E_PAGESZ_1GB<<MAS1_TSIZE_SHIFT)
1189	oris	r15,r15,MAS1_VALID@h	/* MAS1 needs V and TSIZE */
1190	mtspr	SPRN_MAS1,r15
1191
1192	/* Already somebody there ? */
1193	PPC_TLBSRX_DOT(0,R16)
1194	beq	tlb_load_linear_done
1195
1196	/* Now we build the remaining MAS. MAS0 and 2 should be fine
1197	 * with their defaults, which leaves us with MAS 3 and 7. The
1198	 * mapping is linear, so we just take the address, clear the
1199	 * region bits, and or in the permission bits which are currently
1200	 * hard wired
1201	 */
1202	clrrdi	r10,r16,30		/* 1G page index */
1203	clrldi	r10,r10,4		/* clear region bits */
1204	ori	r10,r10,MAS3_SR|MAS3_SW|MAS3_SX
1205
1206BEGIN_MMU_FTR_SECTION
1207	srdi	r16,r10,32
1208	mtspr	SPRN_MAS3,r10
1209	mtspr	SPRN_MAS7,r16
1210MMU_FTR_SECTION_ELSE
1211	mtspr	SPRN_MAS7_MAS3,r10
1212ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_USE_PAIRED_MAS)
1213
1214	tlbwe
1215
1216tlb_load_linear_done:
1217	/* We use the "error" epilog for success as we do want to
1218	 * restore to the initial faulting context, whatever it was.
1219	 * We do that because we can't resume a fault within a TLB
1220	 * miss handler, due to MAS and TLB reservation being clobbered.
1221	 */
1222	TLB_MISS_EPILOG_ERROR
1223	rfi
1224
1225tlb_load_linear_fault:
1226	/* We keep the DEAR and ESR around, this shouldn't have happened */
1227	cmpdi	cr0,r14,-1
1228	beq	1f
1229	TLB_MISS_EPILOG_ERROR_SPECIAL
1230	b	exc_data_storage_book3e
12311:	TLB_MISS_EPILOG_ERROR_SPECIAL
1232	b	exc_instruction_storage_book3e
1233