xref: /linux/arch/powerpc/kernel/head_book3s_32.S (revision 6607aa6f6b68fc9b5955755f1b1be125cf2a9d03)
1/* SPDX-License-Identifier: GPL-2.0-or-later */
2/*
3 *  PowerPC version
4 *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
5 *
6 *  Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP
7 *    Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu>
8 *  Adapted for Power Macintosh by Paul Mackerras.
9 *  Low-level exception handlers and MMU support
10 *  rewritten by Paul Mackerras.
11 *    Copyright (C) 1996 Paul Mackerras.
12 *  MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
13 *
14 *  This file contains the low-level support and setup for the
15 *  PowerPC platform, including trap and interrupt dispatch.
16 *  (The PPC 8xx embedded CPUs use head_8xx.S instead.)
17 */
18
19#include <linux/init.h>
20#include <linux/pgtable.h>
21#include <asm/reg.h>
22#include <asm/page.h>
23#include <asm/mmu.h>
24#include <asm/cputable.h>
25#include <asm/cache.h>
26#include <asm/thread_info.h>
27#include <asm/ppc_asm.h>
28#include <asm/asm-offsets.h>
29#include <asm/ptrace.h>
30#include <asm/bug.h>
31#include <asm/kvm_book3s_asm.h>
32#include <asm/export.h>
33#include <asm/feature-fixups.h>
34
35#include "head_32.h"
36
37#define LOAD_BAT(n, reg, RA, RB)	\
38	/* see the comment for clear_bats() -- Cort */ \
39	li	RA,0;			\
40	mtspr	SPRN_IBAT##n##U,RA;	\
41	mtspr	SPRN_DBAT##n##U,RA;	\
42	lwz	RA,(n*16)+0(reg);	\
43	lwz	RB,(n*16)+4(reg);	\
44	mtspr	SPRN_IBAT##n##U,RA;	\
45	mtspr	SPRN_IBAT##n##L,RB;	\
46	lwz	RA,(n*16)+8(reg);	\
47	lwz	RB,(n*16)+12(reg);	\
48	mtspr	SPRN_DBAT##n##U,RA;	\
49	mtspr	SPRN_DBAT##n##L,RB
50
51	__HEAD
52	.stabs	"arch/powerpc/kernel/",N_SO,0,0,0f
53	.stabs	"head_book3s_32.S",N_SO,0,0,0f
540:
55_ENTRY(_stext);
56
57/*
58 * _start is defined this way because the XCOFF loader in the OpenFirmware
59 * on the powermac expects the entry point to be a procedure descriptor.
60 */
61_ENTRY(_start);
62	/*
63	 * These are here for legacy reasons, the kernel used to
64	 * need to look like a coff function entry for the pmac
65	 * but we're always started by some kind of bootloader now.
66	 *  -- Cort
67	 */
68	nop	/* used by __secondary_hold on prep (mtx) and chrp smp */
69	nop	/* used by __secondary_hold on prep (mtx) and chrp smp */
70	nop
71
72/* PMAC
73 * Enter here with the kernel text, data and bss loaded starting at
74 * 0, running with virtual == physical mapping.
75 * r5 points to the prom entry point (the client interface handler
76 * address).  Address translation is turned on, with the prom
77 * managing the hash table.  Interrupts are disabled.  The stack
78 * pointer (r1) points to just below the end of the half-meg region
79 * from 0x380000 - 0x400000, which is mapped in already.
80 *
81 * If we are booted from MacOS via BootX, we enter with the kernel
82 * image loaded somewhere, and the following values in registers:
83 *  r3: 'BooX' (0x426f6f58)
84 *  r4: virtual address of boot_infos_t
85 *  r5: 0
86 *
87 * PREP
88 * This is jumped to on prep systems right after the kernel is relocated
89 * to its proper place in memory by the boot loader.  The expected layout
90 * of the regs is:
91 *   r3: ptr to residual data
92 *   r4: initrd_start or if no initrd then 0
93 *   r5: initrd_end - unused if r4 is 0
94 *   r6: Start of command line string
95 *   r7: End of command line string
96 *
97 * This just gets a minimal mmu environment setup so we can call
98 * start_here() to do the real work.
99 * -- Cort
100 */
101
102	.globl	__start
103__start:
104/*
105 * We have to do any OF calls before we map ourselves to KERNELBASE,
106 * because OF may have I/O devices mapped into that area
107 * (particularly on CHRP).
108 */
109	cmpwi	0,r5,0
110	beq	1f
111
112#ifdef CONFIG_PPC_OF_BOOT_TRAMPOLINE
113	/* find out where we are now */
114	bcl	20,31,$+4
1150:	mflr	r8			/* r8 = runtime addr here */
116	addis	r8,r8,(_stext - 0b)@ha
117	addi	r8,r8,(_stext - 0b)@l	/* current runtime base addr */
118	bl	prom_init
119#endif /* CONFIG_PPC_OF_BOOT_TRAMPOLINE */
120
121	/* We never return. We also hit that trap if trying to boot
122	 * from OF while CONFIG_PPC_OF_BOOT_TRAMPOLINE isn't selected */
123	trap
124
125/*
126 * Check for BootX signature when supporting PowerMac and branch to
127 * appropriate trampoline if it's present
128 */
129#ifdef CONFIG_PPC_PMAC
1301:	lis	r31,0x426f
131	ori	r31,r31,0x6f58
132	cmpw	0,r3,r31
133	bne	1f
134	bl	bootx_init
135	trap
136#endif /* CONFIG_PPC_PMAC */
137
1381:	mr	r31,r3			/* save device tree ptr */
139	li	r24,0			/* cpu # */
140
141/*
142 * early_init() does the early machine identification and does
143 * the necessary low-level setup and clears the BSS
144 *  -- Cort <cort@fsmlabs.com>
145 */
146	bl	early_init
147
148/* Switch MMU off, clear BATs and flush TLB. At this point, r3 contains
149 * the physical address we are running at, returned by early_init()
150 */
151 	bl	mmu_off
152__after_mmu_off:
153	bl	clear_bats
154	bl	flush_tlbs
155
156	bl	initial_bats
157	bl	load_segment_registers
158	bl	reloc_offset
159	bl	early_hash_table
160#if defined(CONFIG_BOOTX_TEXT)
161	bl	setup_disp_bat
162#endif
163#ifdef CONFIG_PPC_EARLY_DEBUG_CPM
164	bl	setup_cpm_bat
165#endif
166#ifdef CONFIG_PPC_EARLY_DEBUG_USBGECKO
167	bl	setup_usbgecko_bat
168#endif
169
170/*
171 * Call setup_cpu for CPU 0 and initialize 6xx Idle
172 */
173	bl	reloc_offset
174	li	r24,0			/* cpu# */
175	bl	call_setup_cpu		/* Call setup_cpu for this CPU */
176	bl	reloc_offset
177	bl	init_idle_6xx
178
179
180/*
181 * We need to run with _start at physical address 0.
182 * On CHRP, we are loaded at 0x10000 since OF on CHRP uses
183 * the exception vectors at 0 (and therefore this copy
184 * overwrites OF's exception vectors with our own).
185 * The MMU is off at this point.
186 */
187	bl	reloc_offset
188	mr	r26,r3
189	addis	r4,r3,KERNELBASE@h	/* current address of _start */
190	lis	r5,PHYSICAL_START@h
191	cmplw	0,r4,r5			/* already running at PHYSICAL_START? */
192	bne	relocate_kernel
193/*
194 * we now have the 1st 16M of ram mapped with the bats.
195 * prep needs the mmu to be turned on here, but pmac already has it on.
196 * this shouldn't bother the pmac since it just gets turned on again
197 * as we jump to our code at KERNELBASE. -- Cort
198 * Actually no, pmac doesn't have it on any more. BootX enters with MMU
199 * off, and in other cases, we now turn it off before changing BATs above.
200 */
201turn_on_mmu:
202	mfmsr	r0
203	ori	r0,r0,MSR_DR|MSR_IR|MSR_RI
204	mtspr	SPRN_SRR1,r0
205	lis	r0,start_here@h
206	ori	r0,r0,start_here@l
207	mtspr	SPRN_SRR0,r0
208	rfi				/* enables MMU */
209
210/*
211 * We need __secondary_hold as a place to hold the other cpus on
212 * an SMP machine, even when we are running a UP kernel.
213 */
214	. = 0xc0			/* for prep bootloader */
215	li	r3,1			/* MTX only has 1 cpu */
216	.globl	__secondary_hold
217__secondary_hold:
218	/* tell the master we're here */
219	stw	r3,__secondary_hold_acknowledge@l(0)
220#ifdef CONFIG_SMP
221100:	lwz	r4,0(0)
222	/* wait until we're told to start */
223	cmpw	0,r4,r3
224	bne	100b
225	/* our cpu # was at addr 0 - go */
226	mr	r24,r3			/* cpu # */
227	b	__secondary_start
228#else
229	b	.
230#endif /* CONFIG_SMP */
231
232	.globl	__secondary_hold_spinloop
233__secondary_hold_spinloop:
234	.long	0
235	.globl	__secondary_hold_acknowledge
236__secondary_hold_acknowledge:
237	.long	-1
238
239/* System reset */
240/* core99 pmac starts the seconary here by changing the vector, and
241   putting it back to what it was (unknown_exception) when done.  */
242	EXCEPTION(0x100, Reset, unknown_exception, EXC_XFER_STD)
243
244/* Machine check */
245/*
246 * On CHRP, this is complicated by the fact that we could get a
247 * machine check inside RTAS, and we have no guarantee that certain
248 * critical registers will have the values we expect.  The set of
249 * registers that might have bad values includes all the GPRs
250 * and all the BATs.  We indicate that we are in RTAS by putting
251 * a non-zero value, the address of the exception frame to use,
252 * in thread.rtas_sp.  The machine check handler checks thread.rtas_sp
253 * and uses its value if it is non-zero.
254 * (Other exception handlers assume that r1 is a valid kernel stack
255 * pointer when we take an exception from supervisor mode.)
256 *	-- paulus.
257 */
258	. = 0x200
259	DO_KVM  0x200
260MachineCheck:
261	EXCEPTION_PROLOG_0
262#ifdef CONFIG_PPC_CHRP
263#ifdef CONFIG_VMAP_STACK
264	mtspr	SPRN_SPRG_SCRATCH2,r1
265	mfspr	r1, SPRN_SPRG_THREAD
266	lwz	r1, RTAS_SP(r1)
267	cmpwi	cr1, r1, 0
268	bne	cr1, 7f
269	mfspr	r1, SPRN_SPRG_SCRATCH2
270#else
271	mfspr	r11, SPRN_SPRG_THREAD
272	lwz	r11, RTAS_SP(r11)
273	cmpwi	cr1, r11, 0
274	bne	cr1, 7f
275#endif
276#endif /* CONFIG_PPC_CHRP */
277	EXCEPTION_PROLOG_1 for_rtas=1
2787:	EXCEPTION_PROLOG_2
279	addi	r3,r1,STACK_FRAME_OVERHEAD
280#ifdef CONFIG_PPC_CHRP
281#ifdef CONFIG_VMAP_STACK
282	mfspr	r4, SPRN_SPRG_THREAD
283	tovirt(r4, r4)
284	lwz	r4, RTAS_SP(r4)
285	cmpwi	cr1, r4, 0
286#endif
287	beq	cr1, machine_check_tramp
288	twi	31, 0, 0
289#else
290	b	machine_check_tramp
291#endif
292
293/* Data access exception. */
294	. = 0x300
295	DO_KVM  0x300
296DataAccess:
297#ifdef CONFIG_VMAP_STACK
298BEGIN_MMU_FTR_SECTION
299	mtspr	SPRN_SPRG_SCRATCH2,r10
300	mfspr	r10, SPRN_SPRG_THREAD
301	stw	r11, THR11(r10)
302	mfspr	r10, SPRN_DSISR
303	mfcr	r11
304	andis.	r10, r10, (DSISR_BAD_FAULT_32S | DSISR_DABRMATCH)@h
305	mfspr	r10, SPRN_SPRG_THREAD
306	beq	hash_page_dsi
307.Lhash_page_dsi_cont:
308	mtcr	r11
309	lwz	r11, THR11(r10)
310	mfspr	r10, SPRN_SPRG_SCRATCH2
311MMU_FTR_SECTION_ELSE
312	b	1f
313ALT_MMU_FTR_SECTION_END_IFSET(MMU_FTR_HPTE_TABLE)
3141:	EXCEPTION_PROLOG_0 handle_dar_dsisr=1
315	EXCEPTION_PROLOG_1
316	b	handle_page_fault_tramp_1
317#else	/* CONFIG_VMAP_STACK */
318	EXCEPTION_PROLOG handle_dar_dsisr=1
319	get_and_save_dar_dsisr_on_stack	r4, r5, r11
320BEGIN_MMU_FTR_SECTION
321	andis.	r0, r5, (DSISR_BAD_FAULT_32S | DSISR_DABRMATCH)@h
322	bne	handle_page_fault_tramp_2	/* if not, try to put a PTE */
323	rlwinm	r3, r5, 32 - 15, 21, 21		/* DSISR_STORE -> _PAGE_RW */
324	bl	hash_page
325	b	handle_page_fault_tramp_1
326MMU_FTR_SECTION_ELSE
327	b	handle_page_fault_tramp_2
328ALT_MMU_FTR_SECTION_END_IFSET(MMU_FTR_HPTE_TABLE)
329#endif	/* CONFIG_VMAP_STACK */
330
331/* Instruction access exception. */
332	. = 0x400
333	DO_KVM  0x400
334InstructionAccess:
335#ifdef CONFIG_VMAP_STACK
336	mtspr	SPRN_SPRG_SCRATCH0,r10
337	mtspr	SPRN_SPRG_SCRATCH1,r11
338	mfspr	r10, SPRN_SPRG_THREAD
339	mfspr	r11, SPRN_SRR0
340	stw	r11, SRR0(r10)
341	mfspr	r11, SPRN_SRR1		/* check whether user or kernel */
342	stw	r11, SRR1(r10)
343	mfcr	r10
344BEGIN_MMU_FTR_SECTION
345	andis.	r11, r11, SRR1_ISI_NOPT@h	/* no pte found? */
346	bne	hash_page_isi
347.Lhash_page_isi_cont:
348	mfspr	r11, SPRN_SRR1		/* check whether user or kernel */
349END_MMU_FTR_SECTION_IFSET(MMU_FTR_HPTE_TABLE)
350	andi.	r11, r11, MSR_PR
351
352	EXCEPTION_PROLOG_1
353	EXCEPTION_PROLOG_2
354#else	/* CONFIG_VMAP_STACK */
355	EXCEPTION_PROLOG
356	andis.	r0,r9,SRR1_ISI_NOPT@h	/* no pte found? */
357	beq	1f			/* if so, try to put a PTE */
358	li	r3,0			/* into the hash table */
359	mr	r4,r12			/* SRR0 is fault address */
360BEGIN_MMU_FTR_SECTION
361	bl	hash_page
362END_MMU_FTR_SECTION_IFSET(MMU_FTR_HPTE_TABLE)
363#endif	/* CONFIG_VMAP_STACK */
3641:	mr	r4,r12
365	andis.	r5,r9,DSISR_SRR1_MATCH_32S@h /* Filter relevant SRR1 bits */
366	stw	r4, _DAR(r11)
367	EXC_XFER_LITE(0x400, handle_page_fault)
368
369/* External interrupt */
370	EXCEPTION(0x500, HardwareInterrupt, do_IRQ, EXC_XFER_LITE)
371
372/* Alignment exception */
373	. = 0x600
374	DO_KVM  0x600
375Alignment:
376	EXCEPTION_PROLOG handle_dar_dsisr=1
377	save_dar_dsisr_on_stack r4, r5, r11
378	addi	r3,r1,STACK_FRAME_OVERHEAD
379	b	alignment_exception_tramp
380
381/* Program check exception */
382	EXCEPTION(0x700, ProgramCheck, program_check_exception, EXC_XFER_STD)
383
384/* Floating-point unavailable */
385	. = 0x800
386	DO_KVM  0x800
387FPUnavailable:
388#ifdef CONFIG_PPC_FPU
389BEGIN_FTR_SECTION
390/*
391 * Certain Freescale cores don't have a FPU and treat fp instructions
392 * as a FP Unavailable exception.  Redirect to illegal/emulation handling.
393 */
394	b 	ProgramCheck
395END_FTR_SECTION_IFSET(CPU_FTR_FPU_UNAVAILABLE)
396	EXCEPTION_PROLOG
397	beq	1f
398	bl	load_up_fpu		/* if from user, just load it up */
399	b	fast_exception_return
4001:	addi	r3,r1,STACK_FRAME_OVERHEAD
401	EXC_XFER_LITE(0x800, kernel_fp_unavailable_exception)
402#else
403	b 	ProgramCheck
404#endif
405
406/* Decrementer */
407	EXCEPTION(0x900, Decrementer, timer_interrupt, EXC_XFER_LITE)
408
409	EXCEPTION(0xa00, Trap_0a, unknown_exception, EXC_XFER_STD)
410	EXCEPTION(0xb00, Trap_0b, unknown_exception, EXC_XFER_STD)
411
412/* System call */
413	. = 0xc00
414	DO_KVM  0xc00
415SystemCall:
416	SYSCALL_ENTRY	0xc00
417
418	EXCEPTION(0xd00, SingleStep, single_step_exception, EXC_XFER_STD)
419	EXCEPTION(0xe00, Trap_0e, unknown_exception, EXC_XFER_STD)
420
421/*
422 * The Altivec unavailable trap is at 0x0f20.  Foo.
423 * We effectively remap it to 0x3000.
424 * We include an altivec unavailable exception vector even if
425 * not configured for Altivec, so that you can't panic a
426 * non-altivec kernel running on a machine with altivec just
427 * by executing an altivec instruction.
428 */
429	. = 0xf00
430	DO_KVM  0xf00
431	b	PerformanceMonitor
432
433	. = 0xf20
434	DO_KVM  0xf20
435	b	AltiVecUnavailable
436
437/*
438 * Handle TLB miss for instruction on 603/603e.
439 * Note: we get an alternate set of r0 - r3 to use automatically.
440 */
441	. = 0x1000
442InstructionTLBMiss:
443/*
444 * r0:	scratch
445 * r1:	linux style pte ( later becomes ppc hardware pte )
446 * r2:	ptr to linux-style pte
447 * r3:	scratch
448 */
449	/* Get PTE (linux-style) and check access */
450	mfspr	r3,SPRN_IMISS
451#ifdef CONFIG_MODULES
452	lis	r1, TASK_SIZE@h		/* check if kernel address */
453	cmplw	0,r1,r3
454#endif
455	mfspr	r2, SPRN_SDR1
456	li	r1,_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_EXEC
457	rlwinm	r2, r2, 28, 0xfffff000
458#ifdef CONFIG_MODULES
459	bgt-	112f
460	lis	r2, (swapper_pg_dir - PAGE_OFFSET)@ha	/* if kernel address, use */
461	addi	r2, r2, (swapper_pg_dir - PAGE_OFFSET)@l	/* kernel page table */
462#endif
463112:	rlwimi	r2,r3,12,20,29		/* insert top 10 bits of address */
464	lwz	r2,0(r2)		/* get pmd entry */
465	rlwinm.	r2,r2,0,0,19		/* extract address of pte page */
466	beq-	InstructionAddressInvalid	/* return if no mapping */
467	rlwimi	r2,r3,22,20,29		/* insert next 10 bits of address */
468	lwz	r0,0(r2)		/* get linux-style pte */
469	andc.	r1,r1,r0		/* check access & ~permission */
470	bne-	InstructionAddressInvalid /* return if access not permitted */
471	/* Convert linux-style PTE to low word of PPC-style PTE */
472	rlwimi	r0,r0,32-2,31,31	/* _PAGE_USER -> PP lsb */
473	ori	r1, r1, 0xe06		/* clear out reserved bits */
474	andc	r1, r0, r1		/* PP = user? 1 : 0 */
475BEGIN_FTR_SECTION
476	rlwinm	r1,r1,0,~_PAGE_COHERENT	/* clear M (coherence not required) */
477END_FTR_SECTION_IFCLR(CPU_FTR_NEED_COHERENT)
478	mtspr	SPRN_RPA,r1
479	tlbli	r3
480	mfspr	r3,SPRN_SRR1		/* Need to restore CR0 */
481	mtcrf	0x80,r3
482	rfi
483InstructionAddressInvalid:
484	mfspr	r3,SPRN_SRR1
485	rlwinm	r1,r3,9,6,6	/* Get load/store bit */
486
487	addis	r1,r1,0x2000
488	mtspr	SPRN_DSISR,r1	/* (shouldn't be needed) */
489	andi.	r2,r3,0xFFFF	/* Clear upper bits of SRR1 */
490	or	r2,r2,r1
491	mtspr	SPRN_SRR1,r2
492	mfspr	r1,SPRN_IMISS	/* Get failing address */
493	rlwinm.	r2,r2,0,31,31	/* Check for little endian access */
494	rlwimi	r2,r2,1,30,30	/* change 1 -> 3 */
495	xor	r1,r1,r2
496	mtspr	SPRN_DAR,r1	/* Set fault address */
497	mfmsr	r0		/* Restore "normal" registers */
498	xoris	r0,r0,MSR_TGPR>>16
499	mtcrf	0x80,r3		/* Restore CR0 */
500	mtmsr	r0
501	b	InstructionAccess
502
503/*
504 * Handle TLB miss for DATA Load operation on 603/603e
505 */
506	. = 0x1100
507DataLoadTLBMiss:
508/*
509 * r0:	scratch
510 * r1:	linux style pte ( later becomes ppc hardware pte )
511 * r2:	ptr to linux-style pte
512 * r3:	scratch
513 */
514	/* Get PTE (linux-style) and check access */
515	mfspr	r3,SPRN_DMISS
516	lis	r1, TASK_SIZE@h		/* check if kernel address */
517	cmplw	0,r1,r3
518	mfspr	r2, SPRN_SDR1
519	li	r1, _PAGE_PRESENT | _PAGE_ACCESSED
520	rlwinm	r2, r2, 28, 0xfffff000
521	bgt-	112f
522	lis	r2, (swapper_pg_dir - PAGE_OFFSET)@ha	/* if kernel address, use */
523	addi	r2, r2, (swapper_pg_dir - PAGE_OFFSET)@l	/* kernel page table */
524112:	rlwimi	r2,r3,12,20,29		/* insert top 10 bits of address */
525	lwz	r2,0(r2)		/* get pmd entry */
526	rlwinm.	r2,r2,0,0,19		/* extract address of pte page */
527	beq-	DataAddressInvalid	/* return if no mapping */
528	rlwimi	r2,r3,22,20,29		/* insert next 10 bits of address */
529	lwz	r0,0(r2)		/* get linux-style pte */
530	andc.	r1,r1,r0		/* check access & ~permission */
531	bne-	DataAddressInvalid	/* return if access not permitted */
532	/*
533	 * NOTE! We are assuming this is not an SMP system, otherwise
534	 * we would need to update the pte atomically with lwarx/stwcx.
535	 */
536	/* Convert linux-style PTE to low word of PPC-style PTE */
537	rlwinm	r1,r0,32-9,30,30	/* _PAGE_RW -> PP msb */
538	rlwimi	r0,r0,32-1,30,30	/* _PAGE_USER -> PP msb */
539	rlwimi	r0,r0,32-1,31,31	/* _PAGE_USER -> PP lsb */
540	ori	r1,r1,0xe04		/* clear out reserved bits */
541	andc	r1,r0,r1		/* PP = user? rw? 1: 3: 0 */
542BEGIN_FTR_SECTION
543	rlwinm	r1,r1,0,~_PAGE_COHERENT	/* clear M (coherence not required) */
544END_FTR_SECTION_IFCLR(CPU_FTR_NEED_COHERENT)
545	mtspr	SPRN_RPA,r1
546	mfspr	r2,SPRN_SRR1		/* Need to restore CR0 */
547	mtcrf	0x80,r2
548BEGIN_MMU_FTR_SECTION
549	li	r0,1
550	mfspr	r1,SPRN_SPRG_603_LRU
551	rlwinm	r2,r3,20,27,31		/* Get Address bits 15:19 */
552	slw	r0,r0,r2
553	xor	r1,r0,r1
554	srw	r0,r1,r2
555	mtspr   SPRN_SPRG_603_LRU,r1
556	mfspr	r2,SPRN_SRR1
557	rlwimi	r2,r0,31-14,14,14
558	mtspr   SPRN_SRR1,r2
559END_MMU_FTR_SECTION_IFSET(MMU_FTR_NEED_DTLB_SW_LRU)
560	tlbld	r3
561	rfi
562DataAddressInvalid:
563	mfspr	r3,SPRN_SRR1
564	rlwinm	r1,r3,9,6,6	/* Get load/store bit */
565	addis	r1,r1,0x2000
566	mtspr	SPRN_DSISR,r1
567	andi.	r2,r3,0xFFFF	/* Clear upper bits of SRR1 */
568	mtspr	SPRN_SRR1,r2
569	mfspr	r1,SPRN_DMISS	/* Get failing address */
570	rlwinm.	r2,r2,0,31,31	/* Check for little endian access */
571	beq	20f		/* Jump if big endian */
572	xori	r1,r1,3
57320:	mtspr	SPRN_DAR,r1	/* Set fault address */
574	mfmsr	r0		/* Restore "normal" registers */
575	xoris	r0,r0,MSR_TGPR>>16
576	mtcrf	0x80,r3		/* Restore CR0 */
577	mtmsr	r0
578	b	DataAccess
579
580/*
581 * Handle TLB miss for DATA Store on 603/603e
582 */
583	. = 0x1200
584DataStoreTLBMiss:
585/*
586 * r0:	scratch
587 * r1:	linux style pte ( later becomes ppc hardware pte )
588 * r2:	ptr to linux-style pte
589 * r3:	scratch
590 */
591	/* Get PTE (linux-style) and check access */
592	mfspr	r3,SPRN_DMISS
593	lis	r1, TASK_SIZE@h		/* check if kernel address */
594	cmplw	0,r1,r3
595	mfspr	r2, SPRN_SDR1
596	li	r1, _PAGE_RW | _PAGE_DIRTY | _PAGE_PRESENT | _PAGE_ACCESSED
597	rlwinm	r2, r2, 28, 0xfffff000
598	bgt-	112f
599	lis	r2, (swapper_pg_dir - PAGE_OFFSET)@ha	/* if kernel address, use */
600	addi	r2, r2, (swapper_pg_dir - PAGE_OFFSET)@l	/* kernel page table */
601112:	rlwimi	r2,r3,12,20,29		/* insert top 10 bits of address */
602	lwz	r2,0(r2)		/* get pmd entry */
603	rlwinm.	r2,r2,0,0,19		/* extract address of pte page */
604	beq-	DataAddressInvalid	/* return if no mapping */
605	rlwimi	r2,r3,22,20,29		/* insert next 10 bits of address */
606	lwz	r0,0(r2)		/* get linux-style pte */
607	andc.	r1,r1,r0		/* check access & ~permission */
608	bne-	DataAddressInvalid	/* return if access not permitted */
609	/*
610	 * NOTE! We are assuming this is not an SMP system, otherwise
611	 * we would need to update the pte atomically with lwarx/stwcx.
612	 */
613	/* Convert linux-style PTE to low word of PPC-style PTE */
614	rlwimi	r0,r0,32-2,31,31	/* _PAGE_USER -> PP lsb */
615	li	r1,0xe06		/* clear out reserved bits & PP msb */
616	andc	r1,r0,r1		/* PP = user? 1: 0 */
617BEGIN_FTR_SECTION
618	rlwinm	r1,r1,0,~_PAGE_COHERENT	/* clear M (coherence not required) */
619END_FTR_SECTION_IFCLR(CPU_FTR_NEED_COHERENT)
620	mtspr	SPRN_RPA,r1
621	mfspr	r2,SPRN_SRR1		/* Need to restore CR0 */
622	mtcrf	0x80,r2
623BEGIN_MMU_FTR_SECTION
624	li	r0,1
625	mfspr	r1,SPRN_SPRG_603_LRU
626	rlwinm	r2,r3,20,27,31		/* Get Address bits 15:19 */
627	slw	r0,r0,r2
628	xor	r1,r0,r1
629	srw	r0,r1,r2
630	mtspr   SPRN_SPRG_603_LRU,r1
631	mfspr	r2,SPRN_SRR1
632	rlwimi	r2,r0,31-14,14,14
633	mtspr   SPRN_SRR1,r2
634END_MMU_FTR_SECTION_IFSET(MMU_FTR_NEED_DTLB_SW_LRU)
635	tlbld	r3
636	rfi
637
638#ifndef CONFIG_ALTIVEC
639#define altivec_assist_exception	unknown_exception
640#endif
641
642#ifndef CONFIG_TAU_INT
643#define TAUException	unknown_exception
644#endif
645
646	EXCEPTION(0x1300, Trap_13, instruction_breakpoint_exception, EXC_XFER_STD)
647	EXCEPTION(0x1400, SMI, SMIException, EXC_XFER_STD)
648	EXCEPTION(0x1500, Trap_15, unknown_exception, EXC_XFER_STD)
649	EXCEPTION(0x1600, Trap_16, altivec_assist_exception, EXC_XFER_STD)
650	EXCEPTION(0x1700, Trap_17, TAUException, EXC_XFER_STD)
651	EXCEPTION(0x1800, Trap_18, unknown_exception, EXC_XFER_STD)
652	EXCEPTION(0x1900, Trap_19, unknown_exception, EXC_XFER_STD)
653	EXCEPTION(0x1a00, Trap_1a, unknown_exception, EXC_XFER_STD)
654	EXCEPTION(0x1b00, Trap_1b, unknown_exception, EXC_XFER_STD)
655	EXCEPTION(0x1c00, Trap_1c, unknown_exception, EXC_XFER_STD)
656	EXCEPTION(0x1d00, Trap_1d, unknown_exception, EXC_XFER_STD)
657	EXCEPTION(0x1e00, Trap_1e, unknown_exception, EXC_XFER_STD)
658	EXCEPTION(0x1f00, Trap_1f, unknown_exception, EXC_XFER_STD)
659	EXCEPTION(0x2000, RunMode, RunModeException, EXC_XFER_STD)
660	EXCEPTION(0x2100, Trap_21, unknown_exception, EXC_XFER_STD)
661	EXCEPTION(0x2200, Trap_22, unknown_exception, EXC_XFER_STD)
662	EXCEPTION(0x2300, Trap_23, unknown_exception, EXC_XFER_STD)
663	EXCEPTION(0x2400, Trap_24, unknown_exception, EXC_XFER_STD)
664	EXCEPTION(0x2500, Trap_25, unknown_exception, EXC_XFER_STD)
665	EXCEPTION(0x2600, Trap_26, unknown_exception, EXC_XFER_STD)
666	EXCEPTION(0x2700, Trap_27, unknown_exception, EXC_XFER_STD)
667	EXCEPTION(0x2800, Trap_28, unknown_exception, EXC_XFER_STD)
668	EXCEPTION(0x2900, Trap_29, unknown_exception, EXC_XFER_STD)
669	EXCEPTION(0x2a00, Trap_2a, unknown_exception, EXC_XFER_STD)
670	EXCEPTION(0x2b00, Trap_2b, unknown_exception, EXC_XFER_STD)
671	EXCEPTION(0x2c00, Trap_2c, unknown_exception, EXC_XFER_STD)
672	EXCEPTION(0x2d00, Trap_2d, unknown_exception, EXC_XFER_STD)
673	EXCEPTION(0x2e00, Trap_2e, unknown_exception, EXC_XFER_STD)
674	EXCEPTION(0x2f00, Trap_2f, unknown_exception, EXC_XFER_STD)
675
676	. = 0x3000
677
678machine_check_tramp:
679	EXC_XFER_STD(0x200, machine_check_exception)
680
681alignment_exception_tramp:
682	EXC_XFER_STD(0x600, alignment_exception)
683
684handle_page_fault_tramp_1:
685#ifdef CONFIG_VMAP_STACK
686	EXCEPTION_PROLOG_2 handle_dar_dsisr=1
687#endif
688	lwz	r4, _DAR(r11)
689	lwz	r5, _DSISR(r11)
690	/* fall through */
691handle_page_fault_tramp_2:
692	EXC_XFER_LITE(0x300, handle_page_fault)
693
694#ifdef CONFIG_VMAP_STACK
695.macro save_regs_thread		thread
696	stw	r0, THR0(\thread)
697	stw	r3, THR3(\thread)
698	stw	r4, THR4(\thread)
699	stw	r5, THR5(\thread)
700	stw	r6, THR6(\thread)
701	stw	r8, THR8(\thread)
702	stw	r9, THR9(\thread)
703	mflr	r0
704	stw	r0, THLR(\thread)
705	mfctr	r0
706	stw	r0, THCTR(\thread)
707.endm
708
709.macro restore_regs_thread	thread
710	lwz	r0, THLR(\thread)
711	mtlr	r0
712	lwz	r0, THCTR(\thread)
713	mtctr	r0
714	lwz	r0, THR0(\thread)
715	lwz	r3, THR3(\thread)
716	lwz	r4, THR4(\thread)
717	lwz	r5, THR5(\thread)
718	lwz	r6, THR6(\thread)
719	lwz	r8, THR8(\thread)
720	lwz	r9, THR9(\thread)
721.endm
722
723hash_page_dsi:
724	save_regs_thread	r10
725	mfdsisr	r3
726	mfdar	r4
727	mfsrr0	r5
728	mfsrr1	r9
729	rlwinm	r3, r3, 32 - 15, _PAGE_RW	/* DSISR_STORE -> _PAGE_RW */
730	bl	hash_page
731	mfspr	r10, SPRN_SPRG_THREAD
732	restore_regs_thread r10
733	b	.Lhash_page_dsi_cont
734
735hash_page_isi:
736	mr	r11, r10
737	mfspr	r10, SPRN_SPRG_THREAD
738	save_regs_thread	r10
739	li	r3, 0
740	lwz	r4, SRR0(r10)
741	lwz	r9, SRR1(r10)
742	bl	hash_page
743	mfspr	r10, SPRN_SPRG_THREAD
744	restore_regs_thread r10
745	mr	r10, r11
746	b	.Lhash_page_isi_cont
747
748	.globl fast_hash_page_return
749fast_hash_page_return:
750	andis.	r10, r9, SRR1_ISI_NOPT@h	/* Set on ISI, cleared on DSI */
751	mfspr	r10, SPRN_SPRG_THREAD
752	restore_regs_thread r10
753	bne	1f
754
755	/* DSI */
756	mtcr	r11
757	lwz	r11, THR11(r10)
758	mfspr	r10, SPRN_SPRG_SCRATCH2
759	rfi
760
7611:	/* ISI */
762	mtcr	r11
763	mfspr	r11, SPRN_SPRG_SCRATCH1
764	mfspr	r10, SPRN_SPRG_SCRATCH0
765	rfi
766
767stack_overflow:
768	vmap_stack_overflow_exception
769#endif
770
771AltiVecUnavailable:
772	EXCEPTION_PROLOG
773#ifdef CONFIG_ALTIVEC
774	beq	1f
775	bl	load_up_altivec		/* if from user, just load it up */
776	b	fast_exception_return
777#endif /* CONFIG_ALTIVEC */
7781:	addi	r3,r1,STACK_FRAME_OVERHEAD
779	EXC_XFER_LITE(0xf20, altivec_unavailable_exception)
780
781PerformanceMonitor:
782	EXCEPTION_PROLOG
783	addi	r3,r1,STACK_FRAME_OVERHEAD
784	EXC_XFER_STD(0xf00, performance_monitor_exception)
785
786
787/*
788 * This code is jumped to from the startup code to copy
789 * the kernel image to physical address PHYSICAL_START.
790 */
791relocate_kernel:
792	addis	r9,r26,klimit@ha	/* fetch klimit */
793	lwz	r25,klimit@l(r9)
794	addis	r25,r25,-KERNELBASE@h
795	lis	r3,PHYSICAL_START@h	/* Destination base address */
796	li	r6,0			/* Destination offset */
797	li	r5,0x4000		/* # bytes of memory to copy */
798	bl	copy_and_flush		/* copy the first 0x4000 bytes */
799	addi	r0,r3,4f@l		/* jump to the address of 4f */
800	mtctr	r0			/* in copy and do the rest. */
801	bctr				/* jump to the copy */
8024:	mr	r5,r25
803	bl	copy_and_flush		/* copy the rest */
804	b	turn_on_mmu
805
806/*
807 * Copy routine used to copy the kernel to start at physical address 0
808 * and flush and invalidate the caches as needed.
809 * r3 = dest addr, r4 = source addr, r5 = copy limit, r6 = start offset
810 * on exit, r3, r4, r5 are unchanged, r6 is updated to be >= r5.
811 */
812_ENTRY(copy_and_flush)
813	addi	r5,r5,-4
814	addi	r6,r6,-4
8154:	li	r0,L1_CACHE_BYTES/4
816	mtctr	r0
8173:	addi	r6,r6,4			/* copy a cache line */
818	lwzx	r0,r6,r4
819	stwx	r0,r6,r3
820	bdnz	3b
821	dcbst	r6,r3			/* write it to memory */
822	sync
823	icbi	r6,r3			/* flush the icache line */
824	cmplw	0,r6,r5
825	blt	4b
826	sync				/* additional sync needed on g4 */
827	isync
828	addi	r5,r5,4
829	addi	r6,r6,4
830	blr
831
832#ifdef CONFIG_SMP
833	.globl __secondary_start_mpc86xx
834__secondary_start_mpc86xx:
835	mfspr	r3, SPRN_PIR
836	stw	r3, __secondary_hold_acknowledge@l(0)
837	mr	r24, r3			/* cpu # */
838	b	__secondary_start
839
840	.globl	__secondary_start_pmac_0
841__secondary_start_pmac_0:
842	/* NB the entries for cpus 0, 1, 2 must each occupy 8 bytes. */
843	li	r24,0
844	b	1f
845	li	r24,1
846	b	1f
847	li	r24,2
848	b	1f
849	li	r24,3
8501:
851	/* on powersurge, we come in here with IR=0 and DR=1, and DBAT 0
852	   set to map the 0xf0000000 - 0xffffffff region */
853	mfmsr	r0
854	rlwinm	r0,r0,0,28,26		/* clear DR (0x10) */
855	mtmsr	r0
856	isync
857
858	.globl	__secondary_start
859__secondary_start:
860	/* Copy some CPU settings from CPU 0 */
861	bl	__restore_cpu_setup
862
863	lis	r3,-KERNELBASE@h
864	mr	r4,r24
865	bl	call_setup_cpu		/* Call setup_cpu for this CPU */
866	lis	r3,-KERNELBASE@h
867	bl	init_idle_6xx
868
869	/* get current's stack and current */
870	lis	r2,secondary_current@ha
871	tophys(r2,r2)
872	lwz	r2,secondary_current@l(r2)
873	tophys(r1,r2)
874	lwz	r1,TASK_STACK(r1)
875
876	/* stack */
877	addi	r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
878	li	r0,0
879	tophys(r3,r1)
880	stw	r0,0(r3)
881
882	/* load up the MMU */
883	bl	load_segment_registers
884	bl	load_up_mmu
885
886	/* ptr to phys current thread */
887	tophys(r4,r2)
888	addi	r4,r4,THREAD	/* phys address of our thread_struct */
889	mtspr	SPRN_SPRG_THREAD,r4
890BEGIN_MMU_FTR_SECTION
891	lis	r4, (swapper_pg_dir - PAGE_OFFSET)@h
892	ori	r4, r4, (swapper_pg_dir - PAGE_OFFSET)@l
893	rlwinm	r4, r4, 4, 0xffff01ff
894	mtspr	SPRN_SDR1, r4
895END_MMU_FTR_SECTION_IFCLR(MMU_FTR_HPTE_TABLE)
896
897	/* enable MMU and jump to start_secondary */
898	li	r4,MSR_KERNEL
899	lis	r3,start_secondary@h
900	ori	r3,r3,start_secondary@l
901	mtspr	SPRN_SRR0,r3
902	mtspr	SPRN_SRR1,r4
903	rfi
904#endif /* CONFIG_SMP */
905
906#ifdef CONFIG_KVM_BOOK3S_HANDLER
907#include "../kvm/book3s_rmhandlers.S"
908#endif
909
910/*
911 * Load stuff into the MMU.  Intended to be called with
912 * IR=0 and DR=0.
913 */
914early_hash_table:
915	sync			/* Force all PTE updates to finish */
916	isync
917	tlbia			/* Clear all TLB entries */
918	sync			/* wait for tlbia/tlbie to finish */
919	TLBSYNC			/* ... on all CPUs */
920	/* Load the SDR1 register (hash table base & size) */
921	lis	r6, early_hash - PAGE_OFFSET@h
922	ori	r6, r6, 3	/* 256kB table */
923	mtspr	SPRN_SDR1, r6
924	blr
925
926load_up_mmu:
927	sync			/* Force all PTE updates to finish */
928	isync
929	tlbia			/* Clear all TLB entries */
930	sync			/* wait for tlbia/tlbie to finish */
931	TLBSYNC			/* ... on all CPUs */
932BEGIN_MMU_FTR_SECTION
933	/* Load the SDR1 register (hash table base & size) */
934	lis	r6,_SDR1@ha
935	tophys(r6,r6)
936	lwz	r6,_SDR1@l(r6)
937	mtspr	SPRN_SDR1,r6
938END_MMU_FTR_SECTION_IFSET(MMU_FTR_HPTE_TABLE)
939
940/* Load the BAT registers with the values set up by MMU_init. */
941	lis	r3,BATS@ha
942	addi	r3,r3,BATS@l
943	tophys(r3,r3)
944	LOAD_BAT(0,r3,r4,r5)
945	LOAD_BAT(1,r3,r4,r5)
946	LOAD_BAT(2,r3,r4,r5)
947	LOAD_BAT(3,r3,r4,r5)
948BEGIN_MMU_FTR_SECTION
949	LOAD_BAT(4,r3,r4,r5)
950	LOAD_BAT(5,r3,r4,r5)
951	LOAD_BAT(6,r3,r4,r5)
952	LOAD_BAT(7,r3,r4,r5)
953END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS)
954	blr
955
956_GLOBAL(load_segment_registers)
957	li	r0, NUM_USER_SEGMENTS /* load up user segment register values */
958	mtctr	r0		/* for context 0 */
959	li	r3, 0		/* Kp = 0, Ks = 0, VSID = 0 */
960#ifdef CONFIG_PPC_KUEP
961	oris	r3, r3, SR_NX@h	/* Set Nx */
962#endif
963#ifdef CONFIG_PPC_KUAP
964	oris	r3, r3, SR_KS@h	/* Set Ks */
965#endif
966	li	r4, 0
9673:	mtsrin	r3, r4
968	addi	r3, r3, 0x111	/* increment VSID */
969	addis	r4, r4, 0x1000	/* address of next segment */
970	bdnz	3b
971	li	r0, 16 - NUM_USER_SEGMENTS /* load up kernel segment registers */
972	mtctr	r0			/* for context 0 */
973	rlwinm	r3, r3, 0, ~SR_NX	/* Nx = 0 */
974	rlwinm	r3, r3, 0, ~SR_KS	/* Ks = 0 */
975	oris	r3, r3, SR_KP@h		/* Kp = 1 */
9763:	mtsrin	r3, r4
977	addi	r3, r3, 0x111	/* increment VSID */
978	addis	r4, r4, 0x1000	/* address of next segment */
979	bdnz	3b
980	blr
981
982/*
983 * This is where the main kernel code starts.
984 */
985start_here:
986	/* ptr to current */
987	lis	r2,init_task@h
988	ori	r2,r2,init_task@l
989	/* Set up for using our exception vectors */
990	/* ptr to phys current thread */
991	tophys(r4,r2)
992	addi	r4,r4,THREAD	/* init task's THREAD */
993	mtspr	SPRN_SPRG_THREAD,r4
994BEGIN_MMU_FTR_SECTION
995	lis	r4, (swapper_pg_dir - PAGE_OFFSET)@h
996	ori	r4, r4, (swapper_pg_dir - PAGE_OFFSET)@l
997	rlwinm	r4, r4, 4, 0xffff01ff
998	mtspr	SPRN_SDR1, r4
999END_MMU_FTR_SECTION_IFCLR(MMU_FTR_HPTE_TABLE)
1000
1001	/* stack */
1002	lis	r1,init_thread_union@ha
1003	addi	r1,r1,init_thread_union@l
1004	li	r0,0
1005	stwu	r0,THREAD_SIZE-STACK_FRAME_OVERHEAD(r1)
1006/*
1007 * Do early platform-specific initialization,
1008 * and set up the MMU.
1009 */
1010#ifdef CONFIG_KASAN
1011	bl	kasan_early_init
1012#endif
1013	li	r3,0
1014	mr	r4,r31
1015	bl	machine_init
1016	bl	__save_cpu_setup
1017	bl	MMU_init
1018	bl	MMU_init_hw_patch
1019
1020/*
1021 * Go back to running unmapped so we can load up new values
1022 * for SDR1 (hash table pointer) and the segment registers
1023 * and change to using our exception vectors.
1024 */
1025	lis	r4,2f@h
1026	ori	r4,r4,2f@l
1027	tophys(r4,r4)
1028	li	r3,MSR_KERNEL & ~(MSR_IR|MSR_DR)
1029
1030	.align	4
1031	mtspr	SPRN_SRR0,r4
1032	mtspr	SPRN_SRR1,r3
1033	rfi
1034/* Load up the kernel context */
10352:	bl	load_up_mmu
1036
1037#ifdef CONFIG_BDI_SWITCH
1038	/* Add helper information for the Abatron bdiGDB debugger.
1039	 * We do this here because we know the mmu is disabled, and
1040	 * will be enabled for real in just a few instructions.
1041	 */
1042	lis	r5, abatron_pteptrs@h
1043	ori	r5, r5, abatron_pteptrs@l
1044	stw	r5, 0xf0(0)	/* This much match your Abatron config */
1045	lis	r6, swapper_pg_dir@h
1046	ori	r6, r6, swapper_pg_dir@l
1047	tophys(r5, r5)
1048	stw	r6, 0(r5)
1049#endif /* CONFIG_BDI_SWITCH */
1050
1051/* Now turn on the MMU for real! */
1052	li	r4,MSR_KERNEL
1053	lis	r3,start_kernel@h
1054	ori	r3,r3,start_kernel@l
1055	mtspr	SPRN_SRR0,r3
1056	mtspr	SPRN_SRR1,r4
1057	rfi
1058
1059/*
1060 * void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next);
1061 *
1062 * Set up the segment registers for a new context.
1063 */
1064_ENTRY(switch_mmu_context)
1065	lwz	r3,MMCONTEXTID(r4)
1066	cmpwi	cr0,r3,0
1067	blt-	4f
1068	mulli	r3,r3,897	/* multiply context by skew factor */
1069	rlwinm	r3,r3,4,8,27	/* VSID = (context & 0xfffff) << 4 */
1070#ifdef CONFIG_PPC_KUEP
1071	oris	r3, r3, SR_NX@h	/* Set Nx */
1072#endif
1073#ifdef CONFIG_PPC_KUAP
1074	oris	r3, r3, SR_KS@h	/* Set Ks */
1075#endif
1076	li	r0,NUM_USER_SEGMENTS
1077	mtctr	r0
1078
1079#ifdef CONFIG_BDI_SWITCH
1080	/* Context switch the PTE pointer for the Abatron BDI2000.
1081	 * The PGDIR is passed as second argument.
1082	 */
1083	lwz	r4, MM_PGD(r4)
1084	lis	r5, abatron_pteptrs@ha
1085	stw	r4, abatron_pteptrs@l + 0x4(r5)
1086#endif
1087BEGIN_MMU_FTR_SECTION
1088#ifndef CONFIG_BDI_SWITCH
1089	lwz	r4, MM_PGD(r4)
1090#endif
1091	tophys(r4, r4)
1092	rlwinm	r4, r4, 4, 0xffff01ff
1093	mtspr	SPRN_SDR1, r4
1094END_MMU_FTR_SECTION_IFCLR(MMU_FTR_HPTE_TABLE)
1095	li	r4,0
1096	isync
10973:
1098	mtsrin	r3,r4
1099	addi	r3,r3,0x111	/* next VSID */
1100	rlwinm	r3,r3,0,8,3	/* clear out any overflow from VSID field */
1101	addis	r4,r4,0x1000	/* address of next segment */
1102	bdnz	3b
1103	sync
1104	isync
1105	blr
11064:	trap
1107	EMIT_BUG_ENTRY 4b,__FILE__,__LINE__,0
1108	blr
1109EXPORT_SYMBOL(switch_mmu_context)
1110
1111/*
1112 * An undocumented "feature" of 604e requires that the v bit
1113 * be cleared before changing BAT values.
1114 *
1115 * Also, newer IBM firmware does not clear bat3 and 4 so
1116 * this makes sure it's done.
1117 *  -- Cort
1118 */
1119clear_bats:
1120	li	r10,0
1121
1122	mtspr	SPRN_DBAT0U,r10
1123	mtspr	SPRN_DBAT0L,r10
1124	mtspr	SPRN_DBAT1U,r10
1125	mtspr	SPRN_DBAT1L,r10
1126	mtspr	SPRN_DBAT2U,r10
1127	mtspr	SPRN_DBAT2L,r10
1128	mtspr	SPRN_DBAT3U,r10
1129	mtspr	SPRN_DBAT3L,r10
1130	mtspr	SPRN_IBAT0U,r10
1131	mtspr	SPRN_IBAT0L,r10
1132	mtspr	SPRN_IBAT1U,r10
1133	mtspr	SPRN_IBAT1L,r10
1134	mtspr	SPRN_IBAT2U,r10
1135	mtspr	SPRN_IBAT2L,r10
1136	mtspr	SPRN_IBAT3U,r10
1137	mtspr	SPRN_IBAT3L,r10
1138BEGIN_MMU_FTR_SECTION
1139	/* Here's a tweak: at this point, CPU setup have
1140	 * not been called yet, so HIGH_BAT_EN may not be
1141	 * set in HID0 for the 745x processors. However, it
1142	 * seems that doesn't affect our ability to actually
1143	 * write to these SPRs.
1144	 */
1145	mtspr	SPRN_DBAT4U,r10
1146	mtspr	SPRN_DBAT4L,r10
1147	mtspr	SPRN_DBAT5U,r10
1148	mtspr	SPRN_DBAT5L,r10
1149	mtspr	SPRN_DBAT6U,r10
1150	mtspr	SPRN_DBAT6L,r10
1151	mtspr	SPRN_DBAT7U,r10
1152	mtspr	SPRN_DBAT7L,r10
1153	mtspr	SPRN_IBAT4U,r10
1154	mtspr	SPRN_IBAT4L,r10
1155	mtspr	SPRN_IBAT5U,r10
1156	mtspr	SPRN_IBAT5L,r10
1157	mtspr	SPRN_IBAT6U,r10
1158	mtspr	SPRN_IBAT6L,r10
1159	mtspr	SPRN_IBAT7U,r10
1160	mtspr	SPRN_IBAT7L,r10
1161END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS)
1162	blr
1163
1164_ENTRY(update_bats)
1165	lis	r4, 1f@h
1166	ori	r4, r4, 1f@l
1167	tophys(r4, r4)
1168	mfmsr	r6
1169	mflr	r7
1170	li	r3, MSR_KERNEL & ~(MSR_IR | MSR_DR)
1171	rlwinm	r0, r6, 0, ~MSR_RI
1172	rlwinm	r0, r0, 0, ~MSR_EE
1173	mtmsr	r0
1174
1175	.align	4
1176	mtspr	SPRN_SRR0, r4
1177	mtspr	SPRN_SRR1, r3
1178	rfi
11791:	bl	clear_bats
1180	lis	r3, BATS@ha
1181	addi	r3, r3, BATS@l
1182	tophys(r3, r3)
1183	LOAD_BAT(0, r3, r4, r5)
1184	LOAD_BAT(1, r3, r4, r5)
1185	LOAD_BAT(2, r3, r4, r5)
1186	LOAD_BAT(3, r3, r4, r5)
1187BEGIN_MMU_FTR_SECTION
1188	LOAD_BAT(4, r3, r4, r5)
1189	LOAD_BAT(5, r3, r4, r5)
1190	LOAD_BAT(6, r3, r4, r5)
1191	LOAD_BAT(7, r3, r4, r5)
1192END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS)
1193	li	r3, MSR_KERNEL & ~(MSR_IR | MSR_DR | MSR_RI)
1194	mtmsr	r3
1195	mtspr	SPRN_SRR0, r7
1196	mtspr	SPRN_SRR1, r6
1197	rfi
1198
1199flush_tlbs:
1200	lis	r10, 0x40
12011:	addic.	r10, r10, -0x1000
1202	tlbie	r10
1203	bgt	1b
1204	sync
1205	blr
1206
1207mmu_off:
1208 	addi	r4, r3, __after_mmu_off - _start
1209	mfmsr	r3
1210	andi.	r0,r3,MSR_DR|MSR_IR		/* MMU enabled? */
1211	beqlr
1212	andc	r3,r3,r0
1213
1214	.align	4
1215	mtspr	SPRN_SRR0,r4
1216	mtspr	SPRN_SRR1,r3
1217	sync
1218	rfi
1219
1220/* We use one BAT to map up to 256M of RAM at _PAGE_OFFSET */
1221initial_bats:
1222	lis	r11,PAGE_OFFSET@h
1223	tophys(r8,r11)
1224#ifdef CONFIG_SMP
1225	ori	r8,r8,0x12		/* R/W access, M=1 */
1226#else
1227	ori	r8,r8,2			/* R/W access */
1228#endif /* CONFIG_SMP */
1229	ori	r11,r11,BL_256M<<2|0x2	/* set up BAT registers for 604 */
1230
1231	mtspr	SPRN_DBAT0L,r8		/* N.B. 6xx have valid */
1232	mtspr	SPRN_DBAT0U,r11		/* bit in upper BAT register */
1233	mtspr	SPRN_IBAT0L,r8
1234	mtspr	SPRN_IBAT0U,r11
1235	isync
1236	blr
1237
1238#ifdef CONFIG_BOOTX_TEXT
1239setup_disp_bat:
1240	/*
1241	 * setup the display bat prepared for us in prom.c
1242	 */
1243	mflr	r8
1244	bl	reloc_offset
1245	mtlr	r8
1246	addis	r8,r3,disp_BAT@ha
1247	addi	r8,r8,disp_BAT@l
1248	cmpwi	cr0,r8,0
1249	beqlr
1250	lwz	r11,0(r8)
1251	lwz	r8,4(r8)
1252	mtspr	SPRN_DBAT3L,r8
1253	mtspr	SPRN_DBAT3U,r11
1254	blr
1255#endif /* CONFIG_BOOTX_TEXT */
1256
1257#ifdef CONFIG_PPC_EARLY_DEBUG_CPM
1258setup_cpm_bat:
1259	lis	r8, 0xf000
1260	ori	r8, r8,	0x002a
1261	mtspr	SPRN_DBAT1L, r8
1262
1263	lis	r11, 0xf000
1264	ori	r11, r11, (BL_1M << 2) | 2
1265	mtspr	SPRN_DBAT1U, r11
1266
1267	blr
1268#endif
1269
1270#ifdef CONFIG_PPC_EARLY_DEBUG_USBGECKO
1271setup_usbgecko_bat:
1272	/* prepare a BAT for early io */
1273#if defined(CONFIG_GAMECUBE)
1274	lis	r8, 0x0c00
1275#elif defined(CONFIG_WII)
1276	lis	r8, 0x0d00
1277#else
1278#error Invalid platform for USB Gecko based early debugging.
1279#endif
1280	/*
1281	 * The virtual address used must match the virtual address
1282	 * associated to the fixmap entry FIX_EARLY_DEBUG_BASE.
1283	 */
1284	lis	r11, 0xfffe	/* top 128K */
1285	ori	r8, r8, 0x002a	/* uncached, guarded ,rw */
1286	ori	r11, r11, 0x2	/* 128K, Vs=1, Vp=0 */
1287	mtspr	SPRN_DBAT1L, r8
1288	mtspr	SPRN_DBAT1U, r11
1289	blr
1290#endif
1291
1292#ifdef CONFIG_8260
1293/* Jump into the system reset for the rom.
1294 * We first disable the MMU, and then jump to the ROM reset address.
1295 *
1296 * r3 is the board info structure, r4 is the location for starting.
1297 * I use this for building a small kernel that can load other kernels,
1298 * rather than trying to write or rely on a rom monitor that can tftp load.
1299 */
1300       .globl  m8260_gorom
1301m8260_gorom:
1302	mfmsr	r0
1303	rlwinm	r0,r0,0,17,15	/* clear MSR_EE in r0 */
1304	sync
1305	mtmsr	r0
1306	sync
1307	mfspr	r11, SPRN_HID0
1308	lis	r10, 0
1309	ori	r10,r10,HID0_ICE|HID0_DCE
1310	andc	r11, r11, r10
1311	mtspr	SPRN_HID0, r11
1312	isync
1313	li	r5, MSR_ME|MSR_RI
1314	lis	r6,2f@h
1315	addis	r6,r6,-KERNELBASE@h
1316	ori	r6,r6,2f@l
1317	mtspr	SPRN_SRR0,r6
1318	mtspr	SPRN_SRR1,r5
1319	isync
1320	sync
1321	rfi
13222:
1323	mtlr	r4
1324	blr
1325#endif
1326
1327
1328/*
1329 * We put a few things here that have to be page-aligned.
1330 * This stuff goes at the beginning of the data segment,
1331 * which is page-aligned.
1332 */
1333	.data
1334	.globl	sdata
1335sdata:
1336	.globl	empty_zero_page
1337empty_zero_page:
1338	.space	4096
1339EXPORT_SYMBOL(empty_zero_page)
1340
1341	.globl	swapper_pg_dir
1342swapper_pg_dir:
1343	.space	PGD_TABLE_SIZE
1344
1345/* Room for two PTE pointers, usually the kernel and current user pointers
1346 * to their respective root page table.
1347 */
1348abatron_pteptrs:
1349	.space	8
1350